зеркало из https://github.com/microsoft/lisa.git
Pylint: Enable redefined-builtin
This commit is contained in:
Родитель
c08a0974cd
Коммит
ee1ee590f2
|
@ -25,7 +25,7 @@ from tools import update_file, update_summary # type: ignore # noqa: E402
|
|||
# -- Project information -----------------------------------------------------
|
||||
|
||||
project = "Linux Integration Services Automation (LISA)"
|
||||
copyright = "Microsoft Corporation"
|
||||
copyright = "Microsoft Corporation" # pylint: disable=redefined-builtin
|
||||
author = "Microsoft"
|
||||
|
||||
release = ""
|
||||
|
|
|
@ -72,10 +72,10 @@ def _get_environment_id() -> int:
|
|||
global _global_environment_id
|
||||
|
||||
with _global_environment_id_lock:
|
||||
id = _global_environment_id
|
||||
env_id = _global_environment_id
|
||||
_global_environment_id += 1
|
||||
|
||||
return id
|
||||
return env_id
|
||||
|
||||
|
||||
@dataclass
|
||||
|
|
|
@ -50,7 +50,7 @@ class Disk(Feature):
|
|||
def add_data_disk(
|
||||
self,
|
||||
count: int,
|
||||
type: schema.DiskType = schema.DiskType.StandardHDDLRS,
|
||||
disk_type: schema.DiskType = schema.DiskType.StandardHDDLRS,
|
||||
size_in_gb: int = 20,
|
||||
) -> List[str]:
|
||||
raise NotImplementedError
|
||||
|
|
|
@ -153,8 +153,8 @@ class Gpu(Feature):
|
|||
lsvmbus_tool = self._node.tools[Lsvmbus]
|
||||
device_list = lsvmbus_tool.get_device_channels()
|
||||
for device in device_list:
|
||||
for name, id, bridge_count in NvidiaSmi.gpu_devices:
|
||||
if id in device.device_id:
|
||||
for name, id_, bridge_count in NvidiaSmi.gpu_devices:
|
||||
if id_ in device.device_id:
|
||||
lsvmbus_device_count += 1
|
||||
bridge_device_count = bridge_count
|
||||
self._log.debug(f"GPU device {name} found!")
|
||||
|
|
|
@ -312,7 +312,7 @@ class Node(subclasses.BaseClassWithRunbookMixin, ContextMixin, InitializableMixi
|
|||
partition_name = partition.name
|
||||
if not partition.is_mounted:
|
||||
mountpoint = f"{PATH_REMOTE_ROOT}/{partition_name}"
|
||||
mount.mount(partition.device_name, mountpoint, format=True)
|
||||
mount.mount(partition.device_name, mountpoint, format_=True)
|
||||
else:
|
||||
mountpoint = partition.mountpoint
|
||||
|
||||
|
@ -330,7 +330,7 @@ class Node(subclasses.BaseClassWithRunbookMixin, ContextMixin, InitializableMixi
|
|||
if not disk.is_mounted:
|
||||
mountpoint = f"{PATH_REMOTE_ROOT}/{disk_name}"
|
||||
self.tools[Mkfs].format_disk(disk.device_name, FileSystem.ext4)
|
||||
mount.mount(disk.device_name, mountpoint, format=True)
|
||||
mount.mount(disk.device_name, mountpoint, format_=True)
|
||||
else:
|
||||
mountpoint = disk.mountpoint
|
||||
|
||||
|
|
|
@ -387,7 +387,7 @@ class Posix(OperatingSystem, BaseClassMixin):
|
|||
find_tool = self._node.tools[Find]
|
||||
file_list = find_tool.find_files(
|
||||
self._node.get_pure_path("/var/log/azure/"),
|
||||
type="f",
|
||||
file_type="f",
|
||||
sudo=True,
|
||||
ignore_not_exist=True,
|
||||
)
|
||||
|
|
|
@ -28,8 +28,7 @@ def parse_testcase_filters(raw_filters: List[Any]) -> List[schema.BaseTestCaseFi
|
|||
for raw_filter in raw_filters:
|
||||
if constants.TYPE not in raw_filter:
|
||||
raw_filter[constants.TYPE] = constants.TESTCASE_TYPE_LISA
|
||||
filter = factory.load_typed_runbook(raw_filter)
|
||||
filters.append(filter)
|
||||
filters.append(factory.load_typed_runbook(raw_filter))
|
||||
else:
|
||||
filters = [schema.TestCase(name="test", criteria=schema.Criteria(area="demo"))]
|
||||
return filters
|
||||
|
@ -293,13 +292,13 @@ class RootRunner(Action):
|
|||
runner_filters: Dict[str, List[schema.BaseTestCaseFilter]] = {}
|
||||
for raw_filter in runbook.testcase_raw:
|
||||
# by default run all filtered cases unless 'enable' is specified as false
|
||||
filter = schema.load_by_type(schema.BaseTestCaseFilter, raw_filter)
|
||||
if filter.enabled:
|
||||
filter_ = schema.load_by_type(schema.BaseTestCaseFilter, raw_filter)
|
||||
if filter_.enabled:
|
||||
raw_filters: List[schema.BaseTestCaseFilter] = runner_filters.get(
|
||||
filter.type, []
|
||||
filter_.type, []
|
||||
)
|
||||
if not raw_filters:
|
||||
runner_filters[filter.type] = raw_filters
|
||||
runner_filters[filter_.type] = raw_filters
|
||||
raw_filters.append(raw_filter)
|
||||
else:
|
||||
self._log.debug(f"Skip disabled filter: {raw_filter}.")
|
||||
|
|
|
@ -356,13 +356,13 @@ class FeatureSettings(
|
|||
|
||||
@staticmethod
|
||||
def create(
|
||||
type: str, extended_schemas: Optional[Dict[Any, Any]] = None
|
||||
type_: str, extended_schemas: Optional[Dict[Any, Any]] = None
|
||||
) -> "FeatureSettings":
|
||||
# If a feature has no setting, it will return the default settings.
|
||||
if extended_schemas:
|
||||
feature = FeatureSettings(type=type, extended_schemas=extended_schemas)
|
||||
feature = FeatureSettings(type=type_, extended_schemas=extended_schemas)
|
||||
else:
|
||||
feature = FeatureSettings(type=type)
|
||||
feature = FeatureSettings(type=type_)
|
||||
return feature
|
||||
|
||||
def check(self, capability: Any) -> search_space.ResultReason:
|
||||
|
|
|
@ -76,8 +76,8 @@ def add_secret(
|
|||
_secret_list = sorted(_secret_list, reverse=True, key=lambda x: len(x[0]))
|
||||
|
||||
|
||||
def mask(input: str) -> str:
|
||||
def mask(text: str) -> str:
|
||||
for secret in _secret_list:
|
||||
if secret[0] in input:
|
||||
input = input.replace(secret[0], secret[1])
|
||||
return input
|
||||
if secret[0] in text:
|
||||
text = text.replace(secret[0], secret[1])
|
||||
return text
|
||||
|
|
|
@ -759,11 +759,11 @@ def get_resource_management_client(
|
|||
|
||||
|
||||
def get_storage_account_name(
|
||||
subscription_id: str, location: str, type: str = "s"
|
||||
subscription_id: str, location: str, type_: str = "s"
|
||||
) -> str:
|
||||
subscription_id_postfix = subscription_id[-8:]
|
||||
# name should be shorter than 24 character
|
||||
return f"lisa{type}{location[0:11]}{subscription_id_postfix}"
|
||||
return f"lisa{type_}{location[:11]}{subscription_id_postfix}"
|
||||
|
||||
|
||||
def get_marketplace_ordering_client(
|
||||
|
|
|
@ -307,14 +307,14 @@ class SerialConsole(AzureFeatureMixin, features.SerialConsole):
|
|||
return self._ws
|
||||
|
||||
def _write(self, cmd: str) -> None:
|
||||
self._initialize_serial_console(id=self.DEFAULT_SERIAL_PORT_ID)
|
||||
self._initialize_serial_console(port_id=self.DEFAULT_SERIAL_PORT_ID)
|
||||
|
||||
# connect to websocket and send command
|
||||
ws = self._get_connection()
|
||||
self._get_event_loop().run_until_complete(ws.send(cmd))
|
||||
|
||||
def _read(self) -> str:
|
||||
self._initialize_serial_console(id=self.DEFAULT_SERIAL_PORT_ID)
|
||||
self._initialize_serial_console(port_id=self.DEFAULT_SERIAL_PORT_ID)
|
||||
|
||||
# connect to websocket
|
||||
ws = self._get_connection()
|
||||
|
@ -371,7 +371,7 @@ class SerialConsole(AzureFeatureMixin, features.SerialConsole):
|
|||
|
||||
return serial_port_connection_str
|
||||
|
||||
def _initialize_serial_console(self, id: int) -> None:
|
||||
def _initialize_serial_console(self, port_id: int) -> None:
|
||||
if self._serial_console_initialized:
|
||||
return
|
||||
|
||||
|
@ -400,20 +400,20 @@ class SerialConsole(AzureFeatureMixin, features.SerialConsole):
|
|||
)
|
||||
serial_port_ids = [int(port.name) for port in serial_ports.value]
|
||||
|
||||
if id not in serial_port_ids:
|
||||
if port_id not in serial_port_ids:
|
||||
self._serial_port: SerialPort = self._serial_port_operations.create(
|
||||
resource_group_name=self._resource_group_name,
|
||||
resource_provider_namespace=self.RESOURCE_PROVIDER_NAMESPACE,
|
||||
parent_resource_type=self.PARENT_RESOURCE_TYPE,
|
||||
parent_resource=self._vm_name,
|
||||
serial_port=id,
|
||||
serial_port=port_id,
|
||||
parameters=SerialPort(state=SerialPortState.ENABLED),
|
||||
)
|
||||
else:
|
||||
self._serial_port = [
|
||||
serialport
|
||||
for serialport in serial_ports.value
|
||||
if int(serialport.name) == id
|
||||
if int(serialport.name) == port_id
|
||||
][0]
|
||||
|
||||
# setup shared web socket connection variable
|
||||
|
@ -1125,10 +1125,10 @@ class Disk(AzureFeatureMixin, features.Disk):
|
|||
def add_data_disk(
|
||||
self,
|
||||
count: int,
|
||||
type: schema.DiskType = schema.DiskType.StandardHDDLRS,
|
||||
disk_type: schema.DiskType = schema.DiskType.StandardHDDLRS,
|
||||
size_in_gb: int = 20,
|
||||
) -> List[str]:
|
||||
disk_sku = _disk_type_mapping.get(type, None)
|
||||
disk_sku = _disk_type_mapping.get(disk_type, None)
|
||||
assert disk_sku
|
||||
assert self._node.capability.disk
|
||||
assert isinstance(self._node.capability.disk.data_disk_count, int)
|
||||
|
@ -1827,7 +1827,7 @@ class AzureExtension(AzureFeatureMixin, Feature):
|
|||
|
||||
def create_or_update(
|
||||
self,
|
||||
type: str,
|
||||
type_: str,
|
||||
name: str = "",
|
||||
tags: Optional[Dict[str, str]] = None,
|
||||
publisher: str = "Microsoft.Azure.Extensions",
|
||||
|
@ -1851,7 +1851,7 @@ class AzureExtension(AzureFeatureMixin, Feature):
|
|||
force_update_tag=force_update_tag,
|
||||
publisher=publisher,
|
||||
auto_upgrade_minor_version=auto_upgrade_minor_version,
|
||||
type_properties_type=type,
|
||||
type_properties_type=type_,
|
||||
type_handler_version=type_handler_version,
|
||||
enable_automatic_upgrade=enable_automatic_upgrade,
|
||||
settings=settings,
|
||||
|
|
|
@ -1948,7 +1948,7 @@ class AzurePlatform(Platform):
|
|||
) # type: ignore
|
||||
|
||||
storage_name = get_storage_account_name(
|
||||
subscription_id=self.subscription_id, location=location, type="t"
|
||||
subscription_id=self.subscription_id, location=location, type_="t"
|
||||
)
|
||||
|
||||
check_or_create_storage_account(
|
||||
|
|
|
@ -178,7 +178,7 @@ class VhdTransformer(Transformer):
|
|||
# get vhd container
|
||||
if not runbook.storage_account_name:
|
||||
runbook.storage_account_name = get_storage_account_name(
|
||||
subscription_id=platform.subscription_id, location=location, type="t"
|
||||
subscription_id=platform.subscription_id, location=location, type_="t"
|
||||
)
|
||||
|
||||
check_or_create_storage_account(
|
||||
|
|
|
@ -31,9 +31,9 @@ def select_testcases(
|
|||
selected: Dict[str, TestCaseRuntimeData] = {}
|
||||
force_included: Set[str] = set()
|
||||
force_excluded: Set[str] = set()
|
||||
for filter in filters:
|
||||
for filter_ in filters:
|
||||
selected = _apply_filter(
|
||||
filter, selected, force_included, force_excluded, full_list
|
||||
filter_, selected, force_included, force_excluded, full_list
|
||||
)
|
||||
results: List[TestCaseRuntimeData] = []
|
||||
for case in selected.values():
|
||||
|
|
|
@ -678,11 +678,11 @@ class Ethtool(Tool):
|
|||
return msg_level_settings
|
||||
|
||||
def set_unset_device_message_flag_by_name(
|
||||
self, interface: str, msg_flag: List[str], set: bool
|
||||
self, interface: str, msg_flag: List[str], flag_set: bool
|
||||
) -> DeviceMessageLevel:
|
||||
if set:
|
||||
if flag_set:
|
||||
result = self.run(
|
||||
f"-s {interface} msglvl {' on '.join(flag for flag in msg_flag)} on",
|
||||
f"-s {interface} msglvl {' on '.join(msg_flag)} on",
|
||||
sudo=True,
|
||||
force_run=True,
|
||||
)
|
||||
|
@ -691,7 +691,7 @@ class Ethtool(Tool):
|
|||
)
|
||||
else:
|
||||
result = self.run(
|
||||
f"-s {interface} msglvl {' off '.join(flag for flag in msg_flag)} off",
|
||||
f"-s {interface} msglvl {' off '.join(msg_flag)} off",
|
||||
sudo=True,
|
||||
force_run=True,
|
||||
)
|
||||
|
|
|
@ -26,7 +26,7 @@ class Fdisk(Tool):
|
|||
self,
|
||||
disk_name: str,
|
||||
file_system: FileSystem = FileSystem.ext4,
|
||||
format: bool = True,
|
||||
format_: bool = True,
|
||||
) -> str:
|
||||
"""
|
||||
disk_name: make a partition against the disk.
|
||||
|
@ -67,7 +67,7 @@ class Fdisk(Tool):
|
|||
raise LisaException(
|
||||
f"fail to find partition(s) after formatting disk {disk_name}"
|
||||
)
|
||||
if format:
|
||||
if format_:
|
||||
mkfs.format_disk(partition_disk[0], file_system)
|
||||
return partition_disk[0]
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ class Find(Tool):
|
|||
start_path: PurePath,
|
||||
name_pattern: str = "",
|
||||
path_pattern: str = "",
|
||||
type: str = "",
|
||||
file_type: str = "",
|
||||
ignore_case: bool = False,
|
||||
sudo: bool = False,
|
||||
ignore_not_exist: bool = False,
|
||||
|
@ -46,8 +46,8 @@ class Find(Tool):
|
|||
cmd += f" -ipath '{path_pattern}'"
|
||||
else:
|
||||
cmd += f" -path '{path_pattern}'"
|
||||
if type:
|
||||
cmd += f" -type '{type}'"
|
||||
if file_type:
|
||||
cmd += f" -type '{file_type}'"
|
||||
|
||||
# for possibility of newline character in the file/folder name.
|
||||
cmd += " -print0"
|
||||
|
|
|
@ -216,7 +216,7 @@ class Git(Tool):
|
|||
sort_by: str = "v:refname",
|
||||
contains: str = "",
|
||||
return_last: bool = True,
|
||||
filter: str = "",
|
||||
filter_: str = "",
|
||||
) -> str:
|
||||
sort_arg = ""
|
||||
contains_arg = ""
|
||||
|
@ -248,14 +248,14 @@ class Git(Tool):
|
|||
"check sort and commit arguments are correct"
|
||||
),
|
||||
).stdout.splitlines()
|
||||
if filter:
|
||||
filter_re = re.compile(filter)
|
||||
if filter_:
|
||||
filter_re = re.compile(filter_)
|
||||
tags = [x for x in tags if filter_re.search(x)]
|
||||
|
||||
# build some nice error info for failure cases
|
||||
error_info = f"sortby:{sort_by} contains:{contains}"
|
||||
if filter:
|
||||
error_info += f" filter:{filter}"
|
||||
if filter_:
|
||||
error_info += f" filter:{filter_}"
|
||||
assert_that(len(tags)).described_as(
|
||||
"Error: could not find any tags with this sort or "
|
||||
f"filter setting: {error_info}"
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from collections import Counter
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from lisa.executable import Tool
|
||||
|
@ -67,21 +68,16 @@ class InterruptInspector(Tool):
|
|||
# Note : Some IRQ numbers have single entry because they're not actually
|
||||
# CPU stats, but events count belonging to the IO-APIC controller. For
|
||||
# example, `ERR` is incremented in the case of errors in the IO-APIC bus.
|
||||
result = (
|
||||
self.node.tools[Cat]
|
||||
.run("/proc/interrupts", sudo=True, force_run=True)
|
||||
.stdout
|
||||
)
|
||||
mappings_with_header = result.splitlines(keepends=False)
|
||||
mappings = mappings_with_header[1:]
|
||||
assert len(mappings) > 0
|
||||
result = self.node.tools[Cat].run("/proc/interrupts", sudo=True, force_run=True)
|
||||
mappings = result.stdout.splitlines(keepends=False)[1:]
|
||||
assert mappings
|
||||
|
||||
interrupts = []
|
||||
for line in mappings:
|
||||
matched = self._interrupt_regex.fullmatch(line)
|
||||
assert matched
|
||||
cpu_counter = [int(count) for count in matched.group("cpu_counter").split()]
|
||||
counter_sum = sum([int(x) for x in cpu_counter])
|
||||
counter_sum = sum(int(x) for x in cpu_counter)
|
||||
interrupts.append(
|
||||
Interrupt(
|
||||
irq_number=matched.group("irq_number"),
|
||||
|
@ -108,24 +104,24 @@ class InterruptInspector(Tool):
|
|||
if pci_slot in x.metadata
|
||||
and all(y not in x.metadata for y in exclude_key_words)
|
||||
]
|
||||
for interrupt in matched_interrupts:
|
||||
interrupts_sum_by_irqs.append({interrupt.irq_number: interrupt.counter_sum})
|
||||
interrupts_sum_by_irqs.extend(
|
||||
{interrupt.irq_number: interrupt.counter_sum}
|
||||
for interrupt in matched_interrupts
|
||||
)
|
||||
return interrupts_sum_by_irqs
|
||||
|
||||
def sum_cpu_counter_by_index(self, pci_slot: str) -> Dict[int, int]:
|
||||
interrupts_sum_by_cpus: Dict[int, int] = {}
|
||||
interrupts = self.get_interrupt_data()
|
||||
matched_interrupts = [x for x in interrupts if pci_slot in x.metadata]
|
||||
for cpu_index in range(0, len(matched_interrupts[0].cpu_counter)):
|
||||
interrupts_sum_by_cpus[cpu_index] = self._get_sum_of_interrupt_data_per_cpu(
|
||||
matched_interrupts, cpu_index
|
||||
)
|
||||
return interrupts_sum_by_cpus
|
||||
|
||||
def _get_sum_of_interrupt_data_per_cpu(
|
||||
self, interrupts: List[Interrupt], index: int
|
||||
) -> int:
|
||||
sum = 0
|
||||
for interrupt in interrupts:
|
||||
sum += interrupt.cpu_counter[index]
|
||||
return sum
|
||||
interrupts_by_cpu: Counter[int] = Counter()
|
||||
for interrupt in self.get_interrupt_data():
|
||||
|
||||
# Ignore unrelated entries
|
||||
if pci_slot not in interrupt.metadata:
|
||||
continue
|
||||
|
||||
# For each CPU, add count to totals
|
||||
for cpu_index, count in enumerate(interrupt.cpu_counter):
|
||||
interrupts_by_cpu[cpu_index] += count
|
||||
|
||||
# Return a standard dictionary
|
||||
return dict(interrupts_by_cpu)
|
||||
|
|
|
@ -102,8 +102,7 @@ class Iperf3(Tool):
|
|||
posix_os.install_packages("iperf3")
|
||||
install_from_src = False
|
||||
if self._check_exists():
|
||||
help = self.help()
|
||||
if "--logfile" not in help.stdout:
|
||||
if "--logfile" not in self.help().stdout:
|
||||
install_from_src = True
|
||||
else:
|
||||
install_from_src = True
|
||||
|
@ -235,8 +234,7 @@ class Iperf3(Tool):
|
|||
if log_file:
|
||||
if self.node.shell.exists(self.node.get_pure_path(log_file)):
|
||||
self.node.shell.remove(self.node.get_pure_path(log_file))
|
||||
help = self.help()
|
||||
if "--logfile" not in help.stdout:
|
||||
if "--logfile" not in self.help().stdout:
|
||||
self._install_from_src()
|
||||
cmd += f" --logfile {log_file}"
|
||||
|
||||
|
|
|
@ -99,7 +99,7 @@ class Kexec(Tool):
|
|||
tar.extract(kexec_tar, str(tool_path))
|
||||
find_tool = self.node.tools[Find]
|
||||
kexec_source_folder = find_tool.find_files(
|
||||
tool_path, name_pattern="kexec-tools*", type="d"
|
||||
tool_path, name_pattern="kexec-tools*", file_type="d"
|
||||
)
|
||||
code_path = tool_path.joinpath(kexec_source_folder[0])
|
||||
self.node.tools[Gcc]
|
||||
|
|
|
@ -38,7 +38,7 @@ class PartitionInfo(object):
|
|||
name: str,
|
||||
mountpoint: str,
|
||||
size: int = 0,
|
||||
type: str = "",
|
||||
dev_type: str = "",
|
||||
available_blocks: int = 0,
|
||||
used_blocks: int = 0,
|
||||
total_blocks: int = 0,
|
||||
|
@ -48,7 +48,7 @@ class PartitionInfo(object):
|
|||
self.name = name
|
||||
self.mountpoint = mountpoint
|
||||
self.size_in_gb = int(size / (1024 * 1024 * 1024))
|
||||
self.type = type
|
||||
self.type = dev_type
|
||||
self.available_blocks = available_blocks
|
||||
self.used_blocks = used_blocks
|
||||
self.total_blocks = total_blocks
|
||||
|
@ -66,12 +66,9 @@ class DiskInfo(object):
|
|||
|
||||
@property
|
||||
def is_os_disk(self) -> bool:
|
||||
# check if the disk contains boot partition
|
||||
# boot partitions start with /boot/{id}
|
||||
for partition in self.partitions:
|
||||
if partition.mountpoint.startswith("/boot"):
|
||||
return True
|
||||
return False
|
||||
return any(
|
||||
partition.mountpoint.startswith("/boot") for partition in self.partitions
|
||||
)
|
||||
|
||||
@property
|
||||
def is_mounted(self) -> bool:
|
||||
|
@ -79,11 +76,7 @@ class DiskInfo(object):
|
|||
if self.mountpoint:
|
||||
return True
|
||||
|
||||
for partition in self.partitions:
|
||||
if partition.mountpoint:
|
||||
return True
|
||||
|
||||
return False
|
||||
return any(partition.mountpoint for partition in self.partitions)
|
||||
|
||||
@property
|
||||
def device_name(self) -> str:
|
||||
|
@ -94,13 +87,13 @@ class DiskInfo(object):
|
|||
name: str,
|
||||
mountpoint: str,
|
||||
size: int = 0,
|
||||
type: str = "",
|
||||
dev_type: str = "",
|
||||
partitions: Optional[List[PartitionInfo]] = None,
|
||||
):
|
||||
self.name = name
|
||||
self.mountpoint = mountpoint
|
||||
self.size_in_gb = int(size / (1024 * 1024 * 1024))
|
||||
self.type = type
|
||||
self.type = dev_type
|
||||
self.partitions = partitions if partitions is not None else []
|
||||
|
||||
|
||||
|
@ -152,7 +145,7 @@ class Lsblk(Tool):
|
|||
PartitionInfo(
|
||||
name=lsblk_entry["name"],
|
||||
size=int(lsblk_entry["size"]),
|
||||
type=lsblk_entry["type"],
|
||||
dev_type=lsblk_entry["type"],
|
||||
mountpoint=lsblk_entry["mountpoint"],
|
||||
fstype=lsblk_entry["fstype"],
|
||||
)
|
||||
|
@ -170,7 +163,7 @@ class Lsblk(Tool):
|
|||
name=lsblk_entry["name"],
|
||||
mountpoint=lsblk_entry["mountpoint"],
|
||||
size=int(lsblk_entry["size"]),
|
||||
type=lsblk_entry["type"],
|
||||
dev_type=lsblk_entry["type"],
|
||||
partitions=disk_partition_map.get(lsblk_entry["name"], []),
|
||||
)
|
||||
)
|
||||
|
|
|
@ -27,10 +27,10 @@ class PartitionInfo(object):
|
|||
# /dev/sdc
|
||||
_disk_regex = re.compile(r"\s*\/dev\/(?P<disk>\D+).*")
|
||||
|
||||
def __init__(self, name: str, mount_point: str, type: str) -> None:
|
||||
def __init__(self, name: str, mount_point: str, fs_type: str) -> None:
|
||||
self.name = name
|
||||
self.mount_point = mount_point
|
||||
self.type = type
|
||||
self.type = fs_type
|
||||
matched = self._disk_regex.fullmatch(name)
|
||||
assert matched
|
||||
self.disk = matched.group("disk")
|
||||
|
@ -73,29 +73,31 @@ class Mount(Tool):
|
|||
self,
|
||||
name: str,
|
||||
point: str,
|
||||
type: Optional[FileSystem] = None,
|
||||
fs_type: Optional[FileSystem] = None,
|
||||
options: str = "",
|
||||
format: bool = False,
|
||||
format_: bool = False,
|
||||
) -> None:
|
||||
self.node.shell.mkdir(PurePosixPath(point), exist_ok=True)
|
||||
runline = [self.command]
|
||||
if type:
|
||||
runline.append(f"-t {type.name}")
|
||||
if fs_type:
|
||||
runline.append(f"-t {fs_type.name}")
|
||||
if options:
|
||||
runline.append(f"-o {options}")
|
||||
if format:
|
||||
format_type = type if type else self._DEFAULT_TYPE
|
||||
if format_:
|
||||
format_type = fs_type or self._DEFAULT_TYPE
|
||||
self.node.tools[Mkfs].format_disk(name, format_type)
|
||||
runline.append(f"{name} {point}")
|
||||
cmd_result = self.node.execute(" ".join(runline), shell=True, sudo=True)
|
||||
cmd_result.assert_exit_code()
|
||||
|
||||
def umount(
|
||||
self, disk_name: str, point: str, erase: bool = True, type: str = ""
|
||||
self, disk_name: str, point: str, erase: bool = True, fs_type: str = ""
|
||||
) -> None:
|
||||
if type:
|
||||
type = f"-t {type}"
|
||||
cmd_result = self.node.execute(f"umount {type} {point}", shell=True, sudo=True)
|
||||
if fs_type:
|
||||
fs_type = f"-t {fs_type}"
|
||||
cmd_result = self.node.execute(
|
||||
f"umount {fs_type} {point}", shell=True, sudo=True
|
||||
)
|
||||
if erase:
|
||||
fdisk = self.node.tools[Fdisk]
|
||||
fdisk.delete_partitions(disk_name)
|
||||
|
|
|
@ -44,7 +44,7 @@ class NFSClient(Tool):
|
|||
self.node.tools[Mount].mount(
|
||||
name=f"{server_ip}:{server_shared_dir}",
|
||||
point=mount_dir,
|
||||
type=FileSystem.nfs,
|
||||
fs_type=FileSystem.nfs,
|
||||
options=options,
|
||||
)
|
||||
|
||||
|
|
|
@ -51,11 +51,11 @@ class Nvmecli(Tool):
|
|||
)
|
||||
return cmd_result
|
||||
|
||||
def delete_namespace(self, namespace: str, id: int) -> ExecutableResult:
|
||||
return self.run(f"delete-ns -n {id} {namespace}", shell=True, sudo=True)
|
||||
def delete_namespace(self, namespace: str, id_: int) -> ExecutableResult:
|
||||
return self.run(f"delete-ns -n {id_} {namespace}", shell=True, sudo=True)
|
||||
|
||||
def detach_namespace(self, namespace: str, id: int) -> ExecutableResult:
|
||||
return self.run(f"detach-ns -n {id} {namespace}", shell=True, sudo=True)
|
||||
def detach_namespace(self, namespace: str, id_: int) -> ExecutableResult:
|
||||
return self.run(f"detach-ns -n {id_} {namespace}", shell=True, sudo=True)
|
||||
|
||||
def format_namespace(self, namespace: str) -> ExecutableResult:
|
||||
return self.run(f"format {namespace}", shell=True, sudo=True)
|
||||
|
|
|
@ -29,9 +29,9 @@ class Parted(Tool):
|
|||
)
|
||||
cmd_result.assert_exit_code()
|
||||
|
||||
def make_label(self, disk_name: str, type: str = "gpt") -> None:
|
||||
def make_label(self, disk_name: str, disk_type: str = "gpt") -> None:
|
||||
cmd_result = self.run(
|
||||
f"-s -- {disk_name} mklabel {type}",
|
||||
f"-s -- {disk_name} mklabel {disk_type}",
|
||||
shell=True,
|
||||
sudo=True,
|
||||
force_run=True,
|
||||
|
|
|
@ -9,9 +9,9 @@ from lisa.util import find_patterns_in_lines
|
|||
|
||||
|
||||
class ProcessInfo(object):
|
||||
def __init__(self, name: str, id: str) -> None:
|
||||
def __init__(self, name: str, pid: str) -> None:
|
||||
self.name = name
|
||||
self.id = id
|
||||
self.id = pid
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"name: {self.name}, id: {self.id}"
|
||||
|
@ -38,7 +38,7 @@ class Pgrep(Tool):
|
|||
f'-l "{process_identifier}"', sudo=True, force_run=True
|
||||
).stdout
|
||||
found_processes = find_patterns_in_lines(output, [self._process_map_regex])
|
||||
for item in found_processes[0]:
|
||||
running_process.append(ProcessInfo(name=item[1], id=item[0]))
|
||||
|
||||
running_process.extend(
|
||||
ProcessInfo(name=item[1], pid=item[0]) for item in found_processes[0]
|
||||
)
|
||||
return running_process
|
||||
|
|
|
@ -44,15 +44,12 @@ class Swap(Tool):
|
|||
# example output:
|
||||
# sdb2 8:18 0 7.6G 0 part [SWAP]
|
||||
lsblk = self.node.tools[Lsblk].run().stdout
|
||||
if "SWAP" in lsblk:
|
||||
return True
|
||||
|
||||
return False
|
||||
return "SWAP" in lsblk
|
||||
|
||||
def create_swap(
|
||||
self, path: str = "/tmp/swap", bytes: str = "1M", count: int = 1024
|
||||
self, path: str = "/tmp/swap", size: str = "1M", count: int = 1024
|
||||
) -> None:
|
||||
self.node.execute(f"dd if=/dev/zero of={path} bs={bytes} count={count}")
|
||||
self.node.execute(f"dd if=/dev/zero of={path} bs={size} count={count}")
|
||||
self.node.tools[MkSwap].run(path, sudo=True, force_run=True)
|
||||
self.node.tools[SwapOn].run(path, sudo=True, force_run=True)
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ class TcpDump(Tool):
|
|||
def dump_async(
|
||||
self,
|
||||
nic_name: str = "",
|
||||
filter: str = "",
|
||||
expression: str = "",
|
||||
timeout: int = 0,
|
||||
packet_filename: str = "tcp_dump.pcap",
|
||||
) -> Process:
|
||||
|
@ -52,11 +52,10 @@ class TcpDump(Tool):
|
|||
# -n not resolve address to domain name.
|
||||
# -i specify the nic name
|
||||
# -w write to pcap file.
|
||||
command = f"{self.command} -n -i {nic_name} {filter} -w {full_name}"
|
||||
command = f"{self.command} -n -i {nic_name} {expression} -w {full_name}"
|
||||
if timeout > 0:
|
||||
command = f"timeout {timeout} {command}"
|
||||
process = self.node.execute_async(cmd=command, shell=True, sudo=True)
|
||||
return process
|
||||
return self.node.execute_async(cmd=command, shell=True, sudo=True)
|
||||
|
||||
# It may be called too fast, and the capture file may not be ready.
|
||||
# Use retry to wait it completed.
|
||||
|
|
|
@ -73,22 +73,22 @@ class Transformer(subclasses.BaseClassWithRunbookMixin, InitializableMixin):
|
|||
|
||||
def _sort(transformers: List[schema.Transformer]) -> List[schema.Transformer]:
|
||||
visited: Set[str] = set()
|
||||
all: Dict[str, schema.Transformer] = {}
|
||||
transformers_by_name: Dict[str, schema.Transformer] = {}
|
||||
sorted_transformers: List[schema.Transformer] = []
|
||||
|
||||
# construct full set and check duplicates
|
||||
for transformer in transformers:
|
||||
if transformer.name in all:
|
||||
if transformer.name in transformers_by_name:
|
||||
raise LisaException(
|
||||
f"found duplicate transformers: '{transformer.name}', "
|
||||
f"use different names for them."
|
||||
)
|
||||
all[transformer.name] = transformer
|
||||
transformers_by_name[transformer.name] = transformer
|
||||
|
||||
# build new sorted results
|
||||
for transformer in transformers:
|
||||
if transformer.name not in visited:
|
||||
_sort_dfs(all, transformer, visited, sorted_transformers)
|
||||
_sort_dfs(transformers_by_name, transformer, visited, sorted_transformers)
|
||||
|
||||
# sort by phase: init, expanded.
|
||||
init_transformers: List[schema.Transformer] = []
|
||||
|
|
|
@ -651,10 +651,10 @@ def generate_random_chars(
|
|||
return "".join(random.choices(candidates, k=length))
|
||||
|
||||
|
||||
def strip_strs(object: Any, fields: List[str]) -> Any:
|
||||
def strip_strs(obj: Any, fields: List[str]) -> Any:
|
||||
for field in fields:
|
||||
if hasattr(object, field):
|
||||
value = getattr(object, field)
|
||||
if hasattr(obj, field):
|
||||
value = getattr(obj, field)
|
||||
value = value.strip() if isinstance(value, str) else value
|
||||
setattr(object, field, value)
|
||||
return object
|
||||
setattr(obj, field, value)
|
||||
return obj
|
||||
|
|
|
@ -95,10 +95,10 @@ class Factory(InitializableMixin, Generic[T_BASECLASS], SubClassTypeDict):
|
|||
return cast(T_BASECLASS, sub_object)
|
||||
|
||||
def _get_subclasses(
|
||||
self, type: Type[BaseClassMixin]
|
||||
self, cls: Type[BaseClassMixin]
|
||||
) -> Iterable[Type[BaseClassMixin]]:
|
||||
# recursive loop subclasses of subclasses
|
||||
for subclass_type in type.__subclasses__():
|
||||
for subclass_type in cls.__subclasses__():
|
||||
yield subclass_type
|
||||
yield from self._get_subclasses(subclass_type)
|
||||
|
||||
|
|
|
@ -719,9 +719,9 @@ class AzureImageStandard(TestSuite):
|
|||
expected_repo_list += ["mariner-official-update"]
|
||||
elif 2 == node.os.information.version.major:
|
||||
expected_repo_list += ["mariner-official-extras"]
|
||||
for id in expected_repo_list:
|
||||
for id_ in expected_repo_list:
|
||||
is_repository_present = any(
|
||||
[id in repository.id for repository in mariner_repositories]
|
||||
id_ in repository.id for repository in mariner_repositories
|
||||
)
|
||||
assert_that(
|
||||
is_repository_present,
|
||||
|
|
|
@ -398,7 +398,7 @@ class Storage(TestSuite):
|
|||
raise BadEnvironmentStateException
|
||||
|
||||
def _hot_add_disk_serial(
|
||||
self, log: Logger, node: Node, type: DiskType, size: int
|
||||
self, log: Logger, node: Node, disk_type: DiskType, size: int
|
||||
) -> None:
|
||||
disk = node.features[Disk]
|
||||
lsblk = node.tools[Lsblk]
|
||||
|
@ -423,7 +423,7 @@ class Storage(TestSuite):
|
|||
for _ in range(disks_to_add):
|
||||
# add data disk
|
||||
log.debug("Adding 1 managed disk")
|
||||
disks_added = disk.add_data_disk(1, type, size)
|
||||
disks_added = disk.add_data_disk(1, disk_type, size)
|
||||
|
||||
# verify that partition count is increased by 1
|
||||
# and the size of partition is correct
|
||||
|
@ -457,7 +457,7 @@ class Storage(TestSuite):
|
|||
)
|
||||
|
||||
def _hot_add_disk_parallel(
|
||||
self, log: Logger, node: Node, type: DiskType, size: int
|
||||
self, log: Logger, node: Node, disk_type: DiskType, size: int
|
||||
) -> None:
|
||||
disk = node.features[Disk]
|
||||
lsblk = node.tools[Lsblk]
|
||||
|
@ -481,7 +481,7 @@ class Storage(TestSuite):
|
|||
|
||||
# add data disks
|
||||
log.debug(f"Adding {disks_to_add} managed disks")
|
||||
disks_added = disk.add_data_disk(disks_to_add, type, size)
|
||||
disks_added = disk.add_data_disk(disks_to_add, disk_type, size)
|
||||
|
||||
# verify that partition count is increased by disks_to_add
|
||||
# and the size of partition is correct
|
||||
|
|
|
@ -146,7 +146,7 @@ class DpdkOvs(Tool):
|
|||
version_major_and_minor = "\\.".join(minimum_version.split(".")[:2])
|
||||
|
||||
# get the tags, picks the latest with that prefix
|
||||
tag = git.get_tag(cwd=self.repo_dir, filter=f"^v{version_major_and_minor}.*")
|
||||
tag = git.get_tag(cwd=self.repo_dir, filter_=f"^v{version_major_and_minor}.*")
|
||||
|
||||
# checkout the revision into a local branch
|
||||
git.checkout(tag, cwd=self.repo_dir, checkout_branch=f"local-{tag}")
|
||||
|
|
|
@ -592,7 +592,7 @@ class DpdkTestpmd(Tool):
|
|||
# dpdk stopped using a default branch
|
||||
# if a branch is not specified, get latest version tag.
|
||||
self._dpdk_branch = git_tool.get_tag(
|
||||
self.dpdk_path, filter=r"^v.*" # starts w 'v'
|
||||
self.dpdk_path, filter_=r"^v.*" # starts w 'v'
|
||||
)
|
||||
|
||||
git_tool.checkout(self._dpdk_branch, cwd=self.dpdk_path)
|
||||
|
|
|
@ -85,11 +85,11 @@ class DpdkTestResources:
|
|||
|
||||
def init_hugepages(node: Node) -> None:
|
||||
mount = node.tools[Mount]
|
||||
mount.mount(name="nodev", point="/mnt/huge", type=FileSystem.hugetlbfs)
|
||||
mount.mount(name="nodev", point="/mnt/huge", fs_type=FileSystem.hugetlbfs)
|
||||
mount.mount(
|
||||
name="nodev",
|
||||
point="/mnt/huge-1G",
|
||||
type=FileSystem.hugetlbfs,
|
||||
fs_type=FileSystem.hugetlbfs,
|
||||
options="pagesize=1G",
|
||||
)
|
||||
_enable_hugepages(node)
|
||||
|
|
|
@ -487,7 +487,7 @@ class NetworkSettings(TestSuite):
|
|||
number_test_flag += int(msg_value, 16)
|
||||
|
||||
# variable to indicate set or unset
|
||||
set = True
|
||||
flag_set = True
|
||||
|
||||
# if test message flags are already set, pick first test flag in list.
|
||||
# validate change by first unsetting the flag and then unsetting
|
||||
|
@ -495,13 +495,13 @@ class NetworkSettings(TestSuite):
|
|||
first_pair = list(msg_types.items())[0]
|
||||
name_test_flag.append(first_pair[0])
|
||||
number_test_flag = int(first_pair[1], 16)
|
||||
set = False
|
||||
flag_set = False
|
||||
|
||||
# Testing set/unset message level by name
|
||||
new_settings = ethtool.set_unset_device_message_flag_by_name(
|
||||
interface, name_test_flag, set
|
||||
interface, name_test_flag, flag_set
|
||||
)
|
||||
if set:
|
||||
if flag_set:
|
||||
assert_that(
|
||||
new_settings.msg_level_name,
|
||||
f"Setting msg flags - {' '.join(name_test_flag)} didn't"
|
||||
|
@ -515,9 +515,9 @@ class NetworkSettings(TestSuite):
|
|||
).does_not_contain(" ".join(name_test_flag))
|
||||
|
||||
reverted_settings = ethtool.set_unset_device_message_flag_by_name(
|
||||
interface, name_test_flag, not set
|
||||
interface, name_test_flag, not flag_set
|
||||
)
|
||||
if not set:
|
||||
if not flag_set:
|
||||
assert_that(
|
||||
reverted_settings.msg_level_name,
|
||||
f"Setting msg flags by name - {' '.join(name_test_flag)} didn't"
|
||||
|
|
|
@ -123,7 +123,7 @@ def reset_partitions(
|
|||
partition_disks: List[str] = []
|
||||
for data_disk in disk_names:
|
||||
fdisk.delete_partitions(data_disk)
|
||||
partition_disks.append(fdisk.make_partition(data_disk, format=False))
|
||||
partition_disks.append(fdisk.make_partition(data_disk, format_=False))
|
||||
return partition_disks
|
||||
|
||||
|
||||
|
|
|
@ -251,7 +251,7 @@ class StoragePerformance(TestSuite):
|
|||
]
|
||||
failed_test_cases = []
|
||||
for testcase in testcases:
|
||||
id = str(uuid.uuid4())
|
||||
|
||||
try:
|
||||
start_iodepth = testcase.get("start_iodepth", 1)
|
||||
max_iodepth = testcase.get("max_iodepth", 1)
|
||||
|
@ -274,7 +274,7 @@ class StoragePerformance(TestSuite):
|
|||
node=node,
|
||||
start_iodepth=start_iodepth,
|
||||
max_iodepth=max_iodepth,
|
||||
filename=f"{size_mb}_MB_FIO_{id}",
|
||||
filename=f"{size_mb}_MB_FIO_{uuid.uuid4()}",
|
||||
test_result=result,
|
||||
test_name=test_name,
|
||||
num_jobs=num_jobs,
|
||||
|
|
|
@ -27,7 +27,7 @@ def _format_disk(
|
|||
partition_disks: List[str] = []
|
||||
for data_disk in disk_list:
|
||||
fdisk.delete_partitions(data_disk)
|
||||
partition_disks.append(fdisk.make_partition(data_disk, format=False))
|
||||
partition_disks.append(fdisk.make_partition(data_disk, format_=False))
|
||||
return partition_disks
|
||||
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ class WaAgentBvt(TestSuite):
|
|||
result = extension.create_or_update(
|
||||
name="CustomScript",
|
||||
publisher="Microsoft.Azure.Extensions",
|
||||
type="CustomScript",
|
||||
type_="CustomScript",
|
||||
type_handler_version="2.0",
|
||||
auto_upgrade_minor_version=True,
|
||||
settings=settings,
|
||||
|
|
|
@ -64,7 +64,7 @@ def set_hugepage(node: Node) -> None:
|
|||
mount = node.tools[Mount]
|
||||
for point, options in _huge_page_disks.items():
|
||||
mount.mount(
|
||||
name="nodev", point=point, type=FileSystem.hugetlbfs, options=options
|
||||
name="nodev", point=point, fs_type=FileSystem.hugetlbfs, options=options
|
||||
)
|
||||
echo = node.tools[Echo]
|
||||
echo.write_to_file(
|
||||
|
@ -102,7 +102,7 @@ def remove_hugepage(node: Node) -> None:
|
|||
|
||||
mount = node.tools[Mount]
|
||||
for point in _huge_page_disks:
|
||||
mount.umount(disk_name="nodev", point=point, type="hugetlbfs", erase=False)
|
||||
mount.umount(disk_name="nodev", point=point, fs_type="hugetlbfs", erase=False)
|
||||
pure_path = node.get_pure_path(point)
|
||||
node.execute(f"rm -rf {pure_path}", sudo=True)
|
||||
|
||||
|
|
|
@ -455,7 +455,7 @@ class XdpFunctional(TestSuite):
|
|||
pcap_filename = f"{case_name}.pcap"
|
||||
tcpdump.dump_async(
|
||||
ping_source_node.nics.default_nic,
|
||||
filter=f'"icmp and host {ping_address}"',
|
||||
expression=f'"icmp and host {ping_address}"',
|
||||
packet_filename=pcap_filename,
|
||||
)
|
||||
xdpdump.test_by_ping(
|
||||
|
|
1
pylintrc
1
pylintrc
|
@ -46,7 +46,6 @@ disable=
|
|||
pointless-statement,
|
||||
pointless-string-statement,
|
||||
redefined-argument-from-local,
|
||||
redefined-builtin,
|
||||
redefined-outer-name,
|
||||
super-init-not-called,
|
||||
unbalanced-tuple-unpacking,
|
||||
|
|
|
@ -131,10 +131,13 @@ class ResultStateManagerTestCase(TestCase):
|
|||
completed_count: int,
|
||||
expected_statuses: List[TestStatus],
|
||||
) -> None:
|
||||
all = self._create_information(all_count, TestStatus.QUEUED)
|
||||
running = self._create_information(running_count, TestStatus.RUNNING)
|
||||
completed = self._create_information(completed_count, TestStatus.PASSED)
|
||||
state.set_states(all, running, completed)
|
||||
|
||||
state.set_states(
|
||||
self._create_information(all_count, TestStatus.QUEUED),
|
||||
self._create_information(running_count, TestStatus.RUNNING),
|
||||
self._create_information(completed_count, TestStatus.PASSED),
|
||||
)
|
||||
|
||||
self.assertListEqual([x.status for x in state._results], expected_statuses)
|
||||
|
||||
def _create_information(
|
||||
|
|
|
@ -49,8 +49,7 @@ class MockItem(RequirementMixin):
|
|||
class SearchSpaceTestCase(unittest.TestCase):
|
||||
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
||||
super().__init__(*args, **kwargs)
|
||||
id = f"{'.'.join(self.id().split('.')[-2:])}"
|
||||
self._log = get_logger(id)
|
||||
self._log = get_logger(f"{'.'.join(self.id().split('.')[-2:])}")
|
||||
|
||||
def test_supported_intrange(self) -> None:
|
||||
self._verify_matrix(
|
||||
|
|
Загрузка…
Ссылка в новой задаче