Add unittest for lisarunner, and bug fixes (#1019)

* Add ut for lisarunner, and bug fixes

1. add ut for lisa runner
2. fix bugs found by ut.
3. remove some unused code.
4. improve testability.
5. move EnvironmentSpace from shema to env, because it's confused in
  schema folder. As it's not a schema.
6. other minor fixes.
This commit is contained in:
Chi Song 2020-09-18 11:31:02 +08:00 коммит произвёл GitHub
Родитель 9319ce0a3d
Коммит 13d9be733f
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
16 изменённых файлов: 694 добавлений и 163 удалений

Просмотреть файл

@ -36,11 +36,6 @@ class Action(metaclass=ABCMeta):
def config(self, key: str, value: object) -> None:
pass
@property
@abstractmethod
def typename(self) -> str:
raise NotImplementedError()
@abstractmethod
async def start(self) -> None:
self.__is_started = True

Просмотреть файл

@ -4,7 +4,7 @@ from argparse import Namespace
from typing import Iterable, Optional, cast
from lisa.parameter_parser.runbook import load as load_runbook
from lisa.test_runner.lisarunner import LISARunner
from lisa.test_runner.lisarunner import LisaRunner
from lisa.testselector import select_testcases
from lisa.testsuite import TestCaseRuntimeData
from lisa.util import LisaException, constants
@ -16,7 +16,7 @@ _get_init_logger = functools.partial(get_logger, "init")
def run(args: Namespace) -> None:
runbook = load_runbook(args)
runner = LISARunner()
runner = LisaRunner()
runner.config(constants.CONFIG_RUNBOOK, runbook)
awaitable = runner.start()
asyncio.run(awaitable)

Просмотреть файл

@ -2,12 +2,16 @@ from __future__ import annotations
import copy
from collections import UserDict
from dataclasses import dataclass, field
from functools import partial
from typing import TYPE_CHECKING, Optional
from typing import TYPE_CHECKING, Any, List, Optional
from lisa import schema
from dataclasses_json import LetterCase, dataclass_json # type: ignore
from marshmallow import validate
from lisa import schema, search_space
from lisa.node import Nodes
from lisa.util import ContextMixin, InitializableMixin, LisaException
from lisa.util import ContextMixin, InitializableMixin, LisaException, constants
from lisa.util.logger import get_logger
if TYPE_CHECKING:
@ -18,6 +22,89 @@ if TYPE_CHECKING:
_get_init_logger = partial(get_logger, "init", "env")
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class EnvironmentSpace(search_space.RequirementMixin):
"""
Search space of an environment. It uses to
1. Specify test suite requirement, see TestCaseRequirement
2. Describe capability of an environment, see Environment.capability
"""
topology: str = field(
default=constants.ENVIRONMENTS_SUBNET,
metadata=schema.metadata(
validate=validate.OneOf([constants.ENVIRONMENTS_SUBNET])
),
)
nodes: List[schema.NodeSpace] = field(default_factory=list)
def __post_init__(self, *args: Any, **kwargs: Any) -> None:
self._expand_node_space()
def __eq__(self, o: object) -> bool:
assert isinstance(o, EnvironmentSpace), f"actual: {type(o)}"
return self.topology == o.topology and search_space.equal_list(
self.nodes, o.nodes
)
def check(self, capability: Any) -> search_space.ResultReason:
assert isinstance(capability, EnvironmentSpace), f"actual: {type(capability)}"
result = search_space.ResultReason()
if not capability.nodes:
result.add_reason("no node instance found")
elif len(self.nodes) > len(capability.nodes):
result.add_reason(
f"no enough nodes, "
f"capability: {len(capability.nodes)}, "
f"requirement: {len(self.nodes)}"
)
else:
if self.nodes:
for index, current_req in enumerate(self.nodes):
current_cap = capability.nodes[index]
result.merge(
search_space.check(current_req, current_cap), str(index),
)
if not result.result:
break
return result
@classmethod
def from_value(cls, value: Any) -> Any:
assert isinstance(value, EnvironmentSpace), f"actual: {type(value)}"
env = EnvironmentSpace()
env.nodes = value.nodes
if value.nodes:
env.nodes = list()
for value_capability in value.nodes:
env.nodes.append(schema.NodeSpace.from_value(value_capability))
return env
def _generate_min_capability(self, capability: Any) -> Any:
env = EnvironmentSpace(topology=self.topology)
assert isinstance(capability, EnvironmentSpace), f"actual: {type(capability)}"
assert capability.nodes
for index, current_req in enumerate(self.nodes):
if len(capability.nodes) == 1:
current_cap = capability.nodes[0]
else:
current_cap = capability.nodes[index]
env.nodes.append(current_req.generate_min_capability(current_cap))
return env
def _expand_node_space(self) -> None:
if self.nodes:
expanded_requirements: List[schema.NodeSpace] = []
for node in self.nodes:
expanded_requirements.extend(node.expand_by_node_count())
self.nodes = expanded_requirements
class Environment(ContextMixin, InitializableMixin):
def __init__(self, is_predefined: bool, warn_as_error: bool) -> None:
super().__init__()
@ -35,7 +122,7 @@ class Environment(ContextMixin, InitializableMixin):
self.cost: int = 0
# original runbook or generated from test case which this environment supports
self.runbook: schema.Environment
self._capability: Optional[schema.EnvironmentSpace] = None
self._capability: Optional[EnvironmentSpace] = None
self.warn_as_error = warn_as_error
self._default_node: Optional[Node] = None
self._log = get_logger("env", self.name)
@ -85,10 +172,10 @@ class Environment(ContextMixin, InitializableMixin):
self.nodes.close()
@property
def capability(self) -> schema.EnvironmentSpace:
def capability(self) -> EnvironmentSpace:
# merge existing node to capability
if self._capability is None:
result = schema.EnvironmentSpace(topology=self.runbook.topology)
result = EnvironmentSpace(topology=self.runbook.topology)
for node in self.nodes.list():
result.nodes.append(node.capability)
if not self.is_ready and self.runbook.nodes_requirement:
@ -124,9 +211,7 @@ class Environments(EnvironmentsDict):
self.max_concurrency = max_concurrency
self.allow_create = allow_create
def get_or_create(
self, requirement: schema.EnvironmentSpace
) -> Optional[Environment]:
def get_or_create(self, requirement: EnvironmentSpace) -> Optional[Environment]:
result: Optional[Environment] = None
for environment in self.values():
# find exact match, or create a new one.
@ -137,9 +222,7 @@ class Environments(EnvironmentsDict):
result = self.from_requirement(requirement)
return result
def from_requirement(
self, requirement: schema.EnvironmentSpace
) -> Optional[Environment]:
def from_requirement(self, requirement: EnvironmentSpace) -> Optional[Environment]:
runbook = schema.Environment(
topology=requirement.topology, nodes_requirement=requirement.nodes,
)

Просмотреть файл

@ -132,12 +132,17 @@ class Platforms(PlatformsDict):
def register_platform(self, platform: Type[Platform]) -> None:
platform_type = platform.platform_type()
if self.get(platform_type) is None:
self[platform_type] = platform()
else:
raise LisaException(
f"platform '{platform_type}' exists, cannot be registered again"
exist_platform = self.get(platform_type)
if exist_platform:
# so far, it happens on ut only. As global variables are used in ut,
# it's # important to use first registered.
log = _get_init_logger()
log.warning(
f"ignore to register [{platform_type}] platform again. "
f"new: [{platform}], exist: [{exist_platform}]"
)
else:
self[platform_type] = platform()
def _load_sub_platforms() -> Platforms:

Просмотреть файл

@ -361,6 +361,20 @@ class NodeSpace(search_space.RequirementMixin, ExtendableSchemaMixin):
if self.excluded_features is not None:
self.excluded_features.is_allow_set = False
def __eq__(self, o: object) -> bool:
assert isinstance(o, NodeSpace), f"actual: {type(o)}"
return (
self.type == o.type
and self.node_count == o.node_count
and self.core_count == o.core_count
and self.memory_mb == o.memory_mb
and self.disk_count == o.disk_count
and self.nic_count == o.nic_count
and self.gpu_count == o.gpu_count
and self.features == o.features
and self.excluded_features == o.excluded_features
)
def __repr__(self) -> str:
"""
override it for shorter text
@ -550,8 +564,11 @@ class NodeSpace(search_space.RequirementMixin, ExtendableSchemaMixin):
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class Capability(NodeSpace):
type: str = constants.ENVIRONMENTS_NODES_CAPABILITY
node_count = 1
type: str = constants.ENVIRONMENTS_NODES_REQUIREMENT
def __post_init__(self, *args: Any, **kwargs: Any) -> None:
super().__post_init__(*args, **kwargs)
self.node_count = 1
@dataclass_json(letter_case=LetterCase.CAMEL)
@ -626,75 +643,6 @@ class RemoteNode:
)
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class EnvironmentSpace(search_space.RequirementMixin):
topology: str = field(
default=constants.ENVIRONMENTS_SUBNET,
metadata=metadata(validate=validate.OneOf([constants.ENVIRONMENTS_SUBNET])),
)
nodes: List[NodeSpace] = field(default_factory=list)
def __post_init__(self, *args: Any, **kwargs: Any) -> None:
self._expand_node_space()
def check(self, capability: Any) -> search_space.ResultReason:
assert isinstance(capability, EnvironmentSpace), f"actual: {type(capability)}"
result = search_space.ResultReason()
if not capability.nodes:
result.add_reason("no node instance found")
elif len(self.nodes) > len(capability.nodes):
result.add_reason(
f"no enough nodes, "
f"capability: {len(capability.nodes)}, "
f"requirement: {len(self.nodes)}"
)
else:
if self.nodes:
for index, current_req in enumerate(self.nodes):
current_cap = capability.nodes[index]
result.merge(
search_space.check(current_req, current_cap), str(index),
)
if not result.result:
break
return result
@classmethod
def from_value(cls, value: Any) -> Any:
assert isinstance(value, EnvironmentSpace), f"actual: {type(value)}"
env = EnvironmentSpace()
env.nodes = value.nodes
if value.nodes:
env.nodes = list()
for value_capability in value.nodes:
env.nodes.append(NodeSpace.from_value(value_capability))
return env
def _generate_min_capability(self, capability: Any) -> Any:
env = EnvironmentSpace(topology=self.topology)
assert isinstance(capability, EnvironmentSpace), f"actual: {type(capability)}"
assert capability.nodes
for index, current_req in enumerate(self.nodes):
if len(capability.nodes) == 1:
current_cap = capability.nodes[0]
else:
current_cap = capability.nodes[index]
env.nodes.append(current_req.generate_min_capability(current_cap))
return env
def _expand_node_space(self) -> None:
if self.nodes:
expanded_requirements: List[NodeSpace] = []
for node in self.nodes:
expanded_requirements.extend(node.expand_by_node_count())
self.nodes = expanded_requirements
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class Environment:

Просмотреть файл

@ -1,6 +1,6 @@
from typing import Any, Dict, List, Optional
from lisa import schema
from lisa import schema, search_space
from lisa.action import Action, ActionStatus
from lisa.environment import Environment, Environments, load_environments
from lisa.platform_ import WaitMoreResourceError, load_platform
@ -17,17 +17,13 @@ from lisa.util import constants
from lisa.util.logger import get_logger
class LISARunner(Action):
class LisaRunner(Action):
def __init__(self) -> None:
super().__init__()
self.exitCode = None
self._log = get_logger("runner")
@property
def typename(self) -> str:
return "LISA"
def config(self, key: str, value: Any) -> None:
if key == constants.CONFIG_RUNBOOK:
self._runbook: schema.Runbook = value
@ -91,14 +87,14 @@ class LISARunner(Action):
try:
platform.deploy_environment(environment)
except WaitMoreResourceError as identifier:
self._log.warn(
self._log.warning(
f"[{environment.name}] waiting for more resource: "
f"{identifier}, skip assiging case"
)
continue
if not environment.is_ready:
self._log.warn(
self._log.warning(
f"[{environment.name}] is not deployed successfully, "
f"skip assiging case"
)
@ -147,11 +143,11 @@ class LISARunner(Action):
# not run as there is no fit environment.
for case in can_run_results:
if case.can_run:
assert case.check_results
case.set_status(
TestStatus.SKIPPED,
f"no environment meet requirement: {case.check_results.reasons}",
)
reasons = "no available environment"
if case.check_results and case.check_results.reasons:
reasons = f"{reasons}: {case.check_results.reasons}"
case.set_status(TestStatus.SKIPPED, reasons)
result_count_dict: Dict[TestStatus, int] = dict()
for test_result in selected_test_results:
@ -168,9 +164,11 @@ class LISARunner(Action):
for key in TestStatus:
self._log.info(f" {key.name:<9}: {result_count_dict.get(key, 0)}")
# delete enviroment after run
self.set_status(ActionStatus.SUCCESS)
# for UT testability
self._latest_test_results = selected_test_results
async def stop(self) -> None:
super().stop()
@ -188,13 +186,7 @@ class LISARunner(Action):
)
for case in cases:
case.assigned_env = environment.name
try:
await test_suite.start()
except Exception as identifier:
self._log.error(f"suite[{suite_metadata.name}] failed: {identifier}")
for case in cases:
if case.can_run:
case.set_status(TestStatus.SKIPPED, "test suite setup failed")
await test_suite.start()
def _create_test_results(
self, cases: List[TestCaseRuntimeData]
@ -210,12 +202,16 @@ class LISARunner(Action):
existing_environments: Environments,
platform_type: str,
) -> None:
assert platform_type
platform_type_set = search_space.SetSpace[str](
is_allow_set=True, items=[platform_type]
)
for test_result in test_results:
test_req: TestCaseRequirement = test_result.runtime_data.requirement
# check if there is playform requirement on test case
if test_req.platform_type:
check_result = test_req.platform_type.check(platform_type)
if test_req.platform_type and len(test_req.platform_type) > 0:
check_result = test_req.platform_type.check(platform_type_set)
if not check_result.result:
test_result.set_status(TestStatus.SKIPPED, check_result.reasons)

Просмотреть файл

Просмотреть файл

@ -0,0 +1,444 @@
import asyncio
from typing import List, Optional
from unittest.case import TestCase
from lisa import schema
from lisa.environment import load_environments
from lisa.test_runner.lisarunner import LisaRunner
from lisa.tests import test_platform, test_testsuite
from lisa.tests.test_environment import generate_runbook as generate_env_runbook
from lisa.tests.test_platform import deleted_envs, deployed_envs, prepared_envs
from lisa.tests.test_testsuite import (
cleanup_cases_metadata,
generate_cases_metadata,
generate_cases_result,
)
from lisa.testsuite import TestResult, TestStatus, simple_requirement
from lisa.util import constants
def generate_lisarunner(
env_runbook: Optional[schema.EnvironmentRoot] = None, case_use_new_env: bool = False
) -> LisaRunner:
runbook = schema.Runbook(
platform=[
schema.Platform(type=constants.PLATFORM_MOCK, admin_password="not use it")
],
testcase=[
schema.TestCase(
criteria=schema.Criteria(priority=[0, 1, 2]),
use_new_environment=case_use_new_env,
)
],
)
if env_runbook:
runbook.environment = env_runbook
runner = LisaRunner()
runner.config(constants.CONFIG_RUNBOOK, runbook)
return runner
class LisaRunnerTestCase(TestCase):
def tearDown(self) -> None:
cleanup_cases_metadata()
test_platform.return_prepared = True
test_platform.deploy_is_ready = True
test_platform.deploy_success = True
test_platform.wait_more_resource_error = False
def test_merge_req_create_on_new(self) -> None:
# if no predefined envs, can generate from requirement
env_runbook = generate_env_runbook(is_single_env=False)
envs = load_environments(env_runbook)
self.assertListEqual(
[], [x for x in envs],
)
runner = generate_lisarunner(None)
test_results = generate_cases_result()
runner._merge_test_requirements(
test_results=test_results,
existing_environments=envs,
platform_type=constants.PLATFORM_MOCK,
)
# 3 cases create 3 envs
self.assertListEqual(
["req_0", "req_1", "req_2"], [x for x in envs],
)
self.verify_test_results(
expected_envs=["", "", ""],
expected_status=[TestStatus.NOTRUN, TestStatus.NOTRUN, TestStatus.NOTRUN],
expected_message=["", "", ""],
test_results=test_results,
)
def test_merge_req_run_not_create_on_equal(self) -> None:
# when merging requirement from test cases,
# it won't create new, if predefined exact match test case needs
env_runbook = generate_env_runbook(remote=True)
envs = load_environments(env_runbook)
self.assertListEqual(
["runbook_0"], [x for x in envs],
)
runner = generate_lisarunner(env_runbook)
test_results = generate_cases_result()
runner._merge_test_requirements(
test_results=test_results,
existing_environments=envs,
platform_type=constants.PLATFORM_MOCK,
)
# 3 cases created only two req, as simple req meets on runbook_0
self.assertListEqual(
["runbook_0", "req_1", "req_2"], [x for x in envs],
)
self.assertListEqual(
[TestStatus.NOTRUN, TestStatus.NOTRUN, TestStatus.NOTRUN],
[x.status for x in test_results],
)
def test_merge_req_create_on_use_new(self) -> None:
# same runbook as test_merge_req_run_not_create_on_equal
# but all 3 cases asks a new env, so create 3 envs
# note, when running cases, predefined env will be treat as a new env.
env_runbook = generate_env_runbook(remote=True)
envs = load_environments(env_runbook)
self.assertListEqual(
["runbook_0"], [x for x in envs],
)
runner = generate_lisarunner(env_runbook)
test_results = generate_cases_result()
for test_result in test_results:
test_result.runtime_data.use_new_environment = True
runner._merge_test_requirements(
test_results=test_results,
existing_environments=envs,
platform_type=constants.PLATFORM_MOCK,
)
# every case need a new environment, so created 3
self.assertListEqual(
["runbook_0", "req_1", "req_2", "req_3"], [x for x in envs],
)
self.verify_test_results(
expected_envs=["", "", ""],
expected_status=[TestStatus.NOTRUN, TestStatus.NOTRUN, TestStatus.NOTRUN],
expected_message=["", "", ""],
test_results=test_results,
)
def test_merge_req_not_allow_create(self) -> None:
# force to use existing env, not to create new.
# this case doesn't provide predefined env, so all cases skipped.
env_runbook = generate_env_runbook(is_single_env=False)
env_runbook.allow_create = False
envs = load_environments(env_runbook)
self.assertListEqual(
[], [x for x in envs],
)
runner = generate_lisarunner(None)
test_results = generate_cases_result()
runner._merge_test_requirements(
test_results=test_results,
existing_environments=envs,
platform_type=constants.PLATFORM_MOCK,
)
self.assertListEqual(
[], [x for x in envs],
)
not_allow_new_message = (
"not found fit environment, and not allow to create new environment"
)
self.verify_test_results(
expected_envs=["", "", ""],
expected_status=[
TestStatus.SKIPPED,
TestStatus.SKIPPED,
TestStatus.SKIPPED,
],
expected_message=[
not_allow_new_message,
not_allow_new_message,
not_allow_new_message,
],
test_results=test_results,
)
def test_merge_req_platform_type_checked(self) -> None:
# check if current platform supported,
# for example, some case run on azure only.
# platform check happens in runner, so this case is here
# a simple check is enough. More covered by search_space.SetSpace
env_runbook = generate_env_runbook(is_single_env=False)
envs = load_environments(env_runbook)
self.assertListEqual(
[], [x for x in envs],
)
runner = generate_lisarunner(None)
test_results = generate_cases_result()
for test_result in test_results:
metadata = test_result.runtime_data.metadata
metadata.requirement = simple_requirement(
supported_platform_type=["notexists"]
)
runner._merge_test_requirements(
test_results=test_results,
existing_environments=envs,
platform_type=constants.PLATFORM_MOCK,
)
platform_unsupported = "capability cannot support some of requirement"
self.verify_test_results(
expected_envs=["", "", ""],
expected_status=[
TestStatus.SKIPPED,
TestStatus.SKIPPED,
TestStatus.SKIPPED,
],
expected_message=[
platform_unsupported,
platform_unsupported,
platform_unsupported,
],
test_results=test_results,
)
def test_fit_a_predefined_env(self) -> None:
# predefined env can run case in below condition.
# 1. with predefined env of 1 simple node, so ut2 don't need a new env
# 2. ut3 need 8 cores, and predefined env target to meet all core requirement,
# so it can run any case with core requirements.
generate_cases_metadata()
env_runbook = generate_env_runbook(is_single_env=True, remote=True)
runner = generate_lisarunner(env_runbook)
asyncio.run(runner.start())
self.verify_env_results(
expected_prepared=["runbook_0", "req_1", "req_2"],
expected_deployed_envs=["runbook_0", "req_1"],
expected_deleted_envs=["runbook_0", "req_1"],
)
self.verify_test_results(
expected_envs=["req_1", "runbook_0", "runbook_0"],
expected_status=[TestStatus.PASSED, TestStatus.PASSED, TestStatus.PASSED],
expected_message=["", "", ""],
test_results=runner._latest_test_results,
)
def test_fit_a_bigger_env(self) -> None:
# similar with test_fit_a_predefined_env, but predefined 2 nodes,
# it doesn't equal to any case req, but reusable for all cases.
generate_cases_metadata()
env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True)
runner = generate_lisarunner(env_runbook)
asyncio.run(runner.start())
self.verify_env_results(
expected_prepared=["runbook_0", "req_1", "req_2", "req_3"],
expected_deployed_envs=["runbook_0"],
expected_deleted_envs=["runbook_0"],
)
self.verify_test_results(
expected_envs=["runbook_0", "runbook_0", "runbook_0"],
expected_status=[TestStatus.PASSED, TestStatus.PASSED, TestStatus.PASSED],
expected_message=["", "", ""],
test_results=runner._latest_test_results,
)
def test_case_new_env_run_only_1_needed(self) -> None:
# same predefined env as test_fit_a_bigger_env,
# but all case want to run on a new env
generate_cases_metadata()
env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True)
runner = generate_lisarunner(env_runbook, case_use_new_env=True)
asyncio.run(runner.start())
self.verify_env_results(
expected_prepared=["runbook_0", "req_1", "req_2", "req_3"],
expected_deployed_envs=["runbook_0", "req_1", "req_2"],
expected_deleted_envs=["runbook_0", "req_1", "req_2"],
)
self.verify_test_results(
expected_envs=["runbook_0", "req_1", "req_2"],
expected_status=[TestStatus.PASSED, TestStatus.PASSED, TestStatus.PASSED],
expected_message=["", "", ""],
test_results=runner._latest_test_results,
)
def test_no_needed_env(self) -> None:
# two 1 node env predefined, but only runbook_0 go to deploy
# no cases assigned to runbook_1, as fit cases run on runbook_0 already
generate_cases_metadata()
env_runbook = generate_env_runbook(local=True, remote=True)
runner = generate_lisarunner(env_runbook)
asyncio.run(runner.start())
self.verify_env_results(
expected_prepared=["runbook_0", "runbook_1", "req_2", "req_3"],
expected_deployed_envs=["runbook_0", "req_2"],
expected_deleted_envs=["runbook_0", "req_2"],
)
self.verify_test_results(
expected_envs=["req_2", "runbook_0", "runbook_0"],
expected_status=[TestStatus.PASSED, TestStatus.PASSED, TestStatus.PASSED],
expected_message=["", "", ""],
test_results=runner._latest_test_results,
)
def test_deploy_no_more_resource(self) -> None:
# platform may see no more resource, like no azure quota.
# cases skipped due to this.
# In future, will add retry on wait more resource.
test_platform.wait_more_resource_error = True
generate_cases_metadata()
env_runbook = generate_env_runbook(is_single_env=True, local=True)
runner = generate_lisarunner(env_runbook)
asyncio.run(runner.start())
self.verify_env_results(
expected_prepared=["runbook_0", "req_1", "req_2", "req_3"],
expected_deployed_envs=[],
expected_deleted_envs=[],
)
before_suite_failed = "no available environment"
self.verify_test_results(
expected_envs=["", "", ""],
expected_status=[
TestStatus.SKIPPED,
TestStatus.SKIPPED,
TestStatus.SKIPPED,
],
expected_message=[
before_suite_failed,
before_suite_failed,
before_suite_failed,
],
test_results=runner._latest_test_results,
)
def test_skipped_on_suite_failure(self) -> None:
# first two cases skipped due to test suite setup failed
test_testsuite.fail_on_before_suite = True
generate_cases_metadata()
env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True)
runner = generate_lisarunner(env_runbook)
asyncio.run(runner.start())
self.verify_env_results(
expected_prepared=["runbook_0", "req_1", "req_2", "req_3"],
expected_deployed_envs=["runbook_0"],
expected_deleted_envs=["runbook_0"],
)
before_suite_failed = "before_suite: failed"
self.verify_test_results(
expected_envs=["runbook_0", "runbook_0", "runbook_0"],
expected_status=[
TestStatus.SKIPPED,
TestStatus.SKIPPED,
TestStatus.PASSED,
],
expected_message=[before_suite_failed, before_suite_failed, ""],
test_results=runner._latest_test_results,
)
def test_env_skipped_no_prepared_env(self) -> None:
# test env not prepared, so test cases cannot find an env to run
test_platform.return_prepared = False
generate_cases_metadata()
env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True)
runner = generate_lisarunner(env_runbook)
asyncio.run(runner.start())
self.verify_env_results(
expected_prepared=["runbook_0", "req_1", "req_2", "req_3"],
expected_deployed_envs=[],
expected_deleted_envs=[],
)
no_avaiable_env = "no available environment"
self.verify_test_results(
expected_envs=["", "", ""],
expected_status=[
TestStatus.SKIPPED,
TestStatus.SKIPPED,
TestStatus.SKIPPED,
],
expected_message=[no_avaiable_env, no_avaiable_env, no_avaiable_env],
test_results=runner._latest_test_results,
)
def test_env_skipped_not_ready(self) -> None:
# env prepared, but not deployed to ready.
# so no cases can run
test_platform.deploy_is_ready = False
generate_cases_metadata()
env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True)
runner = generate_lisarunner(env_runbook)
asyncio.run(runner.start())
self.verify_env_results(
expected_prepared=["runbook_0", "req_1", "req_2", "req_3"],
expected_deployed_envs=["runbook_0", "req_1", "req_2", "req_3"],
expected_deleted_envs=[],
)
no_avaiable_env = "no available environment"
self.verify_test_results(
expected_envs=["", "", ""],
expected_status=[
TestStatus.SKIPPED,
TestStatus.SKIPPED,
TestStatus.SKIPPED,
],
expected_message=[no_avaiable_env, no_avaiable_env, no_avaiable_env],
test_results=runner._latest_test_results,
)
def test_env_skipped_no_case(self) -> None:
# no case found, as not call generate_case_metadata
# in this case, not deploy any env
env_runbook = generate_env_runbook(is_single_env=True, remote=True)
runner = generate_lisarunner(env_runbook)
asyncio.run(runner.start())
# still prepare predefined, but not deploy
self.verify_env_results(
expected_prepared=["runbook_0"],
expected_deployed_envs=[],
expected_deleted_envs=[],
)
self.verify_test_results(
expected_envs=[],
expected_status=[],
expected_message=[],
test_results=runner._latest_test_results,
)
def verify_test_results(
self,
expected_envs: List[str],
expected_status: List[TestStatus],
expected_message: List[str],
test_results: List[TestResult],
) -> None:
self.assertListEqual(
expected_envs, [x.assigned_env for x in test_results],
)
self.assertListEqual(
expected_status, [x.status for x in test_results],
)
# compare it's begin with
actual_messages = [
test_results[index].message[0 : len(expected)]
for index, expected in enumerate(expected_message)
]
self.assertListEqual(
expected_message, actual_messages,
)
def verify_env_results(
self,
expected_prepared: List[str],
expected_deployed_envs: List[str],
expected_deleted_envs: List[str],
) -> None:
self.assertListEqual(
expected_prepared, [x for x in prepared_envs],
)
self.assertListEqual(
expected_deployed_envs, [x for x in deployed_envs],
)
self.assertListEqual(
expected_deleted_envs, [x for x in deleted_envs],
)

Просмотреть файл

@ -3,6 +3,7 @@ from functools import partial
from typing import Any, List, Optional, Type
from lisa import schema
from lisa.environment import EnvironmentSpace
from lisa.operating_system import OperatingSystem
from lisa.search_space import (
IntRange,
@ -19,7 +20,7 @@ from lisa.util import constants
@dataclass
class TestCaseSchema:
environment: schema.EnvironmentSpace
environment: EnvironmentSpace
platform_type: Optional[SetSpace[Type[schema.Platform]]]
operating_system: Optional[SetSpace[Type[OperatingSystem]]]
@ -127,29 +128,29 @@ class RequirementTestCase(SearchSpaceTestCase):
partial_testcase_schema = partial(
TestCaseSchema, platform_type=None, operating_system=None,
)
s11 = partial_testcase_schema(environment=schema.EnvironmentSpace())
s11 = partial_testcase_schema(environment=EnvironmentSpace())
s11.environment.nodes = [n1]
s14 = partial_testcase_schema(environment=schema.EnvironmentSpace())
s14 = partial_testcase_schema(environment=EnvironmentSpace())
s14.environment.nodes = [n4]
s14g1 = partial_testcase_schema(environment=schema.EnvironmentSpace())
s14g1 = partial_testcase_schema(environment=EnvironmentSpace())
s14g1.environment.nodes = [n4g1]
s24 = partial_testcase_schema(environment=schema.EnvironmentSpace())
s24 = partial_testcase_schema(environment=EnvironmentSpace())
s24.environment.nodes = [n4, n4]
s16 = partial_testcase_schema(environment=schema.EnvironmentSpace())
s16 = partial_testcase_schema(environment=EnvironmentSpace())
s16.environment.nodes = [n6]
s16g2 = partial_testcase_schema(environment=schema.EnvironmentSpace())
s16g2 = partial_testcase_schema(environment=EnvironmentSpace())
s16g2.environment.nodes = [n6g2]
s16g1 = partial_testcase_schema(environment=schema.EnvironmentSpace())
s16g1 = partial_testcase_schema(environment=EnvironmentSpace())
s16g1.environment.nodes = [n6g1]
s110 = partial_testcase_schema(environment=schema.EnvironmentSpace())
s110 = partial_testcase_schema(environment=EnvironmentSpace())
s110.environment.nodes = [n10]
s2i6 = partial_testcase_schema(environment=schema.EnvironmentSpace())
s2i6 = partial_testcase_schema(environment=EnvironmentSpace())
s2i6.environment.nodes = [n6, n6]
s266 = partial_testcase_schema(environment=schema.EnvironmentSpace())
s266 = partial_testcase_schema(environment=EnvironmentSpace())
s266.environment.nodes = [n6, n6]
s2610 = partial_testcase_schema(environment=schema.EnvironmentSpace())
s2610 = partial_testcase_schema(environment=EnvironmentSpace())
s2610.environment.nodes = [n6, n10]
s2106 = partial_testcase_schema(environment=schema.EnvironmentSpace())
s2106 = partial_testcase_schema(environment=EnvironmentSpace())
s2106.environment.nodes = [n10, n6]
self._verify_matrix(
@ -177,7 +178,7 @@ class RequirementTestCase(SearchSpaceTestCase):
min_count=2, node=schema.NodeSpace(core_count=IntRange(4, 8))
),
UtTestCaseRequirement(
environment=schema.EnvironmentSpace(
environment=EnvironmentSpace(
nodes=[schema.NodeSpace(core_count=6, node_count=1)]
)
),
@ -189,14 +190,14 @@ class RequirementTestCase(SearchSpaceTestCase):
capabilities=[
ut_simple_requirement(),
UtTestCaseRequirement(
environment=schema.EnvironmentSpace(
environment=EnvironmentSpace(
nodes=[
schema.NodeSpace(core_count=6, node_count=1, gpu_count=0)
]
)
),
UtTestCaseRequirement(
environment=schema.EnvironmentSpace(
environment=EnvironmentSpace(
nodes=[
schema.NodeSpace(
node_count=1, core_count=6, gpu_count=IntRange(max=2)
@ -205,28 +206,28 @@ class RequirementTestCase(SearchSpaceTestCase):
)
),
UtTestCaseRequirement(
environment=schema.EnvironmentSpace(
environment=EnvironmentSpace(
nodes=[
schema.NodeSpace(node_count=1, core_count=6, gpu_count=2)
]
)
),
UtTestCaseRequirement(
environment=schema.EnvironmentSpace(
environment=EnvironmentSpace(
nodes=[
schema.NodeSpace(core_count=10, node_count=1, gpu_count=0)
]
)
),
UtTestCaseRequirement(
environment=schema.EnvironmentSpace(
environment=EnvironmentSpace(
nodes=[
schema.NodeSpace(core_count=6, node_count=2, gpu_count=0)
]
)
),
UtTestCaseRequirement(
environment=schema.EnvironmentSpace(
environment=EnvironmentSpace(
nodes=[
schema.NodeSpace(core_count=6, node_count=1, gpu_count=0),
schema.NodeSpace(core_count=6, node_count=1, gpu_count=0),
@ -234,7 +235,7 @@ class RequirementTestCase(SearchSpaceTestCase):
)
),
UtTestCaseRequirement(
environment=schema.EnvironmentSpace(
environment=EnvironmentSpace(
nodes=[
schema.NodeSpace(core_count=6, node_count=1, gpu_count=0),
schema.NodeSpace(core_count=10, node_count=1, gpu_count=0),
@ -242,7 +243,7 @@ class RequirementTestCase(SearchSpaceTestCase):
)
),
UtTestCaseRequirement(
environment=schema.EnvironmentSpace(
environment=EnvironmentSpace(
nodes=[
schema.NodeSpace(core_count=10, node_count=1, gpu_count=0),
schema.NodeSpace(core_count=6, node_count=1, gpu_count=0),

Просмотреть файл

@ -41,7 +41,7 @@ def generate_runbook(
"nodeCount": 2,
"coreCount": 8,
"diskCount": {"min": 1},
"nicCount": {"max": 1},
"nicCount": {"min": 1, "max": 1},
}
)
if is_single_env:
@ -106,7 +106,9 @@ class EnvironmentTestCase(TestCase):
self.assertEqual(2, len(env_cap.nodes))
self.assertEqual(8, env_cap.nodes[0].core_count)
self.assertEqual(search_space.IntRange(min=1), env_cap.nodes[0].disk_count)
self.assertEqual(search_space.IntRange(max=1), env_cap.nodes[0].nic_count)
self.assertEqual(
search_space.IntRange(min=1, max=1), env_cap.nodes[0].nic_count
)
def test_create_from_requirement(self) -> None:
requirement = simple_requirement(min_count=2)
@ -116,6 +118,10 @@ class EnvironmentTestCase(TestCase):
env = envs.get_or_create(requirement=env_requirement)
assert env
self.assertEqual(1, len(envs))
requirement = simple_requirement(min_count=2)
env_requirement = requirement.environment
assert env_requirement
env = envs.get_or_create(requirement=env_requirement)
self.assertEqual(1, len(envs), "get or create again won't create new")
assert env

Просмотреть файл

@ -1,38 +1,72 @@
from typing import cast
from typing import List
from unittest.case import TestCase
from lisa import schema
from lisa.environment import Environment, Environments, load_environments
from lisa.platform_ import Platform, load_platform
from lisa.platform_ import Platform, WaitMoreResourceError, load_platform
from lisa.tests.test_environment import generate_runbook as generate_env_runbook
from lisa.util import LisaException, constants
from lisa.util.logger import Logger
# for other UT to set value
return_prepared = True
deploy_success = True
deploy_is_ready = True
wait_more_resource_error = False
prepared_envs: List[str] = []
deployed_envs: List[str] = []
deleted_envs: List[str] = []
class MockPlatform(Platform):
def __init__(self) -> None:
super().__init__()
prepared_envs.clear()
deployed_envs.clear()
deleted_envs.clear()
self.set_test_config(
return_prepared=return_prepared,
deploy_success=deploy_success,
deploy_is_ready=deploy_is_ready,
wait_more_resource_error=wait_more_resource_error,
)
@classmethod
def platform_type(cls) -> str:
return constants.PLATFORM_MOCK
def set_test_config(
self, return_prepared: bool = True, deploy_success: bool = True,
self,
return_prepared: bool = True,
deploy_success: bool = True,
deploy_is_ready: bool = True,
wait_more_resource_error: bool = False,
) -> None:
self.return_prepared = return_prepared
self.deploy_success = deploy_success
self.deploy_is_ready = deploy_is_ready
self.wait_more_resource_error = wait_more_resource_error
def _prepare_environment(self, environment: Environment, log: Logger) -> bool:
prepared_envs.append(environment.name)
return self.return_prepared
def _deploy_environment(self, environment: Environment, log: Logger) -> None:
if self.wait_more_resource_error:
raise WaitMoreResourceError("wait more resource")
if not self.deploy_success:
raise LisaException("mock deploy failed")
if self.return_prepared and environment.runbook.nodes_requirement:
requirements = environment.runbook.nodes_requirement
for node_space in requirements:
environment.nodes.from_requirement(node_requirement=node_space)
environment.is_ready = True
min_value = node_space.generate_min_capability(node_space)
environment.nodes.from_requirement(node_requirement=min_value)
deployed_envs.append(environment.name)
environment._is_initialized = True
environment.is_ready = self.deploy_is_ready
def _delete_environment(self, environment: Environment, log: Logger) -> None:
deleted_envs.append(environment.name)
self.delete_called = True
@ -49,8 +83,13 @@ def generate_platform(
}
runbook = schema.Platform.schema().load(runbook_data) # type: ignore
platform = load_platform([runbook])
return cast(MockPlatform, platform)
try:
assert isinstance(platform, MockPlatform), f"actual: {type(platform)}"
except AssertionError:
# as UT imported from tests package, instaed of from lisa.tests package
# ignore by assign type from current package
platform = MockPlatform()
return platform
def generate_environments() -> Environments:
@ -145,7 +184,6 @@ class PlatformTestCase(TestCase):
envs = generate_environments()
platform.set_test_config()
for env in envs.values():
env._is_initialized = True
platform.deploy_environment(env)
self.assertEqual(True, env.is_ready)
platform.delete_environment(env)

Просмотреть файл

@ -1,12 +1,12 @@
from unittest import TestCase
from lisa.tests.test_testsuite import cleanup_metadata, select_and_check
from lisa.tests.test_testsuite import cleanup_cases_metadata, select_and_check
from lisa.util import LisaException, constants
class SelectorTestCase(TestCase):
def setUp(self) -> None:
cleanup_metadata()
cleanup_cases_metadata()
def test_no_case_selected(self) -> None:
runbook = [{constants.TESTCASE_CRITERIA: {"area": "demo"}}]

Просмотреть файл

@ -21,8 +21,23 @@ from lisa.testsuite import (
)
from lisa.util import LisaException, constants
# for other UTs
fail_on_before_suite = False
fail_on_after_suite = False
fail_on_before_case = False
fail_on_after_case = False
fail_case_count = 0
class MockTestSuite(TestSuite):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.fail_on_before_suite = fail_on_before_suite
self.fail_on_after_suite = fail_on_after_suite
self.fail_on_before_case = fail_on_before_case
self.fail_on_after_case = fail_on_after_case
self.fail_case_count = fail_case_count
def set_fail_phase(
self,
fail_on_before_suite: bool = False,
@ -67,7 +82,7 @@ class MockTestSuite2(TestSuite):
pass
def cleanup_metadata() -> None:
def cleanup_cases_metadata() -> None:
get_cases_metadata().clear()
get_suites_metadata().clear()
@ -132,7 +147,7 @@ class TestSuiteTestCase(TestCase):
return test_suite
def setUp(self) -> None:
cleanup_metadata()
cleanup_cases_metadata()
def test_expanded_nodespace(self) -> None:
cases = generate_cases_metadata()

Просмотреть файл

@ -11,6 +11,7 @@ from retry.api import retry_call # type: ignore
from lisa import schema, search_space
from lisa.action import Action, ActionStatus
from lisa.environment import EnvironmentSpace
from lisa.operating_system import OperatingSystem
from lisa.util import LisaException, constants, set_filtered_fields
from lisa.util.logger import get_logger
@ -82,7 +83,7 @@ class TestResult:
@dataclass
class TestCaseRequirement:
environment: Optional[schema.EnvironmentSpace] = None
environment: Optional[EnvironmentSpace] = None
platform_type: Optional[search_space.SetSpace[str]] = None
os_type: Optional[search_space.SetSpace[Type[OperatingSystem]]] = None
@ -118,7 +119,7 @@ def simple_requirement(
os = search_space.create_set_space(supported_os, unsupported_os, "operating system")
return TestCaseRequirement(
environment=schema.EnvironmentSpace(nodes=nodes),
environment=EnvironmentSpace(nodes=nodes),
platform_type=platform_types,
os_type=os,
)
@ -259,10 +260,6 @@ class TestSuite(unittest.TestCase, Action, metaclass=ABCMeta):
def after_case(self) -> None:
pass
@property
def typename(self) -> str:
return "TestSuite"
async def start(self) -> None:
suite_error_message = ""
is_suite_continue = True
@ -385,7 +382,10 @@ def _add_suite_metadata(metadata: TestSuiteMetadata) -> None:
if exist_metadata is None:
_all_suites[key] = metadata
else:
raise LisaException(f"duplicate test class name: {key}")
raise LisaException(
f"duplicate test class name: {key}, "
f"new: [{metadata}], exists: [{exist_metadata}]"
)
class_prefix = f"{key}."
for test_case in _all_cases.values():

Просмотреть файл

@ -79,7 +79,7 @@ class Logger(logging.Logger):
if raise_error:
raise LisaException(message)
else:
self.warn(message)
self.warning(message)
class LogWriter(object):

Просмотреть файл

@ -122,7 +122,7 @@ class Process:
if timeout < timer.elapsed():
if self._process is not None:
self._log.warn(f"timeout in {timeout} sec, and killed")
self._log.warning(f"timeout in {timeout} sec, and killed")
self.kill()
if not isinstance(self._process, ExecutableResult):