environment remove allow_create flag

if there is predefined environment in runbook, it doesn't allow to
create environment automatically. For the usage scenarios, if there is
predefined environment, it's confusing to have generated environments.
This change force to use predefined environment for all test cases.

Improved UTs, add checks on case names
This commit is contained in:
Chi Song 2021-05-26 18:38:39 -07:00 коммит произвёл Chi Song
Родитель 80ac8b7ddb
Коммит 2b15949653
8 изменённых файлов: 145 добавлений и 86 удалений

Просмотреть файл

@ -1,7 +1,6 @@
extension:
- "../testsuites"
environment:
allow_create: false
environments:
- nodes:
- type: local

Просмотреть файл

@ -275,12 +275,10 @@ class Environments(EnvironmentsDict):
self,
warn_as_error: bool = False,
max_concurrency: int = 1,
allow_create: bool = True,
) -> None:
super().__init__()
self.warn_as_error = warn_as_error
self.max_concurrency = max_concurrency
self.allow_create = allow_create
def get_or_create(self, requirement: EnvironmentSpace) -> Optional[Environment]:
result: Optional[Environment] = None
@ -301,27 +299,28 @@ class Environments(EnvironmentsDict):
return self.from_runbook(
runbook=runbook,
name=f"generated_{len(self.keys())}",
is_original_runbook=False,
is_predefined_runbook=False,
)
def from_runbook(
self, runbook: schema.Environment, name: str, is_original_runbook: bool
self, runbook: schema.Environment, name: str, is_predefined_runbook: bool
) -> Optional[Environment]:
assert runbook
assert name
env: Optional[Environment] = None
if is_original_runbook or self.allow_create:
# make a copy, so that modification on env won't impact test case
copied_runbook = copy.copy(runbook)
copied_runbook.name = name
env = Environment.create(
runbook=copied_runbook,
is_predefined=is_original_runbook,
warn_as_error=self.warn_as_error,
)
self[name] = env
log = _get_init_logger()
log.debug(f"created {env.name}: {env.runbook}")
# make a copy, so that modification on env won't impact test case
copied_runbook = copy.copy(runbook)
copied_runbook.name = name
env = Environment.create(
runbook=copied_runbook,
is_predefined=is_predefined_runbook,
warn_as_error=self.warn_as_error,
)
self[name] = env
log = _get_init_logger()
log.debug(f"created {env.name}: {env.runbook}")
return env
@ -332,7 +331,6 @@ def load_environments(
environments = Environments(
warn_as_error=root_runbook.warn_as_error,
max_concurrency=root_runbook.max_concurrency,
allow_create=root_runbook.allow_create,
)
environments_runbook = root_runbook.environments
@ -340,7 +338,7 @@ def load_environments(
env = environments.from_runbook(
runbook=environment_runbook,
name=f"customized_{len(environments)}",
is_original_runbook=True,
is_predefined_runbook=True,
)
assert env, "created from runbook shouldn't be None"
else:

Просмотреть файл

@ -63,12 +63,13 @@ class LisaRunner(BaseRunner):
platform_message = PlatformMessage(name=platform.type_name())
notifier.notify(platform_message)
# get environment requirements
self._merge_test_requirements(
test_results=test_results,
existing_environments=candidate_environments,
platform_type=platform.type_name(),
)
if not candidate_environments:
# if no runbook environment defined, generate from requirements
self._merge_test_requirements(
test_results=test_results,
existing_environments=candidate_environments,
platform_type=platform.type_name(),
)
# there may not need to handle requirements, if all environment are predefined

Просмотреть файл

@ -21,6 +21,7 @@ from lisa.util import LisaException, constants
def generate_runner(
env_runbook: Optional[schema.EnvironmentRoot] = None,
case_use_new_env: bool = False,
times: int = 1,
platform_schema: Optional[test_platform.MockPlatformSchema] = None,
) -> LisaRunner:
platform_runbook = schema.Platform(
@ -37,6 +38,7 @@ def generate_runner(
schema.TestCase(
criteria=schema.Criteria(priority=[0, 1, 2]),
use_new_environment=case_use_new_env,
times=times,
)
]
if env_runbook:
@ -47,6 +49,8 @@ def generate_runner(
class RunnerTestCase(TestCase):
__skipped_no_env = "no available environment"
def tearDown(self) -> None:
cleanup_cases_metadata() # Necessary side effects!
@ -71,6 +75,7 @@ class RunnerTestCase(TestCase):
list(envs),
)
self.verify_test_results(
expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],
expected_envs=["", "", ""],
expected_status=[TestStatus.QUEUED, TestStatus.QUEUED, TestStatus.QUEUED],
expected_message=["", "", ""],
@ -131,17 +136,17 @@ class RunnerTestCase(TestCase):
list(envs),
)
self.verify_test_results(
expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],
expected_envs=["", "", ""],
expected_status=[TestStatus.QUEUED, TestStatus.QUEUED, TestStatus.QUEUED],
expected_message=["", "", ""],
test_results=test_results,
)
def test_merge_req_not_allow_create(self) -> None:
def test_merge_req_all_generated(self) -> None:
# force to use existing env, not to create new.
# this case doesn't provide predefined env, but no case skipped on this stage.
env_runbook = generate_env_runbook(is_single_env=False)
env_runbook.allow_create = False
envs = load_environments(env_runbook)
self.assertListEqual(
[],
@ -155,11 +160,12 @@ class RunnerTestCase(TestCase):
platform_type=constants.PLATFORM_MOCK,
)
self.assertListEqual(
[],
["generated_0", "generated_1", "generated_2"],
list(envs),
)
self.verify_test_results(
expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],
expected_envs=["", "", ""],
expected_status=[
TestStatus.QUEUED,
@ -195,6 +201,7 @@ class RunnerTestCase(TestCase):
)
platform_unsupported = "capability cannot support some of requirement"
self.verify_test_results(
expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],
expected_envs=["", "", ""],
expected_status=[
TestStatus.SKIPPED,
@ -220,15 +227,16 @@ class RunnerTestCase(TestCase):
test_results = runner.run("ut")
self.verify_env_results(
expected_prepared=["customized_0", "generated_1", "generated_2"],
expected_deployed_envs=["customized_0", "generated_1"],
expected_deleted_envs=["customized_0", "generated_1"],
expected_prepared=["customized_0"],
expected_deployed_envs=["customized_0"],
expected_deleted_envs=["customized_0"],
runner=runner,
)
self.verify_test_results(
expected_envs=["generated_1", "customized_0", "customized_0"],
expected_status=[TestStatus.PASSED, TestStatus.PASSED, TestStatus.PASSED],
expected_message=["", "", ""],
expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],
expected_envs=["", "customized_0", "customized_0"],
expected_status=[TestStatus.SKIPPED, TestStatus.PASSED, TestStatus.PASSED],
expected_message=[self.__skipped_no_env, "", ""],
test_results=test_results,
)
@ -242,24 +250,20 @@ class RunnerTestCase(TestCase):
test_results = runner.run("ut")
self.verify_env_results(
expected_prepared=[
"customized_0",
"generated_1",
"generated_2",
"generated_3",
],
expected_prepared=["customized_0"],
expected_deployed_envs=["customized_0"],
expected_deleted_envs=["customized_0"],
runner=runner,
)
self.verify_test_results(
expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],
expected_envs=["customized_0", "customized_0", "customized_0"],
expected_status=[TestStatus.PASSED, TestStatus.PASSED, TestStatus.PASSED],
expected_message=["", "", ""],
test_results=test_results,
)
def test_case_new_env_run_only_1_needed(self) -> None:
def test_case_new_env_run_only_1_needed_customized(self) -> None:
# same predefined env as test_fit_a_bigger_env,
# but all case want to run on a new env
generate_cases_metadata()
@ -268,20 +272,80 @@ class RunnerTestCase(TestCase):
test_results = runner.run("ut")
self.verify_env_results(
expected_prepared=[
"customized_0",
"generated_1",
"generated_2",
"generated_3",
],
expected_deployed_envs=["customized_0", "generated_1", "generated_3"],
expected_deleted_envs=["customized_0", "generated_1", "generated_3"],
expected_prepared=["customized_0"],
expected_deployed_envs=["customized_0"],
expected_deleted_envs=["customized_0"],
runner=runner,
)
self.verify_test_results(
expected_envs=["customized_0", "generated_1", "generated_3"],
expected_status=[TestStatus.PASSED, TestStatus.PASSED, TestStatus.PASSED],
expected_message=["", "", ""],
expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],
expected_envs=["customized_0", "", ""],
expected_status=[TestStatus.PASSED, TestStatus.SKIPPED, TestStatus.SKIPPED],
expected_message=["", self.__skipped_no_env, self.__skipped_no_env],
test_results=test_results,
)
def test_case_new_env_run_only_1_needed_generated(self) -> None:
# same predefined env as test_fit_a_bigger_env,
# but all case want to run on a new env
generate_cases_metadata()
env_runbook = generate_env_runbook()
runner = generate_runner(env_runbook, case_use_new_env=True, times=2)
test_results = runner.run("ut")
self.verify_env_results(
expected_prepared=[
"generated_0",
"generated_1",
"generated_2",
"generated_3",
"generated_4",
"generated_5",
],
expected_deployed_envs=[
"generated_0",
"generated_1",
"generated_2",
"generated_3",
"generated_4",
"generated_5",
],
expected_deleted_envs=[
"generated_0",
"generated_1",
"generated_2",
"generated_3",
"generated_4",
"generated_5",
],
runner=runner,
)
self.verify_test_results(
expected_test_order=[
"mock_ut1",
"mock_ut1",
"mock_ut2",
"mock_ut2",
"mock_ut3",
"mock_ut3",
],
expected_envs=[
"generated_0",
"generated_1",
"generated_2",
"generated_3",
"generated_4",
"generated_5",
],
expected_status=[
TestStatus.PASSED,
TestStatus.PASSED,
TestStatus.PASSED,
TestStatus.PASSED,
TestStatus.PASSED,
TestStatus.PASSED,
],
expected_message=["", "", "", "", "", ""],
test_results=test_results,
)
@ -298,17 +362,16 @@ class RunnerTestCase(TestCase):
expected_prepared=[
"customized_0",
"customized_1",
"generated_2",
"generated_3",
],
expected_deployed_envs=["customized_0", "generated_2"],
expected_deleted_envs=["customized_0", "generated_2"],
expected_deployed_envs=["customized_0"],
expected_deleted_envs=["customized_0"],
runner=runner,
)
self.verify_test_results(
expected_envs=["generated_2", "customized_0", "customized_0"],
expected_status=[TestStatus.PASSED, TestStatus.PASSED, TestStatus.PASSED],
expected_message=["", "", ""],
expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],
expected_envs=["", "customized_0", "customized_0"],
expected_status=[TestStatus.SKIPPED, TestStatus.PASSED, TestStatus.PASSED],
expected_message=[self.__skipped_no_env, "", ""],
test_results=test_results,
)
@ -324,18 +387,14 @@ class RunnerTestCase(TestCase):
test_results = runner.run("ut")
self.verify_env_results(
expected_prepared=[
"customized_0",
"generated_1",
"generated_2",
"generated_3",
],
expected_prepared=["customized_0"],
expected_deployed_envs=[],
expected_deleted_envs=[],
runner=runner,
)
before_suite_failed = "no available environment"
self.verify_test_results(
expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],
expected_envs=["", "", ""],
expected_status=[
TestStatus.SKIPPED,
@ -343,9 +402,9 @@ class RunnerTestCase(TestCase):
TestStatus.SKIPPED,
],
expected_message=[
before_suite_failed,
before_suite_failed,
before_suite_failed,
self.__skipped_no_env,
self.__skipped_no_env,
self.__skipped_no_env,
],
test_results=test_results,
)
@ -359,12 +418,7 @@ class RunnerTestCase(TestCase):
test_results = runner.run("ut")
self.verify_env_results(
expected_prepared=[
"customized_0",
"generated_1",
"generated_2",
"generated_3",
],
expected_prepared=["customized_0"],
expected_deployed_envs=["customized_0"],
expected_deleted_envs=["customized_0"],
runner=runner,
@ -372,6 +426,7 @@ class RunnerTestCase(TestCase):
before_suite_failed = "before_suite: failed"
self.verify_test_results(
expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],
expected_envs=["customized_0", "customized_0", "customized_0"],
expected_status=[
TestStatus.SKIPPED,
@ -404,6 +459,7 @@ class RunnerTestCase(TestCase):
no_available_env = "deployment failed: no capability found for environment: "
self.verify_test_results(
expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],
expected_envs=["generated_0", "generated_1", "generated_2"],
expected_status=[
TestStatus.FAILED,
@ -418,7 +474,6 @@ class RunnerTestCase(TestCase):
# test env not prepared, so test cases cannot find an env to run
platform_schema = test_platform.MockPlatformSchema()
platform_schema.return_prepared = False
generate_cases_metadata()
env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True)
runner = generate_runner(env_runbook, platform_schema=platform_schema)
@ -435,21 +490,20 @@ class RunnerTestCase(TestCase):
platform_schema = test_platform.MockPlatformSchema()
platform_schema.deployed_status = EnvironmentStatus.Prepared
generate_cases_metadata()
env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True)
env_runbook = generate_env_runbook()
runner = generate_runner(env_runbook, platform_schema=platform_schema)
test_results = runner.run("ut")
self.verify_env_results(
expected_prepared=[
"customized_0",
"generated_0",
"generated_1",
"generated_2",
"generated_3",
],
expected_deployed_envs=[
"customized_0",
"generated_0",
"generated_1",
"generated_3",
"generated_2",
],
expected_deleted_envs=[],
runner=runner,
@ -458,7 +512,8 @@ class RunnerTestCase(TestCase):
"deployment failed: expected status is EnvironmentStatus.Prepared"
)
self.verify_test_results(
expected_envs=["customized_0", "generated_1", "generated_3"],
expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],
expected_envs=["generated_0", "generated_1", "generated_2"],
expected_status=[
TestStatus.FAILED,
TestStatus.FAILED,
@ -483,6 +538,7 @@ class RunnerTestCase(TestCase):
runner=runner,
)
self.verify_test_results(
expected_test_order=[],
expected_envs=[],
expected_status=[],
expected_message=[],
@ -491,22 +547,30 @@ class RunnerTestCase(TestCase):
def verify_test_results(
self,
expected_test_order: List[str],
expected_envs: List[str],
expected_status: List[TestStatus],
expected_message: List[str],
test_results: List[TestResult],
) -> None:
self.assertListEqual(
expected_test_order,
[x.runtime_data.metadata.name for x in test_results],
"test order inconsistent",
)
self.assertListEqual(
expected_envs,
[
x.environment.name if x.environment is not None else ""
for x in test_results
],
"test env inconsistent",
)
self.assertListEqual(
expected_status,
[x.status for x in test_results],
"test result inconsistent",
)
# compare it's begin with
actual_messages = [
@ -516,6 +580,7 @@ class RunnerTestCase(TestCase):
self.assertListEqual(
expected_message,
actual_messages,
"test message inconsistent",
)
def verify_env_results(

Просмотреть файл

@ -654,7 +654,6 @@ class EnvironmentRoot:
default=1,
metadata=metadata(validate=validate.Range(min=1)),
)
allow_create: bool = True
warn_as_error: bool = field(default=False)
environments: List[Environment] = field(default_factory=list)

Просмотреть файл

@ -176,7 +176,6 @@ class EnvironmentTestCase(TestCase):
self.assertEqual(0, len(envs))
self.assertEqual(False, envs.warn_as_error)
self.assertEqual(1, envs.max_concurrency)
self.assertEqual(True, envs.allow_create)
def test_create_from_runbook_split(self) -> None:
runbook = generate_runbook(local=True, remote=True)

Просмотреть файл

@ -18,7 +18,6 @@ notifier:
- type: html
environment:
warn_as_error: false
allow_create: false
environments:
- nodes:
- type: requirement

Просмотреть файл

@ -9,7 +9,6 @@ variable:
notifier:
- type: html
environment:
allow_create: false
environments:
- nodes:
- type: remote