diff --git a/doc/authoring_command_modules/README.md b/doc/authoring_command_modules/README.md index 14c068a85..c44b04eae 100644 --- a/doc/authoring_command_modules/README.md +++ b/doc/authoring_command_modules/README.md @@ -152,18 +152,29 @@ $ az myfoo create --myfoo-name foo --resource-group-name myrg Testing ------- +Discover tests + +``` +azdev test --discover +``` + Run all tests in a module: ``` -azdev test --modules [--live] [--series] [--discover] [--dest-file FILENAME] +azdev test MODULE [--live] [--series] [--discover] [--dest-file FILENAME] ``` Run an individual test: ``` -azdev test --tests TEST [TEST ...] [--live] [--series] [--discover] [--dest-file FILENAME] +azdev test TEST [TEST ...] [--live] [--series] [--discover] [--dest-file FILENAME] +``` +For example `azdev test test_myfoo` + +Run a test when there is a conflict (for example, both 'azure-cli-core' and 'azure-cli-network' have 'test_foo'): +``` +azdev test MODULE.TEST [--live] ``` -For example `azdev test --tests test_myfoo` The list of failed tests are displayed at the end of a run and dumped to the file specified with `--dest-file` or `test_failures.txt` if nothing is provided. This allows for conveniently replaying failed tests: diff --git a/doc/authoring_tests.md b/doc/authoring_tests.md index 4e9e87144..194377f0c 100644 --- a/doc/authoring_tests.md +++ b/doc/authoring_tests.md @@ -33,7 +33,7 @@ Azure CLI translates user inputs into Azure Python SDK calls which communicate w ### Recording tests for the first time -After the test is executed, a recording file will be generated at `recording//.yaml`. The recording file will be created no matter the test pass or not. The behavior makes it easy for you to find issues when a test fails. To re-record the test, you can either delete the existing recording and re-run the test, or simply re-run the test using the `--live` flag (ex: `azdev test --module example --live`. +After the test is executed, a recording file will be generated at `recording//.yaml`. The recording file will be created no matter the test pass or not. The behavior makes it easy for you to find issues when a test fails. To re-record the test, you can either delete the existing recording and re-run the test, or simply re-run the test using the `--live` flag (ex: `azdev test example_test --live`. It is a good practice to add recording file to the local git cache, which makes it easy to diff the different versions of recording to detect issues or changes. diff --git a/src/command_modules/azure-cli-monitor/azure/cli/command_modules/monitor/tests/latest/test_monitor_log_profile.py b/src/command_modules/azure-cli-monitor/azure/cli/command_modules/monitor/tests/latest/test_monitor_log_profile.py index 1051c3884..fcaff4417 100644 --- a/src/command_modules/azure-cli-monitor/azure/cli/command_modules/monitor/tests/latest/test_monitor_log_profile.py +++ b/src/command_modules/azure-cli-monitor/azure/cli/command_modules/monitor/tests/latest/test_monitor_log_profile.py @@ -6,7 +6,7 @@ from azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer, StorageAccountPreparer -class TestActionGroupScenarios(ScenarioTest): +class TestLogProfileScenarios(ScenarioTest): @ResourceGroupPreparer(location='southcentralus') @StorageAccountPreparer(location='southcentralus') def test_monitor_create_log_profile(self, resource_group, storage_account): diff --git a/tools/automation/tests/__init__.py b/tools/automation/tests/__init__.py index 74bfb6107..37975b48b 100644 --- a/tools/automation/tests/__init__.py +++ b/tools/automation/tests/__init__.py @@ -21,25 +21,36 @@ IS_WINDOWS = sys.platform.lower() in ['windows', 'win32'] TEST_INDEX_FILE = 'testIndex.json' +def extract_module_name(path): + mod_name_regex = re.compile(r'azure-cli-([^/\\]*)') + ext_name_regex = re.compile(r'.*(azext_[^/\\]+).*') + + try: + return re.search(mod_name_regex, path).group(1) + except AttributeError: + return re.search(ext_name_regex, path).group(1) + + def execute(args): from .main import run_tests, collect_test validate_usage(args) current_profile = get_current_profile(args) test_index = get_test_index(args) + modules = [] if args.ci: # CI Mode runs specific modules selected_modules = [('CI mode', 'azure.cli', 'azure.cli')] elif not (args.tests or args.src_file): # Default is to run with modules (possibly via environment variable) - if not args.modules and os.environ.get('AZURE_CLI_TEST_MODULES', None): + if os.environ.get('AZURE_CLI_TEST_MODULES', None): display('Test modules list is parsed from environment variable AZURE_CLI_TEST_MODULES.') - args.modules = [m.strip() for m in os.environ.get('AZURE_CLI_TEST_MODULES').split(',')] + modules = [m.strip() for m in os.environ.get('AZURE_CLI_TEST_MODULES').split(',')] - selected_modules = filter_user_selected_modules_with_tests(args.modules, args.profile) + selected_modules = filter_user_selected_modules_with_tests(modules, args.profile) if not selected_modules: - display('No module is selected.') + display('\nNo tests selected.') sys.exit(1) else: # Otherwise run specific tests @@ -54,11 +65,10 @@ def execute(args): args.tests.append(line) test_paths = [] selected_modules = [] - regex = re.compile(r'azure-cli-([^/\\]*)[/\\]') for t in args.tests: try: test_path = os.path.normpath(test_index[t]) - mod_name = regex.findall(test_path)[0] + mod_name = extract_module_name(test_path) test_paths.append(test_path) if mod_name not in selected_modules: selected_modules.append(mod_name) @@ -79,14 +89,11 @@ def execute(args): def validate_usage(args): """ Ensure conflicting options aren't specified. """ test_usage = '[--test TESTS [TESTS ...]] [--src-file FILENAME]' - module_usage = '--modules MODULES [MODULES ...]' ci_usage = '--ci' usages = [] if args.tests or args.src_file: usages.append(test_usage) - if args.modules: - usages.append(module_usage) if args.ci: usages.append(ci_usage) @@ -134,6 +141,27 @@ def get_test_index(args): return test_index +def get_extension_modules(): + from importlib import import_module + import os + import pkgutil + from azure.cli.core.extension import get_extensions, get_extension_path, get_extension_modname + extension_whls = get_extensions() + ext_modules = [] + if extension_whls: + for ext_name in [e.name for e in extension_whls]: + ext_dir = get_extension_path(ext_name) + sys.path.append(ext_dir) + try: + ext_mod = get_extension_modname(ext_name, ext_dir=ext_dir) + module = import_module(ext_mod) + setattr(module, 'path', module.__path__[0]) + ext_modules.append((module, ext_mod)) + except Exception as ex: + display("Error importing '{}' extension: {}".format(ext_mod, ex)) + return ext_modules + + def discover_tests(args): """ Builds an index of tests so that the user can simply supply the name they wish to test instead of the full path. @@ -150,7 +178,9 @@ def discover_tests(args): core_ns_pkg = import_module('azure.cli') command_modules = list(pkgutil.iter_modules(mods_ns_pkg.__path__)) core_modules = list(pkgutil.iter_modules(core_ns_pkg.__path__)) - all_modules = command_modules + [x for x in core_modules if x[1] not in CORE_EXCLUSIONS] + extensions = get_extension_modules() + + all_modules = command_modules + [x for x in core_modules if x[1] not in CORE_EXCLUSIONS] + extensions display(""" ================== @@ -167,6 +197,12 @@ def discover_tests(args): 'base_path': 'azure.cli.{}.tests'.format(mod_name), 'files': {} } + elif mod_name.startswith('azext_'): + mod_data = { + 'filepath': os.path.join(mod[0].path, 'tests', profile), + 'base_path': '{}.tests.{}'.format(mod_name, profile), + 'files': {} + } else: mod_data = { 'filepath': os.path.join(mod[0].path, mod_name, 'tests', profile), @@ -207,10 +243,22 @@ def discover_tests(args): module_data[mod_name] = mod_data test_index = {} + conflicted_keys = [] def add_to_index(key, path): + key = key or mod_name if key in test_index: - display("COLLISION: Test '{}' Attempted '{}' Existing '{}'".format(key, test_index[key], path)) + if key not in conflicted_keys: + conflicted_keys.append(key) + mod1 = extract_module_name(path) + mod2 = extract_module_name(test_index[key]) + if mod1 != mod2: + # resolve conflicted keys by prefixing with the module name and a dot (.) + display("\nCOLLISION: Test '{}' exists in both '{}' and '{}'. Resolve using .".format(key, mod1, mod2)) + test_index['{}.{}'.format(mod1, key)] = path + test_index['{}.{}'.format(mod2, key)] = test_index[key] + else: + display("\nERROR: Test '{}' exists twice in the '{}' module. Please rename one or both and re-run --discover.".format(key, mod1)) else: test_index[key] = path @@ -227,18 +275,18 @@ def discover_tests(args): add_to_index(class_name, class_path) add_to_index(file_name, file_path) add_to_index(mod_name, mod_path) + + # remove the conflicted keys since they would arbitrarily point to a random implementation + for key in conflicted_keys: + del test_index[key] + return test_index def setup_arguments(parser): - parser.add_argument('--modules', dest='modules', nargs='+', - help='Space separated list of modules to be run. Accepts short names, except azure-cli and azure-cli-nspkg.' - 'The modules list can also be set through environment ' - 'variable AZURE_CLI_TEST_MODULES.' - 'The environment variable will be overwritten by command line parameters.') parser.add_argument('--series', dest='parallel', action='store_false', default=True, help='Disable test parallelization.') parser.add_argument('--live', action='store_true', help='Run all the tests live.') - parser.add_argument('--tests', dest='tests', nargs='+', + parser.add_argument(dest='tests', nargs='*', help='Space separated list of tests to run. Can specify test filenames, class name or individual method names.') parser.add_argument('--src-file', dest='src_file', help='Text file of test names to include in the the test run.') parser.add_argument('--dest-file', dest='dest_file', help='File in which to save the names of any test failures.', default='test_failures.txt') diff --git a/tools/automation/utilities/path.py b/tools/automation/utilities/path.py index 7b8d2d423..b6c862249 100644 --- a/tools/automation/utilities/path.py +++ b/tools/automation/utilities/path.py @@ -139,6 +139,8 @@ def filter_user_selected_modules_with_tests(user_input_modules=None, profile=Non if user_input_modules is not None: selected_modules = set(user_input_modules) extra = selected_modules - set([name for name, _, _ in existing_modules]) + # don't count extensions as extras + extra = [x for x in extra if not x.startswith('azext_')] if any(extra): print('ERROR: These modules do not exist: {}.'.format(', '.join(extra))) return None