diff --git a/taskcluster/taskgraph/transforms/tests.py b/taskcluster/taskgraph/transforms/tests.py index 5ac6776a717d..b6e297fefe66 100644 --- a/taskcluster/taskgraph/transforms/tests.py +++ b/taskcluster/taskgraph/transforms/tests.py @@ -788,7 +788,8 @@ def split_chunks(config, tests): them and assigning 'this-chunk' appropriately and updating the treeherder symbol.""" for test in tests: - if test['suite'].startswith('test-verify'): + if test['suite'].startswith('test-verify') or \ + test['suite'].startswith('test-coverage'): env = config.params.get('try_task_config', {}) or {} env = env.get('templates', {}).get('env', {}) test['chunks'] = perfile_number_of_chunks(env.get('MOZHARNESS_TEST_PATHS', ''), diff --git a/taskcluster/taskgraph/util/perfile.py b/taskcluster/taskgraph/util/perfile.py index f5bc2c5d31fa..c65987c305ef 100644 --- a/taskcluster/taskgraph/util/perfile.py +++ b/taskcluster/taskgraph/util/perfile.py @@ -22,16 +22,16 @@ def perfile_number_of_chunks(try_task_config, head_repository, head_rev, type): # TODO: Make this flexible based on coverage vs verify || test type tests_per_chunk = 10.0 - if type.startswith('test-verify-wpt'): + if type.startswith('test-verify-wpt') or type.startswith('test-coverage-wpt'): file_patterns = ['testing/web-platform/tests/**', 'testing/web-platform/mozilla/tests/**'] - elif type.startswith('test-verify-gpu'): + elif type.startswith('test-verify-gpu') or type.startswith('test-coverage-gpu'): file_patterns = ['**/*webgl*/**/test_*', '**/dom/canvas/**/test_*', '**/gfx/tests/**/test_*', '**/devtools/canvasdebugger/**/browser_*', '**/reftest*/**'] - elif type.startswith('test-verify'): + elif type.startswith('test-verify') or type.startswith('test-coverage'): file_patterns = ['**/test_*', '**/browser_*', '**/crashtest*/**', @@ -69,7 +69,7 @@ def perfile_number_of_chunks(try_task_config, head_repository, head_rev, type): if mozpackmatch(path, pattern): gpu = False - if type == 'test-verify-e10s': + if type == 'test-verify-e10s' or type == 'test-coverage-e10s': # file_patterns for test-verify will pick up some gpu tests, lets ignore # in the case of reftest, we will not have any in the regular case gpu_dirs = ['dom/canvas', 'gfx/tests', 'devtools/canvasdebugger', 'webgl'] diff --git a/testing/mozbase/mozlog/mozlog/handlers/statushandler.py b/testing/mozbase/mozlog/mozlog/handlers/statushandler.py index d145f73874e2..c6c3adac542f 100644 --- a/testing/mozbase/mozlog/mozlog/handlers/statushandler.py +++ b/testing/mozbase/mozlog/mozlog/handlers/statushandler.py @@ -30,12 +30,16 @@ class StatusHandler(object): self.action_counts = defaultdict(int) # The count of messages logged at each log level self.log_level_counts = defaultdict(int) + # The count of "No tests run" error messages seen + self.no_tests_run_count = 0 def __call__(self, data): action = data['action'] self.action_counts[action] += 1 if action == 'log': + if data['level'] == 'ERROR' and data['message'] == 'No tests ran': + self.no_tests_run_count += 1 self.log_level_counts[data['level']] += 1 if action in ('test_status', 'test_end'): diff --git a/testing/mozharness/mozharness/mozilla/structuredlog.py b/testing/mozharness/mozharness/mozilla/structuredlog.py index 98654acff118..4adaeda4a43e 100644 --- a/testing/mozharness/mozharness/mozilla/structuredlog.py +++ b/testing/mozharness/mozharness/mozilla/structuredlog.py @@ -68,7 +68,7 @@ class StructuredOutputParser(OutputParser): try: candidate_data = json.loads(line) if (isinstance(candidate_data, dict) and - 'action' in candidate_data and candidate_data['action'] in self.log_actions): + 'action' in candidate_data and candidate_data['action'] in self.log_actions): data = candidate_data except ValueError: pass @@ -124,27 +124,37 @@ class StructuredOutputParser(OutputParser): 1) Remove previous data from the new summary to only look at new data 2) Build a joined summary to include the previous + new data """ + RunSummary = namedtuple("RunSummary", + ("unexpected_statuses", + "expected_statuses", + "log_level_counts", + "action_counts")) + if previous_summary == {}: + previous_summary = RunSummary(defaultdict(int), + defaultdict(int), + defaultdict(int), + defaultdict(int)) if previous_summary: - RunSummary = namedtuple("RunSummary", - ("unexpected_statuses", - "expected_statuses", - "log_level_counts", - "action_counts")) - self.tbpl_status = TBPL_SUCCESS - joined_summary = summary # Remove previously known status messages - summary = RunSummary(self._subtract_tuples(previous_summary.unexpected_statuses, summary.unexpected_statuses), - self._subtract_tuples(previous_summary.expected_statuses, summary.expected_statuses), - summary.log_level_counts, + if 'ERROR' in summary.log_level_counts: + summary.log_level_counts['ERROR'] -= self.handler.no_tests_run_count + + summary = RunSummary(self._subtract_tuples(previous_summary.unexpected_statuses, + summary.unexpected_statuses), + self._subtract_tuples(previous_summary.expected_statuses, + summary.expected_statuses), + self._subtract_tuples(previous_summary.log_level_counts, + summary.log_level_counts), summary.action_counts) - # If we have previous data to ignore, cache it so we don't parse the log multiple times + # If we have previous data to ignore, + # cache it so we don't parse the log multiple times self.summary = summary else: - joined_summary = summary + joined_summary = summary fail_pair = TBPL_WARNING, WARNING error_pair = TBPL_FAILURE, ERROR diff --git a/testing/mozharness/scripts/android_emulator_unittest.py b/testing/mozharness/scripts/android_emulator_unittest.py index 118a39a7d26b..858c5de6d9e9 100644 --- a/testing/mozharness/scripts/android_emulator_unittest.py +++ b/testing/mozharness/scripts/android_emulator_unittest.py @@ -774,7 +774,7 @@ class AndroidEmulatorTest(TestingMixin, BaseScript, MozbaseMixin, CodeCoverageMi env['MINIDUMP_SAVE_PATH'] = self.query_abs_dirs()['abs_blob_upload_dir'] env['RUST_BACKTRACE'] = 'full' - summary = None + summary = {} for per_test_args in self.query_args(per_test_suite): if (datetime.datetime.now() - self.start_time) > max_per_test_time: # Running tests has run out of time. That is okay! Stop running diff --git a/testing/mozharness/scripts/desktop_unittest.py b/testing/mozharness/scripts/desktop_unittest.py index 67a278332476..10a922bb53cc 100755 --- a/testing/mozharness/scripts/desktop_unittest.py +++ b/testing/mozharness/scripts/desktop_unittest.py @@ -848,7 +848,7 @@ class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, env = self.query_env(partial_env=env, log_level=INFO) cmd_timeout = self.get_timeout_for_category(suite_category) - summary = None + summary = {} executed_too_many_tests = False for per_test_args in self.query_args(suite): # Make sure baseline code coverage tests are never diff --git a/testing/mozharness/scripts/web_platform_tests.py b/testing/mozharness/scripts/web_platform_tests.py index 7f3cd409972f..35469ab71df3 100755 --- a/testing/mozharness/scripts/web_platform_tests.py +++ b/testing/mozharness/scripts/web_platform_tests.py @@ -335,7 +335,7 @@ class WebPlatformTest(TestingMixin, MercurialScript, CodeCoverageMixin): if suite: test_types = [suite] - summary = None + summary = {} executed_too_many_tests = False for per_test_args in self.query_args(suite): # Make sure baseline code coverage tests are never