Bug 1442790 - fix test-verify to not fail on: 'No checks run.'. r=ahal

This commit is contained in:
Joel Maher 2018-05-28 06:44:24 -04:00
Родитель 6bac60aea0
Коммит 9e5d9c55f5
7 изменённых файлов: 36 добавлений и 21 удалений

Просмотреть файл

@ -788,7 +788,8 @@ def split_chunks(config, tests):
them and assigning 'this-chunk' appropriately and updating the treeherder
symbol."""
for test in tests:
if test['suite'].startswith('test-verify'):
if test['suite'].startswith('test-verify') or \
test['suite'].startswith('test-coverage'):
env = config.params.get('try_task_config', {}) or {}
env = env.get('templates', {}).get('env', {})
test['chunks'] = perfile_number_of_chunks(env.get('MOZHARNESS_TEST_PATHS', ''),

Просмотреть файл

@ -22,16 +22,16 @@ def perfile_number_of_chunks(try_task_config, head_repository, head_rev, type):
# TODO: Make this flexible based on coverage vs verify || test type
tests_per_chunk = 10.0
if type.startswith('test-verify-wpt'):
if type.startswith('test-verify-wpt') or type.startswith('test-coverage-wpt'):
file_patterns = ['testing/web-platform/tests/**',
'testing/web-platform/mozilla/tests/**']
elif type.startswith('test-verify-gpu'):
elif type.startswith('test-verify-gpu') or type.startswith('test-coverage-gpu'):
file_patterns = ['**/*webgl*/**/test_*',
'**/dom/canvas/**/test_*',
'**/gfx/tests/**/test_*',
'**/devtools/canvasdebugger/**/browser_*',
'**/reftest*/**']
elif type.startswith('test-verify'):
elif type.startswith('test-verify') or type.startswith('test-coverage'):
file_patterns = ['**/test_*',
'**/browser_*',
'**/crashtest*/**',
@ -69,7 +69,7 @@ def perfile_number_of_chunks(try_task_config, head_repository, head_rev, type):
if mozpackmatch(path, pattern):
gpu = False
if type == 'test-verify-e10s':
if type == 'test-verify-e10s' or type == 'test-coverage-e10s':
# file_patterns for test-verify will pick up some gpu tests, lets ignore
# in the case of reftest, we will not have any in the regular case
gpu_dirs = ['dom/canvas', 'gfx/tests', 'devtools/canvasdebugger', 'webgl']

Просмотреть файл

@ -30,12 +30,16 @@ class StatusHandler(object):
self.action_counts = defaultdict(int)
# The count of messages logged at each log level
self.log_level_counts = defaultdict(int)
# The count of "No tests run" error messages seen
self.no_tests_run_count = 0
def __call__(self, data):
action = data['action']
self.action_counts[action] += 1
if action == 'log':
if data['level'] == 'ERROR' and data['message'] == 'No tests ran':
self.no_tests_run_count += 1
self.log_level_counts[data['level']] += 1
if action in ('test_status', 'test_end'):

Просмотреть файл

@ -68,7 +68,7 @@ class StructuredOutputParser(OutputParser):
try:
candidate_data = json.loads(line)
if (isinstance(candidate_data, dict) and
'action' in candidate_data and candidate_data['action'] in self.log_actions):
'action' in candidate_data and candidate_data['action'] in self.log_actions):
data = candidate_data
except ValueError:
pass
@ -124,27 +124,37 @@ class StructuredOutputParser(OutputParser):
1) Remove previous data from the new summary to only look at new data
2) Build a joined summary to include the previous + new data
"""
RunSummary = namedtuple("RunSummary",
("unexpected_statuses",
"expected_statuses",
"log_level_counts",
"action_counts"))
if previous_summary == {}:
previous_summary = RunSummary(defaultdict(int),
defaultdict(int),
defaultdict(int),
defaultdict(int))
if previous_summary:
RunSummary = namedtuple("RunSummary",
("unexpected_statuses",
"expected_statuses",
"log_level_counts",
"action_counts"))
self.tbpl_status = TBPL_SUCCESS
joined_summary = summary
# Remove previously known status messages
summary = RunSummary(self._subtract_tuples(previous_summary.unexpected_statuses, summary.unexpected_statuses),
self._subtract_tuples(previous_summary.expected_statuses, summary.expected_statuses),
summary.log_level_counts,
if 'ERROR' in summary.log_level_counts:
summary.log_level_counts['ERROR'] -= self.handler.no_tests_run_count
summary = RunSummary(self._subtract_tuples(previous_summary.unexpected_statuses,
summary.unexpected_statuses),
self._subtract_tuples(previous_summary.expected_statuses,
summary.expected_statuses),
self._subtract_tuples(previous_summary.log_level_counts,
summary.log_level_counts),
summary.action_counts)
# If we have previous data to ignore, cache it so we don't parse the log multiple times
# If we have previous data to ignore,
# cache it so we don't parse the log multiple times
self.summary = summary
else:
joined_summary = summary
joined_summary = summary
fail_pair = TBPL_WARNING, WARNING
error_pair = TBPL_FAILURE, ERROR

Просмотреть файл

@ -774,7 +774,7 @@ class AndroidEmulatorTest(TestingMixin, BaseScript, MozbaseMixin, CodeCoverageMi
env['MINIDUMP_SAVE_PATH'] = self.query_abs_dirs()['abs_blob_upload_dir']
env['RUST_BACKTRACE'] = 'full'
summary = None
summary = {}
for per_test_args in self.query_args(per_test_suite):
if (datetime.datetime.now() - self.start_time) > max_per_test_time:
# Running tests has run out of time. That is okay! Stop running

Просмотреть файл

@ -848,7 +848,7 @@ class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin,
env = self.query_env(partial_env=env, log_level=INFO)
cmd_timeout = self.get_timeout_for_category(suite_category)
summary = None
summary = {}
executed_too_many_tests = False
for per_test_args in self.query_args(suite):
# Make sure baseline code coverage tests are never

Просмотреть файл

@ -335,7 +335,7 @@ class WebPlatformTest(TestingMixin, MercurialScript, CodeCoverageMixin):
if suite:
test_types = [suite]
summary = None
summary = {}
executed_too_many_tests = False
for per_test_args in self.query_args(suite):
# Make sure baseline code coverage tests are never