Bug 1431753 - Add a test-coverage test suite, similar to test-verify, that runs tests in isolation in coverage mode. r=gbrown,jmaher

--HG--
extra : rebase_source : 8e151ddc81d528f85e2a4acbd7623935d4246b4f
This commit is contained in:
Marco Castelluccio 2018-04-19 19:19:31 +02:00
Родитель d2eefb1040
Коммит ac95649c98
14 изменённых файлов: 444 добавлений и 216 удалений

Просмотреть файл

@ -24,6 +24,8 @@ INCLUSIVE_COMPONENTS = [
'jittest',
'test-verify',
'test-verify-wpt',
'test-coverage',
'test-coverage-wpt',
'jsreftest',
]
INCLUSIVE_COMPONENTS = sorted(INCLUSIVE_COMPONENTS)

Просмотреть файл

@ -97,3 +97,42 @@ test-verify:
no-read-buildbot-config: true
extra-options:
- --verify
test-coverage:
description: "Per-test coverage"
suite: test-coverage
treeherder-symbol: TC
loopback-video: true
instance-size: default
max-run-time: 10800
allow-software-gl-layers: false
run-on-projects:
by-test-platform:
# do not run on beta or release: usually just confirms earlier results
linux64-ccov/.*: ['trunk', 'try']
windows10-64-ccov/debug: ['trunk', 'try']
default: []
tier:
by-test-platform:
windows10-64-asan.*: 3
default: 2
mozharness:
script:
by-test-platform:
android.*: android_emulator_unittest.py
default: desktop_unittest.py
config:
by-test-platform:
android.*:
- android/android_common.py
- android/androidarm_4_3.py
linux.*:
- unittests/linux_unittest.py
- remove_executables.py
macosx.*:
- unittests/mac_unittest.py
windows.*:
- unittests/win_taskcluster_unittest.py
no-read-buildbot-config: true
extra-options:
- --per-test-coverage

Просмотреть файл

@ -37,6 +37,8 @@ common-tests:
- reftest
- reftest-no-accel
- telemetry-tests-client
- test-coverage
- test-coverage-wpt
- test-verify
- test-verify-wpt
- xpcshell
@ -185,6 +187,8 @@ windows-tests:
- mochitest-webgl
- reftest
- reftest-no-accel
- test-coverage
- test-coverage-wpt
- test-verify
- test-verify-wpt
- web-platform-tests

Просмотреть файл

@ -163,3 +163,22 @@ test-verify-wpt:
mozharness:
extra-options:
- --verify
test-coverage-wpt:
description: "Per web-platform test coverage"
suite: test-coverage-wpt
treeherder-symbol: TCw
max-run-time: 10800
run-on-projects:
by-test-platform:
# do not run on beta or release: usually just confirms earlier results
linux64-ccov/.*: ['trunk', 'try']
windows10-64-ccov/debug: ['trunk', 'try']
default: []
tier:
by-test-platform:
windows10-64-asan.*: 3
default: 2
mozharness:
extra-options:
- --per-test-coverage

Просмотреть файл

@ -673,7 +673,7 @@ def handle_suite_category(config, tests):
script = test['mozharness']['script']
category_arg = None
if suite == 'test-verify':
if suite == 'test-verify' or suite == 'test-coverage':
pass
elif script == 'android_emulator_unittest.py':
category_arg = '--test-suite'

Просмотреть файл

@ -36,6 +36,6 @@ config = {
# this would normally be in "exes", but "exes" is clobbered by remove_executables
"geckodriver": "%(abs_test_bin_dir)s/geckodriver",
"verify_category": "web-platform",
"per_test_category": "web-platform",
}

Просмотреть файл

@ -42,5 +42,5 @@ config = {
"download_minidump_stackwalk": True,
"verify_category": "web-platform",
"per_test_category": "web-platform",
}

Просмотреть файл

@ -64,5 +64,5 @@ config = {
"download_minidump_stackwalk": True,
"verify_category": "web-platform",
"per_test_category": "web-platform",
}

Просмотреть файл

@ -14,6 +14,8 @@ from mozharness.base.script import (
PreScriptAction,
PostScriptAction,
)
from mozharness.mozilla.testing.per_test_base import SingleTestMixin
_here = os.path.abspath(os.path.dirname(__file__))
_tooltool_path = os.path.normpath(os.path.join(_here, '..', '..', '..',
@ -27,6 +29,12 @@ code_coverage_config_options = [
"default": False,
"help": "Whether gcov c++ code coverage should be run."
}],
[["--per-test-coverage"],
{"action": "store_true",
"dest": "per_test_coverage",
"default": False,
"help": "Whether per-test coverage should be collected."
}],
[["--disable-ccov-upload"],
{"action": "store_true",
"dest": "disable_ccov_upload",
@ -42,7 +50,7 @@ code_coverage_config_options = [
]
class CodeCoverageMixin(object):
class CodeCoverageMixin(SingleTestMixin):
"""
Mixin for setting GCOV_PREFIX during test execution, packaging up
the resulting .gcda files and uploading them to blobber.
@ -51,6 +59,9 @@ class CodeCoverageMixin(object):
jsvm_dir = None
prefix = None
def __init__(self):
super(CodeCoverageMixin, self).__init__()
@property
def code_coverage_enabled(self):
try:
@ -62,6 +73,13 @@ class CodeCoverageMixin(object):
except (AttributeError, KeyError, TypeError):
return False
@property
def per_test_coverage(self):
try:
return bool(self.config.get('per_test_coverage'))
except (AttributeError, KeyError, TypeError):
return False
@property
def ccov_upload_disabled(self):
try:
@ -82,8 +100,8 @@ class CodeCoverageMixin(object):
except (AttributeError, KeyError, TypeError):
return False
@PreScriptAction('run-tests')
def _set_gcov_prefix(self, action):
@PostScriptAction('download-and-extract')
def setup_coverage_tools(self, action, success=None):
if not self.code_coverage_enabled:
return
@ -98,14 +116,6 @@ class CodeCoverageMixin(object):
os.environ['GCOV_PREFIX_STRIP'] = str(strip_count)
# Set the GCOV directory.
self.gcov_dir = tempfile.mkdtemp()
os.environ['GCOV_PREFIX'] = self.gcov_dir
# Set JSVM directory.
self.jsvm_dir = tempfile.mkdtemp()
os.environ['JS_CODE_COVERAGE_OUTPUT_DIR'] = self.jsvm_dir
# Install grcov on the test machine
# Get the path to the build machines gcno files.
self.url_to_gcno = self.query_build_dir_url('target.code-coverage-gcno.zip')
@ -133,6 +143,97 @@ class CodeCoverageMixin(object):
with tarfile.open(os.path.join(self.grcov_dir, tar_file)) as tar:
tar.extractall(self.grcov_dir)
# Download the gcno archive from the build machine.
self.download_file(self.url_to_gcno, parent_dir=self.grcov_dir)
# Download the chrome-map.json file from the build machine.
self.download_file(self.url_to_chrome_map, parent_dir=self.grcov_dir)
@PostScriptAction('download-and-extract')
def find_tests_for_coverage(self, action, success=None):
"""
For each file modified on this push, determine if the modified file
is a test, by searching test manifests. Populate self.verify_suites
with test files, organized by suite.
This depends on test manifests, so can only run after test zips have
been downloaded and extracted.
"""
if not self.per_test_coverage:
return
self.find_modified_tests()
# TODO: Add tests that haven't been run for a while (a week? N pushes?)
@property
def coverage_args(self):
return []
def set_coverage_env(self, env):
# Set the GCOV directory.
gcov_dir = tempfile.mkdtemp()
env['GCOV_PREFIX'] = gcov_dir
# Set JSVM directory.
jsvm_dir = tempfile.mkdtemp()
env['JS_CODE_COVERAGE_OUTPUT_DIR'] = jsvm_dir
return (gcov_dir, jsvm_dir)
@PreScriptAction('run-tests')
def _set_gcov_prefix(self, action):
if not self.code_coverage_enabled:
return
if self.per_test_coverage:
return
self.gcov_dir, self.jsvm_dir = self.set_coverage_env(os.environ)
def parse_coverage_artifacts(self, gcov_dir, jsvm_dir):
jsvm_output_file = 'jsvm_lcov_output.info'
grcov_output_file = 'grcov_lcov_output.info'
dirs = self.query_abs_dirs()
# Zip gcda files (will be given in input to grcov).
file_path_gcda = os.path.join(os.getcwd(), 'code-coverage-gcda.zip')
self.run_command(['zip', '-q', '-0', '-r', file_path_gcda, '.'], cwd=gcov_dir)
sys.path.append(dirs['abs_test_install_dir'])
sys.path.append(os.path.join(dirs['abs_test_install_dir'], 'mozbuild/codecoverage'))
from lcov_rewriter import LcovFileRewriter
jsvm_files = [os.path.join(jsvm_dir, e) for e in os.listdir(jsvm_dir)]
rewriter = LcovFileRewriter(os.path.join(self.grcov_dir, 'chrome-map.json'))
rewriter.rewrite_files(jsvm_files, jsvm_output_file, '')
# Run grcov on the zipped .gcno and .gcda files.
grcov_command = [
os.path.join(self.grcov_dir, 'grcov'),
'-t', 'lcov',
'-p', self.prefix,
'--ignore-dir', 'gcc*',
'--ignore-dir', 'vs2017_*',
os.path.join(self.grcov_dir, 'target.code-coverage-gcno.zip'), file_path_gcda
]
if mozinfo.os == 'win':
grcov_command += ['--llvm']
# 'grcov_output' will be a tuple, the first variable is the path to the lcov output,
# the other is the path to the standard error output.
tmp_output_file, _ = self.get_output_from_command(
grcov_command,
silent=True,
save_tmpfiles=True,
return_type='files',
throw_exception=True,
)
shutil.move(tmp_output_file, grcov_output_file)
return grcov_output_file, jsvm_output_file
@PostScriptAction('run-tests')
def _package_coverage_data(self, action, success=None):
if self.jsd_code_coverage_enabled:
@ -156,65 +257,26 @@ class CodeCoverageMixin(object):
if not self.code_coverage_enabled:
return
if self.per_test_coverage:
return
del os.environ['GCOV_PREFIX_STRIP']
del os.environ['GCOV_PREFIX']
del os.environ['JS_CODE_COVERAGE_OUTPUT_DIR']
if not self.ccov_upload_disabled:
grcov_output_file, jsvm_output_file = self.parse_coverage_artifacts(self.gcov_dir, self.jsvm_dir)
dirs = self.query_abs_dirs()
# Zip gcda files (will be given in input to grcov).
file_path_gcda = os.path.join(os.getcwd(), 'code-coverage-gcda.zip')
self.run_command(['zip', '-q', '-0', '-r', file_path_gcda, '.'], cwd=self.gcov_dir)
sys.path.append(dirs['abs_test_install_dir'])
sys.path.append(os.path.join(dirs['abs_test_install_dir'], 'mozbuild/codecoverage'))
# Download the chrome-map.json file from the build machine.
self.download_file(self.url_to_chrome_map)
from lcov_rewriter import LcovFileRewriter
jsvm_files = [os.path.join(self.jsvm_dir, e) for e in os.listdir(self.jsvm_dir)]
rewriter = LcovFileRewriter('chrome-map.json')
rewriter.rewrite_files(jsvm_files, 'jsvm_lcov_output.info', '')
# Package JSVM coverage data.
file_path_jsvm = os.path.join(dirs['abs_blob_upload_dir'], 'code-coverage-jsvm.zip')
self.run_command(['zip', '-q', file_path_jsvm, 'jsvm_lcov_output.info'])
# GRCOV post-processing
# Download the gcno from the build machine.
self.download_file(self.url_to_gcno, parent_dir=self.grcov_dir)
# Run grcov on the zipped .gcno and .gcda files.
grcov_command = [
os.path.join(self.grcov_dir, 'grcov'),
'-t', 'lcov',
'-p', self.prefix,
'--ignore-dir', 'gcc*',
'--ignore-dir', 'vs2017_*',
os.path.join(self.grcov_dir, 'target.code-coverage-gcno.zip'), file_path_gcda
]
if mozinfo.os == 'win':
grcov_command += ['--llvm']
# 'grcov_output' will be a tuple, the first variable is the path to the lcov output,
# the other is the path to the standard error output.
grcov_output, _ = self.get_output_from_command(
grcov_command,
silent=True,
save_tmpfiles=True,
return_type='files',
throw_exception=True,
)
output_file_name = 'grcov_lcov_output.info'
shutil.move(grcov_output, os.path.join(self.grcov_dir, output_file_name))
# Zip the grcov output and upload it.
self.run_command(
['zip', '-q', os.path.join(dirs['abs_blob_upload_dir'], 'code-coverage-grcov.zip'), output_file_name],
cwd=self.grcov_dir
['zip', '-q', os.path.join(dirs['abs_blob_upload_dir'], 'code-coverage-grcov.zip'), grcov_output_file]
)
# Zip the JSVM coverage data and upload it.
self.run_command(
['zip', '-q', os.path.join(dirs['abs_blob_upload_dir'], 'code-coverage-jsvm.zip'), jsvm_output_file]
)
shutil.rmtree(self.gcov_dir)

Просмотреть файл

@ -14,22 +14,13 @@ import mozinfo
from manifestparser import TestManifest
from mozharness.base.script import PostScriptAction
verify_config_options = [
[["--verify"],
{"action": "store_true",
"dest": "verify",
"default": "False",
"help": "Run additional verification on modified tests."
}],
]
class VerifyToolsMixin(object):
"""Utility functions for test verification."""
class SingleTestMixin(object):
"""Utility functions for per-test testing like test verification and per-test coverage."""
def __init__(self):
self.verify_suites = {}
self.verify_downloaded = False
self.suites = {}
self.tests_downloaded = False
self.reftest_test_dir = None
self.jsreftest_test_dir = None
@ -47,7 +38,7 @@ class VerifyToolsMixin(object):
man = TestManifest([path], strict=False)
active = man.active_tests(exists=False, disabled=True, filters=[], **mozinfo.info)
# Remove disabled tests. Also, remove tests with the same path as
# disabled tests, even if they are not disabled, since test-verify
# disabled tests, even if they are not disabled, since per-test mode
# specifies tests by path (it cannot distinguish between two or more
# tests with the same path specified in multiple manifests).
disabled = [t['relpath'] for t in active if 'disabled' in t]
@ -55,7 +46,7 @@ class VerifyToolsMixin(object):
for t in active if 'disabled' not in t and \
t['relpath'] not in disabled}
tests_by_path.update(new_by_path)
self.info("Verification updated with manifest %s" % path)
self.info("Per-test run updated with manifest %s" % path)
ref_manifests = [
(os.path.join(dirs['abs_reftest_dir'], 'tests', 'layout', 'reftests', 'reftest.list'), 'reftest'),
@ -69,7 +60,7 @@ class VerifyToolsMixin(object):
man = manifest.ReftestManifest()
man.load(path)
tests_by_path.update({os.path.relpath(t,self.reftest_test_dir):(suite,None) for t in man.files})
self.info("Verification updated with manifest %s" % path)
self.info("Per-test run updated with manifest %s" % path)
suite = 'jsreftest'
self.jsreftest_test_dir = os.path.join(dirs['abs_test_install_dir'], 'jsreftest', 'tests')
@ -89,7 +80,7 @@ class VerifyToolsMixin(object):
tests_by_path.update({relpath:(suite,None)})
else:
self.warning("unexpected jsreftest test format: %s" % str(t))
self.info("Verification updated with manifest %s" % path)
self.info("Per-test run updated with manifest %s" % path)
# for each changed file, determine if it is a test file, and what suite it is in
for file in changed_files:
@ -98,7 +89,7 @@ class VerifyToolsMixin(object):
file = file.replace(posixpath.sep, os.sep)
entry = tests_by_path.get(file)
if entry:
self.info("Verification found test %s" % file)
self.info("Per-test run found test %s" % file)
subsuite_mapping = {
('browser-chrome', 'clipboard') : 'browser-chrome-clipboard',
('chrome', 'clipboard') : 'chrome-clipboard',
@ -115,11 +106,11 @@ class VerifyToolsMixin(object):
suite = subsuite_mapping[entry]
else:
suite = entry[0]
suite_files = self.verify_suites.get(suite)
suite_files = self.suites.get(suite)
if not suite_files:
suite_files = []
suite_files.append(file)
self.verify_suites[suite] = suite_files
self.suites[suite] = suite_files
def _find_wpt_tests(self, dirs, changed_files):
# Setup sys.path to include all the dependencies required to import
@ -146,31 +137,26 @@ class VerifyToolsMixin(object):
repo_path = repo_path.replace(os.sep, posixpath.sep)
if repo_path in changed_files:
self.info("found web-platform test file '%s', type %s" % (path, type))
suite_files = self.verify_suites.get(type)
suite_files = self.suites.get(type)
if not suite_files:
suite_files = []
path = os.path.join(tests_path, path)
suite_files.append(path)
self.verify_suites[type] = suite_files
self.suites[type] = suite_files
@PostScriptAction('download-and-extract')
def find_tests_for_verification(self, action, success=None):
def find_modified_tests(self):
"""
For each file modified on this push, determine if the modified file
is a test, by searching test manifests. Populate self.verify_suites
is a test, by searching test manifests. Populate self.suites
with test files, organized by suite.
This depends on test manifests, so can only run after test zips have
been downloaded and extracted.
"""
if self.config.get('verify') != True:
return
repository = os.environ.get("GECKO_HEAD_REPOSITORY")
revision = os.environ.get("GECKO_HEAD_REV")
if not repository or not revision:
self.warning("unable to verify tests: no repo or revision!")
self.warning("unable to run tests in per-test mode: no repo or revision!")
return []
def get_automationrelevance():
@ -186,7 +172,7 @@ class VerifyToolsMixin(object):
# FIXME(emilio): Need to update test expectations.
mozinfo.update({'stylo': True})
mozinfo.update({'verify': True})
self.info("Verification using mozinfo: %s" % str(mozinfo.info))
self.info("Per-test run using mozinfo: %s" % str(mozinfo.info))
# determine which files were changed on this push
url = '%s/json-automationrelevance/%s' % (repository.rstrip('/'), revision)
@ -198,91 +184,93 @@ class VerifyToolsMixin(object):
desc=c['desc'].splitlines()[0].encode('ascii', 'ignore')))
changed_files |= set(c['files'])
if self.config.get('verify_category') == "web-platform":
if self.config.get('per_test_category') == "web-platform":
self._find_wpt_tests(dirs, changed_files)
else:
self._find_misc_tests(dirs, changed_files)
self.verify_downloaded = True
self.tests_downloaded = True
def query_verify_args(self, suite):
def query_args(self, suite):
"""
For the specified suite, return an array of command line arguments to
be passed to test harnesses when running in verify mode.
be passed to test harnesses when running in per-test mode.
Each array element is an array of command line arguments for a modified
test in the suite.
"""
# not in verify or per-test coverage mode: run once, with no additional args
if not self.per_test_coverage and not self.verify_enabled:
return [[]]
# Limit each test harness run to 15 minutes, to avoid task timeouts
# when verifying long-running tests.
MAX_TIME_PER_TEST = 900
references = re.compile(r"(-ref|-notref|-noref|-noref.)\.")
files = []
jsreftest_extra_dir = os.path.join('js', 'src', 'tests')
# For some suites, the test path needs to be updated before passing to
# the test harness.
for file in self.suites.get(suite):
if (self.config.get('per_test_category') != "web-platform" and
suite in ['reftest', 'crashtest']):
file = os.path.join(self.reftest_test_dir, file)
if suite == 'reftest':
# Special handling for modified reftest reference files:
# - if both test and reference modified, run the test file
# - if only reference modified, run the test file
nonref = references.sub('.', file)
if nonref != file:
file = None
if nonref not in files and os.path.exists(nonref):
file = nonref
elif (self.config.get('per_test_category') != "web-platform" and
suite == 'jsreftest'):
file = os.path.relpath(file, jsreftest_extra_dir)
file = os.path.join(self.jsreftest_test_dir, file)
if file is None:
continue
file = file.replace(os.sep, posixpath.sep)
files.append(file)
self.info("Per-test file(s) for '%s': %s" % (suite, files))
args = []
for file in files:
cur = []
cur.extend(self.coverage_args)
cur.extend(self.verify_args)
cur.append(file)
args.append(cur)
if self.config.get('verify') != True:
# not in verify mode: run once, with no additional args
args = [[]]
else:
# in verify mode, run nothing by default (unsupported suite or no files modified)
args = []
# otherwise, run once for each file in requested suite
references = re.compile(r"(-ref|-notref|-noref|-noref.)\.")
files = []
jsreftest_extra_dir = os.path.join('js', 'src', 'tests')
# For some suites, the test path needs to be updated before passing to
# the test harness.
for file in self.verify_suites.get(suite):
if (self.config.get('verify_category') != "web-platform" and
suite in ['reftest', 'crashtest']):
file = os.path.join(self.reftest_test_dir, file)
elif (self.config.get('verify_category') != "web-platform" and
suite == 'jsreftest'):
file = os.path.relpath(file, jsreftest_extra_dir)
file = os.path.join(self.jsreftest_test_dir, file)
file = file.replace(os.sep, posixpath.sep)
files.append(file)
for file in files:
if self.config.get('verify_category') == "web-platform":
args.append(['--verify-log-full', '--verify', file])
else:
if suite == 'reftest':
# Special handling for modified reftest reference files:
# - if both test and reference modified, verify the test file
# - if only reference modified, verify the test file
nonref = references.sub('.', file)
if nonref != file:
file = None
if nonref not in files and os.path.exists(nonref):
file = nonref
if file:
args.append(['--verify-max-time=%d' % MAX_TIME_PER_TEST, '--verify', file])
self.info("Verification file(s) for '%s': %s" % (suite, files))
return args
def query_verify_category_suites(self, category, all_suites):
def query_per_test_category_suites(self, category, all_suites):
"""
In verify mode, determine which suites are active, for the given
In per-test mode, determine which suites are active, for the given
suite category.
"""
suites = None
if self.config.get('verify') == True:
if self.config.get('verify_category') == "web-platform":
suites = self.verify_suites.keys()
elif all_suites and self.verify_downloaded:
if self.verify_enabled or self.per_test_coverage:
if self.config.get('per_test_category') == "web-platform":
suites = self.suites.keys()
elif all_suites and self.tests_downloaded:
suites = dict((key, all_suites.get(key)) for key in
self.verify_suites if key in all_suites.keys())
self.suites if key in all_suites.keys())
else:
# Until test zips are downloaded, manifests are not available,
# so it is not possible to determine which suites are active/
# required for verification; assume all suites from supported
# required for per-test mode; assume all suites from supported
# suite categories are required.
if category in ['mochitest', 'xpcshell', 'reftest']:
suites = all_suites
return suites
def log_verify_status(self, test_name, tbpl_status, log_level):
def log_per_test_status(self, test_name, tbpl_status, log_level):
"""
Log verification status of a single test. This will display in the
Job Details pane in treeherder - a convenient summary of verification.
Log status of a single test. This will display in the
Job Details pane in treeherder - a convenient summary of per-test mode.
Special test name formatting is needed because treeherder truncates
lines that are too long, and may remove duplicates after truncation.
"""
@ -298,6 +286,6 @@ class VerifyToolsMixin(object):
new = os.path.join(tail, new)
test_name = os.path.join('...', previous or new)
test_name = test_name.rstrip(os.path.sep)
self.log("TinderboxPrint: Verification of %s<br/>: %s" %
self.log("TinderboxPrint: Per-test run of %s<br/>: %s" %
(test_name, tbpl_status), level=log_level)

Просмотреть файл

@ -0,0 +1,67 @@
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
from mozharness.base.script import PostScriptAction
from mozharness.mozilla.testing.per_test_base import SingleTestMixin
verify_config_options = [
[["--verify"],
{"action": "store_true",
"dest": "verify",
"default": False,
"help": "Run additional verification on modified tests."
}],
]
class VerifyToolsMixin(SingleTestMixin):
"""Utility functions for test verification."""
def __init__(self):
super(VerifyToolsMixin, self).__init__()
@property
def verify_enabled(self):
try:
return bool(self.config.get('verify'))
except (AttributeError, KeyError, TypeError):
return False
@PostScriptAction('download-and-extract')
def find_tests_for_verification(self, action, success=None):
"""
For each file modified on this push, determine if the modified file
is a test, by searching test manifests. Populate self.verify_suites
with test files, organized by suite.
This depends on test manifests, so can only run after test zips have
been downloaded and extracted.
"""
if not self.verify_enabled:
return
self.find_modified_tests()
@property
def verify_args(self):
if not self.verify_enabled:
return []
# Limit each test harness run to 15 minutes, to avoid task timeouts
# when executing long-running tests.
MAX_TIME_PER_TEST = 900
if self.config.get('per_test_category') == "web-platform":
args = ['--verify-log-full']
else:
args = ['--verify-max-time=%d' % MAX_TIME_PER_TEST]
args.append('--verify')
return args

Просмотреть файл

@ -471,7 +471,7 @@ class AndroidEmulatorTest(TestingMixin, EmulatorMixin, BaseScript, MozbaseMixin)
try_options, try_tests = self.try_args(self.test_suite)
cmd.extend(try_options)
if self.config.get('verify') is not True:
if self.verify_enabled or self.per_test_coverage:
cmd.extend(self.query_tests_args(
self.config["suite_definitions"][self.test_suite].get("tests"),
None,
@ -740,7 +740,7 @@ class AndroidEmulatorTest(TestingMixin, EmulatorMixin, BaseScript, MozbaseMixin)
def _query_suites(self):
if self.test_suite:
return [(self.test_suite, self.test_suite)]
# test-verification: determine test suites to be verified
# per-test mode: determine test suites to run
all = [('mochitest', {'plain': 'mochitest',
'chrome': 'mochitest-chrome',
'plain-clipboard': 'mochitest-plain-clipboard',
@ -749,7 +749,7 @@ class AndroidEmulatorTest(TestingMixin, EmulatorMixin, BaseScript, MozbaseMixin)
('xpcshell', {'xpcshell': 'xpcshell'})]
suites = []
for (category, all_suites) in all:
cat_suites = self.query_verify_category_suites(category, all_suites)
cat_suites = self.query_per_test_category_suites(category, all_suites)
for k in cat_suites.keys():
suites.append((k, cat_suites[k]))
return suites
@ -758,7 +758,7 @@ class AndroidEmulatorTest(TestingMixin, EmulatorMixin, BaseScript, MozbaseMixin)
if self.test_suite:
categories = [self.test_suite]
else:
# test-verification
# per-test mode
categories = ['mochitest', 'reftest', 'xpcshell']
return categories
@ -767,12 +767,12 @@ class AndroidEmulatorTest(TestingMixin, EmulatorMixin, BaseScript, MozbaseMixin)
Run the tests
"""
self.start_time = datetime.datetime.now()
max_verify_time = datetime.timedelta(minutes=60)
max_per_test_time = datetime.timedelta(minutes=60)
verify_args = []
per_test_args = []
suites = self._query_suites()
minidump = self.query_minidump_stackwalk()
for (verify_suite, suite) in suites:
for (per_test_suite, suite) in suites:
self.test_suite = suite
cmd = self._build_command()
@ -788,24 +788,24 @@ class AndroidEmulatorTest(TestingMixin, EmulatorMixin, BaseScript, MozbaseMixin)
env['MINIDUMP_SAVE_PATH'] = self.query_abs_dirs()['abs_blob_upload_dir']
env['RUST_BACKTRACE'] = 'full'
for verify_args in self.query_verify_args(verify_suite):
if (datetime.datetime.now() - self.start_time) > max_verify_time:
# Verification has run out of time. That is okay! Stop running
# tests so that a task timeout is not triggered, and so that
for per_test_args in self.query_args(per_test_suite):
if (datetime.datetime.now() - self.start_time) > max_per_test_time:
# Running tests has run out of time. That is okay! Stop running
# them so that a task timeout is not triggered, and so that
# (partial) results are made available in a timely manner.
self.info("TinderboxPrint: Verification too long: "
"Not all tests were verified.<br/>")
# Signal verify time exceeded, to break out of suites and
self.info("TinderboxPrint: Running tests took too long: "
"Not all tests were executed.<br/>")
# Signal per-test time exceeded, to break out of suites and
# suite categories loops also.
return False
final_cmd = copy.copy(cmd)
if len(verify_args) > 0:
# in verify mode, remove any chunk arguments from command
if len(per_test_args) > 0:
# in per-test mode, remove any chunk arguments from command
for arg in final_cmd:
if 'total-chunk' in arg or 'this-chunk' in arg:
final_cmd.remove(arg)
final_cmd.extend(verify_args)
final_cmd.extend(per_test_args)
self.info("Running on %s the command %s" % (self.emulator["name"],
subprocess.list2cmdline(final_cmd)))
@ -823,9 +823,9 @@ class AndroidEmulatorTest(TestingMixin, EmulatorMixin, BaseScript, MozbaseMixin)
self.info("##### %s log ends" % self.test_suite)
if len(verify_args) > 0:
if len(per_test_args) > 0:
self.buildbot_status(tbpl_status, level=log_level)
self.log_verify_status(verify_args[-1], tbpl_status, log_level)
self.log_per_test_status(per_test_args[-1], tbpl_status, log_level)
else:
self.buildbot_status(tbpl_status, level=log_level)
self.log("The %s suite: %s ran with return status: %s" %

Просмотреть файл

@ -479,7 +479,7 @@ class DesktopUnittest(TestingMixin, MercurialScript, BlobUploadMixin, MozbaseMix
if c.get('run_all_suites'): # needed if you dont specify any suites
suites = all_suites
else:
suites = self.query_verify_category_suites(category, all_suites)
suites = self.query_per_test_category_suites(category, all_suites)
return suites
@ -767,9 +767,9 @@ class DesktopUnittest(TestingMixin, MercurialScript, BlobUploadMixin, MozbaseMix
abs_app_dir = self.query_abs_app_dir()
abs_res_dir = self.query_abs_res_dir()
max_verify_time = timedelta(minutes=60)
max_verify_tests = 10
verified_tests = 0
max_per_test_time = timedelta(minutes=60)
max_per_test_tests = 10
executed_tests = 0
if suites:
self.info('#### Running %s suites' % suite_category)
@ -789,7 +789,7 @@ class DesktopUnittest(TestingMixin, MercurialScript, BlobUploadMixin, MozbaseMix
}
if isinstance(suites[suite], dict):
options_list = suites[suite].get('options', [])
if self.config.get('verify') is True:
if self.verify_enabled or self.per_test_coverage:
tests_list = []
else:
tests_list = suites[suite].get('tests', [])
@ -848,33 +848,56 @@ class DesktopUnittest(TestingMixin, MercurialScript, BlobUploadMixin, MozbaseMix
env = self.query_env(partial_env=env, log_level=INFO)
cmd_timeout = self.get_timeout_for_category(suite_category)
for verify_args in self.query_verify_args(suite):
if (datetime.now() - self.start_time) > max_verify_time:
# Verification has run out of time. That is okay! Stop running
# tests so that a task timeout is not triggered, and so that
# Run basic startup/shutdown test to collect baseline coverage.
# This way, after we run a test, we can generate a diff between the
# full coverage of the test and the baseline coverage and only get
# the coverage data specific to the test.
if self.per_test_coverage:
gcov_dir, jsvm_dir = self.set_coverage_env(env)
# TODO: Run basic startup/shutdown test to collect baseline coverage.
# grcov_file, jsvm_file = self.parse_coverage_artifacts(gcov_dir, jsvm_dir)
# shutil.rmtree(gcov_dir)
# shutil.rmtree(jsvm_dir)
# TODO: Parse coverage report
for per_test_args in self.query_args(suite):
if (datetime.now() - self.start_time) > max_per_test_time:
# Running tests has run out of time. That is okay! Stop running
# them so that a task timeout is not triggered, and so that
# (partial) results are made available in a timely manner.
self.info("TinderboxPrint: Verification too long: Not all tests "
"were verified.<br/>")
# Signal verify time exceeded, to break out of suites and
self.info("TinderboxPrint: Running tests took too long: Not all tests "
"were executed.<br/>")
# Signal per-test time exceeded, to break out of suites and
# suite categories loops also.
return False
if verified_tests >= max_verify_tests:
if executed_tests >= max_per_test_tests:
# When changesets are merged between trees or many tests are
# otherwise updated at once, there probably is not enough time
# to verify all tests, and attempting to do so may cause other
# to run all tests, and attempting to do so may cause other
# problems, such as generating too much log output.
self.info("TinderboxPrint: Too many modified tests: Not all tests "
"were verified.<br/>")
"were executed.<br/>")
return False
verified_tests = verified_tests + 1
executed_tests = executed_tests + 1
final_cmd = copy.copy(cmd)
final_cmd.extend(verify_args)
final_cmd.extend(per_test_args)
if self.per_test_coverage:
gcov_dir, jsvm_dir = self.set_coverage_env(env)
return_code = self.run_command(final_cmd, cwd=dirs['abs_work_dir'],
output_timeout=cmd_timeout,
output_parser=parser,
env=env)
if self.per_test_coverage:
grcov_file, jsvm_file = self.parse_coverage_artifacts(gcov_dir, jsvm_dir)
shutil.rmtree(gcov_dir)
shutil.rmtree(jsvm_dir)
# TODO: Parse coverage report
# TODO: Diff this coverage report with the baseline one
# mochitest, reftest, and xpcshell suites do not return
# appropriate return codes. Therefore, we must parse the output
# to determine what the tbpl_status and worst_log_level must
@ -895,8 +918,8 @@ class DesktopUnittest(TestingMixin, MercurialScript, BlobUploadMixin, MozbaseMix
parser.append_tinderboxprint_line(suite_name)
self.buildbot_status(tbpl_status, level=log_level)
if len(verify_args) > 0:
self.log_verify_status(verify_args[-1], tbpl_status, log_level)
if len(per_test_args) > 0:
self.log_per_test_status(per_test_args[-1], tbpl_status, log_level)
else:
self.log("The %s suite: %s ran with return status: %s" %
(suite_category, suite, tbpl_status), level=log_level)

Просмотреть файл

@ -6,6 +6,7 @@
# ***** END LICENSE BLOCK *****
import copy
import os
import shutil
import sys
from datetime import datetime, timedelta
@ -318,44 +319,60 @@ class WebPlatformTest(TestingMixin, MercurialScript, BlobUploadMixin, CodeCovera
env = self.query_env(partial_env=env, log_level=INFO)
start_time = datetime.now()
max_verify_time = timedelta(minutes=60)
max_verify_tests = 10
verified_tests = 0
max_per_test_time = timedelta(minutes=60)
max_per_test_tests = 10
executed_tests = 0
if self.config.get("verify") is True:
verify_suites = self.query_verify_category_suites(None, None)
if "wdspec" in verify_suites:
if self.per_test_coverage or self.verify_enabled:
suites = self.query_per_test_category_suites(None, None)
if "wdspec" in suites:
# geckodriver is required for wdspec, but not always available
geckodriver_path = self._query_geckodriver()
if not geckodriver_path or not os.path.isfile(geckodriver_path):
verify_suites.remove("wdspec")
self.info("Test verification skipping 'wdspec' tests - no geckodriver")
suites.remove("wdspec")
self.info("Skipping 'wdspec' tests - no geckodriver")
else:
test_types = self.config.get("test_type", [])
verify_suites = [None]
for verify_suite in verify_suites:
if verify_suite:
test_types = [verify_suite]
for verify_args in self.query_verify_args(verify_suite):
if (datetime.now() - start_time) > max_verify_time:
# Verification has run out of time. That is okay! Stop running
# tests so that a task timeout is not triggered, and so that
suites = [None]
for suite in suites:
if suite:
test_types = [suite]
# Run basic startup/shutdown test to collect baseline coverage.
# This way, after we run a test, we can generate a diff between the
# full coverage of the test and the baseline coverage and only get
# the coverage data specific to the test.
if self.per_test_coverage:
gcov_dir, jsvm_dir = self.set_coverage_env(env)
# TODO: Run basic startup/shutdown test to collect baseline coverage.
# grcov_file, jsvm_file = self.parse_coverage_artifacts(gcov_dir, jsvm_dir)
# shutil.rmtree(gcov_dir)
# shutil.rmtree(jsvm_dir)
# TODO: Parse coverage report
for per_test_args in self.query_args(suite):
if (datetime.now() - start_time) > max_per_test_time:
# Running tests has run out of time. That is okay! Stop running
# them so that a task timeout is not triggered, and so that
# (partial) results are made available in a timely manner.
self.info("TinderboxPrint: Verification too long: Not all tests "
"were verified.<br/>")
self.info("TinderboxPrint: Running tests took too long: Not all tests "
"were executed.<br/>")
return
if verified_tests >= max_verify_tests:
if executed_tests >= max_per_test_tests:
# When changesets are merged between trees or many tests are
# otherwise updated at once, there probably is not enough time
# to verify all tests, and attempting to do so may cause other
# to run all tests, and attempting to do so may cause other
# problems, such as generating too much log output.
self.info("TinderboxPrint: Too many modified tests: Not all tests "
"were verified.<br/>")
"were executed.<br/>")
return
verified_tests = verified_tests + 1
executed_tests = executed_tests + 1
cmd = self._query_cmd(test_types)
cmd.extend(verify_args)
cmd.extend(per_test_args)
if self.per_test_coverage:
gcov_dir, jsvm_dir = self.set_coverage_env(env)
return_code = self.run_command(cmd,
cwd=dirs['abs_work_dir'],
@ -363,11 +380,18 @@ class WebPlatformTest(TestingMixin, MercurialScript, BlobUploadMixin, CodeCovera
output_parser=parser,
env=env)
if self.per_test_coverage:
grcov_file, jsvm_file = self.parse_coverage_artifacts(gcov_dir, jsvm_dir)
shutil.rmtree(gcov_dir)
shutil.rmtree(jsvm_dir)
# TODO: Parse coverage report
# TODO: Diff this coverage report with the baseline one
tbpl_status, log_level = parser.evaluate_parser(return_code)
self.buildbot_status(tbpl_status, level=log_level)
if len(verify_args) > 0:
self.log_verify_status(verify_args[-1], tbpl_status, log_level)
if len(per_test_args) > 0:
self.log_per_test_status(per_test_args[-1], tbpl_status, log_level)
# main {{{1