зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1724026 - Ensure that test files included in multiple manifest are executed once per manifest. r=gbrown
Differential Revision: https://phabricator.services.mozilla.com/D134293
This commit is contained in:
Родитель
b44bac9353
Коммит
804f0d3fde
|
@ -1177,7 +1177,9 @@ class MochitestDesktop(object):
|
|||
testURL = "about:blank"
|
||||
return testURL
|
||||
|
||||
def getTestsByScheme(self, options, testsToFilter=None, disabled=True):
|
||||
def getTestsByScheme(
|
||||
self, options, testsToFilter=None, disabled=True, manifestToFilter=None
|
||||
):
|
||||
"""Build the url path to the specific test harness and test file or directory
|
||||
Build a manifest of tests to run and write out a json file for the harness to read
|
||||
testsToFilter option is used to filter/keep the tests provided in the list
|
||||
|
@ -1191,6 +1193,18 @@ class MochitestDesktop(object):
|
|||
for test in tests:
|
||||
if testsToFilter and (test["path"] not in testsToFilter):
|
||||
continue
|
||||
# If we are running a specific manifest, the previously computed set of active
|
||||
# tests should be filtered out based on the manifest that contains that entry.
|
||||
#
|
||||
# This is especially important when a test file is listed in multiple
|
||||
# manifests (e.g. because the same test runs under a different configuration,
|
||||
# and so it is being included in multiple manifests), without filtering the
|
||||
# active tests based on the current manifest (configuration) that we are
|
||||
# running for each of the N manifests we would be executing the active tests
|
||||
# exactly N times (and so NxN runs instead of the expected N runs, one for each
|
||||
# manifest).
|
||||
if manifestToFilter and (test["manifest"] not in manifestToFilter):
|
||||
continue
|
||||
paths.append(test)
|
||||
|
||||
# Generate test by schemes
|
||||
|
@ -2647,7 +2661,7 @@ toolbar#nav-bar {
|
|||
norm_paths.append(p)
|
||||
return norm_paths
|
||||
|
||||
def runMochitests(self, options, testsToRun):
|
||||
def runMochitests(self, options, testsToRun, manifestToFilter=None):
|
||||
"This is a base method for calling other methods in this class for --bisect-chunk."
|
||||
# Making an instance of bisect class for --bisect-chunk option.
|
||||
bisect = bisection.Bisect(self)
|
||||
|
@ -2667,7 +2681,7 @@ toolbar#nav-bar {
|
|||
)
|
||||
bisection_log = 1
|
||||
|
||||
result = self.doTests(options, testsToRun)
|
||||
result = self.doTests(options, testsToRun, manifestToFilter)
|
||||
if options.bisectChunk:
|
||||
status = bisect.post_test(options, self.expectedError, self.result)
|
||||
else:
|
||||
|
@ -2954,7 +2968,7 @@ toolbar#nav-bar {
|
|||
# by the user, since we need to create a new directory for each run. We would face
|
||||
# problems if we use the directory provided by the user.
|
||||
tests_in_manifest = [t["path"] for t in tests if t["manifest"] == m]
|
||||
res = self.runMochitests(options, tests_in_manifest)
|
||||
res = self.runMochitests(options, tests_in_manifest, manifestToFilter=m)
|
||||
result = result or res
|
||||
|
||||
# Dump the logging buffer
|
||||
|
@ -3033,7 +3047,7 @@ toolbar#nav-bar {
|
|||
if self.profiler_tempdir:
|
||||
shutil.rmtree(self.profiler_tempdir)
|
||||
|
||||
def doTests(self, options, testsToFilter=None):
|
||||
def doTests(self, options, testsToFilter=None, manifestToFilter=None):
|
||||
# A call to initializeLooping method is required in case of --run-by-dir or --bisect-chunk
|
||||
# since we need to initialize variables for each loop.
|
||||
if options.bisectChunk or options.runByManifest:
|
||||
|
@ -3163,7 +3177,9 @@ toolbar#nav-bar {
|
|||
|
||||
# testsToFilter parameter is used to filter out the test list that
|
||||
# is sent to getTestsByScheme
|
||||
for (scheme, tests) in self.getTestsByScheme(options, testsToFilter):
|
||||
for (scheme, tests) in self.getTestsByScheme(
|
||||
options, testsToFilter, True, manifestToFilter
|
||||
):
|
||||
# read the number of tests here, if we are not going to run any,
|
||||
# terminate early
|
||||
if not tests:
|
||||
|
|
|
@ -102,11 +102,14 @@ def runtests(setup_test_harness, binary, parser, request):
|
|||
def inner(*tests, **opts):
|
||||
assert len(tests) > 0
|
||||
|
||||
manifest = TestManifest()
|
||||
# pylint --py3k: W1636
|
||||
manifest.tests.extend(list(map(normalize, tests)))
|
||||
options["manifestFile"] = manifest
|
||||
options.update(opts)
|
||||
# Inject a TestManifest in the runtests option if one
|
||||
# has not been already included by the caller.
|
||||
if not isinstance(options["manifestFile"], TestManifest):
|
||||
manifest = TestManifest()
|
||||
options["manifestFile"] = manifest
|
||||
# pylint --py3k: W1636
|
||||
manifest.tests.extend(list(map(normalize, tests)))
|
||||
options.update(opts)
|
||||
|
||||
result = runtests.run_test_harness(parser, Namespace(**options))
|
||||
out = json.loads("[" + ",".join(buf.getvalue().splitlines()) + "]")
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
[test_pass.html]
|
|
@ -0,0 +1 @@
|
|||
[test_pass.html]
|
|
@ -7,9 +7,12 @@ from __future__ import absolute_import
|
|||
import os
|
||||
from functools import partial
|
||||
|
||||
from manifestparser import TestManifest
|
||||
|
||||
import mozunit
|
||||
import pytest
|
||||
from moztest.selftest.output import get_mozharness_status, filter_action
|
||||
from conftest import setup_args
|
||||
|
||||
from mozharness.base.log import INFO, WARNING, ERROR
|
||||
from mozharness.mozilla.automation import TBPL_SUCCESS, TBPL_WARNING, TBPL_FAILURE
|
||||
|
@ -32,6 +35,22 @@ def test_name(request):
|
|||
return inner
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_manifest(setup_test_harness, request):
|
||||
flavor = request.getfixturevalue("flavor")
|
||||
test_root = setup_test_harness(*setup_args, flavor=flavor)
|
||||
assert test_root
|
||||
|
||||
def inner(manifestFileNames):
|
||||
return TestManifest(
|
||||
manifests=[os.path.join(test_root, name) for name in manifestFileNames],
|
||||
strict=False,
|
||||
rootdir=test_root,
|
||||
)
|
||||
|
||||
return inner
|
||||
|
||||
|
||||
@pytest.mark.parametrize("runFailures", ["selftest", ""])
|
||||
@pytest.mark.parametrize("flavor", ["plain", "browser-chrome"])
|
||||
def test_output_pass(flavor, runFailures, runtests, test_name):
|
||||
|
@ -216,5 +235,43 @@ def test_output_leak(flavor, runFailures, runtests, test_name):
|
|||
assert found_leaks, "At least one process should have leaked"
|
||||
|
||||
|
||||
@pytest.mark.parametrize("flavor", ["plain"])
|
||||
def test_output_testfile_in_dupe_manifests(flavor, runtests, test_name, test_manifest):
|
||||
results = {
|
||||
"status": 0,
|
||||
"tbpl_status": TBPL_SUCCESS,
|
||||
"log_level": (INFO, WARNING),
|
||||
"line_status": "PASS",
|
||||
# We expect the test to be executed exactly 2 times,
|
||||
# once for each manifest where the test file has been included.
|
||||
"lines": 2,
|
||||
}
|
||||
|
||||
# Explicitly provide a manifestFile property that includes the
|
||||
# two manifest files that share the same test file.
|
||||
extra_opts = {
|
||||
"manifestFile": test_manifest(
|
||||
[
|
||||
"mochitest-dupemanifest-1.ini",
|
||||
"mochitest-dupemanifest-2.ini",
|
||||
]
|
||||
),
|
||||
"runByManifest": True,
|
||||
}
|
||||
|
||||
# Execute mochitest by explicitly request the test file listed
|
||||
# in two manifest files to be executed.
|
||||
status, lines = runtests(test_name("pass"), **extra_opts)
|
||||
assert status == results["status"]
|
||||
|
||||
tbpl_status, log_level, summary = get_mozharness_status(lines, status)
|
||||
assert tbpl_status == results["tbpl_status"]
|
||||
assert log_level in results["log_level"]
|
||||
|
||||
lines = filter_action("test_status", lines)
|
||||
assert len(lines) == results["lines"]
|
||||
assert lines[0]["status"] == results["line_status"]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
mozunit.main()
|
||||
|
|
|
@ -53,6 +53,7 @@ def setup_test_harness(request, flavor="plain"):
|
|||
|
||||
def inner(files_dir, *args, **kwargs):
|
||||
harness_root = _get_test_harness(*args, **kwargs)
|
||||
test_root = None
|
||||
if harness_root:
|
||||
sys.path.insert(0, harness_root)
|
||||
|
||||
|
@ -74,7 +75,6 @@ def setup_test_harness(request, flavor="plain"):
|
|||
os.symlink(files_dir, test_root)
|
||||
else:
|
||||
shutil.copytree(files_dir, test_root)
|
||||
|
||||
elif "TEST_HARNESS_ROOT" in os.environ:
|
||||
# The mochitest tests will run regardless of whether a build exists or not.
|
||||
# In a local environment, they should simply be skipped if setup fails. But
|
||||
|
@ -85,6 +85,7 @@ def setup_test_harness(request, flavor="plain"):
|
|||
# We are purposefully not failing here because running |mach python-test|
|
||||
# without a build is a perfectly valid use case.
|
||||
pass
|
||||
return test_root
|
||||
|
||||
return inner
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче