Bug 1832059 - Add a layer to run mochitest performance tests. r=perftest-reviewers,kshampur

This patch adds a new layer that allows developers to run mochitest tests as performance tests directly. It provides the ability to run them in CI on linux, and locally through `./mach test` using the FunctionalTestRunner. Some additional try mapping is also added for future tests. The remote run (CI) method uses the mochitest run_test_harness method directly, along with the mochitest argument parser.

Differential Revision: https://phabricator.services.mozilla.com/D190994
This commit is contained in:
Greg Mierzwinski 2023-10-24 13:05:50 +00:00
Родитель 528dbee04c
Коммит 6ca2c41aee
11 изменённых файлов: 541 добавлений и 30 удалений

Просмотреть файл

@ -22,7 +22,14 @@ from mozperftest.system import get_layers as system_layers # noqa
from mozperftest.test import get_layers as test_layers # noqa
from mozperftest.utils import convert_day # noqa
FLAVORS = "desktop-browser", "mobile-browser", "doc", "xpcshell", "webpagetest"
FLAVORS = (
"desktop-browser",
"mobile-browser",
"doc",
"xpcshell",
"webpagetest",
"mochitest",
)
class Options:
@ -101,10 +108,10 @@ for layer in system_layers() + test_layers() + metrics_layers():
}
for option, value in layer.arguments.items():
option = "--%s-%s" % (layer.name, option.replace("_", "-"))
if option in Options.args:
raise KeyError("%s option already defined!" % option)
Options.args[option] = value
parsed_option = "--%s-%s" % (layer.name, option.replace("_", "-"))
if parsed_option in Options.args:
raise KeyError("%s option already defined!" % parsed_option)
Options.args[parsed_option] = value
class PerftestArgumentParser(ArgumentParser):

Просмотреть файл

@ -41,7 +41,7 @@ if "SHELL" not in os.environ:
os.environ["SHELL"] = "/bin/bash"
def _activate_virtualenvs():
def _activate_virtualenvs(flavor):
"""Adds all available dependencies in the path.
This is done so the runner can be used with no prior
@ -87,8 +87,16 @@ def _activate_virtualenvs():
if TASKCLUSTER:
# In CI, the directory structure is different: xpcshell code is in
# "$topsrcdir/xpcshell/" rather than "$topsrcdir/testing/xpcshell".
sys.path.append("xpcshell")
# "$topsrcdir/xpcshell/" rather than "$topsrcdir/testing/xpcshell". The
# same is true for mochitest. It also needs additional settings for some
# dependencies.
if flavor == "xpcshell":
print("Setting up xpcshell python paths...")
sys.path.append("xpcshell")
elif flavor == "mochitest":
print("Setting up mochitest python paths...")
sys.path.append("mochitest")
sys.path.append(str(Path("tools", "geckoprocesstypes_generator")))
def _create_artifacts_dir(kwargs, artifacts):
@ -230,7 +238,10 @@ def run_tools(mach_cmd, kwargs):
def main(argv=sys.argv[1:]):
"""Used when the runner is directly called from the shell"""
_activate_virtualenvs()
flavor = "desktop-browser"
if "--flavor" in argv:
flavor = argv[argv.index("--flavor") + 1]
_activate_virtualenvs(flavor)
from mach.logging import LoggingManager
from mach.util import get_state_dir

Просмотреть файл

@ -15,7 +15,7 @@ def get_layers():
def pick_system(env, flavor, mach_cmd):
if flavor in ("desktop-browser", "xpcshell"):
if flavor in ("desktop-browser", "xpcshell", "mochitest"):
return Layers(
env,
mach_cmd,

Просмотреть файл

@ -4,12 +4,13 @@
from mozperftest.layers import Layers
from mozperftest.test.androidlog import AndroidLog
from mozperftest.test.browsertime import BrowsertimeRunner
from mozperftest.test.mochitest import Mochitest
from mozperftest.test.webpagetest import WebPageTest
from mozperftest.test.xpcshell import XPCShell
def get_layers():
return BrowsertimeRunner, AndroidLog, XPCShell, WebPageTest
return BrowsertimeRunner, AndroidLog, XPCShell, WebPageTest, Mochitest
def pick_test(env, flavor, mach_cmd):
@ -21,5 +22,7 @@ def pick_test(env, flavor, mach_cmd):
return Layers(env, mach_cmd, (BrowsertimeRunner, AndroidLog))
if flavor == "webpagetest":
return Layers(env, mach_cmd, (WebPageTest,))
if flavor == "mochitest":
return Layers(env, mach_cmd, (Mochitest,))
raise NotImplementedError(flavor)

Просмотреть файл

@ -0,0 +1,215 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import os
from collections import defaultdict
from contextlib import redirect_stdout
from pathlib import Path
from mozperftest.layers import Layer
from mozperftest.test.functionaltestrunner import FunctionalTestRunner
from mozperftest.utils import (
METRICS_MATCHER,
ON_TRY,
LogProcessor,
NoPerfMetricsError,
install_requirements_file,
)
class MissingMochitestInformation(Exception):
"""Raised when information needed to run a mochitest is missing."""
pass
class MochitestTestFailure(Exception):
"""Raised when a mochitest test returns a non-zero exit code."""
pass
class MochitestData:
def open_data(self, data):
return {
"name": "mochitest",
"subtest": data["name"],
"data": [
{"file": "mochitest", "value": value, "xaxis": xaxis}
for xaxis, value in enumerate(data["values"])
],
}
def transform(self, data):
return data
merge = transform
class Mochitest(Layer):
"""Runs a mochitest test through `mach test` locally, and directly with mochitest in CI."""
name = "mochitest"
activated = True
arguments = {
"binary": {
"type": str,
"default": None,
"help": ("Path to the browser."),
},
"cycles": {
"type": int,
"default": 1,
"help": ("Number of cycles/iterations to do for the test."),
},
"manifest": {
"type": str,
"default": None,
"help": (
"Path to the manifest that contains the test (only required in CI)."
),
},
"manifest-flavor": {
"type": str,
"default": None,
"help": "Mochitest flavor of the test to run (only required in CI).",
},
"extra-args": {
"nargs": "*",
"type": str,
"default": [],
"help": (
"Additional arguments to pass to mochitest. Expected in a format such as: "
"--mochitest-extra-args headless profile-path=/path/to/profile"
),
},
}
def __init__(self, env, mach_cmd):
super(Mochitest, self).__init__(env, mach_cmd)
self.topsrcdir = mach_cmd.topsrcdir
self._mach_context = mach_cmd._mach_context
self.python_path = mach_cmd.virtualenv_manager.python_path
self.topobjdir = mach_cmd.topobjdir
self.distdir = mach_cmd.distdir
self.bindir = mach_cmd.bindir
self.statedir = mach_cmd.statedir
self.metrics = []
self.topsrcdir = mach_cmd.topsrcdir
def setup(self):
if ON_TRY:
# Install marionette requirements
install_requirements_file(
self.mach_cmd.virtualenv_manager,
str(
Path(
os.getenv("MOZ_FETCHES_DIR"),
"config",
"marionette_requirements.txt",
)
),
)
def _parse_extra_args(self, extra_args):
"""Sets up the extra-args for passing to mochitest."""
parsed_extra_args = []
for arg in extra_args:
parsed_extra_args.append(f"--{arg}")
return parsed_extra_args
def remote_run(self, test, metadata):
"""Run tests in CI."""
import runtests
from manifestparser import TestManifest
from mochitest_options import MochitestArgumentParser
manifest_flavor = self.get_arg("manifest-flavor")
manifest_name = self.get_arg("manifest")
if not manifest_name:
raise MissingMochitestInformation(
"Name of manifest that contains test needs to be"
"specified (e.g. mochitest-common.ini)"
)
if not manifest_flavor:
raise MissingMochitestInformation(
"Mochitest flavor needs to be provided"
"(e.g. plain, browser-chrome, ...)"
)
manifest_path = Path(test.parent, manifest_name)
manifest = TestManifest([str(manifest_path)], strict=False)
manifest.active_tests(paths=[str(test)])
# Use the mochitest argument parser to parse the extra argument
# options, and produce an `args` object that has all the defaults
parser = MochitestArgumentParser()
args = parser.parse_args(self._parse_extra_args(self.get_arg("extra-args")))
# Bug 1858155 - Attempting to only use one test_path triggers a failure
# during test execution
args.test_paths = [str(test.name), str(test.name)]
args.runByManifest = True
args.manifestFile = manifest
args.topobjdir = self.topobjdir
args.topsrcdir = self.topsrcdir
args.flavor = manifest_flavor
args.app = self.get_arg("binary")
fetch_dir = os.getenv("MOZ_FETCHES_DIR")
args.utilityPath = str(Path(fetch_dir, "bin"))
args.extraProfileFiles.append(str(Path(fetch_dir, "bin", "plugins")))
args.testingModulesDir = str(Path(fetch_dir, "modules"))
args.symbolsPath = str(Path(fetch_dir, "crashreporter-symbols"))
args.certPath = str(Path(fetch_dir, "certs"))
log_processor = LogProcessor(METRICS_MATCHER)
with redirect_stdout(log_processor):
result = runtests.run_test_harness(parser, args)
return result, log_processor
def run(self, metadata):
test = Path(metadata.script["filename"])
results = defaultdict(list)
cycles = self.get_arg("cycles", 1)
for cycle in range(1, cycles + 1):
if ON_TRY:
status, log_processor = self.remote_run(test, metadata)
else:
status, log_processor = FunctionalTestRunner.test(
self.mach_cmd,
[str(test)],
self._parse_extra_args(self.get_arg("extra-args")),
)
if status is not None and status != 0:
raise MochitestTestFailure("Test failed to run")
# Parse metrics found
for metrics_line in log_processor.match:
self.metrics.append(json.loads(metrics_line.split("|")[-1].strip()))
for m in self.metrics:
for key, val in m.items():
results[key].append(val)
if len(results.items()) == 0:
raise NoPerfMetricsError("mochitest")
metadata.add_result(
{
"name": test.name,
"framework": {"name": "mozperftest"},
"transformer": "mozperftest.test.mochitest:MochitestData",
"results": [
{"values": measures, "name": subtest}
for subtest, measures in results.items()
],
}
)
return metadata

Просмотреть файл

@ -7,17 +7,13 @@ from distutils.dir_util import copy_tree
from pathlib import Path
from mozperftest.layers import Layer
from mozperftest.utils import temp_dir
from mozperftest.utils import NoPerfMetricsError, temp_dir
class XPCShellTestError(Exception):
pass
class NoPerfMetricsError(Exception):
pass
class XPCShellData:
def open_data(self, data):
return {
@ -153,10 +149,7 @@ class XPCShell(Layer):
results[key].append(val)
if len(results.items()) == 0:
raise NoPerfMetricsError(
"No perftest results were found in the xpcshell test. Results must be "
'reported using:\n info("perfMetrics", { metricName: metricValue });'
)
raise NoPerfMetricsError("xpcshell")
metadata.add_result(
{

Просмотреть файл

@ -0,0 +1,221 @@
import os
import shutil
from unittest import mock
import pytest
from mozperftest.environment import SYSTEM, TEST
from mozperftest.test.mochitest import MissingMochitestInformation
from mozperftest.tests.support import (
EXAMPLE_MOCHITEST_TEST,
get_running_env,
)
def running_env(**kw):
return get_running_env(flavor="mochitest", **kw)
@mock.patch("mozperftest.test.mochitest.ON_TRY", new=False)
@mock.patch("mozperftest.utils.ON_TRY", new=False)
def test_mochitest_metrics(*mocked):
mach_cmd, metadata, env = running_env(
tests=[str(EXAMPLE_MOCHITEST_TEST)],
mochitest_extra_args=[],
)
sys = env.layers[SYSTEM]
mochitest = env.layers[TEST]
with mock.patch("moztest.resolve.TestResolver") as test_resolver_mock, mock.patch(
"mozperftest.test.functionaltestrunner.load_class_from_path"
) as load_class_path_mock, mock.patch(
"mozperftest.test.functionaltestrunner.mozlog.formatters.MachFormatter.__new__"
) as formatter_mock, mock.patch(
"mozperftest.test.mochitest.install_requirements_file"
):
formatter_mock.return_value = lambda x: x
def test_print(*args, **kwargs):
log_processor = kwargs.get("custom_handler")
log_processor.__call__('perfMetrics | { "fake": 0 }')
return 0
test_mock = mock.MagicMock()
test_mock.test = test_print
load_class_path_mock.return_value = test_mock
test_resolver_mock.resolve_metadata.return_value = (1, 1)
mach_cmd._spawn.return_value = test_resolver_mock
try:
with sys as s, mochitest as m:
m(s(metadata))
finally:
shutil.rmtree(mach_cmd._mach_context.state_dir)
res = metadata.get_results()
assert len(res) == 1
assert res[0]["name"] == "test_mochitest.html"
results = res[0]["results"]
assert results[0]["name"] == "fake"
assert results[0]["values"] == [0]
@mock.patch(
# This mock.patch actually patches the mochitest run_test_harness function
"runtests.run_test_harness"
)
@mock.patch(
# This mock.patch causes mochitest's runtests to be imported instead of
# others in the remote_run
"mochitest.runtests.run_test_harness",
new=mock.MagicMock(),
)
@mock.patch(
"mozperftest.test.functionaltestrunner.mozlog.formatters.MachFormatter.__new__"
)
@mock.patch(
"mozperftest.test.mochitest.install_requirements_file", new=mock.MagicMock()
)
@mock.patch(
"mozperftest.test.functionaltestrunner.load_class_from_path", new=mock.MagicMock()
)
@mock.patch("moztest.resolve.TestResolver", new=mock.MagicMock())
@mock.patch("mozperftest.test.mochitest.ON_TRY", new=True)
@mock.patch("mozperftest.utils.ON_TRY", new=True)
@mock.patch("mochitest.mochitest_options.MochitestArgumentParser", new=mock.MagicMock())
@mock.patch("manifestparser.TestManifest", new=mock.MagicMock())
def test_mochitest_ci_metrics(formatter_mock, run_test_harness_mock):
if not os.getenv("MOZ_FETCHES_DIR"):
os.environ["MOZ_FETCHES_DIR"] = "fake-path"
mach_cmd, metadata, env = running_env(
tests=[str(EXAMPLE_MOCHITEST_TEST)],
mochitest_extra_args=[],
mochitest_manifest="fake.ini",
mochitest_manifest_flavor="mocha",
)
system = env.layers[SYSTEM]
mochitest = env.layers[TEST]
formatter_mock.return_value = lambda x: x
def test_print(*args, **kwargs):
print('perfMetrics | { "fake": 0 }')
return 0
run_test_harness_mock.side_effect = test_print
try:
with system as s, mochitest as m:
m(s(metadata))
finally:
shutil.rmtree(mach_cmd._mach_context.state_dir)
res = metadata.get_results()
assert len(res) == 1
assert res[0]["name"] == "test_mochitest.html"
results = res[0]["results"]
assert results[0]["name"] == "fake"
assert results[0]["values"] == [0]
@mock.patch(
# This mock.patch actually patches the mochitest run_test_harness function
"runtests.run_test_harness",
new=mock.MagicMock(),
)
@mock.patch(
# This mock.patch causes mochitest's runtests to be imported instead of
# others in the remote_run
"mochitest.runtests.run_test_harness",
new=mock.MagicMock(),
)
@mock.patch(
"mozperftest.test.functionaltestrunner.mozlog.formatters.MachFormatter.__new__",
new=mock.MagicMock(),
)
@mock.patch(
"mozperftest.test.mochitest.install_requirements_file", new=mock.MagicMock()
)
@mock.patch(
"mozperftest.test.functionaltestrunner.load_class_from_path", new=mock.MagicMock()
)
@mock.patch("moztest.resolve.TestResolver", new=mock.MagicMock())
@mock.patch("mozperftest.test.mochitest.ON_TRY", new=True)
@mock.patch("mozperftest.utils.ON_TRY", new=True)
@mock.patch("mochitest.mochitest_options.MochitestArgumentParser", new=mock.MagicMock())
@mock.patch("manifestparser.TestManifest", new=mock.MagicMock())
def test_mochitest_ci_metrics_missing_manifest():
if not os.getenv("MOZ_FETCHES_DIR"):
os.environ["MOZ_FETCHES_DIR"] = "fake-path"
mach_cmd, metadata, env = running_env(
tests=[str(EXAMPLE_MOCHITEST_TEST)],
mochitest_extra_args=[],
mochitest_manifest_flavor="mocha",
)
system = env.layers[SYSTEM]
mochitest = env.layers[TEST]
try:
with pytest.raises(MissingMochitestInformation) as exc:
with system as s, mochitest as m:
m(s(metadata))
assert "manifest" in exc.value.args[0]
finally:
shutil.rmtree(mach_cmd._mach_context.state_dir)
res = metadata.get_results()
assert len(res) == 0
@mock.patch(
# This mock.patch actually patches the mochitest run_test_harness function
"runtests.run_test_harness",
new=mock.MagicMock(),
)
@mock.patch(
# This mock.patch causes mochitest's runtests to be imported instead of
# others in the remote_run
"mochitest.runtests.run_test_harness",
new=mock.MagicMock(),
)
@mock.patch(
"mozperftest.test.functionaltestrunner.mozlog.formatters.MachFormatter.__new__",
new=mock.MagicMock(),
)
@mock.patch(
"mozperftest.test.mochitest.install_requirements_file", new=mock.MagicMock()
)
@mock.patch(
"mozperftest.test.functionaltestrunner.load_class_from_path", new=mock.MagicMock()
)
@mock.patch("moztest.resolve.TestResolver", new=mock.MagicMock())
@mock.patch("mozperftest.test.mochitest.ON_TRY", new=True)
@mock.patch("mozperftest.utils.ON_TRY", new=True)
@mock.patch("mochitest.mochitest_options.MochitestArgumentParser", new=mock.MagicMock())
@mock.patch("manifestparser.TestManifest", new=mock.MagicMock())
def test_mochitest_ci_metrics_missing_flavor():
if not os.getenv("MOZ_FETCHES_DIR"):
os.environ["MOZ_FETCHES_DIR"] = "fake-path"
mach_cmd, metadata, env = running_env(
tests=[str(EXAMPLE_MOCHITEST_TEST)],
mochitest_extra_args=[],
mochitest_manifest="fake.ini",
)
system = env.layers[SYSTEM]
mochitest = env.layers[TEST]
try:
with pytest.raises(MissingMochitestInformation) as exc:
with system as s, mochitest as m:
m(s(metadata))
assert "flavor" in exc.value.args[0]
finally:
shutil.rmtree(mach_cmd._mach_context.state_dir)
res = metadata.get_results()
assert len(res) == 0

Просмотреть файл

@ -26,6 +26,7 @@ def setup_env(options):
@pytest.mark.parametrize("no_filter", [True, False])
@mock.patch("mozperftest.metrics.notebookupload.PerftestNotebook")
@mock.patch("mozperftest.test.BrowsertimeRunner", new=mock.MagicMock())
def test_notebookupload_with_filter(notebook, no_filter):
options = {
"notebook-metrics": [],
@ -60,6 +61,7 @@ def test_notebookupload_with_filter(notebook, no_filter):
@pytest.mark.parametrize("stats", [False, True])
@mock.patch("mozperftest.metrics.notebookupload.PerftestNotebook")
@mock.patch("mozperftest.test.BrowsertimeRunner", new=mock.MagicMock())
def test_compare_to_success(notebook, stats):
options = {
"notebook-metrics": [metric_fields("firstPaint")],
@ -93,6 +95,7 @@ def test_compare_to_success(notebook, stats):
@pytest.mark.parametrize("filepath", ["invalidPath", str(BT_DATA)])
@mock.patch("mozperftest.metrics.notebookupload.PerftestNotebook")
@mock.patch("mozperftest.test.BrowsertimeRunner", new=mock.MagicMock())
def test_compare_to_invalid_parameter(notebook, filepath):
options = {
"notebook-metrics": [metric_fields("firstPaint")],

Просмотреть файл

@ -7,7 +7,7 @@ import pytest
from mozperftest import utils
from mozperftest.environment import METRICS, SYSTEM, TEST
from mozperftest.test import xpcshell
from mozperftest.test.xpcshell import NoPerfMetricsError, XPCShellTestError
from mozperftest.test.xpcshell import XPCShellTestError
from mozperftest.tests.support import (
EXAMPLE_XPCSHELL_TEST,
MOZINFO,
@ -110,7 +110,7 @@ def test_xpcshell_metrics_fail(*mocked):
@mock.patch("runxpcshelltests.XPCShellTests", new=XPCShellTestsNoPerfMetrics)
def test_xpcshell_no_perfmetrics(*mocked):
return _test_xpcshell_fail(NoPerfMetricsError, *mocked)
return _test_xpcshell_fail(utils.NoPerfMetricsError, *mocked)
@mock.patch("runxpcshelltests.XPCShellTests", new=XPCShellTests)

Просмотреть файл

@ -8,6 +8,7 @@ import inspect
import logging
import os
import pathlib
import re
import shlex
import shutil
import subprocess
@ -28,6 +29,58 @@ MULTI_REVISION_ROOT = f"{API_ROOT}/namespaces"
MULTI_TASK_ROOT = f"{API_ROOT}/tasks"
ON_TRY = "MOZ_AUTOMATION" in os.environ
DOWNLOAD_TIMEOUT = 30
METRICS_MATCHER = re.compile(r"(perfMetrics\s.*)")
class NoPerfMetricsError(Exception):
"""Raised when perfMetrics were not found, or were not output
during a test run."""
def __init__(self, flavor):
super().__init__(
f"No perftest results were found in the {flavor} test. Results must be "
'reported using:\n info("perfMetrics", { metricName: metricValue });'
)
class LogProcessor:
def __init__(self, matcher):
self.buf = ""
self.stdout = sys.__stdout__
self.matcher = matcher
self._match = []
@property
def match(self):
return self._match
def write(self, buf):
while buf:
try:
newline_index = buf.index("\n")
except ValueError:
# No newline, wait for next call
self.buf += buf
break
# Get data up to next newline and combine with previously buffered data
data = self.buf + buf[: newline_index + 1]
buf = buf[newline_index + 1 :]
# Reset buffer then output line
self.buf = ""
if data.strip() == "":
continue
self.stdout.write(data.strip("\n") + "\n")
# Check if a temporary commit wa created
match = self.matcher.match(data)
if match:
# Last line found is the revision we want
self._match.append(match.group(1))
def flush(self):
pass
@contextlib.contextmanager
@ -232,7 +285,10 @@ def install_requirements_file(
# see - python/mozbuild/mozbuild/action/test_archive.py
# this mapping will map paths when running there.
# The key is the source path, and the value the ci path
_TRY_MAPPING = {Path("netwerk"): Path("xpcshell", "tests", "netwerk")}
_TRY_MAPPING = {
Path("netwerk"): Path("xpcshell", "tests", "netwerk"),
Path("dom"): Path("mochitest", "tests", "dom"),
}
def build_test_list(tests):
@ -259,20 +315,20 @@ def build_test_list(tests):
if ON_TRY and not p_test.resolve().exists():
# until we have pathlib.Path.is_relative_to() (3.9)
for src_path, ci_path in _TRY_MAPPING.items():
src_path, ci_path = str(src_path), str(ci_path)
src_path, ci_path = str(src_path), str(ci_path) # noqa
if test.startswith(src_path):
p_test = Path(test.replace(src_path, ci_path))
break
test = p_test.resolve()
resolved_test = p_test.resolve()
if test.is_file():
res.append(str(test))
elif test.is_dir():
for file in test.rglob("perftest_*.js"):
if resolved_test.is_file():
res.append(str(resolved_test))
elif resolved_test.is_dir():
for file in resolved_test.rglob("perftest_*.js"):
res.append(str(file))
else:
raise FileNotFoundError(str(test))
raise FileNotFoundError(str(resolved_test))
res.sort()
return res, temp_dir

Просмотреть файл

@ -11,8 +11,10 @@ job-defaults:
build:
- artifact: target.mozinfo.json
- artifact: target.common.tests.tar.gz
- artifact: target.condprof.tests.tar.gz
- artifact: target.perftests.tests.tar.gz
- artifact: target.xpcshell.tests.tar.gz
- artifact: target.mochitest.tests.tar.gz
- artifact: target.tar.bz2
platform: linux1804-64-shippable/opt
require-build: