зеркало из https://github.com/mozilla/gecko-dev.git
Backed out 3 changesets (bug 1661809, bug 1662706, bug 1663417) for SM bustages. CLOSED TREE
Backed out changeset 96e69574f7ee (bug 1662706) Backed out changeset a584b11a503a (bug 1663417) Backed out changeset 655ce94f20f3 (bug 1661809)
This commit is contained in:
Родитель
ece3375c66
Коммит
f837964009
|
@ -1 +0,0 @@
|
|||
[perftest_browser_xhtml_dom.js]
|
|
@ -62,10 +62,6 @@ BROWSER_CHROME_MANIFESTS += [
|
|||
'content/test/zoom/browser.ini',
|
||||
]
|
||||
|
||||
PERFTESTS_MANIFESTS += [
|
||||
'content/test/perftest.ini'
|
||||
]
|
||||
|
||||
DEFINES['MOZ_APP_VERSION'] = CONFIG['MOZ_APP_VERSION']
|
||||
DEFINES['MOZ_APP_VERSION_DISPLAY'] = CONFIG['MOZ_APP_VERSION_DISPLAY']
|
||||
|
||||
|
|
|
@ -149,7 +149,7 @@ DIRS += [
|
|||
'build',
|
||||
'config',
|
||||
'python',
|
||||
'testing',
|
||||
'testing/mozbase',
|
||||
'third_party/python',
|
||||
]
|
||||
|
||||
|
|
|
@ -14,11 +14,6 @@ XPCSHELL_TESTS_MANIFESTS += [
|
|||
'unit_ipc/xpcshell.ini',
|
||||
]
|
||||
|
||||
PERFTESTS_MANIFESTS += [
|
||||
'perf/perftest.ini',
|
||||
'unit/perftest.ini'
|
||||
]
|
||||
|
||||
if CONFIG['FUZZING_INTERFACES']:
|
||||
TEST_DIRS += [
|
||||
'fuzz'
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
[perftest_http3_cloudflareblog.js]
|
||||
[perftest_http3_facebook_scroll.js]
|
||||
[perftest_http3_google_image.js]
|
||||
[perftest_http3_google_search.js]
|
||||
[perftest_http3_lucasquicfetch.js]
|
||||
[perftest_http3_youtube_watch.js]
|
||||
[perftest_http3_youtube_watch_scroll.js]
|
|
@ -1 +0,0 @@
|
|||
[test_http3_perf.js]
|
|
@ -1,12 +1,5 @@
|
|||
"use strict";
|
||||
|
||||
var perfMetadata = {
|
||||
owner: "Network Team",
|
||||
name: "http3 raw",
|
||||
description:
|
||||
"XPCShell tests that verifies the lib integration against a local server",
|
||||
};
|
||||
|
||||
var performance = performance || {};
|
||||
performance.now = (function() {
|
||||
return (
|
||||
|
|
|
@ -1998,10 +1998,6 @@ VARIABLES = {
|
|||
"""List of manifest files defining python unit tests.
|
||||
"""),
|
||||
|
||||
'PERFTESTS_MANIFESTS': (ManifestparserManifestList, list,
|
||||
"""List of manifest files defining MozPerftest performance tests.
|
||||
"""),
|
||||
|
||||
'CRAMTEST_MANIFESTS': (ManifestparserManifestList, list,
|
||||
"""List of manifest files defining cram unit tests.
|
||||
"""),
|
||||
|
|
|
@ -58,7 +58,6 @@ TEST_MANIFESTS = dict(
|
|||
MOCHITEST_CHROME=('chrome', 'testing/mochitest', 'chrome', True),
|
||||
WEBRTC_SIGNALLING_TEST=('steeplechase', 'steeplechase', '.', True),
|
||||
XPCSHELL_TESTS=('xpcshell', 'xpcshell', '.', True),
|
||||
PERFTESTS=('perftest', 'testing/perf', 'perf', True)
|
||||
)
|
||||
|
||||
# reftests, wpt, and puppeteer all have their own manifest formats
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
@ -1,108 +0,0 @@
|
|||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
import json
|
||||
|
||||
from mozterm import Terminal
|
||||
from mozboot.util import get_state_dir
|
||||
from mozbuild.util import ensure_subprocess_env
|
||||
from distutils.spawn import find_executable
|
||||
|
||||
|
||||
HERE = Path(__file__).parent.resolve()
|
||||
SRC_ROOT = (HERE / ".." / ".." / ".." / "..").resolve()
|
||||
PREVIEW_SCRIPT = HERE / "preview.py"
|
||||
FZF_HEADER = """
|
||||
Please select a performance test to execute.
|
||||
{shortcuts}
|
||||
""".strip()
|
||||
|
||||
fzf_shortcuts = {
|
||||
"ctrl-t": "toggle-all",
|
||||
"alt-bspace": "beginning-of-line+kill-line",
|
||||
"?": "toggle-preview",
|
||||
}
|
||||
|
||||
fzf_header_shortcuts = [
|
||||
("select", "tab"),
|
||||
("accept", "enter"),
|
||||
("cancel", "ctrl-c"),
|
||||
("cursor-up", "up"),
|
||||
("cursor-down", "down"),
|
||||
]
|
||||
|
||||
|
||||
def run_fzf(cmd, tasks):
|
||||
env = dict(os.environ)
|
||||
env.update(
|
||||
{"PYTHONPATH": os.pathsep.join([p for p in sys.path if "requests" in p])}
|
||||
)
|
||||
proc = subprocess.Popen(
|
||||
cmd,
|
||||
stdout=subprocess.PIPE,
|
||||
stdin=subprocess.PIPE,
|
||||
env=ensure_subprocess_env(env),
|
||||
universal_newlines=True,
|
||||
)
|
||||
out = proc.communicate("\n".join(tasks))[0].splitlines()
|
||||
selected = []
|
||||
query = None
|
||||
if out:
|
||||
query = out[0]
|
||||
selected = out[1:]
|
||||
return query, selected
|
||||
|
||||
|
||||
def format_header():
|
||||
terminal = Terminal()
|
||||
shortcuts = []
|
||||
for action, key in fzf_header_shortcuts:
|
||||
shortcuts.append(
|
||||
"{t.white}{action}{t.normal}: {t.yellow}<{key}>{t.normal}".format(
|
||||
t=terminal, action=action, key=key
|
||||
)
|
||||
)
|
||||
return FZF_HEADER.format(shortcuts=", ".join(shortcuts), t=terminal)
|
||||
|
||||
|
||||
def autodetect(path):
|
||||
from mozperftest.script import ScriptInfo
|
||||
|
||||
return ScriptInfo.detect_type(path).name
|
||||
|
||||
|
||||
def select(test_objects):
|
||||
mozbuild_dir = Path(Path.home(), ".mozbuild")
|
||||
os.makedirs(str(mozbuild_dir), exist_ok=True)
|
||||
cache_file = Path(mozbuild_dir, ".perftestfuzzy")
|
||||
|
||||
with cache_file.open("w") as f:
|
||||
f.write(json.dumps(test_objects))
|
||||
|
||||
def _display(task):
|
||||
flavor = autodetect(task["path"])
|
||||
path = task["path"].replace(str(SRC_ROOT), "")
|
||||
return f"[{flavor}] {path}"
|
||||
|
||||
candidate_tasks = [_display(t) for t in test_objects]
|
||||
fzf_bin = find_executable("fzf", str(Path(get_state_dir(), "fzf", "bin")))
|
||||
key_shortcuts = [k + ":" + v for k, v in fzf_shortcuts.items()]
|
||||
|
||||
base_cmd = [
|
||||
fzf_bin,
|
||||
"-m",
|
||||
"--bind",
|
||||
",".join(key_shortcuts),
|
||||
"--header",
|
||||
format_header(),
|
||||
"--preview-window=right:50%",
|
||||
"--print-query",
|
||||
"--preview",
|
||||
sys.executable + ' {} -t "{{+f}}"'.format(str(PREVIEW_SCRIPT)),
|
||||
]
|
||||
query_str, tasks = run_fzf(base_cmd, sorted(candidate_tasks))
|
||||
return tasks
|
|
@ -1,88 +0,0 @@
|
|||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
"""
|
||||
This file is executed by fzf through the command line and needs to
|
||||
work in a standalone way on any Python 3 environment.
|
||||
|
||||
This is why it alters PATH,making the assumption it's executed
|
||||
from within a source tree. Do not add dependencies unless they
|
||||
are in the source tree and added in SEARCH_PATHS.
|
||||
"""
|
||||
import argparse
|
||||
import sys
|
||||
import json
|
||||
from pathlib import Path
|
||||
import importlib.util
|
||||
|
||||
|
||||
HERE = Path(__file__).parent.resolve()
|
||||
SRC_ROOT = (HERE / ".." / ".." / ".." / "..").resolve()
|
||||
# make sure esprima is in the path
|
||||
SEARCH_PATHS = [
|
||||
("third_party", "python", "esprima"),
|
||||
]
|
||||
|
||||
for path in SEARCH_PATHS:
|
||||
path = Path(SRC_ROOT, *path)
|
||||
if path.exists():
|
||||
sys.path.insert(0, str(path))
|
||||
|
||||
|
||||
def get_test_objects():
|
||||
"""Loads .perftestfuzzy and returns its content.
|
||||
|
||||
The cache file is produced by the main fzf script and is used
|
||||
as a way to let the preview script grab test_objects from the
|
||||
mach command
|
||||
"""
|
||||
cache_file = Path(Path.home(), ".mozbuild", ".perftestfuzzy")
|
||||
with cache_file.open() as f:
|
||||
return json.loads(f.read())
|
||||
|
||||
|
||||
def plain_display(taskfile):
|
||||
"""Preview window display.
|
||||
|
||||
Returns the reST summary for the perf test script.
|
||||
"""
|
||||
# Lame way to catch the ScriptInfo class without loading mozperftest
|
||||
script_info = HERE / ".." / "script.py"
|
||||
spec = importlib.util.spec_from_file_location(
|
||||
name="script.py", location=str(script_info)
|
||||
)
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(module)
|
||||
ScriptInfo = module.ScriptInfo
|
||||
|
||||
with open(taskfile) as f:
|
||||
tasklist = [line.strip() for line in f]
|
||||
script_name = tasklist[0].split(" ")[-1]
|
||||
for ob in get_test_objects():
|
||||
if ob["path"].endswith(script_name):
|
||||
print(ScriptInfo(ob["path"]))
|
||||
return
|
||||
|
||||
|
||||
def process_args(args):
|
||||
"""Process preview arguments."""
|
||||
argparser = argparse.ArgumentParser()
|
||||
argparser.add_argument(
|
||||
"-t",
|
||||
"--tasklist",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Path to temporary file containing the selected tasks",
|
||||
)
|
||||
return argparser.parse_args(args=args)
|
||||
|
||||
|
||||
def main(args=None):
|
||||
if args is None:
|
||||
args = sys.argv[1:]
|
||||
args = process_args(args)
|
||||
plain_display(args.tasklist)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -5,7 +5,6 @@ import os
|
|||
import sys
|
||||
from functools import partial
|
||||
import subprocess
|
||||
import shlex
|
||||
|
||||
from mach.decorators import CommandProvider, Command, CommandArgument
|
||||
from mozbuild.base import MachCommandBase, MachCommandConditions as conditions
|
||||
|
@ -31,10 +30,9 @@ class Perftest(MachCommandBase):
|
|||
parser=get_perftest_parser,
|
||||
)
|
||||
def run_perftest(self, **kwargs):
|
||||
from pathlib import Path
|
||||
|
||||
push_to_try = kwargs.pop("push_to_try", False)
|
||||
if push_to_try:
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.append(str(Path(self.topsrcdir, "tools", "tryselect")))
|
||||
|
||||
|
@ -67,44 +65,6 @@ class Perftest(MachCommandBase):
|
|||
push_to_try("perftest", "perftest", try_task_config=task_config)
|
||||
return
|
||||
|
||||
# user selection with fuzzy UI
|
||||
from mozperftest.utils import ON_TRY
|
||||
|
||||
if not ON_TRY and kwargs.get("tests", []) == []:
|
||||
from moztest.resolve import TestResolver
|
||||
from mozperftest.fzf.fzf import select
|
||||
from mozperftest.script import ScriptInfo, ScriptType
|
||||
|
||||
resolver = self._spawn(TestResolver)
|
||||
test_objects = list(resolver.resolve_tests(paths=None, flavor="perftest"))
|
||||
|
||||
def full_path(s):
|
||||
relative = s.split()[-1].lstrip(os.sep)
|
||||
return str(Path(self.topsrcdir, relative))
|
||||
|
||||
kwargs["tests"] = [full_path(s) for s in select(test_objects)]
|
||||
|
||||
if kwargs["tests"] == []:
|
||||
print("\nNo selection. Bye!")
|
||||
return
|
||||
|
||||
if len(kwargs["tests"]) > 1:
|
||||
print("\nSorry no support yet for multiple local perftest")
|
||||
return
|
||||
|
||||
sel = "\n".join(kwargs["tests"])
|
||||
print("\nGood job! Best selection.\n%s" % sel)
|
||||
|
||||
# if the script is xpcshell, we can force the flavor here
|
||||
script_type = ScriptInfo.detect_type(kwargs["tests"][0])
|
||||
if script_type == ScriptType.xpcshell:
|
||||
kwargs["flavor"] = script_type.name
|
||||
else:
|
||||
# we set the value only if not provided (so "mobile-browser"
|
||||
# can be picked)
|
||||
if "flavor" not in kwargs:
|
||||
kwargs["flavor"] = "desktop-browser"
|
||||
|
||||
# run locally
|
||||
MachCommandBase.activate_virtualenv(self)
|
||||
|
||||
|
@ -112,25 +72,19 @@ class Perftest(MachCommandBase):
|
|||
|
||||
run_tests(mach_cmd=self, **kwargs)
|
||||
|
||||
print("\nFirefox. Fast For Good.\n")
|
||||
|
||||
|
||||
@CommandProvider
|
||||
class PerftestTests(MachCommandBase):
|
||||
def _run_script(self, cmd, *args, **kw):
|
||||
"""Used to run a command in a subprocess."""
|
||||
def _run_python_script(self, module, *args, **kw):
|
||||
"""Used to run the scripts in isolation.
|
||||
|
||||
Coverage needs to run in isolation so it's not
|
||||
reimporting modules and produce wrong coverage info.
|
||||
"""
|
||||
display = kw.pop("display", False)
|
||||
verbose = kw.pop("verbose", False)
|
||||
if isinstance(cmd, str):
|
||||
cmd = shlex.split(cmd)
|
||||
try:
|
||||
joiner = shlex.join
|
||||
except AttributeError:
|
||||
# Python < 3.8
|
||||
joiner = subprocess.list2cmdline
|
||||
|
||||
sys.stdout.write("=> %s " % kw.pop("label", joiner(cmd)))
|
||||
args = cmd + list(args)
|
||||
args = [self.virtualenv_manager.python_path, "-m", module] + list(args)
|
||||
sys.stdout.write("=> %s " % kw.pop("label", module))
|
||||
sys.stdout.flush()
|
||||
try:
|
||||
if verbose:
|
||||
|
@ -151,17 +105,6 @@ class PerftestTests(MachCommandBase):
|
|||
sys.stdout.flush()
|
||||
return False
|
||||
|
||||
def _run_python_script(self, module, *args, **kw):
|
||||
"""Used to run a Python script in isolation.
|
||||
|
||||
Coverage needs to run in isolation so it's not
|
||||
reimporting modules and produce wrong coverage info.
|
||||
"""
|
||||
cmd = [self.virtualenv_manager.python_path, "-m", module]
|
||||
if "label" not in kw:
|
||||
kw["label"] = module
|
||||
return self._run_script(cmd, *args, **kw)
|
||||
|
||||
@Command(
|
||||
"perftest-test",
|
||||
category="testing",
|
||||
|
@ -212,20 +155,30 @@ class PerftestTests(MachCommandBase):
|
|||
vendors = ["coverage"]
|
||||
if not ON_TRY:
|
||||
vendors.append("attrs")
|
||||
if skip_linters:
|
||||
pypis = []
|
||||
else:
|
||||
pypis = ["flake8"]
|
||||
|
||||
# if we're not on try we want to install black
|
||||
if not ON_TRY and not skip_linters:
|
||||
pypis.append("black==19.10b0")
|
||||
|
||||
# these are the deps we are getting from pypi
|
||||
for dep in pypis:
|
||||
install_package(self.virtualenv_manager, dep)
|
||||
|
||||
# pip-installing dependencies that require compilation or special setup
|
||||
for dep in vendors:
|
||||
install_package(self.virtualenv_manager, str(Path(pydeps, dep)))
|
||||
|
||||
if not ON_TRY and not skip_linters:
|
||||
cmd = "./mach lint "
|
||||
if verbose:
|
||||
cmd += " -v"
|
||||
cmd += " " + str(HERE)
|
||||
if not self._run_script(
|
||||
cmd, label="linters", display=verbose, verbose=verbose
|
||||
):
|
||||
raise AssertionError("Please fix your code.")
|
||||
# formatting the code with black
|
||||
assert self._run_python_script("black", str(HERE))
|
||||
|
||||
# checking flake8 correctness
|
||||
if not (ON_TRY and sys.platform == "darwin") and not skip_linters:
|
||||
assert self._run_python_script("flake8", str(HERE))
|
||||
|
||||
# running pytest with coverage
|
||||
# coverage is done in three steps:
|
||||
|
|
|
@ -195,6 +195,7 @@ class MetricsStorage(object):
|
|||
simplify_names=False,
|
||||
simplify_exclude=["statistics"],
|
||||
):
|
||||
|
||||
"""Filters the metrics to only those that were requested by `metrics`.
|
||||
|
||||
If metrics is Falsey (None, empty list, etc.) then no metrics
|
||||
|
|
|
@ -133,7 +133,7 @@ def run_tests(mach_cmd, **kwargs):
|
|||
# XXX this doc is specific to browsertime scripts
|
||||
# maybe we want to move it
|
||||
if flavor == "doc":
|
||||
from mozperftest.script import ScriptInfo
|
||||
from mozperftest.test.browsertime.script import ScriptInfo
|
||||
|
||||
for test in kwargs["tests"]:
|
||||
print(ScriptInfo(test))
|
||||
|
|
|
@ -1,179 +0,0 @@
|
|||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
from collections import defaultdict
|
||||
import re
|
||||
import textwrap
|
||||
from pathlib import Path
|
||||
from enum import Enum
|
||||
|
||||
import esprima
|
||||
|
||||
|
||||
# list of metadata, each item is the name and if the field is mandatory
|
||||
METADATA = [
|
||||
("setUp", False),
|
||||
("tearDown", False),
|
||||
("test", True),
|
||||
("owner", True),
|
||||
("author", False),
|
||||
("name", True),
|
||||
("description", True),
|
||||
("longDescription", False),
|
||||
("usage", False),
|
||||
("supportedBrowsers", False),
|
||||
("supportedPlatforms", False),
|
||||
("filename", True),
|
||||
]
|
||||
|
||||
|
||||
_INFO = """\
|
||||
%(filename)s
|
||||
%(filename_underline)s
|
||||
|
||||
%(description)s
|
||||
|
||||
Owner: %(owner)s
|
||||
Test Name: %(name)s
|
||||
Usage:
|
||||
%(usage)s
|
||||
|
||||
Description:
|
||||
%(longDescription)s
|
||||
"""
|
||||
|
||||
|
||||
XPCSHELL_FUNCS = "add_task", "run_test", "run_next_test"
|
||||
|
||||
|
||||
class MissingFieldError(Exception):
|
||||
def __init__(self, script, field):
|
||||
super().__init__(f"Missing metadata {field}")
|
||||
self.script = script
|
||||
self.field = field
|
||||
|
||||
|
||||
class ParseError(Exception):
|
||||
def __init__(self, script, exception):
|
||||
super().__init__(f"Cannot parse {script}")
|
||||
self.script = script
|
||||
self.exception = exception
|
||||
|
||||
|
||||
class ScriptType(Enum):
|
||||
xpcshell = 1
|
||||
browsertime = 2
|
||||
|
||||
|
||||
class ScriptInfo(defaultdict):
|
||||
"""Loads and parses a Browsertime test script."""
|
||||
|
||||
def __init__(self, path):
|
||||
super(ScriptInfo, self).__init__()
|
||||
try:
|
||||
self._parse_file(path)
|
||||
except Exception as e:
|
||||
raise ParseError(path, e)
|
||||
|
||||
# If the fields found, don't match our known ones, then an error is raised
|
||||
for field, required in METADATA:
|
||||
if not required:
|
||||
continue
|
||||
if field not in self:
|
||||
raise MissingFieldError(path, field)
|
||||
|
||||
def _parse_file(self, path):
|
||||
self.script = Path(path)
|
||||
self["filename"] = str(self.script)
|
||||
self.script_type = ScriptType.browsertime
|
||||
with self.script.open() as f:
|
||||
self.parsed = esprima.parseScript(f.read())
|
||||
|
||||
# looking for the exports statement
|
||||
for stmt in self.parsed.body:
|
||||
# detecting if the script has add_task()
|
||||
if (
|
||||
stmt.type == "ExpressionStatement"
|
||||
and stmt.expression is not None
|
||||
and stmt.expression.callee is not None
|
||||
and stmt.expression.callee.type == "Identifier"
|
||||
and stmt.expression.callee.name in XPCSHELL_FUNCS
|
||||
):
|
||||
self["test"] = "xpcshell"
|
||||
self.script_type = ScriptType.xpcshell
|
||||
continue
|
||||
|
||||
# plain xpcshell tests functions markers
|
||||
if stmt.type == "FunctionDeclaration" and stmt.id.name in XPCSHELL_FUNCS:
|
||||
self["test"] = "xpcshell"
|
||||
self.script_type = ScriptType.xpcshell
|
||||
continue
|
||||
|
||||
# is this the perfMetdatata plain var ?
|
||||
if stmt.type == "VariableDeclaration":
|
||||
for decl in stmt.declarations:
|
||||
if (
|
||||
decl.type != "VariableDeclarator"
|
||||
or decl.id.type != "Identifier"
|
||||
or decl.id.name != "perfMetadata"
|
||||
or decl.init is None
|
||||
):
|
||||
continue
|
||||
self.scan_properties(decl.init.properties)
|
||||
continue
|
||||
|
||||
# or the module.exports map ?
|
||||
if (
|
||||
stmt.type != "ExpressionStatement"
|
||||
or stmt.expression.left is None
|
||||
or stmt.expression.left.property is None
|
||||
or stmt.expression.left.property.name != "exports"
|
||||
or stmt.expression.right is None
|
||||
or stmt.expression.right.properties is None
|
||||
):
|
||||
continue
|
||||
|
||||
# now scanning the properties
|
||||
self.scan_properties(stmt.expression.right.properties)
|
||||
|
||||
def scan_properties(self, properties):
|
||||
for prop in properties:
|
||||
if prop.value.type == "Identifier":
|
||||
value = prop.value.name
|
||||
elif prop.value.type == "Literal":
|
||||
value = prop.value.value
|
||||
elif prop.value.type == "TemplateLiteral":
|
||||
# ugly
|
||||
value = prop.value.quasis[0].value.cooked.replace("\n", " ")
|
||||
value = re.sub(r"\s+", " ", value).strip()
|
||||
elif prop.value.type == "ArrayExpression":
|
||||
value = [e.value for e in prop.value.elements]
|
||||
else:
|
||||
raise ValueError(prop.value.type)
|
||||
|
||||
self[prop.key.name] = value
|
||||
|
||||
def __str__(self):
|
||||
"""Used to generate docs."""
|
||||
d = defaultdict(lambda: "N/A")
|
||||
for field, value in self.items():
|
||||
if field == "filename":
|
||||
d[field] = self.script.name
|
||||
continue
|
||||
|
||||
# line wrapping
|
||||
if isinstance(value, str):
|
||||
value = "\n".join(textwrap.wrap(value, break_on_hyphens=False))
|
||||
elif isinstance(value, list):
|
||||
value = ", ".join(value)
|
||||
d[field] = value
|
||||
|
||||
d["filename_underline"] = "=" * len(d["filename"])
|
||||
return _INFO % d
|
||||
|
||||
def __missing__(self, key):
|
||||
return "N/A"
|
||||
|
||||
@classmethod
|
||||
def detect_type(cls, path):
|
||||
return cls(path).script_type
|
|
@ -12,7 +12,7 @@ from pathlib import Path
|
|||
|
||||
from mozperftest.utils import install_package
|
||||
from mozperftest.test.noderunner import NodeRunner
|
||||
from mozperftest.script import ScriptInfo
|
||||
from mozperftest.test.browsertime.script import ScriptInfo
|
||||
|
||||
|
||||
BROWSERTIME_SRC_ROOT = Path(__file__).parent
|
||||
|
|
|
@ -0,0 +1,115 @@
|
|||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
from collections import defaultdict
|
||||
import re
|
||||
import textwrap
|
||||
from pathlib import Path
|
||||
|
||||
import esprima
|
||||
|
||||
|
||||
# list of metadata, each item is the name and if the field is mandatory
|
||||
METADATA = [
|
||||
("setUp", False),
|
||||
("tearDown", False),
|
||||
("test", True),
|
||||
("owner", True),
|
||||
("author", False),
|
||||
("name", True),
|
||||
("description", True),
|
||||
("longDescription", False),
|
||||
("usage", False),
|
||||
("supportedBrowsers", False),
|
||||
("supportedPlatforms", False),
|
||||
("filename", True),
|
||||
]
|
||||
|
||||
|
||||
_INFO = """\
|
||||
%(filename)s
|
||||
%(filename_underline)s
|
||||
|
||||
%(description)s
|
||||
|
||||
Owner: %(owner)s
|
||||
Test Name: %(name)s
|
||||
Usage:
|
||||
%(usage)s
|
||||
|
||||
Description:
|
||||
%(longDescription)s
|
||||
"""
|
||||
|
||||
|
||||
class MissingFieldError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ScriptInfo(defaultdict):
|
||||
"""Loads and parses a Browsertime test script."""
|
||||
|
||||
def __init__(self, path):
|
||||
super(ScriptInfo, self).__init__()
|
||||
self.script = Path(path)
|
||||
self["filename"] = str(self.script)
|
||||
|
||||
with self.script.open() as f:
|
||||
self.parsed = esprima.parseScript(f.read())
|
||||
|
||||
# looking for the exports statement
|
||||
for stmt in self.parsed.body:
|
||||
if (
|
||||
stmt.type != "ExpressionStatement"
|
||||
or stmt.expression.left is None
|
||||
or stmt.expression.left.property is None
|
||||
or stmt.expression.left.property.name != "exports"
|
||||
or stmt.expression.right is None
|
||||
or stmt.expression.right.properties is None
|
||||
):
|
||||
continue
|
||||
|
||||
# now scanning the properties
|
||||
for prop in stmt.expression.right.properties:
|
||||
if prop.value.type == "Identifier":
|
||||
value = prop.value.name
|
||||
elif prop.value.type == "Literal":
|
||||
value = prop.value.value
|
||||
elif prop.value.type == "TemplateLiteral":
|
||||
# ugly
|
||||
value = prop.value.quasis[0].value.cooked.replace("\n", " ")
|
||||
value = re.sub(r"\s+", " ", value).strip()
|
||||
elif prop.value.type == "ArrayExpression":
|
||||
value = [e.value for e in prop.value.elements]
|
||||
else:
|
||||
raise ValueError(prop.value.type)
|
||||
|
||||
self[prop.key.name] = value
|
||||
|
||||
# If the fields found, don't match our known ones, then an error is raised
|
||||
for field, required in METADATA:
|
||||
if not required:
|
||||
continue
|
||||
if field not in self:
|
||||
raise MissingFieldError(field)
|
||||
|
||||
def __str__(self):
|
||||
"""Used to generate docs."""
|
||||
d = defaultdict(lambda: "N/A")
|
||||
for field, value in self.items():
|
||||
if field == "filename":
|
||||
d[field] = self.script.name
|
||||
continue
|
||||
|
||||
# line wrapping
|
||||
if isinstance(value, str):
|
||||
value = "\n".join(textwrap.wrap(value, break_on_hyphens=False))
|
||||
elif isinstance(value, list):
|
||||
value = ", ".join(value)
|
||||
d[field] = value
|
||||
|
||||
d["filename_underline"] = "=" * len(d["filename"])
|
||||
return _INFO % d
|
||||
|
||||
def __missing__(self, key):
|
||||
return "N/A"
|
|
@ -14,18 +14,3 @@ add_task(async function dummy_test() {
|
|||
info("perfMetrics", {"metrics3": 3});
|
||||
await true;
|
||||
});
|
||||
|
||||
|
||||
var perfMetadata = {
|
||||
owner: "Performance Testing Team",
|
||||
name: "Example",
|
||||
description: "The description of the example test.",
|
||||
longDescription: `
|
||||
This is a longer description of the test perhaps including information
|
||||
about how it should be run locally or links to relevant information.
|
||||
`,
|
||||
usage: "explains how to use it",
|
||||
supportedBrowsers: ["Firefox"],
|
||||
supportedPlatforms: ["Desktop"],
|
||||
};
|
||||
|
||||
|
|
|
@ -1,29 +0,0 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
"use strict";
|
||||
|
||||
var perfMetadata = {
|
||||
owner: "Performance Testing Team",
|
||||
name: "Example",
|
||||
description: "The description of the example test.",
|
||||
longDescription: `
|
||||
This is a longer description of the test perhaps including information
|
||||
about how it should be run locally or links to relevant information.
|
||||
`,
|
||||
usage: `
|
||||
./mach perftest
|
||||
python/mozperftest/mozperftest/tests/data/samples/test_xpcshell_flavor2.js
|
||||
`,
|
||||
supportedBrowsers: ["Firefox"],
|
||||
supportedPlatforms: ["Desktop"],
|
||||
};
|
||||
|
||||
function run_next_test() {
|
||||
// do something
|
||||
}
|
||||
|
||||
function run_test() {
|
||||
// do something
|
||||
}
|
|
@ -11,11 +11,9 @@ from mozperftest.hooks import Hooks
|
|||
|
||||
|
||||
HERE = Path(__file__).parent
|
||||
ROOT = Path(HERE, "..", "..", "..", "..").resolve()
|
||||
EXAMPLE_TESTS_DIR = os.path.join(HERE, "data", "samples")
|
||||
EXAMPLE_TEST = os.path.join(EXAMPLE_TESTS_DIR, "perftest_example.js")
|
||||
EXAMPLE_XPCSHELL_TEST = Path(EXAMPLE_TESTS_DIR, "test_xpcshell.js")
|
||||
EXAMPLE_XPCSHELL_TEST2 = Path(EXAMPLE_TESTS_DIR, "test_xpcshell_flavor2.js")
|
||||
BT_DATA = Path(HERE, "data", "browsertime-results", "browsertime.json")
|
||||
BT_DATA_VIDEO = Path(HERE, "data", "browsertime-results-video", "browsertime.json")
|
||||
DMG = Path(HERE, "data", "firefox.dmg")
|
||||
|
|
|
@ -1,43 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
import mozunit
|
||||
from unittest import mock
|
||||
from pathlib import Path
|
||||
import json
|
||||
|
||||
from mozperftest.tests.support import EXAMPLE_TEST, temp_file
|
||||
from mozperftest.fzf.fzf import select
|
||||
from mozperftest.fzf.preview import main
|
||||
from mozperftest.utils import silence
|
||||
|
||||
|
||||
class Fzf:
|
||||
def __init__(self, cmd, *args, **kw):
|
||||
self.cmd = cmd
|
||||
|
||||
def communicate(self, *args):
|
||||
return "query\n" + args[0], "stderr"
|
||||
|
||||
|
||||
@mock.patch("subprocess.Popen", new=Fzf)
|
||||
def test_select(*mocked):
|
||||
test_objects = [{"path": str(EXAMPLE_TEST)}]
|
||||
selection = select(test_objects)
|
||||
assert len(selection) == 1
|
||||
|
||||
|
||||
def test_preview():
|
||||
content = str(EXAMPLE_TEST)
|
||||
test_objects = [{"path": content}]
|
||||
cache = Path(Path.home(), ".mozbuild", ".perftestfuzzy")
|
||||
with cache.open("w") as f:
|
||||
f.write(json.dumps(test_objects))
|
||||
|
||||
with temp_file(content=content) as tasklist, silence() as out:
|
||||
main(args=["-t", tasklist])
|
||||
|
||||
out[0].seek(0)
|
||||
assert "Owner: Performance Testing Team" in out[0].read()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
mozunit.main()
|
|
@ -19,7 +19,7 @@ Registrar.commands_by_category = {"testing": set()}
|
|||
from mozperftest.environment import MachEnvironment # noqa
|
||||
from mozperftest.mach_commands import Perftest, PerftestTests # noqa
|
||||
from mozperftest import utils # noqa
|
||||
from mozperftest.tests.support import EXAMPLE_TEST, EXAMPLE_TESTS_DIR, ROOT # noqa
|
||||
from mozperftest.tests.support import EXAMPLE_TESTS_DIR # noqa
|
||||
from mozperftest.utils import temporary_env, silence, ON_TRY # noqa
|
||||
|
||||
|
||||
|
@ -143,31 +143,5 @@ def test_run_python_script_failed(*mocked):
|
|||
assert stdout.read().endswith("[FAILED]\n")
|
||||
|
||||
|
||||
def fzf_selection(*args):
|
||||
full_path = args[-1][-1]["path"]
|
||||
path = full_path.replace(str(ROOT), "")
|
||||
return ["[browsertime] " + path]
|
||||
|
||||
|
||||
def resolve_tests(*args, **kw):
|
||||
return [{"path": str(EXAMPLE_TEST)}]
|
||||
|
||||
|
||||
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
|
||||
@mock.patch("mozperftest.mach_commands.MachCommandBase.activate_virtualenv")
|
||||
@mock.patch("mozperftest.fzf.fzf.select", new=fzf_selection)
|
||||
@mock.patch("moztest.resolve.TestResolver.resolve_tests", new=resolve_tests)
|
||||
def test_fzf_flavor(*mocked):
|
||||
# forcing ON_TRY to false, so when the test runs in the CI,
|
||||
# we test the desktop behavior (fzf is a UI that is deactivated in the CI)
|
||||
old = utils.ON_TRY
|
||||
utils.ON_TRY = False
|
||||
try:
|
||||
with _get_command() as test, silence():
|
||||
test.run_perftest(flavor="desktop-browser")
|
||||
finally:
|
||||
utils.ON_TRY = old
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
mozunit.main()
|
||||
|
|
|
@ -5,32 +5,16 @@
|
|||
import mozunit
|
||||
import pytest
|
||||
|
||||
from mozperftest.script import ScriptInfo, MissingFieldError, ScriptType
|
||||
from mozperftest.tests.support import (
|
||||
EXAMPLE_TEST,
|
||||
HERE,
|
||||
EXAMPLE_XPCSHELL_TEST,
|
||||
EXAMPLE_XPCSHELL_TEST2,
|
||||
)
|
||||
from mozperftest.test.browsertime.script import ScriptInfo, MissingFieldError
|
||||
from mozperftest.tests.support import EXAMPLE_TEST, HERE
|
||||
|
||||
|
||||
def test_scriptinfo_bt():
|
||||
def test_scriptinfo():
|
||||
info = ScriptInfo(EXAMPLE_TEST)
|
||||
assert info["author"] == "N/A"
|
||||
|
||||
display = str(info)
|
||||
assert "The description of the example test." in display
|
||||
assert info.script_type == ScriptType.browsertime
|
||||
|
||||
|
||||
@pytest.mark.parametrize("script", [EXAMPLE_XPCSHELL_TEST, EXAMPLE_XPCSHELL_TEST2])
|
||||
def test_scriptinfo_xpcshell(script):
|
||||
info = ScriptInfo(script)
|
||||
assert info["author"] == "N/A"
|
||||
|
||||
display = str(info)
|
||||
assert "The description of the example test." in display
|
||||
assert info.script_type == ScriptType.xpcshell
|
||||
|
||||
|
||||
def test_scriptinfo_failure():
|
||||
|
|
|
@ -9,15 +9,8 @@ from setuptools import setup
|
|||
PACKAGE_NAME = "mozperftest"
|
||||
PACKAGE_VERSION = "0.2"
|
||||
|
||||
deps = [
|
||||
"regex",
|
||||
"jsonschema",
|
||||
"mozlog >= 6.0",
|
||||
"mozdevice >= 4.0.0",
|
||||
"mozproxy",
|
||||
"mozinfo",
|
||||
"mozfile",
|
||||
]
|
||||
deps = ["regex", "jsonschema", "mozlog >= 6.0", "mozdevice >= 4.0.0", "mozproxy",
|
||||
"mozinfo", "mozfile"]
|
||||
|
||||
setup(
|
||||
name=PACKAGE_NAME,
|
||||
|
|
|
@ -7,10 +7,3 @@ with Files("*cppunittest*"):
|
|||
|
||||
with Files("remote*"):
|
||||
BUG_COMPONENT = ("GeckoView", "General")
|
||||
|
||||
DIRS += ['mozbase']
|
||||
|
||||
PERFTESTS_MANIFESTS += [
|
||||
'performance/perftest.ini',
|
||||
]
|
||||
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
[perftest_android_main.js]
|
||||
[perftest_android_view.js]
|
||||
[perftest_bbc_link.js]
|
||||
[perftest_facebook.js]
|
||||
[perftest_jsconf_cold.js]
|
||||
[perftest_jsconf_warm.js]
|
||||
[perftest_pageload.js]
|
||||
[perftest_politico_link.js]
|
||||
[perftest_youtube_link.js]
|
|
@ -10,7 +10,7 @@ import re
|
|||
|
||||
from perfdocs.utils import read_yaml
|
||||
from manifestparser import TestManifest
|
||||
from mozperftest.script import ScriptInfo
|
||||
from mozperftest.test.browsertime.script import ScriptInfo
|
||||
|
||||
"""
|
||||
This file is for framework specific gatherers since manifests
|
||||
|
|
Загрузка…
Ссылка в новой задаче