Bug 1607474 - Add logging and expected results to puppeteer-test, r=remote-protocol-reviewers,maja_zf

Use mozlog to handle logging from the `mach puppeteer-test`
command. This makes the output compatible with treeherder and allows
generating error summaries that can be used by the push-health
feature.

Presently the integration is based on parsing the output from mocha
with regex since that's easy to implement without digging into the
many layers of js code too much. In time it might make sense to
replace this with a custom output formatter integrated into mocha
itself.

We also add the ability to store and use expected results, so that we
are able to detect regressions even when not all test are passing. The
format of expected results is Dict[String, List[String]], where the
keys are test names and the values are lists of statuses. If the list
has more than one value the remaining values are treated as possible
intermittent statuses.

The updated expected statues can be generated by passing the
--write-results flag to the mach command.

Differential Revision: https://phabricator.services.mozilla.com/D65700

--HG--
extra : moz-landing-system : lando
This commit is contained in:
James Graham 2020-03-10 15:19:16 +00:00
Родитель a2c83198eb
Коммит 609861f3ed
1 изменённых файлов: 195 добавлений и 52 удалений

Просмотреть файл

@ -8,13 +8,17 @@ from __future__ import (
unicode_literals,
)
import argparse
import json
import sys
import os
import re
import tempfile
import shutil
import subprocess
from six import iteritems
from mach.decorators import (
Command,
CommandArgument,
@ -27,6 +31,7 @@ from mozbuild.base import (
MozbuildObject,
)
from mozbuild import nodeutil
import mozlog
import mozprofile
@ -141,18 +146,100 @@ def git(*args, **kwargs):
def npm(*args, **kwargs):
from mozprocess import processhandler
env = None
if kwargs.get("env"):
env = os.environ.copy()
env.update(kwargs["env"])
p = subprocess.Popen(("npm",) + args,
cwd=kwargs.get("cwd"),
env=env)
proc_kwargs = {}
if "processOutputLine" in kwargs:
proc_kwargs["processOutputLine"] = kwargs["processOutputLine"]
p.wait()
if p.returncode > 0:
exit(p.returncode, "npm: exit code {}".format(p.returncode))
p = processhandler.ProcessHandler(cmd="npm",
args=list(args),
cwd=kwargs.get("cwd"),
env=env,
**proc_kwargs)
if not kwargs.get("wait", True):
return p
wait_proc(p, cmd="npm", exit_on_fail=kwargs.get("exit_on_fail", True))
return p.returncode
def wait_proc(p, cmd=None, exit_on_fail=True):
try:
p.run()
p.wait()
finally:
p.kill()
if exit_on_fail and p.returncode > 0:
msg = ("%s: exit code %s" % (cmd, p.returncode) if cmd
else "exit code %s" % p.returncode)
exit(p.returncode, msg)
class MochaOutputHandler(object):
def __init__(self, logger, expected):
self.logger = logger
self.test_name_re = re.compile(r"\d+\) \[\s*(\w+)\s*\] (.*) \(.*\)")
self.control_re = re.compile("\x1b\\[[0-9]*m")
self.proc = None
self.test_results = {}
self.expected = expected
self.has_unexpected = False
self.logger.suite_start([], name="puppeteer-tests")
self.status_map = {"OK": "PASS", "TIME": "TIMEOUT"}
@property
def pid(self):
return self.proc and self.proc.pid
def __call__(self, line):
line = line.decode("utf-8", "replace")
line_text = self.control_re.subn("", line)[0]
m = self.test_name_re.match(line_text)
if m:
status, test_name = m.groups()
status = self.status_map.get(status, status)
expected = self.expected.get(test_name, "PASS")
known_intermittent = expected[1:]
expected_status = expected[0]
self.test_results[test_name] = status
self.logger.test_start(test_name)
self.logger.test_end(test_name,
status=status,
expected=expected_status,
known_intermittent=known_intermittent)
if status not in expected:
self.has_unexpected = True
self.logger.process_output(self.pid, line, command="npm")
def new_expected(self):
new_expected = {}
for test_name, status in iteritems(self.test_results):
if test_name not in self.expected:
new_expected[test_name] = [status]
else:
if status in self.expected[test_name]:
new_expected[test_name] = self.expected[test_name]
else:
new_expected[test_name] = [status]
return new_expected
def after_end(self):
missing = set(self.expected) - set(self.test_results)
if missing:
self.has_unexpected = True
for test_name in missing:
self.logger.error("TEST-MISSING %s" % (test_name,))
self.logger.suite_end()
# tempfile.TemporaryDirectory missing from Python 2.7
@ -186,7 +273,7 @@ class PuppeteerRunner(MozbuildObject):
self.remotedir = os.path.join(self.topsrcdir, "remote")
self.puppeteerdir = os.path.join(self.remotedir, "test", "puppeteer")
def run_test(self, *tests, **params):
def run_test(self, logger, *tests, **params):
"""
Runs Puppeteer unit tests with npm.
@ -203,6 +290,8 @@ class PuppeteerRunner(MozbuildObject):
`extra_prefs`:
Dictionary of extra preferences to write to the profile,
before invoking npm. Overrides default preferences.
`write_results`:
Path to write the results json file
"""
setup()
@ -234,58 +323,109 @@ class PuppeteerRunner(MozbuildObject):
if extra_options:
env["EXTRA_LAUNCH_OPTIONS"] = json.dumps(extra_options)
return npm(*command, cwd=self.puppeteerdir, env=env)
expected_path = os.path.join(os.path.dirname(__file__),
"puppeteer-expected.json")
if product == "firefox" and os.path.exists(expected_path):
with open(expected_path) as f:
expected_data = json.load(f)
else:
expected_data = {}
output_handler = MochaOutputHandler(logger, expected_data)
proc = npm(*command, cwd=self.puppeteerdir, env=env,
processOutputLine=output_handler, wait=False)
output_handler.proc = proc
wait_proc(proc, "npm", exit_on_fail=False)
output_handler.after_end()
# Non-zero return codes are non-fatal for now since we have some
# issues with unresolved promises that shouldn't otherwise block
# running the tests
if proc.returncode != 0:
logger.warning("npm exited with code %s" % proc.returncode)
if params["write_results"] and output_handler.has_unexpected:
with open(params["write_results"], "w") as f:
json.dump(output_handler.new_expected(), f, indent=2)
if output_handler.has_unexpected:
exit(1, "Got unexpected results")
def create_parser_puppeteer():
p = argparse.ArgumentParser()
p.add_argument("--product",
type=str,
default="firefox",
choices=["chrome", "firefox"])
p.add_argument("--binary",
type=str,
help="Path to browser binary. Defaults to local Firefox build.")
p.add_argument("--enable-fission",
action="store_true",
help="Enable Fission (site isolation) in Gecko.")
p.add_argument("-z", "--headless",
action="store_true",
help="Run browser in headless mode.")
p.add_argument("--setpref",
action="append",
dest="extra_prefs",
metavar="<pref>=<value>",
help="Defines additional user preferences.")
p.add_argument("--setopt",
action="append",
dest="extra_options",
metavar="<option>=<value>",
help="Defines additional options for `puppeteer.launch`.")
p.add_argument("-j",
dest="jobs",
type=int,
metavar="<N>",
help="Optionally run tests in parallel.")
p.add_argument("-v",
dest="verbosity",
action="count",
default=0,
help="Increase remote agent logging verbosity to include "
"debug level messages with -v, trace messages with -vv,"
"and to not truncate long trace messages with -vvv")
p.add_argument("--write-results",
action="store",
nargs="?",
default=None,
const=os.path.join(os.path.dirname(__file__),
"puppeteer-expected.json"),
help="Path to write updated results to (defaults to the "
"expectations file if the argument is provided but "
"no path is passed)")
p.add_argument("tests", nargs="*")
mozlog.commandline.add_logging_group(p)
return p
@CommandProvider
class PuppeteerTest(MachCommandBase):
@Command("puppeteer-test", category="testing",
description="Run Puppeteer unit tests.")
@CommandArgument("--product",
type=str,
default="firefox",
choices=["chrome", "firefox"])
@CommandArgument("--binary",
type=str,
help="Path to browser binary. Defaults to local Firefox build.")
@CommandArgument("--enable-fission",
action="store_true",
help="Enable Fission (site isolation) in Gecko.")
@CommandArgument("-z", "--headless",
action="store_true",
help="Run browser in headless mode.")
@CommandArgument("--setpref",
action="append",
dest="extra_prefs",
metavar="<pref>=<value>",
help="Defines additional user preferences.")
@CommandArgument("--setopt",
action="append",
dest="extra_options",
metavar="<option>=<value>",
help="Defines additional options for `puppeteer.launch`.")
@CommandArgument("-j",
dest="jobs",
type=int,
metavar="<N>",
help="Optionally run tests in parallel.")
@CommandArgument("-v",
dest="verbosity",
action="count",
default=0,
help="Increase remote agent logging verbosity to include "
"debug level messages with -v, trace messages with -vv,"
"and to not truncate long trace messages with -vvv")
@CommandArgument("tests", nargs="*")
description="Run Puppeteer unit tests.",
parser=create_parser_puppeteer)
def puppeteer_test(self, binary=None, enable_fission=False, headless=False,
extra_prefs=None, extra_options=None, jobs=1, verbosity=0,
tests=None, product="firefox", **kwargs):
tests=None, product="firefox", write_results=None, **kwargs):
logger = mozlog.commandline.setup_logging("puppeteer-test",
kwargs,
{"mach": sys.stdout})
# moztest calls this programmatically with test objects or manifests
if "test_objects" in kwargs and tests is not None:
raise ValueError("Expected either 'test_objects' or 'tests'")
logger.error("Expected either 'test_objects' or 'tests'")
exit(1)
if product != "firefox" and extra_prefs is not None:
raise ValueError("User preferences are not recognized by %s" % product)
logger.error("User preferences are not recognized by %s" % product)
exit(1)
if "test_objects" in kwargs:
tests = []
@ -296,14 +436,16 @@ class PuppeteerTest(MachCommandBase):
for s in (extra_prefs or []):
kv = s.split("=")
if len(kv) != 2:
exit(EX_USAGE, "syntax error in --setpref={}".format(s))
logger.error("syntax error in --setpref={}".format(s))
exit(EX_USAGE)
prefs[kv[0]] = kv[1].strip()
options = {}
for s in (extra_options or []):
kv = s.split("=")
if len(kv) != 2:
exit(EX_USAGE, "syntax error in --setopt={}".format(s))
logger.error("syntax error in --setopt={}".format(s))
exit(EX_USAGE)
options[kv[0]] = kv[1].strip()
if enable_fission:
@ -325,10 +467,11 @@ class PuppeteerTest(MachCommandBase):
"extra_prefs": prefs,
"product": product,
"jobs": jobs,
"extra_launcher_options": options}
"extra_launcher_options": options,
"write_results": write_results}
puppeteer = self._spawn(PuppeteerRunner)
try:
return puppeteer.run_test(*tests, **params)
return puppeteer.run_test(logger, *tests, **params)
except Exception as e:
exit(EX_SOFTWARE, e)