Bug 1894882 - Add support for displaying long running jit tests r=dminor

This adds the --show-slow and --slow-test-threshold options to jit tests which
are already present for js tests.

This extends jstests to output this information via structured logging so it's
available in automation. It doesn't end up in the test log by default but it's
present in main_raw.log.

This also enables this reporting by default in automation.

Differential Revision: https://phabricator.services.mozilla.com/D209362
This commit is contained in:
Jon Coppeard 2024-05-03 15:29:30 +00:00
Родитель a9b35c6156
Коммит 49c751f9ec
5 изменённых файлов: 78 добавлений и 22 удалений

Просмотреть файл

@ -570,6 +570,10 @@ if use_minidump:
for v in ("JSTESTS_EXTRA_ARGS", "JITTEST_EXTRA_ARGS"):
env[v] = "--args='--dll %s' %s" % (injector_lib, env.get(v, ""))
# Report longest running jit-tests in automation.
env["JITTEST_EXTRA_ARGS"] = "--show-slow " + env.get("JITTEST_EXTRA_ARGS", "")
env["JSTESTS_EXTRA_ARGS"] = "--show-slow " + env.get("JSTESTS_EXTRA_ARGS", "")
# Always run all enabled tests, even if earlier ones failed. But return the
# first failed status.
results = [("(make-nonempty)", 0)]

Просмотреть файл

@ -368,6 +368,18 @@ def main(argv):
op.add_argument(
"-z", "--gc-zeal", help="GC zeal mode to use when running the shell"
)
op.add_argument(
"--show-slow",
action="store_true",
help="Show tests taking longer than a minimum time (in seconds).",
)
op.add_argument(
"--slow-test-threshold",
type=float,
default=5.0,
help="Time in seconds a test can take until it is considered slow "
"(default %(default)s).",
)
options, test_args = op.parse_known_args(argv)
js_shell = which(options.js_shell)

Просмотреть файл

@ -605,7 +605,12 @@ def print_automation_format(ok, res, slog):
print("INFO (warn-stderr) 2> " + line.strip())
def print_test_summary(num_tests, failures, complete, doing, options):
def print_test_summary(num_tests, failures, complete, slow_tests, doing, options):
def test_details(res):
if options.show_failed:
return escape_cmdline(res.cmd)
return " ".join(res.test.jitflags + [res.test.relpath_tests])
if failures:
if options.write_failures:
try:
@ -630,21 +635,15 @@ def print_test_summary(num_tests, failures, complete, doing, options):
traceback.print_exc()
sys.stderr.write("---\n")
def show_test(res):
if options.show_failed:
print(" " + escape_cmdline(res.cmd))
else:
print(" " + " ".join(res.test.jitflags + [res.test.relpath_tests]))
print("FAILURES:")
for res in failures:
if not res.timed_out:
show_test(res)
print(" " + test_details(res))
print("TIMEOUTS:")
for res in failures:
if res.timed_out:
show_test(res)
print(" " + test_details(res))
else:
print(
"PASSED ALL"
@ -661,6 +660,23 @@ def print_test_summary(num_tests, failures, complete, doing, options):
print("Passed: {:d}".format(num_tests - num_failures))
print("Failed: {:d}".format(num_failures))
if num_tests != 0 and options.show_slow:
threshold = options.slow_test_threshold
fraction_fast = 1 - len(slow_tests) / num_tests
print(
"{:5.2f}% of tests ran in under {}s".format(fraction_fast * 100, threshold)
)
print("Slowest tests that took longer than {}s:".format(threshold))
slow_tests.sort(key=lambda res: res.dt, reverse=True)
any = False
for i in range(min(len(slow_tests), 20)):
res = slow_tests[i]
print(" {:6.2f} {}".format(res.dt, test_details(res)))
any = True
if not any:
print("None")
return not failures
@ -686,11 +702,14 @@ def process_test_results(results, num_tests, pb, options, slog):
complete = False
output_dict = {}
doing = "before starting"
slow_tests = []
if num_tests == 0:
pb.finish(True)
complete = True
return print_test_summary(num_tests, failures, complete, doing, options)
return print_test_summary(
num_tests, failures, complete, slow_tests, doing, options
)
try:
for i, res in enumerate(results):
@ -744,6 +763,9 @@ def process_test_results(results, num_tests, pb, options, slog):
"SKIP": 0,
},
)
if res.dt > options.slow_test_threshold:
slow_tests.append(res)
complete = True
except KeyboardInterrupt:
print(
@ -752,7 +774,7 @@ def process_test_results(results, num_tests, pb, options, slog):
)
pb.finish(True)
return print_test_summary(num_tests, failures, complete, doing, options)
return print_test_summary(num_tests, failures, complete, slow_tests, doing, options)
def run_tests(tests, num_tests, prefix, options, remote=False):

Просмотреть файл

@ -372,6 +372,9 @@ class ResultsSink:
else:
self.list(completed)
if self.n != 0 and self.options.show_slow:
self.show_slow_tests()
if self.wptreport is not None:
self.wptreport.suite_end()
@ -427,16 +430,28 @@ class ResultsSink:
else:
print("FAIL" + suffix)
if self.options.show_slow:
min_duration = self.options.slow_test_threshold
print("Slow tests (duration > {}s)".format(min_duration))
def show_slow_tests(self):
threshold = self.options.slow_test_threshold
fraction_fast = 1 - len(self.slow_tests) / self.n
self.log_info(
"{:5.2f}% of tests ran in under {}s".format(fraction_fast * 100, threshold)
)
self.log_info("Slowest tests that took longer than {}s:".format(threshold))
slow_tests = sorted(self.slow_tests, key=lambda x: x.duration, reverse=True)
any = False
for test in slow_tests:
print("{:>5} {}".format(round(test.duration, 2), test.test))
for i in range(min(len(slow_tests), 20)):
test = slow_tests[i]
self.log_info(" {:6.2f} {}".format(test.duration, test.test))
any = True
if not any:
print("None")
self.log_info("None")
def log_info(self, message):
if self.options.format == "automation":
self.slog.log_info(message)
else:
print(message)
def all_passed(self):
return "REGRESSIONS" not in self.groups and "TIMEOUTS" not in self.groups

Просмотреть файл

@ -1,5 +1,5 @@
# produce mozlog-compatible log messages, following the spec at
# https://mozbase.readthedocs.io/en/latest/mozlog.html
# https://firefox-source-docs.mozilla.org/mozbase/mozlog.html
import json
import os
@ -54,3 +54,6 @@ class TestLogger(object):
record["status"] = status
record.update(**details)
self._log_obj(record)
def log_info(self, message):
self._log(action="log", level="INFO", message=message)