зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1583364 - Update testing/runtimes/writeruntimes script to write info at the manifest level, r=gbrown
The new format will be: { <path/to/manifest.ini>: <average duration> } Differential Revision: https://phabricator.services.mozilla.com/D53701 --HG-- extra : moz-landing-system : lando
This commit is contained in:
Родитель
ccc0b49e8b
Коммит
927d595f99
|
@ -1,19 +1,22 @@
|
|||
Test Runtimes
|
||||
=============
|
||||
|
||||
These files contain test runtimes for various suites across different platforms. Each JSON file
|
||||
corresponds to a single test job in production and has the following format:
|
||||
These files contain runtimes for test manifests in the tree. They are of the form:
|
||||
|
||||
{ '<test id>': <average runtime> }
|
||||
{ '<path to manifest>': <average runtime in seconds> }
|
||||
|
||||
These files are being used to normalize chunk durations so all chunks take roughly the same length
|
||||
of time. They are still experimental and their format and/or file structure are subject to change
|
||||
without notice.
|
||||
They are being used to normalize chunk durations so all chunks take roughly
|
||||
the same length of time.
|
||||
|
||||
Generating a Test Runtime File
|
||||
------------------------------
|
||||
|
||||
The writeruntimes.py script can be used to generate a runtime file. You must
|
||||
specify the suite for which the runtimes are to be generated, e.g.
|
||||
The ``writeruntimes`` script can be used to generate this file:
|
||||
|
||||
writeruntimes.py -s mochitest-media
|
||||
$ ./writeruntimes
|
||||
|
||||
It will take awhile. You can optionally specify platforms or suites on the
|
||||
command line, but these should only be used for debugging purposes (not for
|
||||
committing an update to the data). For more info, see:
|
||||
|
||||
$ ./writeruntimes -- --help
|
||||
|
|
|
@ -15,25 +15,40 @@ echo "mach not found, either add it to your \$PATH or run this script via ./mach
|
|||
|
||||
from __future__ import absolute_import, print_function
|
||||
|
||||
from argparse import ArgumentParser
|
||||
from collections import defaultdict
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from argparse import ArgumentParser
|
||||
from collections import defaultdict
|
||||
|
||||
import requests
|
||||
|
||||
from moztest.resolve import (
|
||||
TestManifestLoader,
|
||||
TestResolver,
|
||||
TEST_SUITES,
|
||||
)
|
||||
|
||||
here = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
ACTIVE_DATA_URL = "https://activedata.allizom.org/query"
|
||||
PERCENTILE = 0.5 # ignore the bottom PERCENTILE*100% of numbers
|
||||
|
||||
|
||||
def query_activedata(suite):
|
||||
last_week = datetime.datetime.now() - datetime.timedelta(days=7)
|
||||
last_week_timestamp = time.mktime(last_week.timetuple())
|
||||
def query_activedata(suite, platform):
|
||||
if platform in ('windows', 'android'):
|
||||
platform_clause = '{"find":{"run.machine.platform": "%s"}}' % platform
|
||||
else:
|
||||
platform_clause = '''
|
||||
{
|
||||
"not": {
|
||||
"or": [
|
||||
{"find":{"run.machine.platform": "windows"}},
|
||||
{"find":{"run.machine.platform": "android"}}
|
||||
]
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
query = """
|
||||
{
|
||||
|
@ -42,104 +57,105 @@ def query_activedata(suite):
|
|||
"groupby":["result.test"],
|
||||
"select":{"value":"result.duration","aggregate":"average"},
|
||||
"where":{"and":[
|
||||
{"eq":{"run.suite.flavor":"%s"}},
|
||||
{"gt":{"run.timestamp":%s}}
|
||||
{"eq":{"repo.branch.name": "mozilla-central"}},
|
||||
{"in":{"result.status": ["OK", "PASS", "FAIL"]}},
|
||||
{"gt":{"run.timestamp": {"date": "today-week"}}},
|
||||
{"not": {"find": {"run.name": "-ccov"}}},
|
||||
{"not": {"find": {"run.name": "-shippable"}}},
|
||||
{"eq":{"run.suite.fullname":"%s"}},
|
||||
%s
|
||||
]}
|
||||
}
|
||||
""" % (suite, last_week_timestamp)
|
||||
""" % (suite, platform_clause)
|
||||
|
||||
print("Running ActiveData query:")
|
||||
print(query)
|
||||
print("Querying ActiveData for '{}' tests on '{}' platforms.. "
|
||||
.format(suite, platform), end='')
|
||||
sys.stdout.flush()
|
||||
response = requests.post(ACTIVE_DATA_URL,
|
||||
data=query,
|
||||
stream=True)
|
||||
response.raise_for_status()
|
||||
data = response.json()["data"]
|
||||
data = dict(response.json()["data"])
|
||||
print("{} found".format(len(data)))
|
||||
return data
|
||||
|
||||
|
||||
def write_runtimes(data, suite, indir=here, outdir=here):
|
||||
data = dict(data)
|
||||
|
||||
outfilename = os.path.join(outdir, "%s.runtimes.json" % suite)
|
||||
infilename = os.path.join(indir, "%s.runtimes.json" % suite)
|
||||
def write_runtimes(manifest_runtimes, platform, outdir=here):
|
||||
if not os.path.exists(outdir):
|
||||
os.makedirs(outdir)
|
||||
|
||||
# read in existing data, if any
|
||||
indata = None
|
||||
if os.path.exists(infilename):
|
||||
with open(infilename, 'r') as f:
|
||||
indata = json.loads(f.read()).get('runtimes')
|
||||
|
||||
# identify a threshold of durations, below which we ignore
|
||||
runtimes = []
|
||||
for result in data.itervalues():
|
||||
duration = int(result * 1000) if result else 0
|
||||
if duration:
|
||||
runtimes.append(duration)
|
||||
runtimes.sort()
|
||||
threshold = runtimes[int(len(runtimes) * PERCENTILE)]
|
||||
|
||||
# split the durations into two groups; omitted and specified
|
||||
omitted = []
|
||||
specified = indata if indata else {}
|
||||
current_tests = []
|
||||
for test, duration in data.iteritems():
|
||||
current_tests.append(test)
|
||||
duration = int(duration * 1000) if duration else 0
|
||||
if duration > 0 and duration < threshold:
|
||||
omitted.append(duration)
|
||||
if test in specified:
|
||||
del specified[test]
|
||||
elif duration >= threshold and test != "automation.py":
|
||||
original = specified.get(test, 0)
|
||||
if not original or abs(original - duration) > (original/20):
|
||||
# only write new data if it's > 20% different than original
|
||||
specified[test] = duration
|
||||
|
||||
# delete any test references no longer needed
|
||||
to_delete = []
|
||||
for test in specified:
|
||||
if test not in current_tests:
|
||||
to_delete.append(test)
|
||||
for test in to_delete:
|
||||
del specified[test]
|
||||
|
||||
avg = int(sum(omitted)/len(omitted))
|
||||
|
||||
results = {'excluded_test_average': avg,
|
||||
'runtimes': specified}
|
||||
|
||||
outfilename = os.path.join(outdir, "manifest-runtimes-{}.json".format(platform))
|
||||
with open(outfilename, 'w') as f:
|
||||
f.write(json.dumps(results, indent=2, sort_keys=True))
|
||||
f.write(json.dumps(manifest_runtimes, indent=2, sort_keys=True))
|
||||
|
||||
|
||||
def compute_manifest_runtimes(suites, platform):
|
||||
resolver = TestResolver.from_environment(cwd=here, loader_cls=TestManifestLoader)
|
||||
|
||||
crashtest_prefixes = {
|
||||
'http': '/tests/',
|
||||
'chrome': '/reftest/content/',
|
||||
'file': '/reftest/tests/',
|
||||
}
|
||||
manifest_runtimes = defaultdict(float)
|
||||
for suite in suites:
|
||||
data = query_activedata(suite, platform)
|
||||
|
||||
for path, duration in data.items():
|
||||
if not path:
|
||||
continue
|
||||
|
||||
if suite in ('reftest', 'crashtest') and ' ' in path:
|
||||
path = path.split()[0]
|
||||
|
||||
if suite == 'crashtest' and '://' in path:
|
||||
# Crashtest paths are URLs with various schemes and prefixes.
|
||||
# Normalize it to become relative to mozilla-central.
|
||||
scheme = path[:path.index('://')]
|
||||
if ':' in scheme:
|
||||
scheme = scheme.split(':')[-1]
|
||||
|
||||
prefix = crashtest_prefixes[scheme]
|
||||
path = path.split(prefix, 1)[-1]
|
||||
elif suite == 'xpcshell' and ':' in path:
|
||||
path = path.split(':', 1)[-1]
|
||||
|
||||
if path not in resolver.tests_by_path:
|
||||
continue
|
||||
|
||||
for test in resolver.tests_by_path[path]:
|
||||
manifest = test['manifest_relpath']
|
||||
manifest_runtimes[manifest] += duration
|
||||
|
||||
manifest_runtimes = {k: round(v, 2) for k, v in manifest_runtimes.items()}
|
||||
return manifest_runtimes
|
||||
|
||||
|
||||
def cli(args=sys.argv[1:]):
|
||||
default_suites = [suite for suite, obj in TEST_SUITES.items() if 'build_flavor' in obj]
|
||||
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument('-o', '--output-directory', dest='outdir',
|
||||
default=here, help="Directory to save runtime data.")
|
||||
|
||||
parser.add_argument('-i', '--input-directory', dest='indir',
|
||||
default=here, help="Directory from which to read current runtime data.")
|
||||
|
||||
parser.add_argument('-s', '--suite', dest='suite', default=None,
|
||||
help="Suite for which to generate data.")
|
||||
|
||||
parser.add_argument('-o', '--output-directory', dest='outdir', default=here,
|
||||
help="Directory to save runtime data.")
|
||||
parser.add_argument('-s', '--suite', dest='suites', action='append',
|
||||
default=None, choices=default_suites,
|
||||
help="Suite(s) to include in the data set (default: all)")
|
||||
parser.add_argument('-p', '--platform', dest='platforms', action='append',
|
||||
default=None, choices=['android', 'unix', 'windows'],
|
||||
help="Platform(s) to gather runtime information on "
|
||||
"(default: all).")
|
||||
args = parser.parse_args(args)
|
||||
|
||||
if not args.suite:
|
||||
raise ValueError("Must specify suite with the -s argument")
|
||||
if ',' in args.suite:
|
||||
raise ValueError("Passing multiple suites is not supported")
|
||||
suites = args.suites or default_suites
|
||||
platforms = args.platforms or ['android', 'windows', 'unix']
|
||||
for platform in platforms:
|
||||
runtimes = compute_manifest_runtimes(suites, platform)
|
||||
if not runtimes:
|
||||
print("Not creating runtimes file for '{}' as no data was found".format(platform))
|
||||
continue
|
||||
|
||||
suite = args.suite
|
||||
data = query_activedata(suite)
|
||||
write_runtimes(runtimes, platform, outdir=args.outdir)
|
||||
|
||||
if not data:
|
||||
print("Not creating runtimes file as no data was found")
|
||||
else:
|
||||
write_runtimes(data, suite, indir=args.indir, outdir=args.outdir)
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(cli())
|
||||
|
|
Загрузка…
Ссылка в новой задаче