servo: Merge #11107 - Bug 10452 - Page Load Time Test Runner (from shinglyu:servo-perf)

Source-Repo: https://github.com/servo/servo
Source-Revision: 7ed9134e5a8401380253f75d4a7ce43ab5027241
This commit is contained in:
Shing Lyu 2016-08-08 16:22:50 +08:00
Родитель 7756e28712
Коммит fb11a939f3
15 изменённых файлов: 1485 добавлений и 0 удалений

5
servo/etc/ci/performance/.gitignore поставляемый Normal file
Просмотреть файл

@ -0,0 +1,5 @@
servo/*
output.png
output/*
__pycache__/

Просмотреть файл

@ -0,0 +1,68 @@
Servo Page Load Time Test
==============
# Prerequisites
* Python3
# Basic Usage
## Prepare the test runner
* Clone this repo
* Download [tp5n.zip](http://people.mozilla.org/~jmaher/taloszips/zips/tp5n.zip), extract it to `page_load_test/tp5n`
* Run `prepare_manifest.sh` to transform the tp5n manifest to our format
* Install the Python3 `treeherder-client` package. For example, to install it in a virtualenv: `python3 -m virtualenv venv; source venv/bin/activate; pip install treeherder-client`
* Setup your Treeherder client ID and secret as environment variables `TREEHERDER_CLIENT_ID` and `TREEHERDER_CLIENT_SECRET`
## Build Servo
* Clone the servo repo
* Compile release build
* Run `git_log_to_json.sh` in the servo repo, save the output as `revision.json`
* Put your `servo` binary, `revision.json` and `resources` folder in `etc/ci/performance/servo/`
## Run
* Activate the virutalenv: `source venv/bin/activate`
* Sync your system clock before running, the Perfherder API SSL check will fail if your system clock is not accurate. (e.g. `sudo nptdate tw.pool.ntp.org`)
* Run `test_all.sh`
* Test results are submitted to https://treeherder.mozilla.org/#/jobs?repo=servo
# How it works
* The testcase is from tp5, every testcase will run 20 times, and we take the median.
* Some of the tests will make Servo run forever, it's disabled right now. See https://github.com/servo/servo/issues/11087
* Each testcase is a subtest on Perfherder, and their summary time is the geometric mean of all the subtests.
* Notice that the test is different from the Talos TP5 test we run for Gecko. So you can NOT conclude that Servo is "faster" or "slower" than Gecko from this test.
# Unit tests
You can run all unit tests (include 3rd-party libraries) with `python -m pytest`.
Individual test can be run by `python -m pytest <filename>`:
* `test_runner.py`
* `test_submit_to_perfherder.py`
# Advanced Usage
## Test Perfherder Locally
If you want to test the data submission code in `submit_to_perfherder.py` without getting a credential for the production server, you can setup a local treeherder VM. If you don't need to test `submit_to_perfherder.py`, you can skip this step.
* Add `192.168.33.10 local.treeherder.mozilla.org` to `/etc/hosts`
* `git clone https://github.com/mozilla/treeherder; cd treeherder`
* `vagrant up`
* `vagrant ssh`
* `./bin/run_gunicorn`
* Outside of vm, open `http://local.treeherder.mozilla.org` and login to create an account
* `vagrant ssh`
* `./manage.py create_credentials <username> <email> "description"`, the email has to match your logged in user. Remember to log-in through the Web UI once before you run this.
* Setup your Treeherder client ID and secret as environment variables `TREEHERDER_CLIENT_ID` and `TREEHERDER_CLIENT_SECRET`
## For Gecko
* Install Firefox Nightly in your PATH
* Install [jpm](https://developer.mozilla.org/en-US/Add-ons/SDK/Tools/jpm#Installation)
* Run `jpm xpi` in the `firefox/addon` folder
* Install the generated `xpi` file to your Firefox Nightly

Просмотреть файл

@ -0,0 +1,2 @@
#Servo Performance Comparison
Monitor website rendering performance

Просмотреть файл

@ -0,0 +1 @@
../../../user-agent-js/01.perf-timing.js

Просмотреть файл

@ -0,0 +1,8 @@
var self = require("sdk/self");
var pageMod = require("sdk/page-mod");
pageMod.PageMod({
include: "*",
contentScriptFile: self.data.url('perf.js'),
attachTo: ["top", "existing"]
});

Просмотреть файл

@ -0,0 +1,16 @@
{
"title": "Servo Performance Comparison",
"name": "addon",
"version": "0.0.1",
"description": "Monitor website rendering performance",
"main": "index.js",
"author": "The Servo team",
"engines": {
"firefox": ">=38.0a1",
"fennec": ">=38.0a1"
},
"license": "MPL",
"keywords": [
"jetpack"
]
}

Просмотреть файл

@ -0,0 +1,15 @@
#!/usr/bin/env bash
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
set -o errexit
set -o nounset
set -o pipefail
# Don't include body to prevent multiline and unescaped body string
git log -n 1 --pretty=format:'{%n "commit": "%H",%n "subject": "%s",%n
"author": {%n "name": "%aN",%n "email": "%aE",%n
"timestamp": "%at"%n }%n %n}'

Просмотреть файл

@ -0,0 +1,52 @@
http://localhost:8000/tp6/news.ycombinator.com/index.html
# http://localhost:8000/page_load_test/163.com/www.163.com/index.html
http://localhost:8000/page_load_test/56.com/www.56.com/index.html
# http://localhost:8000/page_load_test/aljazeera.net/aljazeera.net/portal.html
http://localhost:8000/page_load_test/amazon.com/www.amazon.com/Kindle-Wireless-Reader-Wifi-Graphite/dp/B002Y27P3M/507846.html
http://localhost:8000/page_load_test/bbc.co.uk/www.bbc.co.uk/news/index.html
http://localhost:8000/page_load_test/beatonna.livejournal.com/beatonna.livejournal.com/index.html
# http://localhost:8000/page_load_test/bild.de/www.bild.de/index.html
http://localhost:8000/page_load_test/cgi.ebay.com/cgi.ebay.com/ALL-NEW-KINDLE-3-eBOOK-WIRELESS-READING-DEVICE-W-WIFI-/130496077314@pt=LH_DefaultDomain_0&hash=item1e622c1e02.html
http://localhost:8000/page_load_test/chemistry.about.com/chemistry.about.com/index.html
# http://localhost:8000/page_load_test/chinaz.com/chinaz.com/index.html
http://localhost:8000/page_load_test/cnn.com/www.cnn.com/index.html
http://localhost:8000/page_load_test/dailymail.co.uk/www.dailymail.co.uk/ushome/index.html
http://localhost:8000/page_load_test/dailymotion.com/www.dailymotion.com/us.html
# http://localhost:8000/page_load_test/digg.com/digg.com/news/story/New_logo_for_Mozilla_Firefox_browser.html
http://localhost:8000/page_load_test/ezinearticles.com/ezinearticles.com/index.html@Migraine-Ocular---The-Eye-Migraines&id=4684133.html
http://localhost:8000/page_load_test/globo.com/www.globo.com/index.html
http://localhost:8000/page_load_test/google.com/www.google.com/search@q=mozilla.html
http://localhost:8000/page_load_test/goo.ne.jp/goo.ne.jp/index.html
# http://localhost:8000/page_load_test/guardian.co.uk/www.guardian.co.uk/index.html
# http://localhost:8000/page_load_test/homeway.com.cn/www.hexun.com/index.html
http://localhost:8000/page_load_test/huffingtonpost.com/www.huffingtonpost.com/index.html
# http://localhost:8000/page_load_test/ifeng.com/ifeng.com/index.html
# http://localhost:8000/page_load_test/imdb.com/www.imdb.com/title/tt1099212/index.html
http://localhost:8000/page_load_test/imgur.com/imgur.com/gallery/index.html
# http://localhost:8000/page_load_test/indiatimes.com/www.indiatimes.com/index.html
http://localhost:8000/page_load_test/mail.ru/mail.ru/index.html
# http://localhost:8000/page_load_test/mashable.com/mashable.com/index.html
http://localhost:8000/page_load_test/media.photobucket.com/media.photobucket.com/image/funny%20gif/findstuff22/Best%20Images/Funny/funny-gif1.jpg@o=1.html
http://localhost:8000/page_load_test/myspace.com/www.myspace.com/albumart.html
# http://localhost:8000/page_load_test/naver.com/www.naver.com/index.html
# http://localhost:8000/page_load_test/noimpactman.typepad.com/noimpactman.typepad.com/index.html
http://localhost:8000/page_load_test/page.renren.com/page.renren.com/index.html
# http://localhost:8000/page_load_test/people.com.cn/people.com.cn/index.html
http://localhost:8000/page_load_test/rakuten.co.jp/www.rakuten.co.jp/index.html
http://localhost:8000/page_load_test/reddit.com/www.reddit.com/index.html
# http://localhost:8000/page_load_test/reuters.com/www.reuters.com/index.html
# http://localhost:8000/page_load_test/slideshare.net/www.slideshare.net/jameswillamor/lolcats-in-popular-culture-a-historical-perspective.html
# http://localhost:8000/page_load_test/sohu.com/www.sohu.com/index.html
http://localhost:8000/page_load_test/spiegel.de/www.spiegel.de/index.html
http://localhost:8000/page_load_test/stackoverflow.com/stackoverflow.com/questions/184618/what-is-the-best-comment-in-source-code-you-have-ever-encountered.html
# http://localhost:8000/page_load_test/store.apple.com/store.apple.com/us@mco=Nzc1MjMwNA.html
# http://localhost:8000/page_load_test/thepiratebay.org/thepiratebay.org/top/201.html
http://localhost:8000/page_load_test/tudou.com/www.tudou.com/index.html
# http://localhost:8000/page_load_test/uol.com.br/www.uol.com.br/index.html
http://localhost:8000/page_load_test/w3.org/www.w3.org/standards/webdesign/htmlcss.html
# http://localhost:8000/page_load_test/wsj.com/online.wsj.com/home-page.html
# http://localhost:8000/page_load_test/xinhuanet.com/xinhuanet.com/index.html
http://localhost:8000/page_load_test/xunlei.com/xunlei.com/index.html
http://localhost:8000/page_load_test/yelp.com/www.yelp.com/biz/alexanders-steakhouse-cupertino.html
http://localhost:8000/page_load_test/youku.com/www.youku.com/index.html
http://localhost:8000/page_load_test/youtube.com/www.youtube.com/music.html

Просмотреть файл

@ -0,0 +1,14 @@
#!/usr/bin/env bash
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
set -o errexit
set -o nounset
set -o pipefail
# TP5 manifest uses `localhost`, but our local server probably don't use port 80
sed 's/localhost\/page_load_test\/tp5n/localhost:8000\/page_load_test\/tp5n/g' \
./page_load_test/tp5n/tp5o.manifest > ./page_load_test/tp5n/tp5o_8000.manifest

Просмотреть файл

@ -0,0 +1,265 @@
#!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import itertools
import json
import os
import subprocess
from statistics import median, StatisticsError
def load_manifest(filename):
with open(filename, 'r') as f:
text = f.read()
return list(parse_manifest(text))
def parse_manifest(text):
return filter(lambda x: x != "" and not x.startswith("#"),
map(lambda x: x.strip(), text.splitlines()))
def execute_test(url, command, timeout):
print("Running test:")
print(' '.join(command))
print("Timeout:{}".format(timeout))
try:
return subprocess.check_output(command, stderr=subprocess.STDOUT,
shell=True, timeout=timeout)
except subprocess.CalledProcessError as e:
print("Unexpected Fail:")
print(e)
print("You may want to re-run the test manually:\n{}"
.format(' '.join(command)))
except subprocess.TimeoutExpired:
print("Test timeout: {}".format(url))
return ""
def get_servo_command(url):
ua_script_path = "{}/user-agent-js".format(os.getcwd())
return ["./servo/servo", url,
" --userscripts", ua_script_path,
"-x", "-o", "output.png"]
def get_gecko_command(url):
return ["./firefox/firefox/firefox",
" --display=:0", "--no-remote"
" -profile", "./firefox/servo",
url]
def parse_log(log, testcase=None):
blocks = []
block = []
copy = False
for line_bytes in log.splitlines():
line = line_bytes.decode()
if line.strip() == ("[PERF] perf block start"):
copy = True
elif line.strip() == ("[PERF] perf block end"):
copy = False
blocks.append(block)
block = []
elif copy:
block.append(line)
# We need to still include the failed tests, otherwise Treeherder will
# consider the result to be a new test series, and thus a new graph. So we
# use a placeholder with values = -1 to make Treeherder happy, and still be
# able to identify failed tests (successful tests have time >=0).
placeholder = {
"navigationStart": 0,
"unloadEventStart": -1,
"domLoading": -1,
"fetchStart": -1,
"responseStart": -1,
"loadEventEnd": -1,
"connectStart": -1,
"domainLookupStart": -1,
"redirectStart": -1,
"domContentLoadedEventEnd": -1,
"requestStart": -1,
"secureConnectionStart": -1,
"connectEnd": -1,
"loadEventStart": -1,
"domInteractive": -1,
"domContentLoadedEventStart": -1,
"redirectEnd": -1,
"domainLookupEnd": -1,
"unloadEventEnd": -1,
"responseEnd": -1,
"testcase": testcase,
"domComplete": -1,
}
def parse_block(block):
timing = {}
for line in block:
try:
(_, key, value) = line.split(",")
except:
print("[DEBUG] failed to parse the following block:")
print(block)
print('[DEBUG] log:')
print('-----')
print(log)
print('-----')
return placeholder
if key == "testcase":
timing[key] = value
else:
timing[key] = None if (value == "undefined") else int(value)
if testcase is not None and timing['testcase'] != testcase:
print('[DEBUG] log:')
print('-----')
print(log)
print('-----')
return placeholder
return timing
if len(blocks) == 0:
print("Didn't find any perf data in the log, test timeout?")
print("Fillng in a dummy perf data")
print('[DEBUG] log:')
print('-----')
print(log)
print('-----')
return [placeholder]
else:
return map(parse_block, blocks)
def filter_result_by_manifest(result_json, manifest):
filtered = []
for name in manifest:
match = [tc for tc in result_json if tc['testcase'] == name]
if len(match) == 0:
raise Exception(("Missing test result: {}. This will cause a "
"discontinuity in the treeherder graph, "
"so we won't submit this data.").format(name))
filtered += match
return filtered
def take_result_median(result_json, expected_runs):
median_results = []
for k, g in itertools.groupby(result_json, lambda x: x['testcase']):
group = list(g)
if len(group) != expected_runs:
print(("Warning: Not enough test data for {},"
" maybe some runs failed?").format(k))
median_result = {}
for k, _ in group[0].items():
if k == "testcase":
median_result[k] = group[0][k]
else:
try:
median_result[k] = median([x[k] for x in group
if x[k] is not None])
except StatisticsError:
median_result[k] = -1
median_results.append(median_result)
return median_results
def save_result_json(results, filename, manifest, expected_runs):
results = filter_result_by_manifest(results, manifest)
results = take_result_median(results, expected_runs)
if len(results) == 0:
with open(filename, 'w') as f:
json.dump("No test result found in the log. All tests timeout?",
f, indent=2)
else:
with open(filename, 'w') as f:
json.dump(results, f, indent=2)
print("Result saved to {}".format(filename))
def format_result_summary(results):
failures = list(filter(lambda x: x['domComplete'] == -1, results))
result_log = """
========================================
Total {total} tests; {suc} succeeded, {fail} failed.
Failure summary:
""".format(
total=len(results),
suc=len(list(filter(lambda x: x['domComplete'] != -1, results))),
fail=len(failures)
)
uniq_failures = list(set(map(lambda x: x['testcase'], failures)))
for failure in uniq_failures:
result_log += " - {}\n".format(failure)
result_log += "========================================\n"
return result_log
def main():
parser = argparse.ArgumentParser(
description="Run page load test on servo"
)
parser.add_argument("tp5_manifest",
help="the test manifest in tp5 format")
parser.add_argument("output_file",
help="filename for the output json")
parser.add_argument("--runs",
type=int,
default=20,
help="number of runs for each test case. Defult: 20")
parser.add_argument("--timeout",
type=int,
default=300, # 5 min
help=("kill the test if not finished in time (sec)."
" Default: 5 min"))
parser.add_argument("--engine",
type=str,
default='servo',
help=("The engine to run the tests on. Currently only"
" servo and gecko are supported."))
args = parser.parse_args()
if args.engine == 'servo':
command_factory = get_servo_command
elif args.engine == 'gecko':
command_factory = get_gecko_command
try:
# Assume the server is up and running
testcases = load_manifest(args.tp5_manifest)
results = []
for testcase in testcases:
command = (["timeout", "{timeout}s".format(args.timeout)] +
command_factory(testcase))
for run in range(args.runs):
print("Running test {}/{} on {}".format(run + 1,
args.runs,
testcase))
log = execute_test(testcase, command, args.timeout)
result = parse_log(log, testcase)
# TODO: Record and analyze other performance.timing properties
results += result
print(format_result_summary(results))
save_result_json(results, args.output_file, testcases, args.runs)
except KeyboardInterrupt:
print("Test stopped by user, saving partial result")
save_result_json(results, args.output_file, testcases, args.runs)
if __name__ == "__main__":
main()

Просмотреть файл

@ -0,0 +1,363 @@
#!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
from functools import partial, reduce
import json
import operator
import os
import random
import string
from thclient import (TreeherderClient, TreeherderResultSetCollection,
TreeherderJobCollection)
import time
from runner import format_result_summary
def geometric_mean(iterable):
filtered = list(filter(lambda x: x > 0, iterable))
return (reduce(operator.mul, filtered)) ** (1.0 / len(filtered))
def format_testcase_name(name):
temp = name.replace('http://localhost:8000/page_load_test/', '')
temp = temp.replace('http://localhost:8000/tp6/', '')
temp = temp.split('/')[0]
temp = temp[0:80]
return temp
def format_perf_data(perf_json, engine='servo'):
suites = []
measurement = "domComplete" # Change this to an array when we have more
def get_time_from_nav_start(timings, measurement):
return timings[measurement] - timings['navigationStart']
measurementFromNavStart = partial(get_time_from_nav_start,
measurement=measurement)
if (engine == 'gecko'):
name = 'gecko.{}'.format(measurement)
else:
name = measurement
suite = {
"name": name,
"value": geometric_mean(map(measurementFromNavStart, perf_json)),
"subtests": []
}
for testcase in perf_json:
if measurementFromNavStart(testcase) < 0:
value = -1
# print('Error: test case has negative timing. Test timeout?')
else:
value = measurementFromNavStart(testcase)
suite["subtests"].append({
"name": format_testcase_name(testcase["testcase"]),
"value": value
})
suites.append(suite)
return {
"performance_data": {
# https://bugzilla.mozilla.org/show_bug.cgi?id=1271472
"framework": {"name": "servo-perf"},
"suites": suites
}
}
def create_resultset_collection(dataset):
print("[DEBUG] ResultSet Collection:")
print(dataset)
trsc = TreeherderResultSetCollection()
for data in dataset:
trs = trsc.get_resultset()
trs.add_push_timestamp(data['push_timestamp'])
trs.add_revision(data['revision'])
trs.add_author(data['author'])
# TODO: figure out where type is used
# trs.add_type(data['type'])
revisions = []
for rev in data['revisions']:
tr = trs.get_revision()
tr.add_revision(rev['revision'])
tr.add_author(rev['author'])
tr.add_comment(rev['comment'])
tr.add_repository(rev['repository'])
revisions.append(tr)
trs.add_revisions(revisions)
trsc.add(trs)
return trsc
def create_job_collection(dataset):
print("[DEBUG] Job Collection:")
print(dataset)
tjc = TreeherderJobCollection()
for data in dataset:
tj = tjc.get_job()
tj.add_revision(data['revision'])
tj.add_project(data['project'])
tj.add_coalesced_guid(data['job']['coalesced'])
tj.add_job_guid(data['job']['job_guid'])
tj.add_job_name(data['job']['name'])
tj.add_job_symbol(data['job']['job_symbol'])
tj.add_group_name(data['job']['group_name'])
tj.add_group_symbol(data['job']['group_symbol'])
tj.add_description(data['job']['desc'])
tj.add_product_name(data['job']['product_name'])
tj.add_state(data['job']['state'])
tj.add_result(data['job']['result'])
tj.add_reason(data['job']['reason'])
tj.add_who(data['job']['who'])
tj.add_tier(data['job']['tier'])
tj.add_submit_timestamp(data['job']['submit_timestamp'])
tj.add_start_timestamp(data['job']['start_timestamp'])
tj.add_end_timestamp(data['job']['end_timestamp'])
tj.add_machine(data['job']['machine'])
tj.add_build_info(
data['job']['build_platform']['os_name'],
data['job']['build_platform']['platform'],
data['job']['build_platform']['architecture']
)
tj.add_machine_info(
data['job']['machine_platform']['os_name'],
data['job']['machine_platform']['platform'],
data['job']['machine_platform']['architecture']
)
tj.add_option_collection(data['job']['option_collection'])
for artifact_data in data['job']['artifacts']:
tj.add_artifact(
artifact_data['name'],
artifact_data['type'],
artifact_data['blob']
)
tjc.add(tj)
return tjc
# TODO: refactor this big function to smaller chunks
def submit(perf_data, failures, revision, summary, engine):
print("[DEBUG] failures:")
print(list(map(lambda x: x['testcase'], failures)))
author = "{} <{}>".format(revision['author']['name'],
revision['author']['email'])
dataset = [
{
# The top-most revision in the list of commits for a push.
'revision': revision['commit'],
'author': author,
'push_timestamp': int(revision['author']['timestamp']),
'type': 'push',
# a list of revisions associated with the resultset. There should
# be at least one.
'revisions': [
{
'comment': revision['subject'],
'revision': revision['commit'],
'repository': 'servo',
'author': author
}
]
}
]
trsc = create_resultset_collection(dataset)
result = "success"
# TODO: verify a failed test won't affect Perfherder visualization
# if len(failures) > 0:
# result = "testfailed"
hashlen = len(revision['commit'])
job_guid = ''.join(
random.choice(string.ascii_letters + string.digits) for i in range(hashlen)
)
if (engine == "gecko"):
project = "servo"
job_symbol = 'PLG'
group_symbol = 'SPG'
group_name = 'Servo Perf on Gecko'
else:
project = "servo"
job_symbol = 'PL'
group_symbol = 'SP'
group_name = 'Servo Perf'
dataset = [
{
'project': project,
'revision': revision['commit'],
'job': {
'job_guid': job_guid,
'product_name': project,
'reason': 'scheduler',
# TODO: What is `who` for?
'who': 'Servo',
'desc': 'Servo Page Load Time Tests',
'name': 'Servo Page Load Time',
# The symbol representing the job displayed in
# treeherder.allizom.org
'job_symbol': job_symbol,
# The symbol representing the job group in
# treeherder.allizom.org
'group_symbol': group_symbol,
'group_name': group_name,
# TODO: get the real timing from the test runner
'submit_timestamp': str(int(time.time())),
'start_timestamp': str(int(time.time())),
'end_timestamp': str(int(time.time())),
'state': 'completed',
'result': result, # "success" or "testfailed"
'machine': 'local-machine',
# TODO: read platform from test result
'build_platform': {
'platform': 'linux64',
'os_name': 'linux',
'architecture': 'x86_64'
},
'machine_platform': {
'platform': 'linux64',
'os_name': 'linux',
'architecture': 'x86_64'
},
'option_collection': {'opt': True},
# jobs can belong to different tiers
# setting the tier here will determine which tier the job
# belongs to. However, if a job is set as Tier of 1, but
# belongs to the Tier 2 profile on the server, it will still
# be saved as Tier 2.
'tier': 1,
# the ``name`` of the log can be the default of "buildbot_text"
# however, you can use a custom name. See below.
# TODO: point this to the log when we have them uploaded to S3
'log_references': [
{
'url': 'TBD',
'name': 'test log'
}
],
# The artifact can contain any kind of structured data
# associated with a test.
'artifacts': [
{
'type': 'json',
'name': 'performance_data',
# TODO: include the job_guid when the runner actually
# generates one
# 'job_guid': job_guid,
'blob': perf_data
},
{
'type': 'json',
'name': 'Job Info',
# 'job_guid': job_guid,
"blob": {
"job_details": [
{
"content_type": "link",
"url": "https://www.github.com/servo/servo",
"value": "GitHub",
"title": "Source code"
},
{
"content_type": "raw_html",
"title": "Result Summary",
"value": summary
}
]
}
}
],
# List of job guids that were coalesced to this job
'coalesced': []
}
}
]
tjc = create_job_collection(dataset)
# TODO: extract this read credential code out of this function.
cred = {
'client_id': os.environ['TREEHERDER_CLIENT_ID'],
'secret': os.environ['TREEHERDER_CLIENT_SECRET']
}
client = TreeherderClient(protocol='https',
host='treeherder.allizom.org',
client_id=cred['client_id'],
secret=cred['secret'])
# data structure validation is automatically performed here, if validation
# fails a TreeherderClientError is raised
client.post_collection('servo', trsc)
client.post_collection('servo', tjc)
def main():
parser = argparse.ArgumentParser(
description=("Submit Servo performance data to Perfherder. "
"Remember to set your Treeherder credentail as environment"
" variable \'TREEHERDER_CLIENT_ID\' and "
"\'TREEHERDER_CLIENT_SECRET\'"))
parser.add_argument("perf_json",
help="the output json from runner")
parser.add_argument("revision_json",
help="the json containing the servo revision data")
parser.add_argument("--engine",
type=str,
default='servo',
help=("The engine to run the tests on. Currently only"
" servo and gecko are supported."))
args = parser.parse_args()
with open(args.perf_json, 'r') as f:
result_json = json.load(f)
with open(args.revision_json, 'r') as f:
revision = json.load(f)
perf_data = format_perf_data(result_json, args.engine)
failures = list(filter(lambda x: x['domComplete'] == -1, result_json))
summary = format_result_summary(result_json).replace('\n', '<br/>')
submit(perf_data, failures, revision, summary, args.engine)
print("Done!")
if __name__ == "__main__":
main()

Просмотреть файл

@ -0,0 +1,47 @@
#!/usr/bin/env bash
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
set -o errexit
set -o nounset
set -o pipefail
case "${1}" in
--servo)
engine="--engine servo"
;;
--gecko)
engine="--engine gecko"
;;
*)
# This branch should never be reached with set -o nounset
echo "You didn't specify the engine to run."
exit
;;
esac
echo "Starting the local server"
python3 -m http.server > /dev/null 2>&1 &
# TODO: enable the full manifest when #11087 is fixed
# https://github.com/servo/servo/issues/11087
# MANIFEST="page_load_test/test.manifest"
MANIFEST="page_load_test/tp5n/20160509.manifest" # A manifest that excludes
# timeout test cases
PERF_FILE="output/perf-$(date --iso-8601=seconds).json"
echo "Running tests"
python3 runner.py "${engine}" --runs 3 "${MANIFEST}" "${PERF_FILE}"
echo "Submitting to Perfherder"
# Perfherder SSL check will fail if time is not accurate,
# sync time before you submit
# TODO: we are using Servo's revision hash for Gecko's result to make both
# results appear on the same date. Use the correct result when Perfherder
# allows us to change the date.
python3 submit_to_perfherder.py "${engine}" "${PERF_FILE}" servo/revision.json
# Kill the http server
trap 'kill $(jobs -pr)' SIGINT SIGTERM EXIT

Просмотреть файл

@ -0,0 +1,445 @@
#!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import runner
import pytest
def test_log_parser():
mock_log = b'''
[PERF] perf block start
[PERF],testcase,http://localhost:8000/page_load_test/56.com/www.56.com/index.html
[PERF],navigationStart,1460358376
[PERF],unloadEventStart,undefined
[PERF],unloadEventEnd,undefined
[PERF],redirectStart,undefined
[PERF],redirectEnd,undefined
[PERF],fetchStart,undefined
[PERF],domainLookupStart,undefined
[PERF],domainLookupEnd,undefined
[PERF],connectStart,undefined
[PERF],connectEnd,undefined
[PERF],secureConnectionStart,undefined
[PERF],requestStart,undefined
[PERF],responseStart,undefined
[PERF],responseEnd,undefined
[PERF],domLoading,1460358376000
[PERF],domInteractive,1460358388000
[PERF],domContentLoadedEventStart,1460358388000
[PERF],domContentLoadedEventEnd,1460358388000
[PERF],domComplete,1460358389000
[PERF],loadEventStart,undefined
[PERF],loadEventEnd,undefined
[PERF] perf block end
Shutting down the Constellation after generating an output file or exit flag specified
'''
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"navigationStart": 1460358376,
"unloadEventStart": None,
"unloadEventEnd": None,
"redirectStart": None,
"redirectEnd": None,
"fetchStart": None,
"domainLookupStart": None,
"domainLookupEnd": None,
"connectStart": None,
"connectEnd": None,
"secureConnectionStart": None,
"requestStart": None,
"responseStart": None,
"responseEnd": None,
"domLoading": 1460358376000,
"domInteractive": 1460358388000,
"domContentLoadedEventStart": 1460358388000,
"domContentLoadedEventEnd": 1460358388000,
"domComplete": 1460358389000,
"loadEventStart": None,
"loadEventEnd": None
}]
result = runner.parse_log(mock_log)
assert(expected == list(result))
def test_log_parser_complex():
mock_log = b'''
[PERF] perf block start
[PERF],testcase,http://localhost:8000/page_load_test/56.com/www.56.com/content.html
[PERF],navigationStart,1460358300
[PERF],unloadEventStart,undefined
[PERF],unloadEventEnd,undefined
[PERF],redirectStart,undefined
[PERF],redirectEnd,undefined
[PERF],fetchStart,undefined
[PERF],domainLookupStart,undefined
[PERF],domainLookupEnd,undefined
[PERF],connectStart,undefined
[PERF],connectEnd,undefined
[PERF],secureConnectionStart,undefined
[PERF],requestStart,undefined
[PERF],responseStart,undefined
[PERF],responseEnd,undefined
[PERF],domLoading,1460358376000
[PERF],domInteractive,1460358388000
[PERF],domContentLoadedEventStart,1460358388000
[PERF],domContentLoadedEventEnd,1460358388000
[PERF],domComplete,1460358389000
[PERF],loadEventStart,undefined
[PERF],loadEventEnd,undefined
[PERF] perf block end
Some other js error logs here
[PERF] perf block start
[PERF],testcase,http://localhost:8000/page_load_test/56.com/www.56.com/index.html
[PERF],navigationStart,1460358376
[PERF],unloadEventStart,undefined
[PERF],unloadEventEnd,undefined
[PERF],redirectStart,undefined
[PERF],redirectEnd,undefined
[PERF],fetchStart,undefined
[PERF],domainLookupStart,undefined
[PERF],domainLookupEnd,undefined
[PERF],connectStart,undefined
[PERF],connectEnd,undefined
[PERF],secureConnectionStart,undefined
[PERF],requestStart,undefined
[PERF],responseStart,undefined
[PERF],responseEnd,undefined
[PERF],domLoading,1460358376000
[PERF],domInteractive,1460358388000
[PERF],domContentLoadedEventStart,1460358388000
[PERF],domContentLoadedEventEnd,1460358388000
[PERF],domComplete,1460358389000
[PERF],loadEventStart,undefined
[PERF],loadEventEnd,undefined
[PERF] perf block end
Shutting down the Constellation after generating an output file or exit flag specified
'''
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/content.html",
"navigationStart": 1460358300,
"unloadEventStart": None,
"unloadEventEnd": None,
"redirectStart": None,
"redirectEnd": None,
"fetchStart": None,
"domainLookupStart": None,
"domainLookupEnd": None,
"connectStart": None,
"connectEnd": None,
"secureConnectionStart": None,
"requestStart": None,
"responseStart": None,
"responseEnd": None,
"domLoading": 1460358376000,
"domInteractive": 1460358388000,
"domContentLoadedEventStart": 1460358388000,
"domContentLoadedEventEnd": 1460358388000,
"domComplete": 1460358389000,
"loadEventStart": None,
"loadEventEnd": None
}, {
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"navigationStart": 1460358376,
"unloadEventStart": None,
"unloadEventEnd": None,
"redirectStart": None,
"redirectEnd": None,
"fetchStart": None,
"domainLookupStart": None,
"domainLookupEnd": None,
"connectStart": None,
"connectEnd": None,
"secureConnectionStart": None,
"requestStart": None,
"responseStart": None,
"responseEnd": None,
"domLoading": 1460358376000,
"domInteractive": 1460358388000,
"domContentLoadedEventStart": 1460358388000,
"domContentLoadedEventEnd": 1460358388000,
"domComplete": 1460358389000,
"loadEventStart": None,
"loadEventEnd": None
}]
result = runner.parse_log(mock_log)
assert(expected == list(result))
def test_log_parser_empty():
mock_log = b'''
[PERF] perf block start
[PERF]BROKEN!!!!!!!!!1
[PERF]BROKEN!!!!!!!!!1
[PERF]BROKEN!!!!!!!!!1
[PERF]BROKEN!!!!!!!!!1
[PERF]BROKEN!!!!!!!!!1
[PERF] perf block end
'''
mock_testcase = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"navigationStart": 0,
"unloadEventStart": -1,
"unloadEventEnd": -1,
"redirectStart": -1,
"redirectEnd": -1,
"fetchStart": -1,
"domainLookupStart": -1,
"domainLookupEnd": -1,
"connectStart": -1,
"connectEnd": -1,
"secureConnectionStart": -1,
"requestStart": -1,
"responseStart": -1,
"responseEnd": -1,
"domLoading": -1,
"domInteractive": -1,
"domContentLoadedEventStart": -1,
"domContentLoadedEventEnd": -1,
"domComplete": -1,
"loadEventStart": -1,
"loadEventEnd": -1
}]
result = runner.parse_log(mock_log, mock_testcase)
assert(expected == list(result))
def test_log_parser_error():
mock_log = b'Nothing here! Test failed!'
mock_testcase = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"navigationStart": 0,
"unloadEventStart": -1,
"unloadEventEnd": -1,
"redirectStart": -1,
"redirectEnd": -1,
"fetchStart": -1,
"domainLookupStart": -1,
"domainLookupEnd": -1,
"connectStart": -1,
"connectEnd": -1,
"secureConnectionStart": -1,
"requestStart": -1,
"responseStart": -1,
"responseEnd": -1,
"domLoading": -1,
"domInteractive": -1,
"domContentLoadedEventStart": -1,
"domContentLoadedEventEnd": -1,
"domComplete": -1,
"loadEventStart": -1,
"loadEventEnd": -1
}]
result = runner.parse_log(mock_log, mock_testcase)
assert(expected == list(result))
def test_log_parser_bad_testcase_name():
mock_testcase = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
# Notice the testcase is about:blank, servo crashed
mock_log = b'''
[PERF] perf block start
[PERF],testcase,about:blank
[PERF],navigationStart,1460358376
[PERF],unloadEventStart,undefined
[PERF],unloadEventEnd,undefined
[PERF],redirectStart,undefined
[PERF],redirectEnd,undefined
[PERF],fetchStart,undefined
[PERF],domainLookupStart,undefined
[PERF],domainLookupEnd,undefined
[PERF],connectStart,undefined
[PERF],connectEnd,undefined
[PERF],secureConnectionStart,undefined
[PERF],requestStart,undefined
[PERF],responseStart,undefined
[PERF],responseEnd,undefined
[PERF],domLoading,1460358376000
[PERF],domInteractive,1460358388000
[PERF],domContentLoadedEventStart,1460358388000
[PERF],domContentLoadedEventEnd,1460358388000
[PERF],domComplete,1460358389000
[PERF],loadEventStart,undefined
[PERF],loadEventEnd,undefined
[PERF] perf block end
Shutting down the Constellation after generating an output file or exit flag specified
'''
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"navigationStart": 0,
"unloadEventStart": -1,
"unloadEventEnd": -1,
"redirectStart": -1,
"redirectEnd": -1,
"fetchStart": -1,
"domainLookupStart": -1,
"domainLookupEnd": -1,
"connectStart": -1,
"connectEnd": -1,
"secureConnectionStart": -1,
"requestStart": -1,
"responseStart": -1,
"responseEnd": -1,
"domLoading": -1,
"domInteractive": -1,
"domContentLoadedEventStart": -1,
"domContentLoadedEventEnd": -1,
"domComplete": -1,
"loadEventStart": -1,
"loadEventEnd": -1
}]
result = runner.parse_log(mock_log, mock_testcase)
assert(expected == list(result))
def test_manifest_loader():
text = '''
http://localhost/page_load_test/tp5n/163.com/www.163.com/index.html
http://localhost/page_load_test/tp5n/56.com/www.56.com/index.html
http://localhost/page_load_test/tp5n/aljazeera.net/aljazeera.net/portal.html
# Disabled! http://localhost/page_load_test/tp5n/aljazeera.net/aljazeera.net/portal.html
'''
expected = [
"http://localhost/page_load_test/tp5n/163.com/www.163.com/index.html",
"http://localhost/page_load_test/tp5n/56.com/www.56.com/index.html",
"http://localhost/page_load_test/tp5n/aljazeera.net/aljazeera.net/portal.html"
]
assert(expected == list(runner.parse_manifest(text)))
def test_filter_result_by_manifest():
input_json = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/content.html",
"domComplete": 1460358389000,
}, {
"testcase": "non-existing-html",
"domComplete": 1460358389000,
}, {
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389000,
}]
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389000,
}]
manifest = [
"http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
]
assert(expected == runner.filter_result_by_manifest(input_json, manifest))
def test_filter_result_by_manifest_error():
input_json = [{
"testcase": "1.html",
"domComplete": 1460358389000,
}]
manifest = [
"1.html",
"2.html"
]
with pytest.raises(Exception) as execinfo:
runner.filter_result_by_manifest(input_json, manifest)
assert "Missing test result" in str(execinfo.value)
def test_take_result_median_odd():
input_json = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389001,
"domLoading": 1460358380002
}, {
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389002,
"domLoading": 1460358380001
}, {
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389003,
"domLoading": 1460358380003
}]
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389002,
"domLoading": 1460358380002
}]
assert(expected == runner.take_result_median(input_json, len(input_json)))
def test_take_result_median_even():
input_json = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389001,
"domLoading": 1460358380002
}, {
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389002,
"domLoading": 1460358380001
}]
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389001.5,
"domLoading": 1460358380001.5
}]
assert(expected == runner.take_result_median(input_json, len(input_json)))
def test_take_result_median_error():
input_json = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": None,
"domLoading": 1460358380002
}, {
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389002,
"domLoading": 1460358380001
}]
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389002,
"domLoading": 1460358380001.5
}]
assert(expected == runner.take_result_median(input_json, len(input_json)))
def test_log_result():
results = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": -1
}, {
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": -1
}, {
"testcase": "http://localhost:8000/page_load_test/104.com/www.104.com/index.html",
"domComplete": 123456789
}]
expected = """
========================================
Total 3 tests; 1 succeeded, 2 failed.
Failure summary:
- http://localhost:8000/page_load_test/56.com/www.56.com/index.html
========================================
"""
assert(expected == runner.format_result_summary(results))

Просмотреть файл

@ -0,0 +1,134 @@
#!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import submit_to_perfherder
def test_format_testcase_name():
assert('about:blank' == submit_to_perfherder.format_testcase_name(
'about:blank'))
assert('163.com' == submit_to_perfherder.format_testcase_name((
'http://localhost:8000/page_load_test/163.com/p.mail.163.com/'
'mailinfo/shownewmsg_www_1222.htm.html')))
assert(('1234567890223456789032345678904234567890'
'5234567890623456789072345678908234567890') ==
submit_to_perfherder.format_testcase_name((
'1234567890223456789032345678904234567890'
'52345678906234567890723456789082345678909234567890')))
assert('news.ycombinator.com' == submit_to_perfherder.format_testcase_name(
'http://localhost:8000/tp6/news.ycombinator.com/index.html'))
def test_format_perf_data():
mock_result = [
{
"unloadEventStart": None,
"domLoading": 1460444930000,
"fetchStart": None,
"responseStart": None,
"loadEventEnd": None,
"connectStart": None,
"domainLookupStart": None,
"redirectStart": None,
"domContentLoadedEventEnd": 1460444930000,
"requestStart": None,
"secureConnectionStart": None,
"connectEnd": None,
"navigationStart": 1460444930000,
"loadEventStart": None,
"domInteractive": 1460444930000,
"domContentLoadedEventStart": 1460444930000,
"redirectEnd": None,
"domainLookupEnd": None,
"unloadEventEnd": None,
"responseEnd": None,
"testcase": "about:blank",
"domComplete": 1460444931000
},
{
"unloadEventStart": None,
"domLoading": 1460444934000,
"fetchStart": None,
"responseStart": None,
"loadEventEnd": None,
"connectStart": None,
"domainLookupStart": None,
"redirectStart": None,
"domContentLoadedEventEnd": 1460444946000,
"requestStart": None,
"secureConnectionStart": None,
"connectEnd": None,
"navigationStart": 1460444934000,
"loadEventStart": None,
"domInteractive": 1460444946000,
"domContentLoadedEventStart": 1460444946000,
"redirectEnd": None,
"domainLookupEnd": None,
"unloadEventEnd": None,
"responseEnd": None,
"testcase": ("http://localhost:8000/page_load_test/163.com/"
"p.mail.163.com/mailinfo/"
"shownewmsg_www_1222.htm.html"),
"domComplete": 1460444948000
}
]
expected = {
"performance_data": {
"framework": {"name": "servo-perf"},
"suites": [
{
"name": "domComplete",
"value": 3741.657386773941,
"subtests": [
{"name": "about:blank",
"value": 1000},
{"name": "163.com",
"value": 14000},
]
}
]
}
}
result = submit_to_perfherder.format_perf_data(mock_result)
assert(expected == result)
def test_format_bad_perf_data():
mock_result = [
{
"navigationStart": 1460444930000,
"testcase": "about:blank",
"domComplete": 0
},
{
"navigationStart": 1460444934000,
"testcase": ("http://localhost:8000/page_load_test/163.com/"
"p.mail.163.com/mailinfo/"
"shownewmsg_www_1222.htm.html"),
"domComplete": 1460444948000
}
]
expected = {
"performance_data": {
"framework": {"name": "servo-perf"},
"suites": [
{
"name": "domComplete",
"value": 14000.0,
"subtests": [
{"name": "about:blank",
"value": -1}, # Timeout
{"name": "163.com",
"value": 14000},
]
}
]
}
}
result = submit_to_perfherder.format_perf_data(mock_result)
assert(expected == result)

Просмотреть файл

@ -0,0 +1,50 @@
print = function(o) {
console.log(o);
if (window.dump) {
window.dump(o + '\n');
}
}
function formatLine(name, t) {
print("[PERF]," + name + "," + t);
}
function printPerfTiming() {
print("[PERF] perf block start")
formatLine("testcase", window.location);
formatLine("navigationStart", performance.timing.navigationStart);
formatLine("unloadEventStart", performance.timing.unloadEventStart);
formatLine("unloadEventEnd", performance.timing.unloadEventEnd);
formatLine("redirectStart", performance.timing.redirectStart);
formatLine("redirectEnd", performance.timing.redirectEnd);
formatLine("fetchStart", performance.timing.fetchStart);
formatLine("domainLookupStart", performance.timing.domainLookupStart);
formatLine("domainLookupEnd", performance.timing.domainLookupEnd);
formatLine("connectStart", performance.timing.connectStart);
formatLine("connectEnd", performance.timing.connectEnd);
formatLine("secureConnectionStart", performance.timing.secureConnectionStart);
formatLine("requestStart", performance.timing.requestStart);
formatLine("responseStart", performance.timing.responseStart);
formatLine("responseEnd", performance.timing.responseEnd);
formatLine("domLoading", performance.timing.domLoading);
formatLine("domInteractive", performance.timing.domInteractive);
formatLine("domContentLoadedEventStart", performance.timing.domContentLoadedEventStart);
formatLine("domContentLoadedEventEnd", performance.timing.domContentLoadedEventEnd);
formatLine("domComplete", performance.timing.domComplete);
formatLine("loadEventStart", performance.timing.loadEventStart);
formatLine("loadEventEnd", performance.timing.loadEventEnd);
print("[PERF] perf block end")
}
if (document.readyState === "complete") {
printPerfTiming()
window.close();
} else {
window.addEventListener('load', printPerfTiming);
var timeout = 5;
window.setTimeout(function() {
print("[PERF] Timeout after " + timeout + " min. Force stop");
printPerfTiming();
window.close();
}, timeout * 60 * 1000)
}