зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1713815 - Add support for interactive browsertime tests. r=jmaher,perftest-reviewers,AlexandruIonescu
This patch adds support for interactive browsertime tests (initially used for responsiveness tests). Differential Revision: https://phabricator.services.mozilla.com/D126695
This commit is contained in:
Родитель
b1d459ea5b
Коммит
d11015fe69
|
@ -320,9 +320,12 @@ def main(log, args):
|
|||
for site in browsertime_json:
|
||||
for video in site["files"]["video"]:
|
||||
count += 1
|
||||
name = job["test_name"]
|
||||
if "alias" in site["info"] and site["info"]["alias"].strip() != "":
|
||||
name = "%s.%s" % (name, site["info"]["alias"])
|
||||
jobs.append(
|
||||
Job(
|
||||
test_name=job["test_name"],
|
||||
test_name=name,
|
||||
extra_options=len(job["extra_options"]) > 0
|
||||
and job["extra_options"]
|
||||
or jobs_json["extra_options"],
|
||||
|
|
|
@ -7,6 +7,8 @@ import json
|
|||
import platform
|
||||
from pathlib import Path
|
||||
|
||||
from manifestparser.util import evaluate_list_from_string
|
||||
|
||||
from mozperftest.test.browsertime import add_options, add_option
|
||||
|
||||
options = [
|
||||
|
@ -80,4 +82,9 @@ def before_runs(env):
|
|||
add_option(env, "browsertime.screenshot", "true")
|
||||
add_option(env, "browsertime.testName", test_site.get("name"))
|
||||
|
||||
# pack array into string for transport to javascript, is there a better way?
|
||||
cmds = evaluate_list_from_string(test_site.get("test_cmds", "[]"))
|
||||
parsed_cmds = [":::".join([str(i) for i in item]) for item in cmds if item]
|
||||
add_option(env, "browsertime.commands", ";;;".join(parsed_cmds))
|
||||
|
||||
print("Recording %s to file: %s" % (test_site.get("url"), recording_file))
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
/* eslint-env node */
|
||||
"use strict";
|
||||
|
||||
async function test(context, commands) {
|
||||
async function pageload_test(context, commands) {
|
||||
let testUrl = context.options.browsertime.url;
|
||||
let secondaryUrl = context.options.browsertime.secondary_url;
|
||||
let testName = context.options.browsertime.testName;
|
||||
|
@ -25,6 +25,83 @@ async function test(context, commands) {
|
|||
|
||||
// Wait for browser to settle
|
||||
await commands.wait.byTime(1000);
|
||||
}
|
||||
|
||||
async function get_command_function(cmd, commands) {
|
||||
/*
|
||||
Converts a string such as `measure.start` into the actual
|
||||
function that is found in the `commands` module.
|
||||
|
||||
XXX: Find a way to share this function between
|
||||
perftest_record.js and browsertime_interactive.js
|
||||
*/
|
||||
if (cmd == "") {
|
||||
throw new Error("A blank command was given.");
|
||||
} else if (cmd.endsWith(".")) {
|
||||
throw new Error(
|
||||
"An extra `.` was found at the end of this command: " + cmd
|
||||
);
|
||||
}
|
||||
|
||||
// `func` will hold the actual method that needs to be called,
|
||||
// and the `parent_mod` is the context required to run the `func`
|
||||
// method. Without that context, `this` becomes undefined in the browsertime
|
||||
// classes.
|
||||
let func = null;
|
||||
let parent_mod = null;
|
||||
for (let func_part of cmd.split(".")) {
|
||||
if (func_part == "") {
|
||||
throw new Error(
|
||||
"An empty function part was found in the command: " + cmd
|
||||
);
|
||||
}
|
||||
|
||||
if (func === null) {
|
||||
parent_mod = commands;
|
||||
func = commands[func_part];
|
||||
} else if (func !== undefined) {
|
||||
parent_mod = func;
|
||||
func = func[func_part];
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (func == undefined) {
|
||||
throw new Error(
|
||||
"The given command could not be found as a function: " + cmd
|
||||
);
|
||||
}
|
||||
|
||||
return [func, parent_mod];
|
||||
}
|
||||
|
||||
async function interactive_test(input_cmds, context, commands) {
|
||||
let cmds = input_cmds.split(";;;");
|
||||
|
||||
await commands.navigate("about:blank");
|
||||
|
||||
for (let cmdstr of cmds) {
|
||||
let [cmd, ...args] = cmdstr.split(":::");
|
||||
let [func, parent_mod] = await get_command_function(cmd, commands);
|
||||
|
||||
try {
|
||||
await func.call(parent_mod, ...args);
|
||||
} catch (e) {
|
||||
context.log.info(
|
||||
`Exception found while running \`commands.${cmd}(${args})\`: ` + e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function test(context, commands) {
|
||||
let input_cmds = context.options.browsertime.commands;
|
||||
if (input_cmds !== undefined) {
|
||||
await interactive_test(input_cmds, context, commands);
|
||||
} else {
|
||||
await pageload_test(context, commands);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,104 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
/* eslint-env node */
|
||||
|
||||
async function get_command_function(cmd, commands) {
|
||||
/*
|
||||
Converts a string such as `measure.start` into the actual
|
||||
function that is found in the `commands` module.
|
||||
|
||||
XXX: Find a way to share this function between
|
||||
perftest_record.js and browsertime_interactive.js
|
||||
*/
|
||||
if (cmd == "") {
|
||||
throw new Error("A blank command was given.");
|
||||
} else if (cmd.endsWith(".")) {
|
||||
throw new Error(
|
||||
"An extra `.` was found at the end of this command: " + cmd
|
||||
);
|
||||
}
|
||||
|
||||
// `func` will hold the actual method that needs to be called,
|
||||
// and the `parent_mod` is the context required to run the `func`
|
||||
// method. Without that context, `this` becomes undefined in the browsertime
|
||||
// classes.
|
||||
let func = null;
|
||||
let parent_mod = null;
|
||||
for (let func_part of cmd.split(".")) {
|
||||
if (func_part == "") {
|
||||
throw new Error(
|
||||
"An empty function part was found in the command: " + cmd
|
||||
);
|
||||
}
|
||||
|
||||
if (func === null) {
|
||||
parent_mod = commands;
|
||||
func = commands[func_part];
|
||||
} else if (func !== undefined) {
|
||||
parent_mod = func;
|
||||
func = func[func_part];
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (func == undefined) {
|
||||
throw new Error(
|
||||
"The given command could not be found as a function: " + cmd
|
||||
);
|
||||
}
|
||||
|
||||
return [func, parent_mod];
|
||||
}
|
||||
|
||||
module.exports = async function(context, commands) {
|
||||
context.log.info("Starting an interactive browsertime test");
|
||||
let page_cycles = context.options.browsertime.page_cycles;
|
||||
let post_startup_delay = context.options.browsertime.post_startup_delay;
|
||||
let input_cmds = context.options.browsertime.commands;
|
||||
|
||||
context.log.info(
|
||||
"Waiting for %d ms (post_startup_delay)",
|
||||
post_startup_delay
|
||||
);
|
||||
await commands.wait.byTime(post_startup_delay);
|
||||
|
||||
// unpack commands from python
|
||||
let cmds = input_cmds.split(";;;");
|
||||
|
||||
// let pages_visited = 0;
|
||||
for (let count = 0; count < page_cycles; count++) {
|
||||
context.log.info("Navigating to about:blank w/nav, count: " + count);
|
||||
await commands.navigate("about:blank");
|
||||
|
||||
let pages_visited = [];
|
||||
for (let cmdstr of cmds) {
|
||||
let [cmd, ...args] = cmdstr.split(":::");
|
||||
|
||||
if (cmd == "measure.start") {
|
||||
if (args[0] != "") {
|
||||
pages_visited.push(args[0]);
|
||||
}
|
||||
}
|
||||
|
||||
let [func, parent_mod] = await get_command_function(cmd, commands);
|
||||
|
||||
try {
|
||||
await func.call(parent_mod, ...args);
|
||||
} catch (e) {
|
||||
context.log.info(
|
||||
`Exception found while running \`commands.${cmd}(${args})\`: `
|
||||
);
|
||||
context.log.info(e.stack);
|
||||
}
|
||||
}
|
||||
|
||||
// Log the number of pages visited for results parsing
|
||||
context.log.info("[] metrics: pages_visited: " + pages_visited);
|
||||
}
|
||||
|
||||
context.log.info("Browsertime pageload ended.");
|
||||
return true;
|
||||
};
|
|
@ -23,7 +23,7 @@ module.exports = async function(context, commands) {
|
|||
context.log.info("Navigating to secondary url:" + secondary_url);
|
||||
await commands.navigate(secondary_url);
|
||||
} else {
|
||||
context.log.info("Navigating to about:blank");
|
||||
context.log.info("Navigating to about:blank, count: " + count);
|
||||
await commands.navigate("about:blank");
|
||||
}
|
||||
|
||||
|
@ -34,6 +34,7 @@ module.exports = async function(context, commands) {
|
|||
context.log.info("Cycle %d, starting the measure", count);
|
||||
await commands.measure.start(test_url);
|
||||
}
|
||||
|
||||
context.log.info("Browsertime pageload ended.");
|
||||
return true;
|
||||
};
|
||||
|
|
|
@ -15,6 +15,7 @@ import six
|
|||
import sys
|
||||
|
||||
import mozprocess
|
||||
from manifestparser.util import evaluate_list_from_string
|
||||
from benchmark import Benchmark
|
||||
from logger.logger import RaptorLogger
|
||||
from perftest import Perftest
|
||||
|
@ -41,6 +42,8 @@ class Browsertime(Perftest):
|
|||
def __init__(self, app, binary, process_handler=None, **kwargs):
|
||||
self.browsertime = True
|
||||
self.browsertime_failure = ""
|
||||
self.page_count = []
|
||||
|
||||
self.process_handler = process_handler or mozprocess.ProcessHandler
|
||||
for key in list(kwargs):
|
||||
if key.startswith("browsertime_"):
|
||||
|
@ -200,15 +203,26 @@ class Browsertime(Perftest):
|
|||
]
|
||||
else:
|
||||
# Custom scripts are treated as pageload tests for now
|
||||
browsertime_script = [
|
||||
os.path.join(
|
||||
os.path.dirname(__file__),
|
||||
"..",
|
||||
"..",
|
||||
"browsertime",
|
||||
test.get("test_script", "browsertime_pageload.js"),
|
||||
)
|
||||
]
|
||||
if test.get("interactive", False):
|
||||
browsertime_script = [
|
||||
os.path.join(
|
||||
os.path.dirname(__file__),
|
||||
"..",
|
||||
"..",
|
||||
"browsertime",
|
||||
"browsertime_interactive.js",
|
||||
)
|
||||
]
|
||||
else:
|
||||
browsertime_script = [
|
||||
os.path.join(
|
||||
os.path.dirname(__file__),
|
||||
"..",
|
||||
"..",
|
||||
"browsertime",
|
||||
test.get("test_script", "browsertime_pageload.js"),
|
||||
)
|
||||
]
|
||||
|
||||
btime_args = self.browsertime_args
|
||||
if self.config["app"] in ("chrome", "chromium", "chrome-m"):
|
||||
|
@ -269,6 +283,11 @@ class Browsertime(Perftest):
|
|||
for var, val in self.config.get("environment", {}).items():
|
||||
browsertime_options.extend(["--firefox.env", "{}={}".format(var, val)])
|
||||
|
||||
# Parse the test commands (if any) from the test manifest
|
||||
cmds = evaluate_list_from_string(test.get("test_cmds", "[]"))
|
||||
parsed_cmds = [":::".join([str(i) for i in item]) for item in cmds if item]
|
||||
browsertime_options.extend(["--browsertime.commands", ";;;".join(parsed_cmds)])
|
||||
|
||||
if self.verbose:
|
||||
browsertime_options.append("-vvv")
|
||||
|
||||
|
@ -474,6 +493,9 @@ class Browsertime(Perftest):
|
|||
proc.kill()
|
||||
elif "warning" in level:
|
||||
LOG.warning(msg)
|
||||
elif "metrics" in level:
|
||||
vals = msg.split(":")[-1].strip()
|
||||
self.page_count = vals.split(",")
|
||||
else:
|
||||
LOG.info(msg)
|
||||
|
||||
|
|
|
@ -563,18 +563,15 @@ def get_raptor_test_list(args, oskey):
|
|||
# remove the 'hero =' line since no longer measuring hero
|
||||
del next_test["hero"]
|
||||
|
||||
if next_test.get("lower_is_better") is not None:
|
||||
next_test["lower_is_better"] = bool_from_str(
|
||||
next_test.get("lower_is_better")
|
||||
)
|
||||
if next_test.get("subtest_lower_is_better") is not None:
|
||||
next_test["subtest_lower_is_better"] = bool_from_str(
|
||||
next_test.get("subtest_lower_is_better")
|
||||
)
|
||||
if next_test.get("accept_zero_vismet", None) is not None:
|
||||
next_test["accept_zero_vismet"] = bool_from_str(
|
||||
next_test.get("accept_zero_vismet")
|
||||
)
|
||||
bool_settings = [
|
||||
"lower_is_better",
|
||||
"subtest_lower_is_better",
|
||||
"accept_zero_vismet",
|
||||
"interactive",
|
||||
]
|
||||
for setting in bool_settings:
|
||||
if next_test.get(setting, None) is not None:
|
||||
next_test[setting] = bool_from_str(next_test.get(setting))
|
||||
|
||||
# write out .json test setting files for the control server to read and send to web ext
|
||||
if len(tests_to_run) != 0:
|
||||
|
|
|
@ -191,11 +191,23 @@ class PerftestOutput(object):
|
|||
if self.summarized_results == {}:
|
||||
success = False
|
||||
LOG.error(
|
||||
"no summarized raptor results found for %s" % ", ".join(test_names)
|
||||
"no summarized raptor results found for any of %s"
|
||||
% ", ".join(test_names)
|
||||
)
|
||||
else:
|
||||
for suite in self.summarized_results["suites"]:
|
||||
tname = suite["name"]
|
||||
|
||||
# as we do navigation, tname could end in .<alias>
|
||||
# test_names doesn't have tname, so either add it to test_names,
|
||||
# or strip it
|
||||
parts = tname.split(".")
|
||||
try:
|
||||
tname = ".".join(parts[:-1])
|
||||
except Exception as e:
|
||||
LOG.info("no alias found on test, ignoring: %s" % e)
|
||||
pass
|
||||
|
||||
# Since test names might have been modified, check if
|
||||
# part of the test name exists in the test_names list entries
|
||||
found = False
|
||||
|
@ -205,7 +217,7 @@ class PerftestOutput(object):
|
|||
break
|
||||
if not found:
|
||||
success = False
|
||||
LOG.error("no summarized raptor results found for %s" % tname)
|
||||
LOG.error("no summarized raptor results found for %s" % (tname))
|
||||
|
||||
with open(results_path, "w") as f:
|
||||
for result in self.summarized_results:
|
||||
|
|
|
@ -483,6 +483,7 @@ class Perftest(object):
|
|||
|
||||
self.config["raptor_json_path"] = raptor_json_path
|
||||
self.config["artifact_dir"] = self.artifact_dir
|
||||
self.config["page_count"] = self.page_count
|
||||
res = self.results_handler.summarize_and_output(self.config, tests, test_names)
|
||||
|
||||
# gecko profiling symbolication
|
||||
|
|
|
@ -338,7 +338,14 @@ class BrowsertimeResultsHandler(PerftestResultsHandler):
|
|||
pass
|
||||
|
||||
def parse_browsertime_json(
|
||||
self, raw_btresults, page_cycles, cold, browser_cycles, measure
|
||||
self,
|
||||
raw_btresults,
|
||||
page_cycles,
|
||||
cold,
|
||||
browser_cycles,
|
||||
measure,
|
||||
page_count,
|
||||
test_name,
|
||||
):
|
||||
"""
|
||||
Receive a json blob that contains the results direct from the browsertime tool. Parse
|
||||
|
@ -503,6 +510,7 @@ class BrowsertimeResultsHandler(PerftestResultsHandler):
|
|||
)
|
||||
|
||||
# now parse out the values
|
||||
page_counter = 0
|
||||
for raw_result in raw_btresults:
|
||||
if not raw_result["browserScripts"]:
|
||||
raise MissingResultsError("Browsertime cycle produced no measurements.")
|
||||
|
@ -513,11 +521,20 @@ class BrowsertimeResultsHandler(PerftestResultsHandler):
|
|||
# Desktop chrome doesn't have `browser` scripts data available for now
|
||||
bt_browser = raw_result["browserScripts"][0].get("browser", None)
|
||||
bt_ver = raw_result["info"]["browsertime"]["version"]
|
||||
bt_url = (raw_result["info"]["url"],)
|
||||
|
||||
# when doing actions, we append a .X for each additional pageload in a scenario
|
||||
extra = ""
|
||||
if len(page_count) > 0:
|
||||
extra = ".%s" % page_count[page_counter % len(page_count)]
|
||||
url_parts = raw_result["info"]["url"].split("/")
|
||||
page_counter += 1
|
||||
|
||||
bt_url = "%s%s/%s," % ("/".join(url_parts[:-1]), extra, url_parts[-1])
|
||||
bt_result = {
|
||||
"bt_ver": bt_ver,
|
||||
"browser": bt_browser,
|
||||
"url": bt_url,
|
||||
"name": "%s%s" % (test_name, extra),
|
||||
"measurements": {},
|
||||
"statistics": {},
|
||||
}
|
||||
|
@ -548,6 +565,7 @@ class BrowsertimeResultsHandler(PerftestResultsHandler):
|
|||
"bt_ver": bt_ver,
|
||||
"browser": bt_browser,
|
||||
"url": bt_url,
|
||||
"name": "%s%s" % (test_name, extra),
|
||||
"measurements": {},
|
||||
"statistics": {},
|
||||
}
|
||||
|
@ -751,6 +769,8 @@ class BrowsertimeResultsHandler(PerftestResultsHandler):
|
|||
test["cold"],
|
||||
test["browser_cycles"],
|
||||
test.get("measure"),
|
||||
test_config.get("page_count", []),
|
||||
test["name"],
|
||||
):
|
||||
|
||||
def _new_standard_result(new_result, subtest_unit="ms"):
|
||||
|
|
Загрузка…
Ссылка в новой задаче