Bug 1690746 - Browsertime benchmark measurements are generated only after the page load event r=perftest-reviewers,alexandru.irimovici,sparky

Please leave the landing to the author of the patch!
Build with all benchmarks running: https://treeherder.mozilla.org/#/jobs?repo=try&revision=7c672d70f770f207c746ca39b00abf7f29013a48

Differential Revision: https://phabricator.services.mozilla.com/D106923
This commit is contained in:
Alex Ionescu 2021-03-25 12:29:05 +00:00
Родитель ec157c9846
Коммит 38e2e15223
3 изменённых файлов: 79 добавлений и 30 удалений

Просмотреть файл

@ -1,15 +1,68 @@
(function() {
return new Promise(function(resolve) {
window.addEventListener("message", function(event) {
if (event.data[0] == "raptor-benchmark") {
console.log("Benchmark data received for ", event.data[0]);
let data = {
[event.data[1]]: event.data.slice(2),
};
resolve(data);
}
});
}).catch(function() {
console.log("Benchmark Promise Rejected");
});
})();
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/* eslint-env node */
module.exports = async function(context, commands) {
context.log.info("Starting a browsertime benchamrk");
let url = context.options.browsertime.url;
let page_cycles = context.options.browsertime.page_cycles;
let page_cycle_delay = context.options.browsertime.page_cycle_delay;
let post_startup_delay = context.options.browsertime.post_startup_delay;
let page_timeout = context.options.timeouts.pageLoad;
let ret = false;
context.log.info(
"Waiting for %d ms (post_startup_delay)",
post_startup_delay
);
await commands.wait.byTime(post_startup_delay);
for (let count = 0; count < page_cycles; count++) {
context.log.info("Navigating to about:blank");
await commands.navigate("about:blank");
context.log.info("Cycle %d, waiting for %d ms", count, page_cycle_delay);
await commands.wait.byTime(page_cycle_delay);
context.log.info("Cycle %d, starting the measure", count);
await commands.measure.start(url);
context.log.info("Benchmark custom metric collection");
let data = null;
let starttime = await commands.js.run(`return performance.now();`);
while (
data == null &&
(await commands.js.run(`return performance.now();`)) - starttime <
page_timeout
) {
let wait_time = 3000;
context.log.info("Waiting %d ms for data from benchmark...", wait_time);
await commands.wait.byTime(wait_time);
data = await commands.js.run(
"return window.sessionStorage.getItem('benchmark_results');"
);
}
if (
data == null &&
(await commands.js.run(`return performance.now();`)) - starttime >=
page_timeout
) {
context.log.error("Benchmark timed out. Aborting...");
} else if (data) {
context.log.info("Value of benchmark data: ", data);
data = JSON.parse(data);
commands.measure.addObject({
browsertime_benchmark: {
[data[1]]: data.slice(2),
},
});
ret = true;
}
}
context.log.info("Browsertime benchmark ended.");
return ret;
};

Просмотреть файл

@ -187,6 +187,16 @@ class Browsertime(Perftest):
"--browsertime.background_app",
test.get("background_app", "false"),
]
elif test.get("type", "") == "benchmark":
browsertime_script = [
os.path.join(
os.path.dirname(__file__),
"..",
"..",
"browsertime",
"browsertime_benchmark.js",
)
]
else:
browsertime_script = [
os.path.join(
@ -359,20 +369,6 @@ class Browsertime(Perftest):
# this will be used for btime --timeouts.pageLoad
cmd = self._compose_cmd(test, timeout)
if test.get("type", "") == "benchmark":
cmd.extend(
[
"--script",
os.path.join(
os.path.dirname(__file__),
"..",
"..",
"browsertime",
"browsertime_benchmark.js",
),
]
)
if test.get("type", "") == "scenario":
# Change the timeout for scenarios since they
# don't output much for a long period of time

Просмотреть файл

@ -554,7 +554,7 @@ class BrowsertimeResultsHandler(PerftestResultsHandler):
vismet_result["statistics"] = raw_result["statistics"]["visualMetrics"]
results.append(vismet_result)
custom_types = raw_result["browserScripts"][0].get("custom")
custom_types = raw_result["extras"][0]
if custom_types:
for custom_type in custom_types:
bt_result["measurements"].update(