Backed out changeset 65acd64b9e0e (bug 1640875) for mpu failures

This commit is contained in:
Coroiu Cristina 2020-07-03 15:59:28 +03:00
Родитель 107ae2d1d9
Коммит 50d2261646
3 изменённых файлов: 7 добавлений и 196 удалений

Просмотреть файл

@ -1,85 +0,0 @@
%% md
<div id="table-wrapper">
<table id="compareTable" border="1"></table>
</div>
%% py
from js import document, data_object
import json
import numpy as np
split_data = {}
dir_names = set()
subtests = set()
newest_run_name = ""
for element in data_object:
name = element["name"]
if "- newest run" in name:
newest_run_name = name
subtest = element["subtest"]
dir_names.add(name)
subtests.add(subtest)
data = [p["value"] for p in element["data"]]
split_data.setdefault(name, {}).update({
subtest:{
"data":data,
"stats":{
"Mean": np.round(np.mean(data),2),
"Median": np.median(data),
"Std. Dev.": np.round(np.std(data),2)
}
}
})
table = document.getElementById("compareTable")
table.innerHTML=''
# build table head
thead = table.createTHead()
throw = thead.insertRow()
for name in ["Metrics", "Statistics"] + list(dir_names):
th = document.createElement("th")
th.appendChild(document.createTextNode(name))
throw.appendChild(th)
def fillRow(row, subtest, stat):
row.insertCell().appendChild(document.createTextNode(stat))
newest_run_val = split_data[newest_run_name][subtest]["stats"][stat]
for name in dir_names:
cell_val = split_data[name][subtest]["stats"][stat]
diff = np.round((cell_val - newest_run_val * 1.0)/newest_run_val * 100, 2)
color = "red" if diff>0 else "green"
row.insertCell().innerHTML = f"{cell_val}\n(<span style=\"color:{color}\">{diff}</span>%)"
# build table body
tbody = document.createElement("tbody")
for subtest in subtests:
row1 = tbody.insertRow()
cell0 = row1.insertCell()
cell0.appendChild(document.createTextNode(subtest))
cell0.rowSpan = 3;
a = split_data
fillRow(row1, subtest, "Mean")
row2 = tbody.insertRow()
fillRow(row2, subtest, "Median")
row3 = tbody.insertRow()
fillRow(row3, subtest, "Std. Dev.")
table.appendChild(tbody)
%% css
#table-wrapper {
height: 600px;
overflow: auto;
}
#table {
display: table;
}
td {
white-space:pre-line;
}

Просмотреть файл

@ -1,7 +1,7 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import pathlib
from mozperftest.layers import Layer
from mozperftest.metrics.common import filtered_metrics
@ -34,69 +34,27 @@ class Notebook(Layer):
"analyze-strings": {
"action": "store_true",
"default": False,
"help": "If set, strings won't be filtered out of the results to analyze in Iodide.",
},
"no-server": {
"action": "store_true",
"default": False,
"help": "f set, the data won't be opened in Iodide.",
},
"compare-to": {
"nargs": "*",
"default": [],
"help": (
"Compare the results from this test to the historical data in the folder(s) "
"specified through this option. Only JSON data can be processed for the moment."
"Each folder containing those JSONs is considered as a distinct data point "
"to compare with the newest run."
"If set, strings won't be filtered out of the results to"
" analyze in Iodide."
),
},
"stats": {
"action": "store_true",
"default": False,
"help": "If set, browsertime statistics will be reported.",
},
}
def run(self, metadata):
exclusions = None
if not self.get_arg("stats"):
exclusions = ["statistics."]
for result in metadata.get_results():
result["name"] += "- newest run"
analysis = self.get_arg("analysis")
dir_list = self.get_arg("compare-to")
if dir_list:
analysis.append("compare")
for directory in dir_list:
dirpath = pathlib.Path(directory)
if not dirpath.exists():
raise Exception(f"{dirpath} does not exist.")
if not dirpath.is_dir():
raise Exception(f"{dirpath} is not a directory")
# TODO: Handle more than just JSON data.
for jsonfile in dirpath.rglob("*.json"):
metadata.add_result(
{
"results": str(jsonfile.resolve()),
"name": jsonfile.parent.name,
}
)
# Get filtered metrics
results = filtered_metrics(
metadata,
self.get_arg("output"),
self.get_arg("prefix"),
metrics=self.get_arg("metrics"),
exclude=exclusions,
)
if not results:
self.warning("No results left after filtering")
return metadata
analysis = self.get_arg("analysis")
data_to_post = []
for name, res in results.items():
for r in res:
@ -105,12 +63,9 @@ class Notebook(Layer):
data_to_post.append(r)
elif self.get_arg("analyze-strings"):
data_to_post.append(r)
self.ptnb = PerftestNotebook(
data=data_to_post, logger=metadata, prefix=self.get_arg("prefix")
)
self.ptnb.post_to_iodide(
analysis, start_local_server=not self.get_arg("no-server")
)
self.ptnb.post_to_iodide(analysis)
return metadata

Просмотреть файл

@ -52,66 +52,7 @@ def test_notebookupload_with_filter(notebook, no_filter):
for data in data_dict["data"]:
assert type(data["value"]) in (int, float)
notebook.assert_has_calls(
mock.call().post_to_iodide(["scatterplot"], start_local_server=True)
)
@pytest.mark.parametrize("stats", [False, True])
@mock.patch("mozperftest.metrics.notebookupload.PerftestNotebook")
def test_compare_to_success(notebook, stats):
options = {
"notebook-metrics": ["firstPaint"],
"notebook-prefix": "",
"notebook-analysis": [],
"notebook": True,
"notebook-compare-to": [str(BT_DATA.parent)],
"notebook-stats": stats,
}
metrics, metadata, env = setup_env(options)
with temp_file() as output:
env.set_arg("output", output)
with metrics as m, silence():
m(metadata)
args, kwargs = notebook.call_args_list[0]
if not stats:
assert len(args[0]) == 2
assert args[0][0]["name"] == "browsertime-newestRun"
assert args[0][1]["name"] == "browsertime-results"
else:
assert any(["statistics" in element["subtest"] for element in args[0]])
notebook.assert_has_calls(
mock.call().post_to_iodide(["compare"], start_local_server=True)
)
@pytest.mark.parametrize("filepath", ["invalidPath", str(BT_DATA)])
@mock.patch("mozperftest.metrics.notebookupload.PerftestNotebook")
def test_compare_to_invalid_parameter(notebook, filepath):
options = {
"notebook-metrics": ["firstPaint"],
"notebook-prefix": "",
"notebook-analysis": [],
"notebook": True,
"notebook-compare-to": [filepath],
}
metrics, metadata, env = setup_env(options)
with pytest.raises(Exception) as einfo:
with temp_file() as output:
env.set_arg("output", output)
with metrics as m, silence():
m(metadata)
if filepath == "invalidPath":
assert "does not exist" in str(einfo.value)
else:
assert "not a directory" in str(einfo.value)
notebook.assert_has_calls(mock.call().post_to_iodide(["scatterplot"]))
if __name__ == "__main__":