зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1635389 - clean the code r=sparky
We missed a pass on black, and python.ini was incomplete Differential Revision: https://phabricator.services.mozilla.com/D73822
This commit is contained in:
Родитель
7517317c35
Коммит
a44d97ff35
|
@ -6,7 +6,7 @@ from pathlib import Path
|
|||
|
||||
from mozperftest.metrics.exceptions import (
|
||||
MetricsMultipleTransformsError,
|
||||
MetricsMissingResultsError
|
||||
MetricsMissingResultsError,
|
||||
)
|
||||
from mozperftest.metrics.utils import validate_intermediate_results
|
||||
from mozperftest.metrics.notebook import PerftestNotebook
|
||||
|
@ -70,7 +70,9 @@ class MetricsStorage(object):
|
|||
name = res["name"]
|
||||
if isinstance(res["results"], dict):
|
||||
# XXX Implement subtest based parsing
|
||||
raise NotImplementedError("Subtest-based processing is not implemented yet")
|
||||
raise NotImplementedError(
|
||||
"Subtest-based processing is not implemented yet"
|
||||
)
|
||||
|
||||
# Merge all entries with the same name into one
|
||||
# result, if separation is needed use unique names
|
||||
|
@ -85,7 +87,9 @@ class MetricsStorage(object):
|
|||
# Check the transform definitions
|
||||
currtrfm = self.results[name]["transformer"]
|
||||
if not currtrfm:
|
||||
self.results[name]["transformer"] = res.get("transformer", "SingleJsonRetriever")
|
||||
self.results[name]["transformer"] = res.get(
|
||||
"transformer", "SingleJsonRetriever"
|
||||
)
|
||||
elif currtrfm != res.get("transformer", "SingleJsonRetriever"):
|
||||
raise MetricsMultipleTransformsError(
|
||||
f"Only one transformer allowed per data name! Found multiple for {name}: "
|
||||
|
@ -136,10 +140,7 @@ class MetricsStorage(object):
|
|||
return self.stddata
|
||||
|
||||
def filtered_metrics(
|
||||
self,
|
||||
group_name="firefox",
|
||||
transformer="SingleJsonRetriever",
|
||||
metrics=None,
|
||||
self, group_name="firefox", transformer="SingleJsonRetriever", metrics=None
|
||||
):
|
||||
|
||||
"""Filters the metrics to only those that were requested by `metrics`.
|
||||
|
|
|
@ -9,14 +9,17 @@ class MetricsMultipleTransformsError(Exception):
|
|||
This is because intermediate results with the same data
|
||||
name are merged when being processed.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class MetricsMissingResultsError(Exception):
|
||||
"""Raised when no results could be found after parsing the intermediate results."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class PerfherderValidDataError(Exception):
|
||||
"""Raised when no valid data (int/float) can be found to build perfherder blob."""
|
||||
|
||||
pass
|
||||
|
|
|
@ -51,11 +51,7 @@ class Perfherder(Layer):
|
|||
|
||||
# Get filtered metrics
|
||||
results, fullsettings = filtered_metrics(
|
||||
metadata,
|
||||
output,
|
||||
prefix,
|
||||
metrics=self.get_arg("metrics"),
|
||||
settings=True
|
||||
metadata, output, prefix, metrics=self.get_arg("metrics"), settings=True
|
||||
)
|
||||
|
||||
if not results:
|
||||
|
@ -71,7 +67,11 @@ class Perfherder(Layer):
|
|||
# overall values.
|
||||
subtests = {}
|
||||
for r in res:
|
||||
vals = [v["value"] for v in r["data"] if isinstance(v["value"], (int, float))]
|
||||
vals = [
|
||||
v["value"]
|
||||
for v in r["data"]
|
||||
if isinstance(v["value"], (int, float))
|
||||
]
|
||||
if vals:
|
||||
subtests[r["subtest"]] = vals
|
||||
|
||||
|
@ -84,7 +84,7 @@ class Perfherder(Layer):
|
|||
alert_threshold=settings.get("alertThreshold", 2.0),
|
||||
lower_is_better=settings.get("lowerIsBetter", True),
|
||||
unit=settings.get("unit", "ms"),
|
||||
summary=settings.get("value")
|
||||
summary=settings.get("value"),
|
||||
)
|
||||
|
||||
# XXX Validate perfherder data
|
||||
|
@ -220,8 +220,8 @@ class Perfherder(Layer):
|
|||
|
||||
if len(allvals) == 0:
|
||||
raise PerfherderValidDataError(
|
||||
"Could not build perfherder data blob because no valid data was provided, " +
|
||||
"only int/float data is accepted."
|
||||
"Could not build perfherder data blob because no valid data was provided, "
|
||||
+ "only int/float data is accepted."
|
||||
)
|
||||
|
||||
suite["value"] = statistics.mean(allvals)
|
||||
|
|
|
@ -18,15 +18,12 @@ with pathlib.Path(PARENT, "schemas", "intermediate-results-schema.json").open()
|
|||
# These are the properties we know about in the schema.
|
||||
# If anything other than these is present, then we will
|
||||
# fail validation.
|
||||
KNOWN_PERFHERDER_PROPS = set([
|
||||
"name",
|
||||
"value",
|
||||
"unit",
|
||||
"lowerIsBetter",
|
||||
"shouldAlert",
|
||||
"alertThreshold"
|
||||
])
|
||||
KNOWN_SUITE_PROPS = set(set(["results", "transformer", "extraOptions"]) | KNOWN_PERFHERDER_PROPS)
|
||||
KNOWN_PERFHERDER_PROPS = set(
|
||||
["name", "value", "unit", "lowerIsBetter", "shouldAlert", "alertThreshold"]
|
||||
)
|
||||
KNOWN_SUITE_PROPS = set(
|
||||
set(["results", "transformer", "extraOptions"]) | KNOWN_PERFHERDER_PROPS
|
||||
)
|
||||
KNOWN_SINGLE_MEASURE_PROPS = set(set(["values"]) | KNOWN_PERFHERDER_PROPS)
|
||||
|
||||
|
||||
|
@ -74,7 +71,6 @@ def validate_intermediate_results(results):
|
|||
:param results dict: The intermediate results to validate.
|
||||
:raises ValidationError: Raised when validation fails.
|
||||
"""
|
||||
|
||||
# Start with the standard validation
|
||||
validate(results, IR_SCHEMA)
|
||||
|
||||
|
|
|
@ -5,10 +5,13 @@ skip-if = python == 2
|
|||
[test_android.py]
|
||||
[test_argparser.py]
|
||||
[test_browsertime.py]
|
||||
[test_consoleoutput.py]
|
||||
[test_ir_schema.py]
|
||||
[test_layers.py]
|
||||
[test_mach_commands.py]
|
||||
[test_metrics_utils.py]
|
||||
[test_perfherder.py]
|
||||
[test_profile.py]
|
||||
[test_proxy.py]
|
||||
[test_ir_schema.py]
|
||||
[test_scriptinfo.py]
|
||||
[test_utils.py]
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#!/usr/bin/env python
|
||||
import os
|
||||
import mozunit
|
||||
import mock
|
||||
|
||||
from mozperftest.tests.support import EXAMPLE_TEST, get_running_env, temp_dir
|
||||
from mozperftest.environment import METRICS
|
||||
|
@ -10,7 +11,8 @@ from mozperftest.utils import silence
|
|||
HERE = os.path.dirname(__file__)
|
||||
|
||||
|
||||
def test_console_output():
|
||||
@mock.patch("mozperftest.metrics.common.validate_intermediate_results")
|
||||
def test_console_output(*mocked):
|
||||
with temp_dir() as tempdir:
|
||||
options = {
|
||||
"perfherder": True,
|
||||
|
@ -27,7 +29,10 @@ def test_console_output():
|
|||
mach_cmd.run_process = _run_process
|
||||
metrics = env.layers[METRICS]
|
||||
env.set_arg("tests", [EXAMPLE_TEST])
|
||||
metadata.set_result(os.path.join(HERE, "browsertime-results"))
|
||||
bt_res = os.path.join(HERE, "browsertime-results", "browsertime.json")
|
||||
|
||||
res = {"name": "name", "results": [bt_res]}
|
||||
metadata.add_result(res)
|
||||
|
||||
with metrics as console, silence():
|
||||
console(metadata)
|
||||
|
|
|
@ -10,10 +10,7 @@ from mozperftest.metrics.utils import validate_intermediate_results
|
|||
|
||||
|
||||
def test_results_with_directory():
|
||||
test_result = {
|
||||
"results": "path-to-results",
|
||||
"name": "the-name"
|
||||
}
|
||||
test_result = {"results": "path-to-results", "name": "the-name"}
|
||||
validate_intermediate_results(test_result)
|
||||
|
||||
|
||||
|
@ -21,9 +18,9 @@ def test_results_with_measurements():
|
|||
test_result = {
|
||||
"results": [
|
||||
{"name": "metric-1", "values": [0, 1, 1, 0]},
|
||||
{"name": "metric-2", "values": [0, 1, 1, 0]}
|
||||
{"name": "metric-2", "values": [0, 1, 1, 0]},
|
||||
],
|
||||
"name": "the-name"
|
||||
"name": "the-name",
|
||||
}
|
||||
validate_intermediate_results(test_result)
|
||||
|
||||
|
@ -32,11 +29,11 @@ def test_results_with_suite_perfherder_options():
|
|||
test_result = {
|
||||
"results": [
|
||||
{"name": "metric-1", "values": [0, 1, 1, 0]},
|
||||
{"name": "metric-2", "values": [0, 1, 1, 0]}
|
||||
{"name": "metric-2", "values": [0, 1, 1, 0]},
|
||||
],
|
||||
"name": "the-name",
|
||||
"extraOptions": ["an-extra-option"],
|
||||
"value": 9000
|
||||
"value": 9000,
|
||||
}
|
||||
validate_intermediate_results(test_result)
|
||||
|
||||
|
@ -45,11 +42,11 @@ def test_results_with_subtest_perfherder_options():
|
|||
test_result = {
|
||||
"results": [
|
||||
{"name": "metric-1", "shouldAlert": True, "values": [0, 1, 1, 0]},
|
||||
{"name": "metric-2", "alertThreshold": 1.0, "values": [0, 1, 1, 0]}
|
||||
{"name": "metric-2", "alertThreshold": 1.0, "values": [0, 1, 1, 0]},
|
||||
],
|
||||
"name": "the-name",
|
||||
"extraOptions": ["an-extra-option"],
|
||||
"value": 9000
|
||||
"value": 9000,
|
||||
}
|
||||
validate_intermediate_results(test_result)
|
||||
|
||||
|
@ -58,7 +55,7 @@ def test_results_with_bad_suite_property():
|
|||
test_result = {
|
||||
"results": "path-to-results",
|
||||
"name": "the-name",
|
||||
"I'll cause a failure,": "an expected failure"
|
||||
"I'll cause a failure,": "an expected failure",
|
||||
}
|
||||
with pytest.raises(ValidationError):
|
||||
validate_intermediate_results(test_result)
|
||||
|
@ -69,11 +66,11 @@ def test_results_with_bad_subtest_property():
|
|||
"results": [
|
||||
# Error is in "shouldalert", it should be "shouldAlert"
|
||||
{"name": "metric-1", "shouldalert": True, "values": [0, 1, 1, 0]},
|
||||
{"name": "metric-2", "alertThreshold": 1.0, "values": [0, 1, 1, 0]}
|
||||
{"name": "metric-2", "alertThreshold": 1.0, "values": [0, 1, 1, 0]},
|
||||
],
|
||||
"name": "the-name",
|
||||
"extraOptions": ["an-extra-option"],
|
||||
"value": 9000
|
||||
"value": 9000,
|
||||
}
|
||||
with pytest.raises(ValidationError):
|
||||
validate_intermediate_results(test_result)
|
||||
|
@ -96,7 +93,7 @@ def test_results_with_missing_subtest_property():
|
|||
],
|
||||
"name": "the-name",
|
||||
"extraOptions": ["an-extra-option"],
|
||||
"value": 9000
|
||||
"value": 9000,
|
||||
}
|
||||
with pytest.raises(ValidationError):
|
||||
validate_intermediate_results(test_result)
|
||||
|
|
|
@ -24,9 +24,11 @@ def test_metrics():
|
|||
mach_cmd.run_process = _run_process
|
||||
metrics = env.layers[METRICS]
|
||||
env.set_arg("tests", [EXAMPLE_TEST])
|
||||
metadata.add_result({
|
||||
"results": str(pathlib.Path(HERE, "browsertime-results")),
|
||||
"name": "browsertime"}
|
||||
metadata.add_result(
|
||||
{
|
||||
"results": str(pathlib.Path(HERE, "browsertime-results")),
|
||||
"name": "browsertime",
|
||||
}
|
||||
)
|
||||
|
||||
with temp_file() as output:
|
||||
|
|
Загрузка…
Ссылка в новой задаче