From 756d14a209cbd7fee4acf09721a196e826d840c8 Mon Sep 17 00:00:00 2001 From: Gregory Mierzwinski Date: Tue, 1 Sep 2020 15:29:41 +0000 Subject: [PATCH] Bug 1662278 - Simplify mozperftest metric names. r=tarek This patch adds two new options to mozperftest. The --simplify-names argument can be provided to enable simplification of metric names and the --simplify-exclude option allows users to select which metrics to skip in the simplification. A bug relating to setting the default of a list option is also fixed here. Differential Revision: https://phabricator.services.mozilla.com/D88917 --- python/mozperftest/mozperftest/argparser.py | 2 - .../mozperftest/mozperftest/metrics/common.py | 46 ++++++ .../mozperftest/metrics/consoleoutput.py | 2 + .../mozperftest/metrics/notebookupload.py | 2 + .../mozperftest/metrics/perfherder.py | 2 + .../mozperftest/tests/test_argparser.py | 7 + .../mozperftest/tests/test_perfherder.py | 147 ++++++++++++++++++ 7 files changed, 206 insertions(+), 2 deletions(-) diff --git a/python/mozperftest/mozperftest/argparser.py b/python/mozperftest/mozperftest/argparser.py index 29743ea477af..e68acd38d065 100644 --- a/python/mozperftest/mozperftest/argparser.py +++ b/python/mozperftest/mozperftest/argparser.py @@ -121,8 +121,6 @@ class PerftestArgumentParser(ArgumentParser): if not self.app: self.app = "generic" for name, options in Options.args.items(): - if "default" in options and isinstance(options["default"], list): - options["default"] = [] self.add_argument(name, **options) mozlog.commandline.add_logging_group(self) diff --git a/python/mozperftest/mozperftest/metrics/common.py b/python/mozperftest/mozperftest/metrics/common.py index 31f3801f17f5..9d437f7578b1 100644 --- a/python/mozperftest/mozperftest/metrics/common.py +++ b/python/mozperftest/mozperftest/metrics/common.py @@ -27,6 +27,20 @@ COMMON_ARGS = { "using browserScripts.pageinfo.url will split the data by the unique " "URLs that are found.", }, + "simplify-names": { + "action": "store_true", + "default": False, + "help": "If set, metric names will be simplified to a single word. The PerftestETL " + "combines dictionary keys by `.`, and the final key contains that value of the data. " + "That final key becomes the new name of the metric.", + }, + "simplify-exclude": { + "nargs": "*", + "default": ["statistics"], + "help": "When renaming/simplifying metric names, entries with these strings " + "will be ignored and won't get simplified. These options are only used when " + "--simplify-names is set.", + }, } @@ -172,6 +186,8 @@ class MetricsStorage(object): metrics=None, exclude=None, split_by=None, + simplify_names=False, + simplify_exclude=["statistics"], ): """Filters the metrics to only those that were requested by `metrics`. @@ -183,6 +199,11 @@ class MetricsStorage(object): contain this string in their name will be kept. :param metrics list: List of metrics to keep. + :param exclude list: List of string matchers to exclude from the metrics + gathered/reported. + :param split_by str: The name of a metric to use to split up data by. + :param simplify_exclude list: List of string matchers to exclude + from the naming simplification process. :return dict: Standardized notebook data containing the requested metrics. """ @@ -193,6 +214,8 @@ class MetricsStorage(object): return results if not exclude: exclude = [] + if not simplify_exclude: + simplify_exclude = [] # Get the field to split the results by (if any) if split_by is not None: @@ -218,6 +241,25 @@ class MetricsStorage(object): newresults.append(res) filtered[data_type] = newresults + # Simplify the filtered metric names + if simplify_names: + previous = [] + for data_type, data_info in filtered.items(): + for res in data_info: + if any([met in res["subtest"] for met in simplify_exclude]): + continue + + new = res["subtest"].split(".")[-1] + if new in previous: + self.logger.warning( + f"Another metric which ends with `{new}` was already found. " + f"{res['subtest']} will not be simplified." + ) + continue + + res["subtest"] = new + previous.append(new) + # Split the filtered results if split_by is not None: newfilt = {} @@ -257,6 +299,8 @@ def filtered_metrics( settings=False, exclude=None, split_by=None, + simplify_names=False, + simplify_exclude=["statistics"], ): """Returns standardized data extracted from the metadata instance. @@ -276,6 +320,8 @@ def filtered_metrics( metrics=metrics, exclude=exclude, split_by=split_by, + simplify_names=simplify_names, + simplify_exclude=simplify_exclude, ) # XXX returning two different types is a problem diff --git a/python/mozperftest/mozperftest/metrics/consoleoutput.py b/python/mozperftest/mozperftest/metrics/consoleoutput.py index d7bad05103a7..3c7fac0783c3 100644 --- a/python/mozperftest/mozperftest/metrics/consoleoutput.py +++ b/python/mozperftest/mozperftest/metrics/consoleoutput.py @@ -32,6 +32,8 @@ class ConsoleOutput(Layer): self.get_arg("prefix"), metrics=self.get_arg("metrics"), split_by=self.get_arg("split-by"), + simplify_names=self.get_arg("simplify-names"), + simplify_exclude=self.get_arg("simplify-exclude"), ) if not results: diff --git a/python/mozperftest/mozperftest/metrics/notebookupload.py b/python/mozperftest/mozperftest/metrics/notebookupload.py index 8d5d123804b1..614a9a4d929f 100644 --- a/python/mozperftest/mozperftest/metrics/notebookupload.py +++ b/python/mozperftest/mozperftest/metrics/notebookupload.py @@ -87,6 +87,8 @@ class Notebook(Layer): metrics=self.get_arg("metrics"), exclude=exclusions, split_by=self.get_arg("split-by"), + simplify_names=self.get_arg("simplify-names"), + simplify_exclude=self.get_arg("simplify-exclude"), ) if not results: diff --git a/python/mozperftest/mozperftest/metrics/perfherder.py b/python/mozperftest/mozperftest/metrics/perfherder.py index bd1fa7b98923..aba753122981 100644 --- a/python/mozperftest/mozperftest/metrics/perfherder.py +++ b/python/mozperftest/mozperftest/metrics/perfherder.py @@ -89,6 +89,8 @@ class Perfherder(Layer): settings=True, exclude=exclusions, split_by=self.get_arg("split-by"), + simplify_names=self.get_arg("simplify-names"), + simplify_exclude=self.get_arg("simplify-exclude"), ) if not any([results[name] for name in results]): diff --git a/python/mozperftest/mozperftest/tests/test_argparser.py b/python/mozperftest/mozperftest/tests/test_argparser.py index 73a5c71d8cac..15dad7a1c7b2 100644 --- a/python/mozperftest/mozperftest/tests/test_argparser.py +++ b/python/mozperftest/mozperftest/tests/test_argparser.py @@ -16,6 +16,13 @@ def test_argparser(): assert res.tests == ["test_one.js"] +def test_argparser_defaults(): + parser = PerftestArgumentParser() + args = ["test_one.js"] + res = parser.parse_args(args) + assert res.console_simplify_exclude == ["statistics"] + + def test_options(): assert Options.args["--proxy"]["help"] == "Activates the proxy layer" assert Options.args["--no-browsertime"]["help"] == ( diff --git a/python/mozperftest/mozperftest/tests/test_perfherder.py b/python/mozperftest/mozperftest/tests/test_perfherder.py index 571297e7298f..0e4660f705e8 100644 --- a/python/mozperftest/mozperftest/tests/test_perfherder.py +++ b/python/mozperftest/mozperftest/tests/test_perfherder.py @@ -64,6 +64,153 @@ def test_perfherder(): assert "firstPaint" in subtest["name"] +def test_perfherder_simple_names(): + options = { + "perfherder": True, + "perfherder-stats": True, + "perfherder-prefix": "", + "perfherder-metrics": [metric_fields("firstPaint"), metric_fields("resource")], + "perfherder-simplify-names": True, + "perfherder-simplify-exclude": ["statistics"], + } + + metrics, metadata, env = setup_env(options) + + with temp_file() as output: + env.set_arg("output", output) + with metrics as m, silence(): + m(metadata) + output_file = metadata.get_output() + with open(output_file) as f: + output = json.loads(f.read()) + + # Check some metadata + assert output["application"]["name"] == "firefox" + assert output["framework"]["name"] == "browsertime" + + # Check some numbers in our data + assert len(output["suites"]) == 1 + assert output["suites"][0]["value"] > 0 + + # Check if only firstPaint/resource metrics were obtained and + # that simplifications occurred + assert all( + [ + "firstPaint" in subtest["name"] + or "duration" in subtest["name"] + or "count" in subtest["name"] + for subtest in output["suites"][0]["subtests"] + ] + ) + + found_all = {"firstPaint": False, "count": False, "duration": False} + for subtest in output["suites"][0]["subtests"]: + if subtest["name"] in found_all: + found_all[subtest["name"]] = True + continue + assert any([name in subtest["name"] for name in found_all.keys()]) + # Statistics are not simplified so any metric that isn't + # in the list of known metrics must be a statistic + assert "statistics" in subtest["name"] + + for entry, value in found_all.items(): + assert found_all[entry], f"Failed finding metric simplification for {entry}" + + # Statistics are not simplified by default + assert ( + len( + [ + subtest + for subtest in output["suites"][0]["subtests"] + if "statistics" in subtest["name"] + ] + ) + == 27 + ) + assert ( + len( + [ + subtest + for subtest in output["suites"][0]["subtests"] + if "statistics" not in subtest["name"] + ] + ) + == 3 + ) + + +def test_perfherder_names_simplified_with_no_exclusions(): + options = { + "perfherder": True, + "perfherder-stats": True, + "perfherder-prefix": "", + "perfherder-metrics": [metric_fields("firstPaint"), metric_fields("resource")], + "perfherder-simplify-names": True, + } + + metrics, metadata, env = setup_env(options) + + with temp_file() as output: + env.set_arg("output", output) + with metrics as m, silence(): + m(metadata) + output_file = metadata.get_output() + with open(output_file) as f: + output = json.loads(f.read()) + + # Check some metadata + assert output["application"]["name"] == "firefox" + assert output["framework"]["name"] == "browsertime" + + # Check some numbers in our data + assert len(output["suites"]) == 1 + assert output["suites"][0]["value"] > 0 + + # In this case, some metrics will be called "median", "mean", etc. + # since those are the simplifications of the first statistics entries + # that were found. + assert not all( + [ + "firstPaint" in subtest["name"] + or "duration" in subtest["name"] + or "count" in subtest["name"] + for subtest in output["suites"][0]["subtests"] + ] + ) + + found_all = {"firstPaint": False, "count": False, "duration": False} + for subtest in output["suites"][0]["subtests"]: + if subtest["name"] in found_all: + found_all[subtest["name"]] = True + continue + + for entry, value in found_all.items(): + assert found_all[entry], f"Failed finding metric simplification for {entry}" + + # Only a portion of the metrics should still have statistics in + # their name due to a naming conflict that only emits a warning + assert ( + len( + [ + subtest + for subtest in output["suites"][0]["subtests"] + if "statistics" in subtest["name"] + ] + ) + == 18 + ) + assert ( + len( + [ + subtest + for subtest in output["suites"][0]["subtests"] + if "statistics" not in subtest["name"] + ] + ) + == 12 + ) + + def test_perfherder_with_extra_options(): options = { "perfherder": True,