From c5fd4c93ff63075ebe528727f4d566f6fd54d56b Mon Sep 17 00:00:00 2001 From: Gregory Mierzwinski Date: Thu, 4 Jul 2019 15:36:40 +0000 Subject: [PATCH] Bug 1549033 - Only dump PERFHERDER_DATA when scenario tests are run with a resource usage flag. r=perftest-reviewers,stephendonner,rwood For the Raptor 'scenario' test type, this patch prevents PERFHERDER_DATA from being output when `--power-test`, `--cpu-test`, or `--memory-test` are not used. Differential Revision: https://phabricator.services.mozilla.com/D31665 --HG-- extra : moz-landing-system : lando --- .../mozharness/mozilla/testing/raptor.py | 74 +++++------------ testing/raptor/raptor/output.py | 29 +++++-- testing/raptor/raptor/raptor.py | 3 +- testing/raptor/raptor/results.py | 82 ++++++++++++++++++- 4 files changed, 123 insertions(+), 65 deletions(-) diff --git a/testing/mozharness/mozharness/mozilla/testing/raptor.py b/testing/mozharness/mozharness/mozilla/testing/raptor.py index 06e30e989fd5..1d7926fac320 100644 --- a/testing/mozharness/mozharness/mozilla/testing/raptor.py +++ b/testing/mozharness/mozharness/mozilla/testing/raptor.py @@ -6,7 +6,6 @@ from __future__ import absolute_import, print_function, unicode_literals import argparse import copy -import json import os import re import sys @@ -522,34 +521,6 @@ class Raptor(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin): self.info("installing requirements for the view-gecko-profile tool") self.install_module(requirements=[view_gecko_profile_req]) - def _validate_treeherder_data(self, parser): - # late import is required, because install is done in create_virtualenv - import jsonschema - - expected_perfherder = 1 - if self.config.get('power_test', None): - expected_perfherder += 1 - if self.config.get('memory_test', None): - expected_perfherder += 1 - if self.config.get('cpu_test', None): - expected_perfherder += 1 - if len(parser.found_perf_data) != expected_perfherder: - self.critical("PERFHERDER_DATA was seen %d times, expected %d." - % (len(parser.found_perf_data), expected_perfherder)) - return - - schema_path = os.path.join(external_tools_path, - 'performance-artifact-schema.json') - self.info("Validating PERFHERDER_DATA against %s" % schema_path) - try: - with open(schema_path) as f: - schema = json.load(f) - data = json.loads(parser.found_perf_data[0]) - jsonschema.validate(data, schema) - except Exception as e: - self.exception("Error while validating PERFHERDER_DATA") - self.info(str(e)) - def _artifact_perf_data(self, src, dest): if not os.path.isdir(os.path.dirname(dest)): # create upload dir if it doesn't already exist @@ -646,35 +617,32 @@ class Raptor(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin): for item in parser.minidump_output: self.run_command(["ls", "-l", item]) - elif '--no-upload-results' not in options: - if not self.gecko_profile: - self._validate_treeherder_data(parser) - if not self.run_local: - # copy results to upload dir so they are included as an artifact - self.info("copying raptor results to upload dir:") + elif not self.run_local: + # copy results to upload dir so they are included as an artifact + self.info("copying raptor results to upload dir:") - src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'raptor.json') - dest = os.path.join(env['MOZ_UPLOAD_DIR'], 'perfherder-data.json') - self.info(str(dest)) + src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'raptor.json') + dest = os.path.join(env['MOZ_UPLOAD_DIR'], 'perfherder-data.json') + self.info(str(dest)) + self._artifact_perf_data(src, dest) + + if self.power_test: + src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'raptor-power.json') self._artifact_perf_data(src, dest) - if self.power_test: - src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'raptor-power.json') - self._artifact_perf_data(src, dest) + if self.memory_test: + src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'raptor-memory.json') + self._artifact_perf_data(src, dest) - if self.memory_test: - src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'raptor-memory.json') - self._artifact_perf_data(src, dest) + if self.cpu_test: + src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'raptor-cpu.json') + self._artifact_perf_data(src, dest) - if self.cpu_test: - src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'raptor-cpu.json') - self._artifact_perf_data(src, dest) - - src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'screenshots.html') - if os.path.exists(src): - dest = os.path.join(env['MOZ_UPLOAD_DIR'], 'screenshots.html') - self.info(str(dest)) - self._artifact_perf_data(src, dest) + src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'screenshots.html') + if os.path.exists(src): + dest = os.path.join(env['MOZ_UPLOAD_DIR'], 'screenshots.html') + self.info(str(dest)) + self._artifact_perf_data(src, dest) class RaptorOutputParser(OutputParser): diff --git a/testing/raptor/raptor/output.py b/testing/raptor/raptor/output.py index 0aec63381ef5..58375c551863 100644 --- a/testing/raptor/raptor/output.py +++ b/testing/raptor/raptor/output.py @@ -839,27 +839,40 @@ class Output(object): # now that we've checked for screen captures too, if there were no actual # test results we can bail out here if self.summarized_results == {}: - return False + return False, 0 # when gecko_profiling, we don't want results ingested by Perfherder extra_opts = self.summarized_results['suites'][0].get('extraOptions', []) - if 'gecko_profile' not in extra_opts: + test_type = self.summarized_results['suites'][0].get('type', '') + + output_perf_data = True + not_posting = '- not posting regular test results for perfherder' + if 'gecko_profile' in extra_opts: + LOG.info("gecko profiling enabled %s" % not_posting) + output_perf_data = False + elif test_type == 'scenario': + # if a resource-usage flag was supplied the perfherder data + # will still be output from output_supporting_data + LOG.info("scenario test type was run %s" % not_posting) + output_perf_data = False + + total_perfdata = 0 + if output_perf_data: # if we have supporting data i.e. power, we ONLY want those measurements # dumped out. TODO: Bug 1515406 - Add option to output both supplementary # data (i.e. power) and the regular Raptor test result # Both are already available as separate PERFHERDER_DATA json blobs if len(self.summarized_supporting_data) == 0: LOG.info("PERFHERDER_DATA: %s" % json.dumps(self.summarized_results)) + total_perfdata = 1 else: LOG.info("supporting data measurements exist - only posting those to perfherder") - else: - LOG.info("gecko profiling enabled - not posting results for perfherder") json.dump(self.summarized_results, open(results_path, 'w'), indent=2, sort_keys=True) LOG.info("results can also be found locally at: %s" % results_path) - return True + return True, total_perfdata def output_supporting_data(self, test_names): ''' @@ -874,8 +887,9 @@ class Output(object): if len(self.summarized_supporting_data) == 0: LOG.error("no summarized supporting data found for %s" % ', '.join(test_names)) - return False + return False, 0 + total_perfdata = 0 for next_data_set in self.summarized_supporting_data: data_type = next_data_set['suites'][0]['type'] @@ -894,8 +908,9 @@ class Output(object): # the output that treeherder expects to find LOG.info("PERFHERDER_DATA: %s" % json.dumps(next_data_set)) LOG.info("%s results can also be found locally at: %s" % (data_type, results_path)) + total_perfdata += 1 - return True + return True, total_perfdata @classmethod def v8_Metric(cls, val_list): diff --git a/testing/raptor/raptor/raptor.py b/testing/raptor/raptor/raptor.py index 665adac63b70..0bb64fe87514 100644 --- a/testing/raptor/raptor/raptor.py +++ b/testing/raptor/raptor/raptor.py @@ -136,7 +136,7 @@ class Raptor(object): LOG.info("main raptor init, config is: %s" % str(self.config)) # setup the control server - self.results_handler = RaptorResultsHandler() + self.results_handler = RaptorResultsHandler(self.config) self.start_control_server() self.build_browser_profile() @@ -185,7 +185,6 @@ class Raptor(object): self.run_test(test, timeout=int(test.get('page_timeout'))) return self.process_results(test_names) - finally: self.clean_up() diff --git a/testing/raptor/raptor/results.py b/testing/raptor/raptor/results.py index 24f545bff3db..237c783a1efb 100644 --- a/testing/raptor/raptor/results.py +++ b/testing/raptor/raptor/results.py @@ -6,6 +6,9 @@ # received from the raptor control server from __future__ import absolute_import +import json +import os + from logger.logger import RaptorLogger from output import Output @@ -15,7 +18,8 @@ LOG = RaptorLogger(component='raptor-results-handler') class RaptorResultsHandler(): """Handle Raptor test results""" - def __init__(self): + def __init__(self, config=None): + self.config = config self.results = [] self.page_timeout_list = [] self.images = [] @@ -71,6 +75,70 @@ class RaptorResultsHandler(): self.supporting_data = [] self.supporting_data.append(supporting_data) + def _get_expected_perfherder(self, output): + expected_perfherder = 1 + + def is_resource_test(): + if self.config.get('power_test', None) or \ + self.config.get('cpu_test', None) or \ + self.config.get('memory_test', None): + return True + return False + + if not is_resource_test() and \ + (output.summarized_supporting_data or output.summarized_results): + data = output.summarized_supporting_data + if not data: + data = [output.summarized_results] + + for next_data_set in data: + data_type = next_data_set['suites'][0]['type'] + if data_type == 'scenario': + return None + + if self.config.get('power_test', None): + expected_perfherder += 1 + if self.config.get('memory_test', None): + expected_perfherder += 1 + if self.config.get('cpu_test', None): + expected_perfherder += 1 + + return expected_perfherder + + def _validate_treeherder_data(self, output, output_perfdata): + # late import is required, because install is done in create_virtualenv + import jsonschema + + expected_perfherder = self._get_expected_perfherder(output) + if expected_perfherder is None: + LOG.info( + "Skipping PERFHERDER_DATA check " + "because no perfherder data output is expected" + ) + return True + elif output_perfdata != expected_perfherder: + LOG.critical("PERFHERDER_DATA was seen %d times, expected %d." + % (output_perfdata, expected_perfherder)) + return False + + external_tools_path = os.environ['EXTERNALTOOLSPATH'] + schema_path = os.path.join(external_tools_path, + 'performance-artifact-schema.json') + LOG.info("Validating PERFHERDER_DATA against %s" % schema_path) + try: + with open(schema_path) as f: + schema = json.load(f) + if output.summarized_results: + data = output.summarized_results + else: + data = output.summarized_supporting_data[0] + jsonschema.validate(data, schema) + except Exception as e: + LOG.exception("Error while validating PERFHERDER_DATA") + LOG.info(str(e)) + return False + return True + def summarize_and_output(self, test_config, test_names): # summarize the result data, write to file and output PERFHERDER_DATA LOG.info("summarizing raptor test results") @@ -81,10 +149,18 @@ class RaptorResultsHandler(): output.combine_browser_cycles() output.summarize_screenshots(self.images) # only dump out supporting data (i.e. power) if actual Raptor test completed + out_sup_perfdata = 0 if self.supporting_data is not None and len(self.results) != 0: output.summarize_supporting_data() - output.output_supporting_data(test_names) - return output.output(test_names) + res, out_sup_perfdata = output.output_supporting_data(test_names) + res, out_perfdata = output.output(test_names) + + if not self.config['gecko_profile']: + # res will remain True if no problems are encountered + # during schema validation and perferder_data counting + res = self._validate_treeherder_data(output, out_sup_perfdata + out_perfdata) + + return res class RaptorTestResult():