зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1549033 - Only dump PERFHERDER_DATA when scenario tests are run with a resource usage flag. r=perftest-reviewers,stephendonner,rwood
For the Raptor 'scenario' test type, this patch prevents PERFHERDER_DATA from being output when `--power-test`, `--cpu-test`, or `--memory-test` are not used. Differential Revision: https://phabricator.services.mozilla.com/D31665 --HG-- extra : moz-landing-system : lando
This commit is contained in:
Родитель
3b33f02a03
Коммит
c5fd4c93ff
|
@ -6,7 +6,6 @@ from __future__ import absolute_import, print_function, unicode_literals
|
|||
|
||||
import argparse
|
||||
import copy
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
@ -522,34 +521,6 @@ class Raptor(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin):
|
|||
self.info("installing requirements for the view-gecko-profile tool")
|
||||
self.install_module(requirements=[view_gecko_profile_req])
|
||||
|
||||
def _validate_treeherder_data(self, parser):
|
||||
# late import is required, because install is done in create_virtualenv
|
||||
import jsonschema
|
||||
|
||||
expected_perfherder = 1
|
||||
if self.config.get('power_test', None):
|
||||
expected_perfherder += 1
|
||||
if self.config.get('memory_test', None):
|
||||
expected_perfherder += 1
|
||||
if self.config.get('cpu_test', None):
|
||||
expected_perfherder += 1
|
||||
if len(parser.found_perf_data) != expected_perfherder:
|
||||
self.critical("PERFHERDER_DATA was seen %d times, expected %d."
|
||||
% (len(parser.found_perf_data), expected_perfherder))
|
||||
return
|
||||
|
||||
schema_path = os.path.join(external_tools_path,
|
||||
'performance-artifact-schema.json')
|
||||
self.info("Validating PERFHERDER_DATA against %s" % schema_path)
|
||||
try:
|
||||
with open(schema_path) as f:
|
||||
schema = json.load(f)
|
||||
data = json.loads(parser.found_perf_data[0])
|
||||
jsonschema.validate(data, schema)
|
||||
except Exception as e:
|
||||
self.exception("Error while validating PERFHERDER_DATA")
|
||||
self.info(str(e))
|
||||
|
||||
def _artifact_perf_data(self, src, dest):
|
||||
if not os.path.isdir(os.path.dirname(dest)):
|
||||
# create upload dir if it doesn't already exist
|
||||
|
@ -646,35 +617,32 @@ class Raptor(TestingMixin, MercurialScript, CodeCoverageMixin, AndroidMixin):
|
|||
for item in parser.minidump_output:
|
||||
self.run_command(["ls", "-l", item])
|
||||
|
||||
elif '--no-upload-results' not in options:
|
||||
if not self.gecko_profile:
|
||||
self._validate_treeherder_data(parser)
|
||||
if not self.run_local:
|
||||
# copy results to upload dir so they are included as an artifact
|
||||
self.info("copying raptor results to upload dir:")
|
||||
elif not self.run_local:
|
||||
# copy results to upload dir so they are included as an artifact
|
||||
self.info("copying raptor results to upload dir:")
|
||||
|
||||
src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'raptor.json')
|
||||
dest = os.path.join(env['MOZ_UPLOAD_DIR'], 'perfherder-data.json')
|
||||
self.info(str(dest))
|
||||
src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'raptor.json')
|
||||
dest = os.path.join(env['MOZ_UPLOAD_DIR'], 'perfherder-data.json')
|
||||
self.info(str(dest))
|
||||
self._artifact_perf_data(src, dest)
|
||||
|
||||
if self.power_test:
|
||||
src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'raptor-power.json')
|
||||
self._artifact_perf_data(src, dest)
|
||||
|
||||
if self.power_test:
|
||||
src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'raptor-power.json')
|
||||
self._artifact_perf_data(src, dest)
|
||||
if self.memory_test:
|
||||
src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'raptor-memory.json')
|
||||
self._artifact_perf_data(src, dest)
|
||||
|
||||
if self.memory_test:
|
||||
src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'raptor-memory.json')
|
||||
self._artifact_perf_data(src, dest)
|
||||
if self.cpu_test:
|
||||
src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'raptor-cpu.json')
|
||||
self._artifact_perf_data(src, dest)
|
||||
|
||||
if self.cpu_test:
|
||||
src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'raptor-cpu.json')
|
||||
self._artifact_perf_data(src, dest)
|
||||
|
||||
src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'screenshots.html')
|
||||
if os.path.exists(src):
|
||||
dest = os.path.join(env['MOZ_UPLOAD_DIR'], 'screenshots.html')
|
||||
self.info(str(dest))
|
||||
self._artifact_perf_data(src, dest)
|
||||
src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'screenshots.html')
|
||||
if os.path.exists(src):
|
||||
dest = os.path.join(env['MOZ_UPLOAD_DIR'], 'screenshots.html')
|
||||
self.info(str(dest))
|
||||
self._artifact_perf_data(src, dest)
|
||||
|
||||
|
||||
class RaptorOutputParser(OutputParser):
|
||||
|
|
|
@ -839,27 +839,40 @@ class Output(object):
|
|||
# now that we've checked for screen captures too, if there were no actual
|
||||
# test results we can bail out here
|
||||
if self.summarized_results == {}:
|
||||
return False
|
||||
return False, 0
|
||||
|
||||
# when gecko_profiling, we don't want results ingested by Perfherder
|
||||
extra_opts = self.summarized_results['suites'][0].get('extraOptions', [])
|
||||
if 'gecko_profile' not in extra_opts:
|
||||
test_type = self.summarized_results['suites'][0].get('type', '')
|
||||
|
||||
output_perf_data = True
|
||||
not_posting = '- not posting regular test results for perfherder'
|
||||
if 'gecko_profile' in extra_opts:
|
||||
LOG.info("gecko profiling enabled %s" % not_posting)
|
||||
output_perf_data = False
|
||||
elif test_type == 'scenario':
|
||||
# if a resource-usage flag was supplied the perfherder data
|
||||
# will still be output from output_supporting_data
|
||||
LOG.info("scenario test type was run %s" % not_posting)
|
||||
output_perf_data = False
|
||||
|
||||
total_perfdata = 0
|
||||
if output_perf_data:
|
||||
# if we have supporting data i.e. power, we ONLY want those measurements
|
||||
# dumped out. TODO: Bug 1515406 - Add option to output both supplementary
|
||||
# data (i.e. power) and the regular Raptor test result
|
||||
# Both are already available as separate PERFHERDER_DATA json blobs
|
||||
if len(self.summarized_supporting_data) == 0:
|
||||
LOG.info("PERFHERDER_DATA: %s" % json.dumps(self.summarized_results))
|
||||
total_perfdata = 1
|
||||
else:
|
||||
LOG.info("supporting data measurements exist - only posting those to perfherder")
|
||||
else:
|
||||
LOG.info("gecko profiling enabled - not posting results for perfherder")
|
||||
|
||||
json.dump(self.summarized_results, open(results_path, 'w'), indent=2,
|
||||
sort_keys=True)
|
||||
LOG.info("results can also be found locally at: %s" % results_path)
|
||||
|
||||
return True
|
||||
return True, total_perfdata
|
||||
|
||||
def output_supporting_data(self, test_names):
|
||||
'''
|
||||
|
@ -874,8 +887,9 @@ class Output(object):
|
|||
if len(self.summarized_supporting_data) == 0:
|
||||
LOG.error("no summarized supporting data found for %s" %
|
||||
', '.join(test_names))
|
||||
return False
|
||||
return False, 0
|
||||
|
||||
total_perfdata = 0
|
||||
for next_data_set in self.summarized_supporting_data:
|
||||
data_type = next_data_set['suites'][0]['type']
|
||||
|
||||
|
@ -894,8 +908,9 @@ class Output(object):
|
|||
# the output that treeherder expects to find
|
||||
LOG.info("PERFHERDER_DATA: %s" % json.dumps(next_data_set))
|
||||
LOG.info("%s results can also be found locally at: %s" % (data_type, results_path))
|
||||
total_perfdata += 1
|
||||
|
||||
return True
|
||||
return True, total_perfdata
|
||||
|
||||
@classmethod
|
||||
def v8_Metric(cls, val_list):
|
||||
|
|
|
@ -136,7 +136,7 @@ class Raptor(object):
|
|||
LOG.info("main raptor init, config is: %s" % str(self.config))
|
||||
|
||||
# setup the control server
|
||||
self.results_handler = RaptorResultsHandler()
|
||||
self.results_handler = RaptorResultsHandler(self.config)
|
||||
self.start_control_server()
|
||||
|
||||
self.build_browser_profile()
|
||||
|
@ -185,7 +185,6 @@ class Raptor(object):
|
|||
self.run_test(test, timeout=int(test.get('page_timeout')))
|
||||
|
||||
return self.process_results(test_names)
|
||||
|
||||
finally:
|
||||
self.clean_up()
|
||||
|
||||
|
|
|
@ -6,6 +6,9 @@
|
|||
# received from the raptor control server
|
||||
from __future__ import absolute_import
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
from logger.logger import RaptorLogger
|
||||
from output import Output
|
||||
|
||||
|
@ -15,7 +18,8 @@ LOG = RaptorLogger(component='raptor-results-handler')
|
|||
class RaptorResultsHandler():
|
||||
"""Handle Raptor test results"""
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, config=None):
|
||||
self.config = config
|
||||
self.results = []
|
||||
self.page_timeout_list = []
|
||||
self.images = []
|
||||
|
@ -71,6 +75,70 @@ class RaptorResultsHandler():
|
|||
self.supporting_data = []
|
||||
self.supporting_data.append(supporting_data)
|
||||
|
||||
def _get_expected_perfherder(self, output):
|
||||
expected_perfherder = 1
|
||||
|
||||
def is_resource_test():
|
||||
if self.config.get('power_test', None) or \
|
||||
self.config.get('cpu_test', None) or \
|
||||
self.config.get('memory_test', None):
|
||||
return True
|
||||
return False
|
||||
|
||||
if not is_resource_test() and \
|
||||
(output.summarized_supporting_data or output.summarized_results):
|
||||
data = output.summarized_supporting_data
|
||||
if not data:
|
||||
data = [output.summarized_results]
|
||||
|
||||
for next_data_set in data:
|
||||
data_type = next_data_set['suites'][0]['type']
|
||||
if data_type == 'scenario':
|
||||
return None
|
||||
|
||||
if self.config.get('power_test', None):
|
||||
expected_perfherder += 1
|
||||
if self.config.get('memory_test', None):
|
||||
expected_perfherder += 1
|
||||
if self.config.get('cpu_test', None):
|
||||
expected_perfherder += 1
|
||||
|
||||
return expected_perfherder
|
||||
|
||||
def _validate_treeherder_data(self, output, output_perfdata):
|
||||
# late import is required, because install is done in create_virtualenv
|
||||
import jsonschema
|
||||
|
||||
expected_perfherder = self._get_expected_perfherder(output)
|
||||
if expected_perfherder is None:
|
||||
LOG.info(
|
||||
"Skipping PERFHERDER_DATA check "
|
||||
"because no perfherder data output is expected"
|
||||
)
|
||||
return True
|
||||
elif output_perfdata != expected_perfherder:
|
||||
LOG.critical("PERFHERDER_DATA was seen %d times, expected %d."
|
||||
% (output_perfdata, expected_perfherder))
|
||||
return False
|
||||
|
||||
external_tools_path = os.environ['EXTERNALTOOLSPATH']
|
||||
schema_path = os.path.join(external_tools_path,
|
||||
'performance-artifact-schema.json')
|
||||
LOG.info("Validating PERFHERDER_DATA against %s" % schema_path)
|
||||
try:
|
||||
with open(schema_path) as f:
|
||||
schema = json.load(f)
|
||||
if output.summarized_results:
|
||||
data = output.summarized_results
|
||||
else:
|
||||
data = output.summarized_supporting_data[0]
|
||||
jsonschema.validate(data, schema)
|
||||
except Exception as e:
|
||||
LOG.exception("Error while validating PERFHERDER_DATA")
|
||||
LOG.info(str(e))
|
||||
return False
|
||||
return True
|
||||
|
||||
def summarize_and_output(self, test_config, test_names):
|
||||
# summarize the result data, write to file and output PERFHERDER_DATA
|
||||
LOG.info("summarizing raptor test results")
|
||||
|
@ -81,10 +149,18 @@ class RaptorResultsHandler():
|
|||
output.combine_browser_cycles()
|
||||
output.summarize_screenshots(self.images)
|
||||
# only dump out supporting data (i.e. power) if actual Raptor test completed
|
||||
out_sup_perfdata = 0
|
||||
if self.supporting_data is not None and len(self.results) != 0:
|
||||
output.summarize_supporting_data()
|
||||
output.output_supporting_data(test_names)
|
||||
return output.output(test_names)
|
||||
res, out_sup_perfdata = output.output_supporting_data(test_names)
|
||||
res, out_perfdata = output.output(test_names)
|
||||
|
||||
if not self.config['gecko_profile']:
|
||||
# res will remain True if no problems are encountered
|
||||
# during schema validation and perferder_data counting
|
||||
res = self._validate_treeherder_data(output, out_sup_perfdata + out_perfdata)
|
||||
|
||||
return res
|
||||
|
||||
|
||||
class RaptorTestResult():
|
||||
|
|
Загрузка…
Ссылка в новой задаче