Bug 1520130 - [raptor] Improve Raptor no raptor test results were found message, r=rwood.

This commit is contained in:
Bob Clary 2019-01-31 20:35:56 -08:00
Родитель d6dd350c66
Коммит 5ccc06113c
3 изменённых файлов: 21 добавлений и 15 удалений

Просмотреть файл

@ -32,7 +32,7 @@ class Output(object):
self.summarized_screenshots = []
self.subtest_alert_on = subtest_alert_on
def summarize(self):
def summarize(self, test_names):
suites = []
test_results = {
'framework': {
@ -43,7 +43,8 @@ class Output(object):
# check if we actually have any results
if len(self.results) == 0:
LOG.error("error: no raptor test results found!")
LOG.error("error: no raptor test results found for %s" %
', '.join(test_names))
return
for test in self.results:
@ -134,7 +135,8 @@ class Output(object):
suite['subtests'] = subtests
else:
LOG.error("output.summarize received unsupported test results type")
LOG.error("output.summarize received unsupported test results type for %s" %
test.name)
return
# for benchmarks there is generally more than one subtest in each cycle
@ -602,10 +604,11 @@ class Output(object):
self.summarized_screenshots.append("""</table></body> </html>""")
def output(self):
def output(self, test_names):
"""output to file and perfherder data json """
if self.summarized_results == {}:
LOG.error("error: no summarized raptor results found!")
LOG.error("error: no summarized raptor results found for %s" %
', '.join(test_names))
return False
if os.environ['MOZ_UPLOAD_DIR']:
@ -650,7 +653,7 @@ class Output(object):
return True
def output_supporting_data(self):
def output_supporting_data(self, test_names):
'''
Supporting data was gathered outside of the main raptor test; it has already
been summarized, now output it appropriately.
@ -661,7 +664,8 @@ class Output(object):
from the actual Raptor test that was ran when the supporting data was gathered.
'''
if len(self.summarized_supporting_data) == 0:
LOG.error("error: no summarized supporting data found!")
LOG.error("error: no summarized supporting data found for %s" %
', '.join(test_names))
return False
for next_data_set in self.summarized_supporting_data:

Просмотреть файл

@ -464,7 +464,7 @@ class Raptor(object):
self.config,
test)
def process_results(self):
def process_results(self, test_names):
# when running locally output results in build/raptor.json; when running
# in production output to a local.json to be turned into tc job artifact
if self.config.get('run_local', False):
@ -477,7 +477,7 @@ class Raptor(object):
raptor_json_path = os.path.join(os.getcwd(), 'local.json')
self.config['raptor_json_path'] = raptor_json_path
return self.results_handler.summarize_and_output(self.config)
return self.results_handler.summarize_and_output(self.config, test_names)
def get_page_timeout_list(self):
return self.results_handler.page_timeout_list
@ -589,6 +589,7 @@ def main(args=sys.argv[1:]):
# if a test name specified on command line, and it exists, just run that one
# otherwise run all available raptor tests that are found for this browser
raptor_test_list = get_raptor_test_list(args, mozinfo.os)
raptor_test_names = [raptor_test['name'] for raptor_test in raptor_test_list]
# ensure we have at least one valid test to run
if len(raptor_test_list) == 0:
@ -622,12 +623,13 @@ def main(args=sys.argv[1:]):
raptor.run_test(next_test, timeout=int(next_test['page_timeout']))
success = raptor.process_results()
success = raptor.process_results(raptor_test_names)
raptor.clean_up()
if not success:
# didn't get test results; test timed out or crashed, etc. we want job to fail
LOG.critical("TEST-UNEXPECTED-FAIL: no raptor test results were found")
LOG.critical("TEST-UNEXPECTED-FAIL: no raptor test results were found for %s" %
', '.join(raptor_test_names))
os.sys.exit(1)
# if we have results but one test page timed out (i.e. one tp6 test page didn't load

Просмотреть файл

@ -66,17 +66,17 @@ class RaptorResultsHandler():
self.supporting_data = []
self.supporting_data.append(supporting_data)
def summarize_and_output(self, test_config):
def summarize_and_output(self, test_config, test_names):
# summarize the result data, write to file and output PERFHERDER_DATA
LOG.info("summarizing raptor test results")
output = Output(self.results, self.supporting_data, test_config['subtest_alert_on'])
output.summarize()
output.summarize(test_names)
output.summarize_screenshots(self.images)
# only dump out supporting data (i.e. power) if actual Raptor test completed
if self.supporting_data is not None and len(self.results) != 0:
output.summarize_supporting_data()
output.output_supporting_data()
return output.output()
output.output_supporting_data(test_names)
return output.output(test_names)
class RaptorTestResult():