Refactor android test results logging.
BUG=165529 Review URL: https://codereview.chromium.org/11616010 git-svn-id: http://src.chromium.org/svn/trunk/src/build@173795 4ff67af0-8c30-449e-8e8b-ad334ec8d88c
This commit is contained in:
Родитель
ffa29f9f47
Коммит
796dd1c3ff
|
@ -73,6 +73,10 @@ def AddTestRunnerOptions(option_parser, default_timeout=60):
|
|||
dest='tool',
|
||||
help='Run the test under a tool '
|
||||
'(use --tool help to list them)')
|
||||
option_parser.add_option('--flakiness-dashboard-server',
|
||||
dest='flakiness_dashboard_server',
|
||||
help=('Address of the server that is hosting the '
|
||||
'Chrome for Android flakiness dashboard.'))
|
||||
AddBuildTypeOption(option_parser)
|
||||
|
||||
|
||||
|
@ -126,10 +130,6 @@ def AddInstrumentationOptions(option_parser):
|
|||
'kept. When this is run via a sharder '
|
||||
'the test server ports should be kept and '
|
||||
'should not be reset.')
|
||||
option_parser.add_option('--flakiness-dashboard-server',
|
||||
dest='flakiness_dashboard_server',
|
||||
help=('Address of the server that is hosting the '
|
||||
'Chrome for Android flakiness dashboard.'))
|
||||
option_parser.add_option('--buildbot-step-failure',
|
||||
action='store_true',
|
||||
help=('If present, will set the buildbot status '
|
||||
|
|
|
@ -11,6 +11,7 @@ import traceback
|
|||
|
||||
import buildbot_report
|
||||
import constants
|
||||
import flakiness_dashboard_results_uploader
|
||||
|
||||
|
||||
class BaseTestResult(object):
|
||||
|
@ -125,11 +126,77 @@ class TestResults(object):
|
|||
"""Returns the all broken tests including failed, crashed, unknown."""
|
||||
return self.failed + self.crashed + self.unknown
|
||||
|
||||
def LogFull(self, test_group, test_suite, build_type, tests_to_run):
|
||||
"""Output broken test logs, summarize in a log file and the test output."""
|
||||
def _LogToFile(self, test_type, test_suite, build_type):
|
||||
"""Log results to local files which can be used for aggregation later."""
|
||||
# TODO(frankf): Report tests that failed to run here too.
|
||||
log_file_path = os.path.join(constants.CHROME_DIR, 'out',
|
||||
build_type, 'test_logs')
|
||||
if not os.path.exists(log_file_path):
|
||||
os.mkdir(log_file_path)
|
||||
full_file_name = os.path.join(log_file_path, test_type)
|
||||
if not os.path.exists(full_file_name):
|
||||
with open(full_file_name, 'w') as log_file:
|
||||
print >> log_file, '\n%s results for %s build %s:' % (
|
||||
test_type, os.environ.get('BUILDBOT_BUILDERNAME'),
|
||||
os.environ.get('BUILDBOT_BUILDNUMBER'))
|
||||
logging.info('Writing results to %s.' % full_file_name)
|
||||
log_contents = [' %s result : %d tests ran' % (test_suite,
|
||||
len(self.ok) +
|
||||
len(self.failed) +
|
||||
len(self.crashed) +
|
||||
len(self.unknown))]
|
||||
content_pairs = [('passed', len(self.ok)), ('failed', len(self.failed)),
|
||||
('crashed', len(self.crashed))]
|
||||
for (result, count) in content_pairs:
|
||||
if count:
|
||||
log_contents.append(', %d tests %s' % (count, result))
|
||||
with open(full_file_name, 'a') as log_file:
|
||||
print >> log_file, ''.join(log_contents)
|
||||
logging.info('Writing results to %s.' % full_file_name)
|
||||
content = {'test_group': test_type,
|
||||
'ok': [t.name for t in self.ok],
|
||||
'failed': [t.name for t in self.failed],
|
||||
'crashed': [t.name for t in self.failed],
|
||||
'unknown': [t.name for t in self.unknown],}
|
||||
json_file_path = os.path.join(log_file_path, 'results.json')
|
||||
with open(json_file_path, 'a') as json_file:
|
||||
print >> json_file, json.dumps(content)
|
||||
logging.info('Writing results to %s.' % json_file_path)
|
||||
|
||||
def _LogToFlakinessDashboard(self, test_type, test_package, flakiness_server):
|
||||
"""Upload results to the flakiness dashboard"""
|
||||
# TODO(frankf): Fix upstream/downstream reporting for both test types.
|
||||
logging.info('Upload %s %s to %s' % (test_type, test_package,
|
||||
flakiness_server))
|
||||
flakiness_dashboard_results_uploader.Upload(
|
||||
flakiness_server, 'Chromium_Android_Instrumentation', self)
|
||||
|
||||
def LogFull(self, test_type, test_package, annotation=None,
|
||||
build_type='Debug', all_tests=None, flakiness_server=None):
|
||||
"""Log the tests results for the test suite.
|
||||
|
||||
The results will be logged three different ways:
|
||||
1. Log to stdout.
|
||||
2. Log to local files for aggregating multiple test steps
|
||||
(on buildbots only).
|
||||
3. Log to flakiness dashboard (on buildbots only).
|
||||
|
||||
Args:
|
||||
test_type: Type of the test (e.g. 'Instrumentation', 'Unit test', etc.).
|
||||
test_package: Test package name (e.g. 'ipc_tests' for gtests,
|
||||
'ContentShellTest' for instrumentation tests)
|
||||
annotation: If instrumenation test type, this is a list of annotations
|
||||
(e.g. ['Smoke', 'SmallTest']).
|
||||
build_type: Release/Debug
|
||||
all_tests: A list of all tests that were supposed to run.
|
||||
This is used to determine which tests have failed to run.
|
||||
If None, we assume all tests ran.
|
||||
flakiness_server: If provider, upload the results to flakiness dashboard
|
||||
with this URL.
|
||||
"""
|
||||
# Output all broken tests or 'passed' if none broken.
|
||||
logging.critical('*' * 80)
|
||||
logging.critical('Final result')
|
||||
logging.critical('Final result:')
|
||||
if self.failed:
|
||||
logging.critical('Failed:')
|
||||
self._Log(sorted(self.failed))
|
||||
|
@ -141,44 +208,12 @@ class TestResults(object):
|
|||
self._Log(sorted(self.unknown))
|
||||
if not self.GetAllBroken():
|
||||
logging.critical('Passed')
|
||||
logging.critical('*' * 80)
|
||||
|
||||
# Summarize in a log file, if tests are running on bots.
|
||||
if test_group and test_suite and os.environ.get('BUILDBOT_BUILDERNAME'):
|
||||
log_file_path = os.path.join(constants.CHROME_DIR, 'out',
|
||||
build_type, 'test_logs')
|
||||
if not os.path.exists(log_file_path):
|
||||
os.mkdir(log_file_path)
|
||||
full_file_name = os.path.join(log_file_path, test_group)
|
||||
if not os.path.exists(full_file_name):
|
||||
with open(full_file_name, 'w') as log_file:
|
||||
print >> log_file, '\n%s results for %s build %s:' % (
|
||||
test_group, os.environ.get('BUILDBOT_BUILDERNAME'),
|
||||
os.environ.get('BUILDBOT_BUILDNUMBER'))
|
||||
log_contents = [' %s result : %d tests ran' % (test_suite,
|
||||
len(self.ok) +
|
||||
len(self.failed) +
|
||||
len(self.crashed) +
|
||||
len(self.unknown))]
|
||||
content_pairs = [('passed', len(self.ok)), ('failed', len(self.failed)),
|
||||
('crashed', len(self.crashed))]
|
||||
for (result, count) in content_pairs:
|
||||
if count:
|
||||
log_contents.append(', %d tests %s' % (count, result))
|
||||
with open(full_file_name, 'a') as log_file:
|
||||
print >> log_file, ''.join(log_contents)
|
||||
content = {'test_group': test_group,
|
||||
'ok': [t.name for t in self.ok],
|
||||
'failed': [t.name for t in self.failed],
|
||||
'crashed': [t.name for t in self.failed],
|
||||
'unknown': [t.name for t in self.unknown],}
|
||||
with open(os.path.join(log_file_path, 'results.json'), 'a') as json_file:
|
||||
print >> json_file, json.dumps(content)
|
||||
|
||||
# Summarize in the test output.
|
||||
logging.critical('*' * 80)
|
||||
summary = ['Summary:\n']
|
||||
if tests_to_run:
|
||||
summary += ['TESTS_TO_RUN=%d\n' % (len(tests_to_run))]
|
||||
if all_tests:
|
||||
summary += ['TESTS_TO_RUN=%d\n' % len(all_tests)]
|
||||
num_tests_ran = (len(self.ok) + len(self.failed) +
|
||||
len(self.crashed) + len(self.unknown))
|
||||
tests_passed = [t.name for t in self.ok]
|
||||
|
@ -190,16 +225,28 @@ class TestResults(object):
|
|||
'FAILED=%d %s\n' % (len(tests_failed), tests_failed),
|
||||
'CRASHED=%d %s\n' % (len(tests_crashed), tests_crashed),
|
||||
'UNKNOWN=%d %s\n' % (len(tests_unknown), tests_unknown)]
|
||||
if tests_to_run and num_tests_ran != len(tests_to_run):
|
||||
if all_tests and num_tests_ran != len(all_tests):
|
||||
# Add the list of tests we failed to run.
|
||||
tests_failed_to_run = list(set(tests_to_run) - set(tests_passed) -
|
||||
tests_failed_to_run = list(set(all_tests) - set(tests_passed) -
|
||||
set(tests_failed) - set(tests_crashed) -
|
||||
set(tests_unknown))
|
||||
summary += ['FAILED_TO_RUN=%d %s\n' % (len(tests_failed_to_run),
|
||||
tests_failed_to_run)]
|
||||
summary_string = ''.join(summary)
|
||||
logging.critical(summary_string)
|
||||
return summary_string
|
||||
logging.critical('*' * 80)
|
||||
|
||||
if os.environ.get('BUILDBOT_BUILDERNAME'):
|
||||
# It is possible to have multiple buildbot steps for the same
|
||||
# instrumenation test package using different annotations.
|
||||
if annotation and len(annotation) == 1:
|
||||
test_suite = annotation[0]
|
||||
else:
|
||||
test_suite = test_package
|
||||
self._LogToFile(test_type, test_suite, build_type)
|
||||
|
||||
if flakiness_server:
|
||||
self._LogToFlakinessDashboard(test_type, test_package, flakiness_server)
|
||||
|
||||
def PrintAnnotation(self):
|
||||
"""Print buildbot annotations for test results."""
|
||||
|
|
|
@ -14,7 +14,6 @@ import time
|
|||
from pylib import apk_info
|
||||
from pylib import buildbot_report
|
||||
from pylib import constants
|
||||
from pylib import flakiness_dashboard_results_uploader
|
||||
from pylib import ports
|
||||
from pylib import run_java_tests
|
||||
from pylib import run_python_tests
|
||||
|
@ -23,26 +22,6 @@ from pylib import test_options_parser
|
|||
from pylib.test_result import TestResults
|
||||
|
||||
|
||||
def SummarizeResults(java_results, python_results, annotation, build_type):
|
||||
"""Summarize the results from the various test types.
|
||||
|
||||
Args:
|
||||
java_results: a TestResults object with java test case results.
|
||||
python_results: a TestResults object with python test case results.
|
||||
annotation: the annotation used for these results.
|
||||
build_type: 'Release' or 'Debug'.
|
||||
|
||||
Returns:
|
||||
A tuple (all_results, summary_string, num_failing)
|
||||
"""
|
||||
all_results = TestResults.FromTestResults([java_results, python_results])
|
||||
summary_string = all_results.LogFull('Instrumentation', annotation,
|
||||
build_type, [])
|
||||
num_failing = (len(all_results.failed) + len(all_results.crashed) +
|
||||
len(all_results.unknown))
|
||||
return all_results, summary_string, num_failing
|
||||
|
||||
|
||||
def DispatchInstrumentationTests(options):
|
||||
"""Dispatches the Java and Python instrumentation tests, sharding if possible.
|
||||
|
||||
|
@ -55,7 +34,7 @@ def DispatchInstrumentationTests(options):
|
|||
options: command-line options for running the Java and Python tests.
|
||||
|
||||
Returns:
|
||||
An integer representing the number of failing tests.
|
||||
An integer representing the number of broken tests.
|
||||
"""
|
||||
if not options.keep_test_server_ports:
|
||||
# Reset the test port allocation. It's important to do it before starting
|
||||
|
@ -74,15 +53,16 @@ def DispatchInstrumentationTests(options):
|
|||
if options.run_python_tests:
|
||||
python_results = run_python_tests.DispatchPythonTests(options)
|
||||
|
||||
all_results, summary_string, num_failing = SummarizeResults(
|
||||
java_results, python_results, options.annotation, options.build_type)
|
||||
all_results = TestResults.FromTestResults([java_results, python_results])
|
||||
|
||||
if options.flakiness_dashboard_server:
|
||||
flakiness_dashboard_results_uploader.Upload(
|
||||
options.flakiness_dashboard_server, 'Chromium_Android_Instrumentation',
|
||||
TestResults.FromTestResults([java_results, python_results]))
|
||||
all_results.LogFull(
|
||||
test_type='Instrumentation',
|
||||
test_package=options.test_apk,
|
||||
annotation=options.annotation,
|
||||
build_type=options.build_type,
|
||||
flakiness_server=options.flakiness_dashboard_server)
|
||||
|
||||
return num_failing
|
||||
return len(all_results.GetAllBroken())
|
||||
|
||||
|
||||
def main(argv):
|
||||
|
|
|
@ -108,7 +108,10 @@ def DispatchPythonTests(options):
|
|||
sharder = python_test_sharder.PythonTestSharder(
|
||||
attached_devices, available_tests, options)
|
||||
result = sharder.RunShardedTests()
|
||||
result.LogFull('Monkey', 'Monkey', options.build_type, available_tests)
|
||||
result.LogFull(
|
||||
test_type='Monkey',
|
||||
test_package='Monkey',
|
||||
build_type=options.build_type)
|
||||
result.PrintAnnotation()
|
||||
|
||||
|
||||
|
|
|
@ -174,10 +174,10 @@ class TestSharder(BaseTestSharder):
|
|||
|
||||
def __init__(self, attached_devices, test_suite, gtest_filter,
|
||||
test_arguments, timeout, cleanup_test_files, tool,
|
||||
log_dump_name, fast_and_loose, build_type, in_webkit_checkout):
|
||||
log_dump_name, fast_and_loose, build_type, in_webkit_checkout,
|
||||
flakiness_server=None):
|
||||
BaseTestSharder.__init__(self, attached_devices, build_type)
|
||||
self.test_suite = test_suite
|
||||
self.test_suite_basename = os.path.basename(test_suite)
|
||||
self.gtest_filter = gtest_filter or ''
|
||||
self.test_arguments = test_arguments
|
||||
self.timeout = timeout
|
||||
|
@ -186,6 +186,7 @@ class TestSharder(BaseTestSharder):
|
|||
self.log_dump_name = log_dump_name
|
||||
self.fast_and_loose = fast_and_loose
|
||||
self.in_webkit_checkout = in_webkit_checkout
|
||||
self.flakiness_server = flakiness_server
|
||||
self.all_tests = []
|
||||
if not self.gtest_filter:
|
||||
# No filter has been specified, let's add all tests then.
|
||||
|
@ -272,9 +273,14 @@ class TestSharder(BaseTestSharder):
|
|||
|
||||
def OnTestsCompleted(self, test_runners, test_results):
|
||||
"""Notifies that we completed the tests."""
|
||||
test_results.LogFull('Unit test', os.path.basename(self.test_suite),
|
||||
self.build_type, self.all_tests)
|
||||
test_results.LogFull(
|
||||
test_type='Unit test',
|
||||
test_package=test_runners[0].test_package.test_suite_basename,
|
||||
build_type=self.build_type,
|
||||
all_tests=self.all_tests,
|
||||
flakiness_server=self.flakiness_server)
|
||||
test_results.PrintAnnotation()
|
||||
|
||||
if self.log_dump_name:
|
||||
# Zip all debug info outputs into a file named by log_dump_name.
|
||||
debug_info.GTestDebugInfo.ZipAndCleanResults(
|
||||
|
@ -347,7 +353,8 @@ def _RunATestSuite(options):
|
|||
options.log_dump,
|
||||
options.fast_and_loose,
|
||||
options.build_type,
|
||||
options.webkit)
|
||||
options.webkit,
|
||||
options.flakiness_dashboard_server)
|
||||
test_results = sharder.RunShardedTests()
|
||||
|
||||
for buildbot_emulator in buildbot_emulators:
|
||||
|
|
Загрузка…
Ссылка в новой задаче