[Android] Allow instrumentation test skipping.

This patch also removes a call to AdbInterface.StartInstrumentation and
implements some of the functionality of am_instrument_parser.py.

BUG=408585

Review URL: https://codereview.chromium.org/558883003

Cr-Original-Commit-Position: refs/heads/master@{#295491}
Cr-Mirrored-From: https://chromium.googlesource.com/chromium/src
Cr-Mirrored-Commit: cbcc115def0fe5d2cf4c2018fccbbc16fd4c5aba
This commit is contained in:
jbudorick 2014-09-18 10:50:59 -07:00 коммит произвёл Commit bot
Родитель f552e35119
Коммит 83cb1b3099
5 изменённых файлов: 462 добавлений и 93 удалений

Просмотреть файл

@ -63,6 +63,7 @@ def CommonChecks(input_api, output_api):
unit_tests=[
J('pylib', 'device', 'device_utils_test.py'),
J('pylib', 'gtest', 'test_package_test.py'),
J('pylib', 'instrumentation', 'test_runner_test.py'),
],
env=pylib_test_env))
output.extend(_CheckDeletionsOnlyFiles(input_api, output_api))

Просмотреть файл

@ -7,6 +7,7 @@
class ResultType(object):
"""Class enumerating test types."""
PASS = 'PASS'
SKIP = 'SKIP'
FAIL = 'FAIL'
CRASH = 'CRASH'
TIMEOUT = 'TIMEOUT'
@ -15,8 +16,8 @@ class ResultType(object):
@staticmethod
def GetTypes():
"""Get a list of all test types."""
return [ResultType.PASS, ResultType.FAIL, ResultType.CRASH,
ResultType.TIMEOUT, ResultType.UNKNOWN]
return [ResultType.PASS, ResultType.SKIP, ResultType.FAIL,
ResultType.CRASH, ResultType.TIMEOUT, ResultType.UNKNOWN]
class BaseTestResult(object):
@ -97,19 +98,26 @@ class TestRunResults(object):
s.append('[==========] %s ran.' % (tests(len(self.GetAll()))))
s.append('[ PASSED ] %s.' % (tests(len(self.GetPass()))))
not_passed = self.GetNotPass()
if len(not_passed) > 0:
s.append('[ FAILED ] %s, listed below:' % tests(len(self.GetNotPass())))
for t in self.GetFail():
skipped = self.GetSkip()
if skipped:
s.append('[ SKIPPED ] Skipped %s, listed below:' % tests(len(skipped)))
for t in sorted(skipped):
s.append('[ SKIPPED ] %s' % str(t))
all_failures = self.GetFail().union(self.GetCrash(), self.GetTimeout(),
self.GetUnknown())
if all_failures:
s.append('[ FAILED ] %s, listed below:' % tests(len(all_failures)))
for t in sorted(self.GetFail()):
s.append('[ FAILED ] %s' % str(t))
for t in self.GetCrash():
for t in sorted(self.GetCrash()):
s.append('[ FAILED ] %s (CRASHED)' % str(t))
for t in self.GetTimeout():
for t in sorted(self.GetTimeout()):
s.append('[ FAILED ] %s (TIMEOUT)' % str(t))
for t in self.GetUnknown():
for t in sorted(self.GetUnknown()):
s.append('[ FAILED ] %s (UNKNOWN)' % str(t))
s.append('')
s.append(plural(len(not_passed), 'FAILED TEST', 'FAILED TESTS'))
s.append(plural(len(all_failures), 'FAILED TEST', 'FAILED TESTS'))
return '\n'.join(s)
def GetShortForm(self):
@ -163,6 +171,10 @@ class TestRunResults(object):
"""Get the set of all passed test results."""
return self._GetType(ResultType.PASS)
def GetSkip(self):
"""Get the set of all skipped test results."""
return self._GetType(ResultType.SKIP)
def GetFail(self):
"""Get the set of all failed test results."""
return self._GetType(ResultType.FAIL)
@ -185,4 +197,5 @@ class TestRunResults(object):
def DidRunPass(self):
"""Return whether the test run was successful."""
return not self.GetNotPass()
return not (self.GetNotPass() - self.GetSkip())

Просмотреть файл

@ -10,7 +10,6 @@ import re
import sys
import time
from pylib import android_commands
from pylib import constants
from pylib import flag_changer
from pylib import valgrind_tools
@ -20,8 +19,7 @@ from pylib.device import device_errors
from pylib.instrumentation import json_perf_parser
from pylib.instrumentation import test_result
sys.path.append(os.path.join(sys.path[0],
os.pardir, os.pardir, 'build', 'util', 'lib',
sys.path.append(os.path.join(constants.DIR_SOURCE_ROOT, 'build', 'util', 'lib',
'common'))
import perf_tests_results_helper # pylint: disable=F0401
@ -219,7 +217,7 @@ class TestRunner(base_test_runner.BaseTestRunner):
'shell rm ' + TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX)
self.device.old_interface.StartMonitoringLogcat()
def TestTeardown(self, test, raw_result):
def TestTeardown(self, test, result):
"""Cleans up the test harness after running a particular test.
Depending on the options of this TestRunner this might handle performance
@ -227,13 +225,13 @@ class TestRunner(base_test_runner.BaseTestRunner):
Args:
test: The name of the test that was just run.
raw_result: result for this test.
result: result for this test.
"""
self.tool.CleanUpEnvironment()
# The logic below relies on the test passing.
if not raw_result or raw_result.GetStatusCode():
if not result or not result.DidRunPass():
return
self.TearDownPerfMonitoring(test)
@ -351,54 +349,139 @@ class TestRunner(base_test_runner.BaseTestRunner):
timeout: Timeout time in seconds.
Returns:
An instance of am_instrument_parser.TestResult object.
An instance of InstrumentationTestResult
"""
# Build the 'am instrument' command
instrumentation_path = (
'%s/%s' % (test_package, self.options.test_runner))
args_with_filter = dict(instr_args)
args_with_filter['class'] = test
logging.info(args_with_filter)
(raw_results, _) = self.device.old_interface.Adb().StartInstrumentation(
instrumentation_path=instrumentation_path,
instrumentation_args=args_with_filter,
timeout_time=timeout)
assert len(raw_results) == 1
return raw_results[0]
cmd = ['am', 'instrument', '-r']
for k, v in instr_args.iteritems():
cmd.extend(['-e', k, "'%s'" % v])
cmd.extend(['-e', 'class', "'%s'" % test])
cmd.extend(['-w', instrumentation_path])
def _RunTest(self, test, timeout):
time_ms = lambda: int(time.time() * 1000)
# Run the test.
start_ms = time_ms()
try:
return self.RunInstrumentationTest(
test, self.test_pkg.GetPackageName(),
self._GetInstrumentationArgs(), timeout)
except (device_errors.CommandTimeoutError,
# TODO(jbudorick) Remove this once the underlying implementations
# for the above are switched or wrapped.
android_commands.errors.WaitForResponseTimedOutError):
logging.info('Ran the test with timeout of %ds.' % timeout)
raise
instr_output = self.device.RunShellCommand(
cmd, timeout=timeout, retries=0)
except device_errors.CommandTimeoutError:
return test_result.InstrumentationTestResult(
test, base_test_result.ResultType.TIMEOUT, start_ms,
time_ms() - start_ms)
duration_ms = time_ms() - start_ms
#override
def RunTest(self, test):
raw_result = None
start_date_ms = None
results = base_test_result.TestRunResults()
timeout = (self._GetIndividualTestTimeoutSecs(test) *
self._GetIndividualTestTimeoutScale(test) *
self.tool.GetTimeoutScale())
try:
self.TestSetup(test)
start_date_ms = int(time.time()) * 1000
raw_result = self._RunTest(test, timeout)
duration_ms = int(time.time()) * 1000 - start_date_ms
status_code = raw_result.GetStatusCode()
if status_code:
if self.options.screenshot_failures:
self._TakeScreenshot(test)
log = raw_result.GetFailureReason()
if not log:
log = 'No information.'
# Parse the test output
_, _, statuses = self._ParseAmInstrumentRawOutput(instr_output)
return self._GenerateTestResult(test, statuses, start_ms, duration_ms)
@staticmethod
def _ParseAmInstrumentRawOutput(raw_output):
"""Parses the output of an |am instrument -r| call.
Args:
raw_output: the output of an |am instrument -r| call as a list of lines
Returns:
A 3-tuple containing:
- the instrumentation code as an integer
- the instrumentation result as a list of lines
- the instrumentation statuses received as a list of 2-tuples
containing:
- the status code as an integer
- the bundle dump as a dict mapping string keys to a list of
strings, one for each line.
"""
INSTR_STATUS = 'INSTRUMENTATION_STATUS: '
INSTR_STATUS_CODE = 'INSTRUMENTATION_STATUS_CODE: '
INSTR_RESULT = 'INSTRUMENTATION_RESULT: '
INSTR_CODE = 'INSTRUMENTATION_CODE: '
last = None
instr_code = None
instr_result = []
instr_statuses = []
bundle = {}
for line in raw_output:
if line.startswith(INSTR_STATUS):
instr_var = line[len(INSTR_STATUS):]
if '=' in instr_var:
k, v = instr_var.split('=', 1)
bundle[k] = [v]
last = INSTR_STATUS
last_key = k
else:
logging.debug('Unknown "%s" line: %s' % (INSTR_STATUS, line))
elif line.startswith(INSTR_STATUS_CODE):
instr_status = line[len(INSTR_STATUS_CODE):]
instr_statuses.append((int(instr_status), bundle))
bundle = {}
last = INSTR_STATUS_CODE
elif line.startswith(INSTR_RESULT):
instr_result.append(line[len(INSTR_RESULT):])
last = INSTR_RESULT
elif line.startswith(INSTR_CODE):
instr_code = int(line[len(INSTR_CODE):])
last = INSTR_CODE
elif last == INSTR_STATUS:
bundle[last_key].append(line)
elif last == INSTR_RESULT:
instr_result.append(line)
return (instr_code, instr_result, instr_statuses)
def _GenerateTestResult(self, test, instr_statuses, start_ms, duration_ms):
"""Generate the result of |test| from |instr_statuses|.
Args:
instr_statuses: A list of 2-tuples containing:
- the status code as an integer
- the bundle dump as a dict mapping string keys to string values
Note that this is the same as the third item in the 3-tuple returned by
|_ParseAmInstrumentRawOutput|.
start_ms: The start time of the test in milliseconds.
duration_ms: The duration of the test in milliseconds.
Returns:
An InstrumentationTestResult object.
"""
INSTR_STATUS_CODE_START = 1
INSTR_STATUS_CODE_OK = 0
INSTR_STATUS_CODE_ERROR = -1
INSTR_STATUS_CODE_FAIL = -2
log = ''
result_type = base_test_result.ResultType.UNKNOWN
for status_code, bundle in instr_statuses:
if status_code == INSTR_STATUS_CODE_START:
pass
elif status_code == INSTR_STATUS_CODE_OK:
bundle_test = '%s#%s' % (
''.join(bundle.get('class', [''])),
''.join(bundle.get('test', [''])))
skipped = ''.join(bundle.get('test_skipped', ['']))
if (test == bundle_test and
result_type == base_test_result.ResultType.UNKNOWN):
result_type = base_test_result.ResultType.PASS
elif skipped.lower() in ('true', '1', 'yes'):
result_type = base_test_result.ResultType.SKIP
logging.info('Skipped ' + test)
else:
if status_code not in (INSTR_STATUS_CODE_ERROR,
INSTR_STATUS_CODE_FAIL):
logging.info('Unrecognized status code %d. Handling as an error.',
status_code)
result_type = base_test_result.ResultType.FAIL
if 'stack' in bundle:
log = '\n'.join(bundle['stack'])
# Dismiss any error dialogs. Limit the number in case we have an error
# loop or we are failing to dismiss.
for _ in xrange(10):
@ -409,32 +492,29 @@ class TestRunner(base_test_runner.BaseTestRunner):
if package in self.test_pkg.GetPackageName():
result_type = base_test_result.ResultType.CRASH
break
result = test_result.InstrumentationTestResult(
test, result_type, start_date_ms, duration_ms, log=log)
else:
result = test_result.InstrumentationTestResult(
test, base_test_result.ResultType.PASS, start_date_ms, duration_ms)
return test_result.InstrumentationTestResult(
test, result_type, start_ms, duration_ms, log=log)
#override
def RunTest(self, test):
results = base_test_result.TestRunResults()
timeout = (self._GetIndividualTestTimeoutSecs(test) *
self._GetIndividualTestTimeoutScale(test) *
self.tool.GetTimeoutScale())
try:
self.TestSetup(test)
result = self.RunInstrumentationTest(
test, self.test_pkg.GetPackageName(), self._GetInstrumentationArgs(),
timeout)
results.AddResult(result)
# Catch exceptions thrown by StartInstrumentation().
# See ../../third_party/android/testrunner/adb_interface.py
except (device_errors.CommandTimeoutError,
device_errors.DeviceUnreachableError,
# TODO(jbudorick) Remove these once the underlying implementations
# for the above are switched or wrapped.
android_commands.errors.WaitForResponseTimedOutError,
android_commands.errors.DeviceUnresponsiveError,
android_commands.errors.InstrumentationError), e:
if start_date_ms:
duration_ms = int(time.time()) * 1000 - start_date_ms
else:
start_date_ms = int(time.time()) * 1000
duration_ms = 0
device_errors.DeviceUnreachableError) as e:
message = str(e)
if not message:
message = 'No information.'
results.AddResult(test_result.InstrumentationTestResult(
test, base_test_result.ResultType.CRASH, start_date_ms, duration_ms,
test, base_test_result.ResultType.CRASH, int(time.time() * 1000), 0,
log=message))
raw_result = None
self.TestTeardown(test, raw_result)
self.TestTeardown(test, results)
return (results, None if results.DidRunPass() else test)

Просмотреть файл

@ -0,0 +1,275 @@
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for instrumentation.TestRunner."""
# pylint: disable=W0212
import os
import sys
import unittest
from pylib import constants
from pylib.base import base_test_result
from pylib.instrumentation import test_runner
sys.path.append(os.path.join(
constants.DIR_SOURCE_ROOT, 'third_party', 'pymock'))
import mock # pylint: disable=F0401
class InstrumentationTestRunnerTest(unittest.TestCase):
def setUp(self):
options = mock.Mock()
options.tool = ''
package = mock.Mock()
self.instance = test_runner.TestRunner(options, None, 0, package)
def testParseAmInstrumentRawOutput_nothing(self):
code, result, statuses = (
test_runner.TestRunner._ParseAmInstrumentRawOutput(['']))
self.assertEqual(None, code)
self.assertEqual([], result)
self.assertEqual([], statuses)
def testParseAmInstrumentRawOutput_noMatchingStarts(self):
raw_output = [
'',
'this.is.a.test.package.TestClass:.',
'Test result for =.',
'Time: 1.234',
'',
'OK (1 test)',
]
code, result, statuses = (
test_runner.TestRunner._ParseAmInstrumentRawOutput(raw_output))
self.assertEqual(None, code)
self.assertEqual([], result)
self.assertEqual([], statuses)
def testParseAmInstrumentRawOutput_resultAndCode(self):
raw_output = [
'INSTRUMENTATION_RESULT: foo',
'bar',
'INSTRUMENTATION_CODE: -1',
]
code, result, _ = (
test_runner.TestRunner._ParseAmInstrumentRawOutput(raw_output))
self.assertEqual(-1, code)
self.assertEqual(['foo', 'bar'], result)
def testParseAmInstrumentRawOutput_oneStatus(self):
raw_output = [
'INSTRUMENTATION_STATUS: foo=1',
'INSTRUMENTATION_STATUS: bar=hello',
'INSTRUMENTATION_STATUS: world=false',
'INSTRUMENTATION_STATUS: class=this.is.a.test.package.TestClass',
'INSTRUMENTATION_STATUS: test=testMethod',
'INSTRUMENTATION_STATUS_CODE: 0',
]
_, _, statuses = (
test_runner.TestRunner._ParseAmInstrumentRawOutput(raw_output))
expected = [
(0, {
'foo': ['1'],
'bar': ['hello'],
'world': ['false'],
'class': ['this.is.a.test.package.TestClass'],
'test': ['testMethod'],
})
]
self.assertEqual(expected, statuses)
def testParseAmInstrumentRawOutput_multiStatus(self):
raw_output = [
'INSTRUMENTATION_STATUS: class=foo',
'INSTRUMENTATION_STATUS: test=bar',
'INSTRUMENTATION_STATUS_CODE: 1',
'INSTRUMENTATION_STATUS: test_skipped=true',
'INSTRUMENTATION_STATUS_CODE: 0',
'INSTRUMENTATION_STATUS: class=hello',
'INSTRUMENTATION_STATUS: test=world',
'INSTRUMENTATION_STATUS: stack=',
'foo/bar.py (27)',
'hello/world.py (42)',
'test/file.py (1)',
'INSTRUMENTATION_STATUS_CODE: -1',
]
_, _, statuses = (
test_runner.TestRunner._ParseAmInstrumentRawOutput(raw_output))
expected = [
(1, {'class': ['foo'], 'test': ['bar'],}),
(0, {'test_skipped': ['true']}),
(-1, {
'class': ['hello'],
'test': ['world'],
'stack': ['', 'foo/bar.py (27)', 'hello/world.py (42)',
'test/file.py (1)'],
}),
]
self.assertEqual(expected, statuses)
def testParseAmInstrumentRawOutput_statusResultAndCode(self):
raw_output = [
'INSTRUMENTATION_STATUS: class=foo',
'INSTRUMENTATION_STATUS: test=bar',
'INSTRUMENTATION_STATUS_CODE: 1',
'INSTRUMENTATION_RESULT: hello',
'world',
'',
'',
'INSTRUMENTATION_CODE: 0',
]
code, result, statuses = (
test_runner.TestRunner._ParseAmInstrumentRawOutput(raw_output))
self.assertEqual(0, code)
self.assertEqual(['hello', 'world', '', ''], result)
self.assertEqual([(1, {'class': ['foo'], 'test': ['bar']})], statuses)
def testGenerateTestResult_noStatus(self):
result = self.instance._GenerateTestResult(
'test.package.TestClass#testMethod', [], 0, 1000)
self.assertEqual('test.package.TestClass#testMethod', result.GetName())
self.assertEqual(base_test_result.ResultType.UNKNOWN, result.GetType())
self.assertEqual('', result.GetLog())
self.assertEqual(1000, result.GetDur())
def testGenerateTestResult_testPassed(self):
statuses = [
(1, {
'class': ['test.package.TestClass'],
'test': ['testMethod'],
}),
(0, {
'class': ['test.package.TestClass'],
'test': ['testMethod'],
}),
]
result = self.instance._GenerateTestResult(
'test.package.TestClass#testMethod', statuses, 0, 1000)
self.assertEqual(base_test_result.ResultType.PASS, result.GetType())
def testGenerateTestResult_testSkipped_first(self):
statuses = [
(0, {
'test_skipped': ['true'],
}),
(1, {
'class': ['test.package.TestClass'],
'test': ['testMethod'],
}),
(0, {
'class': ['test.package.TestClass'],
'test': ['testMethod'],
}),
]
result = self.instance._GenerateTestResult(
'test.package.TestClass#testMethod', statuses, 0, 1000)
self.assertEqual(base_test_result.ResultType.SKIP, result.GetType())
def testGenerateTestResult_testSkipped_last(self):
statuses = [
(1, {
'class': ['test.package.TestClass'],
'test': ['testMethod'],
}),
(0, {
'class': ['test.package.TestClass'],
'test': ['testMethod'],
}),
(0, {
'test_skipped': ['true'],
}),
]
result = self.instance._GenerateTestResult(
'test.package.TestClass#testMethod', statuses, 0, 1000)
self.assertEqual(base_test_result.ResultType.SKIP, result.GetType())
def testGenerateTestResult_testSkipped_false(self):
statuses = [
(0, {
'test_skipped': ['false'],
}),
(1, {
'class': ['test.package.TestClass'],
'test': ['testMethod'],
}),
(0, {
'class': ['test.package.TestClass'],
'test': ['testMethod'],
}),
]
result = self.instance._GenerateTestResult(
'test.package.TestClass#testMethod', statuses, 0, 1000)
self.assertEqual(base_test_result.ResultType.PASS, result.GetType())
def testGenerateTestResult_testFailed(self):
statuses = [
(1, {
'class': ['test.package.TestClass'],
'test': ['testMethod'],
}),
(-2, {
'class': ['test.package.TestClass'],
'test': ['testMethod'],
}),
]
result = self.instance._GenerateTestResult(
'test.package.TestClass#testMethod', statuses, 0, 1000)
self.assertEqual(base_test_result.ResultType.FAIL, result.GetType())
def testGenerateTestResult_testCrashed(self):
self.instance.test_pkg.GetPackageName = mock.Mock(
return_value='generate.test.result.test.package')
self.instance.device.old_interface.DismissCrashDialogIfNeeded = mock.Mock(
return_value='generate.test.result.test.package')
statuses = [
(1, {
'class': ['test.package.TestClass'],
'test': ['testMethod'],
}),
(-1, {
'class': ['test.package.TestClass'],
'test': ['testMethod'],
'stack': ['', 'foo/bar.py (27)', 'hello/world.py (42)'],
}),
]
result = self.instance._GenerateTestResult(
'test.package.TestClass#testMethod', statuses, 0, 1000)
self.assertEqual(base_test_result.ResultType.CRASH, result.GetType())
self.assertEqual('\nfoo/bar.py (27)\nhello/world.py (42)', result.GetLog())
def testRunInstrumentationTest_verifyAdbShellCommand(self):
self.instance.options.test_runner = 'MyTestRunner'
self.instance.device.RunShellCommand = mock.Mock()
self.instance._GenerateTestResult = mock.Mock()
with mock.patch('pylib.instrumentation.test_runner.'
'TestRunner._ParseAmInstrumentRawOutput',
return_value=(mock.Mock(), mock.Mock(), mock.Mock())):
self.instance.RunInstrumentationTest(
'test.package.TestClass#testMethod',
'test.package',
{'test_arg_key': 'test_arg_value'},
100)
self.instance.device.RunShellCommand.assert_called_with(
['am', 'instrument', '-r',
'-e', 'test_arg_key', "'test_arg_value'",
'-e', 'class', "'test.package.TestClass#testMethod'",
'-w', 'test.package/MyTestRunner'],
timeout=100, retries=0)
if __name__ == '__main__':
unittest.main(verbosity=2)

Просмотреть файл

@ -175,21 +175,21 @@ def AddJavaTestOptions(option_parser):
'-E', '--exclude-annotation', dest='exclude_annotation_str',
help=('Comma-separated list of annotations. Exclude tests with these '
'annotations.'))
option_parser.add_option('--screenshot', dest='screenshot_failures',
action='store_true',
help='Capture screenshots of test failures')
option_parser.add_option('--save-perf-json', action='store_true',
help='Saves the JSON file for each UI Perf test.')
option_parser.add_option('--official-build', action='store_true',
help='Run official build tests.')
option_parser.add_option('--test_data', action='append', default=[],
help=('Each instance defines a directory of test '
'data that should be copied to the target(s) '
'before running the tests. The argument '
'should be of the form <target>:<source>, '
'<target> is relative to the device data'
'directory, and <source> is relative to the '
'chromium build directory.'))
option_parser.add_option(
'--screenshot', dest='screenshot_failures', action='store_true',
help='Capture screenshots of test failures')
option_parser.add_option(
'--save-perf-json', action='store_true',
help='Saves the JSON file for each UI Perf test.')
option_parser.add_option(
'--official-build', action='store_true', help='Run official build tests.')
option_parser.add_option(
'--test_data', '--test-data', action='append', default=[],
help=('Each instance defines a directory of test data that should be '
'copied to the target(s) before running the tests. The argument '
'should be of the form <target>:<source>, <target> is relative to '
'the device data directory, and <source> is relative to the '
'chromium build directory.'))
def ProcessJavaTestOptions(options):