Increase Android test robustness.
Use of the emulator is probably a lost cause, but maybe this can hold us for now. BUG=None TEST= Review URL: http://codereview.chromium.org/9185043 git-svn-id: http://src.chromium.org/svn/trunk/src/build@117458 4ff67af0-8c30-449e-8e8b-ad334ec8d88c
This commit is contained in:
Родитель
5402752b39
Коммит
a45f7bc4cb
|
@ -92,7 +92,14 @@ class Emulator(object):
|
|||
# Time to wait for a "wait for boot complete" (property set on device).
|
||||
_WAITFORBOOT_TIMEOUT = 300
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, fast_and_loose=False):
|
||||
"""Init an Emulator.
|
||||
|
||||
Args:
|
||||
fast_and_loose: Loosen up the rules for reliable running for speed.
|
||||
Intended for quick testing or re-testing.
|
||||
|
||||
"""
|
||||
try:
|
||||
android_sdk_root = os.environ['ANDROID_SDK_ROOT']
|
||||
except KeyError:
|
||||
|
@ -102,6 +109,7 @@ class Emulator(object):
|
|||
self.emulator = os.path.join(android_sdk_root, 'tools', 'emulator')
|
||||
self.popen = None
|
||||
self.device = None
|
||||
self.fast_and_loose = fast_and_loose
|
||||
|
||||
def _DeviceName(self):
|
||||
"""Return our device name."""
|
||||
|
@ -114,7 +122,8 @@ class Emulator(object):
|
|||
If fails, an exception will be raised.
|
||||
"""
|
||||
_KillAllEmulators() # just to be sure
|
||||
self._AggressiveImageCleanup()
|
||||
if not self.fast_and_loose:
|
||||
self._AggressiveImageCleanup()
|
||||
(self.device, port) = self._DeviceName()
|
||||
emulator_command = [
|
||||
self.emulator,
|
||||
|
@ -123,13 +132,16 @@ class Emulator(object):
|
|||
# The default /data size is 64M.
|
||||
# That's not enough for 4 unit test bundles and their data.
|
||||
'-partition-size', '256',
|
||||
# ALWAYS wipe the data. We've seen cases where an emulator
|
||||
# gets 'stuck' if we don't do this (every thousand runs or
|
||||
# so).
|
||||
'-wipe-data',
|
||||
# Use a familiar name and port.
|
||||
'-avd', 'buildbot',
|
||||
'-port', str(port)]
|
||||
if not self.fast_and_loose:
|
||||
emulator_command.extend([
|
||||
# Wipe the data. We've seen cases where an emulator
|
||||
# gets 'stuck' if we don't do this (every thousand runs or
|
||||
# so).
|
||||
'-wipe-data',
|
||||
])
|
||||
logging.info('Emulator launch command: %s', ' '.join(emulator_command))
|
||||
self.popen = subprocess.Popen(args=emulator_command,
|
||||
stderr=subprocess.STDOUT)
|
||||
|
|
|
@ -132,7 +132,7 @@ class Xvfb(object):
|
|||
|
||||
def RunTests(device, test_suite, gtest_filter, test_arguments, rebaseline,
|
||||
timeout, performance_test, cleanup_test_files, tool,
|
||||
log_dump_name):
|
||||
log_dump_name, fast_and_loose=False):
|
||||
"""Runs the tests.
|
||||
|
||||
Args:
|
||||
|
@ -146,6 +146,8 @@ def RunTests(device, test_suite, gtest_filter, test_arguments, rebaseline,
|
|||
cleanup_test_files: Whether or not to cleanup test files on device.
|
||||
tool: Name of the Valgrind tool.
|
||||
log_dump_name: Name of log dump file.
|
||||
fast_and_loose: should we go extra-fast but sacrifice stability
|
||||
and/or correctness? Intended for quick cycle testing; not for bots!
|
||||
|
||||
Returns:
|
||||
A TestResults object.
|
||||
|
@ -172,7 +174,8 @@ def RunTests(device, test_suite, gtest_filter, test_arguments, rebaseline,
|
|||
for t in _TEST_SUITES:
|
||||
test = SingleTestRunner(device, t, gtest_filter, test_arguments,
|
||||
timeout, rebaseline, performance_test,
|
||||
cleanup_test_files, tool, not not log_dump_name)
|
||||
cleanup_test_files, tool, not not log_dump_name,
|
||||
fast_and_loose=fast_and_loose)
|
||||
test.RunTests()
|
||||
results += [test.test_results]
|
||||
# Collect debug info.
|
||||
|
@ -190,6 +193,7 @@ def RunTests(device, test_suite, gtest_filter, test_arguments, rebaseline,
|
|||
log_dump_name, [d for d in debug_info_list if d])
|
||||
return TestResults.FromTestResults(results)
|
||||
|
||||
|
||||
def Dispatch(options):
|
||||
"""Dispatches the tests, sharding if possible.
|
||||
|
||||
|
@ -214,7 +218,7 @@ def Dispatch(options):
|
|||
|
||||
if options.use_emulator:
|
||||
t = TimeProfile('Emulator launch')
|
||||
buildbot_emulator = emulator.Emulator()
|
||||
buildbot_emulator = emulator.Emulator(options.fast_and_loose)
|
||||
buildbot_emulator.Launch()
|
||||
t.Stop()
|
||||
attached_devices.append(buildbot_emulator.device)
|
||||
|
@ -230,13 +234,25 @@ def Dispatch(options):
|
|||
options.rebaseline, options.timeout,
|
||||
options.performance_test,
|
||||
options.cleanup_test_files, options.tool,
|
||||
options.log_dump)
|
||||
options.log_dump,
|
||||
fast_and_loose=options.fast_and_loose)
|
||||
if buildbot_emulator:
|
||||
buildbot_emulator.Shutdown()
|
||||
if options.use_xvfb:
|
||||
xvfb.Stop()
|
||||
|
||||
return len(test_results.failed)
|
||||
# Another chance if we timed out? At this point It is safe(r) to
|
||||
# run fast and loose since we just uploaded all the test data and
|
||||
# binary.
|
||||
if test_results.timed_out and options.repeat:
|
||||
logging.critical('Timed out; repeating in fast_and_loose mode.')
|
||||
options.fast_and_loose = True
|
||||
options.repeat = options.repeat - 1
|
||||
logging.critical('Repeats left: ' + str(options.repeat))
|
||||
return Dispatch(options)
|
||||
else:
|
||||
return len(test_results.failed)
|
||||
|
||||
|
||||
def ListTestSuites():
|
||||
"""Display a list of available test suites
|
||||
|
@ -256,7 +272,7 @@ def main(argv):
|
|||
help='Rebaseline and update *testsuite_disabled',
|
||||
action='store_true',
|
||||
default=False)
|
||||
option_parser.add_option('-f', dest='gtest_filter',
|
||||
option_parser.add_option('-f', '--gtest_filter', dest='gtest_filter',
|
||||
help='gtest filter')
|
||||
option_parser.add_option('-a', '--test_arguments', dest='test_arguments',
|
||||
help='Additional arguments to pass to the test')
|
||||
|
@ -275,6 +291,17 @@ def main(argv):
|
|||
option_parser.add_option('-x', '--xvfb', dest='use_xvfb',
|
||||
action='store_true', default=False,
|
||||
help='Use Xvfb around tests (ignored if not Linux)')
|
||||
option_parser.add_option('--fast', '--fast_and_loose', dest='fast_and_loose',
|
||||
action='store_true', default=False,
|
||||
help='Go faster (but be less stable), '
|
||||
'for quick testing. Example: when tracking down '
|
||||
'tests that hang to add to the disabled list, '
|
||||
'there is no need to redeploy the test binary '
|
||||
'or data to the device again. '
|
||||
'Don\'t use on bots by default!')
|
||||
option_parser.add_option('--repeat', dest='repeat', type='int',
|
||||
default=2,
|
||||
help='Repeat count on test timeout')
|
||||
options, args = option_parser.parse_args(argv)
|
||||
if len(args) > 1:
|
||||
print 'Unknown argument:', args[1:]
|
||||
|
|
|
@ -31,7 +31,8 @@ class SingleTestRunner(BaseTestRunner):
|
|||
|
||||
def __init__(self, device, test_suite, gtest_filter, test_arguments, timeout,
|
||||
rebaseline, performance_test, cleanup_test_files, tool,
|
||||
dump_debug_info=False):
|
||||
dump_debug_info=False,
|
||||
fast_and_loose=False):
|
||||
BaseTestRunner.__init__(self, device)
|
||||
self._running_on_emulator = self.device.startswith('emulator')
|
||||
self._gtest_filter = gtest_filter
|
||||
|
@ -42,6 +43,7 @@ class SingleTestRunner(BaseTestRunner):
|
|||
os.path.basename(test_suite), gtest_filter)
|
||||
else:
|
||||
self.dump_debug_info = None
|
||||
self.fast_and_loose = fast_and_loose
|
||||
|
||||
self.test_package = TestPackageExecutable(self.adb, device,
|
||||
test_suite, timeout, rebaseline, performance_test, cleanup_test_files,
|
||||
|
@ -205,7 +207,7 @@ class SingleTestRunner(BaseTestRunner):
|
|||
self.test_package.StripAndCopyExecutable()
|
||||
self.test_package.tool.CopyFiles()
|
||||
test_data = self.GetDataFilesForTestSuite()
|
||||
if test_data:
|
||||
if test_data and not self.fast_and_loose:
|
||||
if self.test_package.test_suite_basename == 'page_cycler_tests':
|
||||
# Since the test data for page cycler are huge (around 200M), we use
|
||||
# sdcard to store the data and create symbol links to map them to
|
||||
|
|
|
@ -128,6 +128,7 @@ class TestPackage(object):
|
|||
"""
|
||||
ok_tests = []
|
||||
failed_tests = []
|
||||
timed_out = False
|
||||
re_run = re.compile('\[ RUN \] ?(.*)\r\n')
|
||||
re_fail = re.compile('\[ FAILED \] ?(.*)\r\n')
|
||||
re_ok = re.compile('\[ OK \] ?(.*)\r\n')
|
||||
|
@ -152,6 +153,7 @@ class TestPackage(object):
|
|||
if found == 3: # pexpect.TIMEOUT
|
||||
logging.error('Test terminated after %d second timeout.',
|
||||
self.timeout)
|
||||
timed_out = True
|
||||
break
|
||||
p.close()
|
||||
if not self.rebaseline and ready_to_continue:
|
||||
|
@ -163,4 +165,4 @@ class TestPackage(object):
|
|||
'\npexpect.after: %s'
|
||||
% (p.before,
|
||||
p.after))]
|
||||
return TestResults.FromOkAndFailed(ok_tests, failed_tests)
|
||||
return TestResults.FromOkAndFailed(ok_tests, failed_tests, timed_out)
|
||||
|
|
|
@ -43,6 +43,7 @@ class TestPackageExecutable(TestPackage):
|
|||
|
||||
def _GetGTestReturnCode(self):
|
||||
ret = None
|
||||
ret_code = 1 # Assume failure if we can't find it
|
||||
ret_code_file = tempfile.NamedTemporaryFile()
|
||||
try:
|
||||
if not self.adb.Adb().Pull(
|
||||
|
@ -105,6 +106,9 @@ class TestPackageExecutable(TestPackage):
|
|||
cmd_helper.RunCmd(['chmod', '+x', sh_script_file.name])
|
||||
self.adb.PushIfNeeded(sh_script_file.name,
|
||||
'/data/local/chrome_test_runner.sh')
|
||||
logging.info('Conents of the test runner script: ')
|
||||
for line in open(sh_script_file.name).readlines():
|
||||
logging.info(' ' + line.rstrip())
|
||||
|
||||
def RunTestsAndListResults(self):
|
||||
"""Runs all the tests and checks for failures.
|
||||
|
|
|
@ -57,12 +57,14 @@ class TestResults(object):
|
|||
self.unknown = []
|
||||
self.disabled = []
|
||||
self.unexpected_pass = []
|
||||
self.timed_out = False
|
||||
|
||||
@staticmethod
|
||||
def FromOkAndFailed(ok, failed):
|
||||
def FromOkAndFailed(ok, failed, timed_out=False):
|
||||
ret = TestResults()
|
||||
ret.ok = ok
|
||||
ret.failed = failed
|
||||
ret.timed_out = timed_out
|
||||
return ret
|
||||
|
||||
@staticmethod
|
||||
|
@ -76,6 +78,8 @@ class TestResults(object):
|
|||
ret.unknown += t.unknown
|
||||
ret.disabled += t.disabled
|
||||
ret.unexpected_pass += t.unexpected_pass
|
||||
if t.timed_out:
|
||||
ret.timed_out = True
|
||||
return ret
|
||||
|
||||
def _Log(self, sorted_list):
|
||||
|
|
Загрузка…
Ссылка в новой задаче