2011-11-23 23:13:44 +04:00
|
|
|
#!/usr/bin/env python
|
2012-07-09 13:11:57 +04:00
|
|
|
#
|
2012-02-29 22:40:27 +04:00
|
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
2011-10-24 22:59:20 +04:00
|
|
|
# Use of this source code is governed by a BSD-style license that can be
|
|
|
|
# found in the LICENSE file.
|
|
|
|
|
|
|
|
"""Runs all the native unit tests.
|
|
|
|
|
|
|
|
1. Copy over test binary to /data/local on device.
|
|
|
|
2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak)
|
2012-09-11 19:41:56 +04:00
|
|
|
to be deployed to the device. We use the device's $EXTERNAL_STORAGE as the
|
|
|
|
base dir (which maps to Context.getExternalFilesDir()).
|
2011-10-24 22:59:20 +04:00
|
|
|
3. Environment:
|
|
|
|
3.1. chrome/unit_tests requires (via chrome_paths.cc) a directory named:
|
2012-09-11 19:41:56 +04:00
|
|
|
$EXTERNAL_STORAGE + /chrome/test/data
|
2011-10-24 22:59:20 +04:00
|
|
|
3.2. page_cycler_tests have following requirements,
|
|
|
|
3.2.1 the following data on host:
|
|
|
|
<chrome_src_dir>/tools/page_cycler
|
|
|
|
<chrome_src_dir>/data/page_cycler
|
|
|
|
3.2.2. two data directories to store above test data on device named:
|
2012-09-11 19:41:56 +04:00
|
|
|
$EXTERNAL_STORAGE + /tools/ (for database perf test)
|
|
|
|
$EXTERNAL_STORAGE + /data/ (for other perf tests)
|
2011-10-24 22:59:20 +04:00
|
|
|
3.2.3. a http server to serve http perf tests.
|
|
|
|
The http root is host's <chrome_src_dir>/data/page_cycler/, port 8000.
|
|
|
|
3.2.4 a tool named forwarder is also required to run on device to
|
|
|
|
forward the http request/response between host and device.
|
|
|
|
3.2.5 Chrome is installed on device.
|
|
|
|
4. Run the binary in the device and stream the log to the host.
|
|
|
|
4.1. Optionally, filter specific tests.
|
|
|
|
4.2. Optionally, rebaseline: run the available tests and update the
|
|
|
|
suppressions file for failures.
|
|
|
|
4.3. If we're running a single test suite and we have multiple devices
|
|
|
|
connected, we'll shard the tests.
|
|
|
|
5. Clean up the device.
|
|
|
|
|
|
|
|
Suppressions:
|
|
|
|
|
|
|
|
Individual tests in a test binary can be suppressed by listing it in
|
|
|
|
the gtest_filter directory in a file of the same name as the test binary,
|
|
|
|
one test per line. Here is an example:
|
|
|
|
|
|
|
|
$ cat gtest_filter/base_unittests_disabled
|
|
|
|
DataPackTest.Load
|
|
|
|
ReadOnlyFileUtilTest.ContentsEqual
|
|
|
|
|
|
|
|
This file is generated by the tests running on devices. If running on emulator,
|
|
|
|
additonal filter file which lists the tests only failed in emulator will be
|
|
|
|
loaded. We don't care about the rare testcases which succeeded on emuatlor, but
|
|
|
|
failed on device.
|
|
|
|
"""
|
|
|
|
|
2012-11-03 02:58:53 +04:00
|
|
|
import copy
|
2012-02-29 22:40:27 +04:00
|
|
|
import fnmatch
|
2011-10-24 22:59:20 +04:00
|
|
|
import logging
|
2012-09-17 21:22:36 +04:00
|
|
|
import optparse
|
2011-10-24 22:59:20 +04:00
|
|
|
import os
|
2012-07-12 15:35:03 +04:00
|
|
|
import signal
|
2011-11-21 23:40:25 +04:00
|
|
|
import subprocess
|
2011-10-24 22:59:20 +04:00
|
|
|
import sys
|
2011-11-22 02:25:21 +04:00
|
|
|
import time
|
2011-10-24 22:59:20 +04:00
|
|
|
|
2012-07-09 13:11:57 +04:00
|
|
|
from pylib import android_commands
|
|
|
|
from pylib.base_test_sharder import BaseTestSharder
|
2012-07-12 15:35:03 +04:00
|
|
|
from pylib import buildbot_report
|
2012-07-10 17:21:43 +04:00
|
|
|
from pylib import constants
|
2012-07-09 13:11:57 +04:00
|
|
|
from pylib import debug_info
|
2011-10-24 22:59:20 +04:00
|
|
|
import emulator
|
2012-07-10 17:21:43 +04:00
|
|
|
from pylib import ports
|
2012-07-09 13:11:57 +04:00
|
|
|
from pylib import run_tests_helper
|
2012-07-10 17:21:43 +04:00
|
|
|
from pylib import test_options_parser
|
2012-07-09 13:11:57 +04:00
|
|
|
from pylib.single_test_runner import SingleTestRunner
|
|
|
|
from pylib.test_result import BaseTestResult, TestResults
|
2011-10-24 22:59:20 +04:00
|
|
|
|
2012-07-12 15:35:03 +04:00
|
|
|
|
2012-04-04 07:03:04 +04:00
|
|
|
_TEST_SUITES = ['base_unittests',
|
2012-10-26 11:37:00 +04:00
|
|
|
'cc_unittests',
|
2012-04-04 07:03:04 +04:00
|
|
|
'content_unittests',
|
2012-04-17 23:58:12 +04:00
|
|
|
'gpu_unittests',
|
2012-04-04 07:03:04 +04:00
|
|
|
'ipc_tests',
|
2012-07-26 06:35:57 +04:00
|
|
|
'media_unittests',
|
2012-04-04 07:03:04 +04:00
|
|
|
'net_unittests',
|
|
|
|
'sql_unittests',
|
|
|
|
'sync_unit_tests',
|
|
|
|
'ui_unittests',
|
2012-07-30 22:16:42 +04:00
|
|
|
'unit_tests',
|
2012-10-30 22:44:01 +04:00
|
|
|
'webkit_compositor_bindings_unittests',
|
2012-04-04 07:03:04 +04:00
|
|
|
]
|
2011-11-21 23:40:25 +04:00
|
|
|
|
2012-07-12 15:35:03 +04:00
|
|
|
|
2012-08-21 05:26:26 +04:00
|
|
|
def TestSuiteDir(build_type):
|
|
|
|
"""Return the base directory of test suites."""
|
|
|
|
return os.path.abspath(os.path.join(constants.CHROME_DIR, 'out', build_type))
|
|
|
|
|
|
|
|
def FullyQualifiedTestSuites(exe, option_test_suite, build_type):
|
2012-07-17 15:39:50 +04:00
|
|
|
"""Return a fully qualified list
|
2012-04-20 01:02:09 +04:00
|
|
|
|
|
|
|
Args:
|
2012-07-12 15:35:03 +04:00
|
|
|
exe: if True, use the executable-based test runner.
|
2012-07-17 15:39:50 +04:00
|
|
|
option_test_suite: the test_suite specified as an option.
|
2012-08-21 05:26:26 +04:00
|
|
|
build_type: 'Release' or 'Debug'.
|
2012-07-12 15:35:03 +04:00
|
|
|
"""
|
2012-08-21 05:26:26 +04:00
|
|
|
test_suite_dir = TestSuiteDir(build_type)
|
2012-07-17 15:39:50 +04:00
|
|
|
if option_test_suite:
|
|
|
|
all_test_suites = [option_test_suite]
|
|
|
|
else:
|
|
|
|
all_test_suites = _TEST_SUITES
|
|
|
|
|
2012-07-12 15:35:03 +04:00
|
|
|
if exe:
|
2012-07-17 15:39:50 +04:00
|
|
|
qualified_test_suites = [os.path.join(test_suite_dir, t)
|
|
|
|
for t in all_test_suites]
|
2012-07-12 15:35:03 +04:00
|
|
|
else:
|
2012-08-21 05:26:26 +04:00
|
|
|
# out/(Debug|Release)/$SUITE_apk/$SUITE-debug.apk
|
2012-07-17 15:39:50 +04:00
|
|
|
qualified_test_suites = [os.path.join(test_suite_dir,
|
|
|
|
t + '_apk',
|
|
|
|
t + '-debug.apk')
|
|
|
|
for t in all_test_suites]
|
|
|
|
for t, q in zip(all_test_suites, qualified_test_suites):
|
|
|
|
if not os.path.exists(q):
|
|
|
|
logging.critical('Test suite %s not found in %s.\n'
|
|
|
|
'Supported test suites:\n %s\n'
|
|
|
|
'Ensure it has been built.\n',
|
|
|
|
t, q, _TEST_SUITES)
|
|
|
|
return []
|
|
|
|
return qualified_test_suites
|
2012-01-13 01:36:37 +04:00
|
|
|
|
|
|
|
|
2011-12-02 02:09:03 +04:00
|
|
|
class TimeProfile(object):
|
|
|
|
"""Class for simple profiling of action, with logging of cost."""
|
|
|
|
|
|
|
|
def __init__(self, description):
|
|
|
|
self._description = description
|
|
|
|
self.Start()
|
|
|
|
|
|
|
|
def Start(self):
|
|
|
|
self._starttime = time.time()
|
|
|
|
|
|
|
|
def Stop(self):
|
|
|
|
"""Stop profiling and dump a log."""
|
|
|
|
if self._starttime:
|
|
|
|
stoptime = time.time()
|
2012-07-12 15:35:03 +04:00
|
|
|
logging.info('%fsec to perform %s',
|
|
|
|
stoptime - self._starttime, self._description)
|
2011-12-02 02:09:03 +04:00
|
|
|
self._starttime = None
|
|
|
|
|
2012-07-12 15:35:03 +04:00
|
|
|
|
2011-11-21 23:40:25 +04:00
|
|
|
class Xvfb(object):
|
|
|
|
"""Class to start and stop Xvfb if relevant. Nop if not Linux."""
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
self._pid = 0
|
|
|
|
|
|
|
|
def _IsLinux(self):
|
|
|
|
"""Return True if on Linux; else False."""
|
|
|
|
return sys.platform.startswith('linux')
|
|
|
|
|
|
|
|
def Start(self):
|
|
|
|
"""Start Xvfb and set an appropriate DISPLAY environment. Linux only.
|
|
|
|
|
|
|
|
Copied from tools/code_coverage/coverage_posix.py
|
|
|
|
"""
|
|
|
|
if not self._IsLinux():
|
|
|
|
return
|
2012-07-12 15:35:03 +04:00
|
|
|
proc = subprocess.Popen(['Xvfb', ':9', '-screen', '0', '1024x768x24',
|
|
|
|
'-ac'],
|
2011-11-21 23:40:25 +04:00
|
|
|
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
|
|
|
self._pid = proc.pid
|
|
|
|
if not self._pid:
|
|
|
|
raise Exception('Could not start Xvfb')
|
2012-07-12 15:35:03 +04:00
|
|
|
os.environ['DISPLAY'] = ':9'
|
2011-11-21 23:40:25 +04:00
|
|
|
|
|
|
|
# Now confirm, giving a chance for it to start if needed.
|
2012-07-12 15:35:03 +04:00
|
|
|
for _ in range(10):
|
2011-11-21 23:40:25 +04:00
|
|
|
proc = subprocess.Popen('xdpyinfo >/dev/null', shell=True)
|
2012-07-12 15:35:03 +04:00
|
|
|
_, retcode = os.waitpid(proc.pid, 0)
|
2011-11-21 23:40:25 +04:00
|
|
|
if retcode == 0:
|
|
|
|
break
|
|
|
|
time.sleep(0.25)
|
|
|
|
if retcode != 0:
|
|
|
|
raise Exception('Could not confirm Xvfb happiness')
|
|
|
|
|
|
|
|
def Stop(self):
|
|
|
|
"""Stop Xvfb if needed. Linux only."""
|
|
|
|
if self._pid:
|
|
|
|
try:
|
|
|
|
os.kill(self._pid, signal.SIGKILL)
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
del os.environ['DISPLAY']
|
|
|
|
self._pid = 0
|
|
|
|
|
2012-07-12 15:35:03 +04:00
|
|
|
|
2012-02-29 22:40:27 +04:00
|
|
|
class TestSharder(BaseTestSharder):
|
|
|
|
"""Responsible for sharding the tests on the connected devices."""
|
|
|
|
|
|
|
|
def __init__(self, attached_devices, test_suite, gtest_filter,
|
|
|
|
test_arguments, timeout, rebaseline, performance_test,
|
2012-08-21 05:26:26 +04:00
|
|
|
cleanup_test_files, tool, log_dump_name, fast_and_loose,
|
|
|
|
build_type):
|
2012-11-12 16:46:37 +04:00
|
|
|
BaseTestSharder.__init__(self, attached_devices, build_type)
|
2012-02-29 22:40:27 +04:00
|
|
|
self.test_suite = test_suite
|
|
|
|
self.test_suite_basename = os.path.basename(test_suite)
|
2012-07-17 15:39:50 +04:00
|
|
|
self.gtest_filter = gtest_filter or ''
|
2012-02-29 22:40:27 +04:00
|
|
|
self.test_arguments = test_arguments
|
|
|
|
self.timeout = timeout
|
|
|
|
self.rebaseline = rebaseline
|
|
|
|
self.performance_test = performance_test
|
|
|
|
self.cleanup_test_files = cleanup_test_files
|
|
|
|
self.tool = tool
|
2012-07-12 15:35:03 +04:00
|
|
|
self.log_dump_name = log_dump_name
|
|
|
|
self.fast_and_loose = fast_and_loose
|
2012-08-21 05:26:26 +04:00
|
|
|
self.build_type = build_type
|
2012-07-17 15:39:50 +04:00
|
|
|
self.tests = []
|
|
|
|
if not self.gtest_filter:
|
|
|
|
# No filter has been specified, let's add all tests then.
|
2012-11-05 15:49:15 +04:00
|
|
|
self.tests, self.attached_devices = self._GetTests()
|
|
|
|
|
|
|
|
def _GetTests(self):
|
|
|
|
"""Returns a tuple of (all_tests, available_devices).
|
|
|
|
|
|
|
|
Tries to obtain the list of available tests.
|
|
|
|
Raises Exception if all devices failed.
|
|
|
|
"""
|
|
|
|
available_devices = list(self.attached_devices)
|
|
|
|
while available_devices:
|
|
|
|
try:
|
|
|
|
logging.info('Obtaining tests from %s', available_devices[-1])
|
|
|
|
all_tests = self._GetTestsFromDevice(available_devices[-1])
|
|
|
|
return all_tests, available_devices
|
|
|
|
except Exception as e:
|
|
|
|
logging.info('Failed obtaining tests from %s %s',
|
|
|
|
available_devices[-1], e)
|
|
|
|
available_devices.pop()
|
|
|
|
raise Exception('No device available to get the list of tests.')
|
|
|
|
|
|
|
|
def _GetTestsFromDevice(self, device):
|
|
|
|
test = SingleTestRunner(device, self.test_suite, self.gtest_filter,
|
|
|
|
self.test_arguments, self.timeout, self.rebaseline,
|
|
|
|
self.performance_test, self.cleanup_test_files,
|
|
|
|
self.tool, 0,
|
|
|
|
not not self.log_dump_name, self.fast_and_loose,
|
|
|
|
self.build_type)
|
|
|
|
# The executable/apk needs to be copied before we can call GetAllTests.
|
|
|
|
test.test_package.StripAndCopyExecutable()
|
|
|
|
all_tests = test.test_package.GetAllTests()
|
|
|
|
if not self.rebaseline:
|
|
|
|
disabled_list = test.GetDisabledTests()
|
|
|
|
# Only includes tests that do not have any match in the disabled list.
|
|
|
|
all_tests = filter(lambda t:
|
|
|
|
not any([fnmatch.fnmatch(t, disabled_pattern)
|
|
|
|
for disabled_pattern in disabled_list]),
|
|
|
|
all_tests)
|
|
|
|
return all_tests
|
2012-02-29 22:40:27 +04:00
|
|
|
|
|
|
|
def CreateShardedTestRunner(self, device, index):
|
|
|
|
"""Creates a suite-specific test runner.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
device: Device serial where this shard will run.
|
|
|
|
index: Index of this device in the pool.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A SingleTestRunner object.
|
|
|
|
"""
|
2012-07-10 17:21:43 +04:00
|
|
|
device_num = len(self.attached_devices)
|
|
|
|
shard_size = (len(self.tests) + device_num - 1) / device_num
|
2012-02-29 22:40:27 +04:00
|
|
|
shard_test_list = self.tests[index * shard_size : (index + 1) * shard_size]
|
2012-07-17 15:39:50 +04:00
|
|
|
test_filter = ':'.join(shard_test_list) + self.gtest_filter
|
2012-02-29 22:40:27 +04:00
|
|
|
return SingleTestRunner(device, self.test_suite,
|
|
|
|
test_filter, self.test_arguments, self.timeout,
|
|
|
|
self.rebaseline, self.performance_test,
|
2012-07-12 15:35:03 +04:00
|
|
|
self.cleanup_test_files, self.tool, index,
|
2012-08-21 05:26:26 +04:00
|
|
|
not not self.log_dump_name, self.fast_and_loose,
|
|
|
|
self.build_type)
|
2012-02-29 22:40:27 +04:00
|
|
|
|
|
|
|
def OnTestsCompleted(self, test_runners, test_results):
|
|
|
|
"""Notifies that we completed the tests."""
|
2012-08-22 04:01:18 +04:00
|
|
|
test_results.LogFull('Unit test', os.path.basename(self.test_suite),
|
2012-11-06 07:45:53 +04:00
|
|
|
self.build_type, self.tests)
|
2012-08-30 03:46:21 +04:00
|
|
|
test_results.PrintAnnotation()
|
2012-02-29 22:40:27 +04:00
|
|
|
if test_results.failed and self.rebaseline:
|
|
|
|
test_runners[0].UpdateFilter(test_results.failed)
|
2012-07-17 15:39:50 +04:00
|
|
|
if self.log_dump_name:
|
|
|
|
# Zip all debug info outputs into a file named by log_dump_name.
|
|
|
|
debug_info.GTestDebugInfo.ZipAndCleanResults(
|
2012-08-21 05:26:26 +04:00
|
|
|
os.path.join(TestSuiteDir(self.build_type), 'debug_info_dumps'),
|
2012-07-17 15:39:50 +04:00
|
|
|
self.log_dump_name)
|
2012-02-29 22:40:27 +04:00
|
|
|
|
|
|
|
|
2012-01-13 01:36:37 +04:00
|
|
|
def _RunATestSuite(options):
|
|
|
|
"""Run a single test suite.
|
2012-01-12 22:47:45 +04:00
|
|
|
|
2012-01-13 01:36:37 +04:00
|
|
|
Helper for Dispatch() to allow stop/restart of the emulator across
|
|
|
|
test bundles. If using the emulator, we start it on entry and stop
|
|
|
|
it on exit.
|
2011-10-24 22:59:20 +04:00
|
|
|
|
|
|
|
Args:
|
|
|
|
options: options for running the tests.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
0 if successful, number of failing tests otherwise.
|
|
|
|
"""
|
2012-07-18 18:59:10 +04:00
|
|
|
step_name = os.path.basename(options.test_suite).replace('-debug.apk', '')
|
2012-09-05 01:09:26 +04:00
|
|
|
buildbot_report.PrintNamedStep(step_name)
|
2011-10-24 22:59:20 +04:00
|
|
|
attached_devices = []
|
2012-02-29 22:40:27 +04:00
|
|
|
buildbot_emulators = []
|
2012-01-13 06:34:21 +04:00
|
|
|
|
2011-10-24 22:59:20 +04:00
|
|
|
if options.use_emulator:
|
2012-07-16 18:26:05 +04:00
|
|
|
for n in range(options.emulator_count):
|
2012-02-29 22:40:27 +04:00
|
|
|
t = TimeProfile('Emulator launch %d' % n)
|
2012-07-16 19:22:58 +04:00
|
|
|
avd_name = None
|
|
|
|
if n > 0:
|
|
|
|
# Creates a temporary AVD for the extra emulators.
|
|
|
|
avd_name = 'run_tests_avd_%d' % n
|
|
|
|
buildbot_emulator = emulator.Emulator(avd_name, options.fast_and_loose)
|
2012-02-29 22:40:27 +04:00
|
|
|
buildbot_emulator.Launch(kill_all_emulators=n == 0)
|
|
|
|
t.Stop()
|
|
|
|
buildbot_emulators.append(buildbot_emulator)
|
|
|
|
attached_devices.append(buildbot_emulator.device)
|
2012-07-11 07:30:04 +04:00
|
|
|
# Wait for all emulators to boot completed.
|
2012-07-12 15:35:03 +04:00
|
|
|
map(lambda buildbot_emulator: buildbot_emulator.ConfirmLaunch(True),
|
2012-02-29 22:40:27 +04:00
|
|
|
buildbot_emulators)
|
2012-06-14 05:33:21 +04:00
|
|
|
elif options.test_device:
|
|
|
|
attached_devices = [options.test_device]
|
2011-10-24 22:59:20 +04:00
|
|
|
else:
|
|
|
|
attached_devices = android_commands.GetAttachedDevices()
|
|
|
|
|
|
|
|
if not attached_devices:
|
|
|
|
logging.critical('A device must be attached and online.')
|
2012-07-12 15:35:03 +04:00
|
|
|
buildbot_report.PrintError()
|
2011-10-24 22:59:20 +04:00
|
|
|
return 1
|
|
|
|
|
2012-07-10 17:21:43 +04:00
|
|
|
# Reset the test port allocation. It's important to do it before starting
|
|
|
|
# to dispatch any tests.
|
|
|
|
if not ports.ResetTestServerPortAllocation():
|
|
|
|
raise Exception('Failed to reset test server port.')
|
|
|
|
|
2012-07-17 15:39:50 +04:00
|
|
|
if options.performance_test or options.gtest_filter:
|
|
|
|
# These configuration can't be split in multiple devices.
|
|
|
|
attached_devices = [attached_devices[0]]
|
|
|
|
sharder = TestSharder(attached_devices, options.test_suite,
|
|
|
|
options.gtest_filter, options.test_arguments,
|
|
|
|
options.timeout, options.rebaseline,
|
|
|
|
options.performance_test,
|
|
|
|
options.cleanup_test_files, options.tool,
|
2012-08-21 05:26:26 +04:00
|
|
|
options.log_dump, options.fast_and_loose,
|
|
|
|
options.build_type)
|
2012-07-17 15:39:50 +04:00
|
|
|
test_results = sharder.RunShardedTests()
|
2012-02-29 22:40:27 +04:00
|
|
|
|
|
|
|
for buildbot_emulator in buildbot_emulators:
|
2011-10-24 22:59:20 +04:00
|
|
|
buildbot_emulator.Shutdown()
|
2011-11-21 23:40:25 +04:00
|
|
|
|
2012-01-13 01:36:37 +04:00
|
|
|
return len(test_results.failed)
|
|
|
|
|
|
|
|
|
|
|
|
def Dispatch(options):
|
|
|
|
"""Dispatches the tests, sharding if possible.
|
|
|
|
|
2012-07-16 18:26:05 +04:00
|
|
|
If options.use_emulator is True, all tests will be run in new emulator
|
2012-01-13 01:36:37 +04:00
|
|
|
instance.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
options: options for running the tests.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
0 if successful, number of failing tests otherwise.
|
|
|
|
"""
|
|
|
|
if options.test_suite == 'help':
|
|
|
|
ListTestSuites()
|
|
|
|
return 0
|
|
|
|
|
|
|
|
if options.use_xvfb:
|
|
|
|
xvfb = Xvfb()
|
|
|
|
xvfb.Start()
|
|
|
|
|
2012-08-21 05:26:26 +04:00
|
|
|
all_test_suites = FullyQualifiedTestSuites(options.exe, options.test_suite,
|
|
|
|
options.build_type)
|
2012-01-13 06:34:21 +04:00
|
|
|
failures = 0
|
|
|
|
for suite in all_test_suites:
|
2012-11-03 02:58:53 +04:00
|
|
|
# Give each test suite its own copy of options.
|
|
|
|
test_options = copy.deepcopy(options)
|
|
|
|
test_options.test_suite = suite
|
|
|
|
failures += _RunATestSuite(test_options)
|
2012-01-13 01:36:37 +04:00
|
|
|
|
|
|
|
if options.use_xvfb:
|
|
|
|
xvfb.Stop()
|
|
|
|
return failures
|
2012-01-12 22:47:45 +04:00
|
|
|
|
2011-10-24 22:59:20 +04:00
|
|
|
|
|
|
|
def ListTestSuites():
|
2012-07-12 15:35:03 +04:00
|
|
|
"""Display a list of available test suites."""
|
2011-10-24 22:59:20 +04:00
|
|
|
print 'Available test suites are:'
|
|
|
|
for test_suite in _TEST_SUITES:
|
|
|
|
print test_suite
|
|
|
|
|
|
|
|
|
|
|
|
def main(argv):
|
2012-09-17 21:22:36 +04:00
|
|
|
option_parser = optparse.OptionParser()
|
|
|
|
test_options_parser.AddTestRunnerOptions(option_parser, default_timeout=0)
|
2012-01-13 01:36:37 +04:00
|
|
|
option_parser.add_option('-s', '--suite', dest='test_suite',
|
2011-10-24 22:59:20 +04:00
|
|
|
help='Executable name of the test suite to run '
|
|
|
|
'(use -s help to list them)')
|
2012-06-14 05:33:21 +04:00
|
|
|
option_parser.add_option('-d', '--device', dest='test_device',
|
|
|
|
help='Target device the test suite to run ')
|
2011-10-24 22:59:20 +04:00
|
|
|
option_parser.add_option('-r', dest='rebaseline',
|
|
|
|
help='Rebaseline and update *testsuite_disabled',
|
2012-07-12 15:35:03 +04:00
|
|
|
action='store_true')
|
2012-01-12 22:47:45 +04:00
|
|
|
option_parser.add_option('-f', '--gtest_filter', dest='gtest_filter',
|
2011-10-24 22:59:20 +04:00
|
|
|
help='gtest filter')
|
|
|
|
option_parser.add_option('-a', '--test_arguments', dest='test_arguments',
|
|
|
|
help='Additional arguments to pass to the test')
|
|
|
|
option_parser.add_option('-p', dest='performance_test',
|
|
|
|
help='Indicator of performance test',
|
2012-07-12 15:35:03 +04:00
|
|
|
action='store_true')
|
2011-10-24 22:59:20 +04:00
|
|
|
option_parser.add_option('-L', dest='log_dump',
|
2012-07-17 15:39:50 +04:00
|
|
|
help='file name of log dump, which will be put in '
|
|
|
|
'subfolder debug_info_dumps under the same '
|
|
|
|
'directory in where the test_suite exists.')
|
2011-10-24 22:59:20 +04:00
|
|
|
option_parser.add_option('-e', '--emulator', dest='use_emulator',
|
2012-07-16 18:26:05 +04:00
|
|
|
action='store_true',
|
|
|
|
help='Run tests in a new instance of emulator')
|
|
|
|
option_parser.add_option('-n', '--emulator_count',
|
|
|
|
type='int', default=1,
|
|
|
|
help='Number of emulators to launch for running the '
|
|
|
|
'tests.')
|
2011-11-21 23:40:25 +04:00
|
|
|
option_parser.add_option('-x', '--xvfb', dest='use_xvfb',
|
2012-07-12 15:35:03 +04:00
|
|
|
action='store_true',
|
2011-11-21 23:40:25 +04:00
|
|
|
help='Use Xvfb around tests (ignored if not Linux)')
|
2012-01-12 22:47:45 +04:00
|
|
|
option_parser.add_option('--fast', '--fast_and_loose', dest='fast_and_loose',
|
2012-07-12 15:35:03 +04:00
|
|
|
action='store_true',
|
2012-01-12 22:47:45 +04:00
|
|
|
help='Go faster (but be less stable), '
|
|
|
|
'for quick testing. Example: when tracking down '
|
|
|
|
'tests that hang to add to the disabled list, '
|
|
|
|
'there is no need to redeploy the test binary '
|
|
|
|
'or data to the device again. '
|
|
|
|
'Don\'t use on bots by default!')
|
|
|
|
option_parser.add_option('--repeat', dest='repeat', type='int',
|
|
|
|
default=2,
|
|
|
|
help='Repeat count on test timeout')
|
2012-07-12 15:35:03 +04:00
|
|
|
option_parser.add_option('--exit_code', action='store_true',
|
|
|
|
help='If set, the exit code will be total number '
|
|
|
|
'of failures.')
|
|
|
|
option_parser.add_option('--exe', action='store_true',
|
|
|
|
help='If set, use the exe test runner instead of '
|
|
|
|
'the APK.')
|
2012-08-21 05:26:26 +04:00
|
|
|
|
2011-10-24 22:59:20 +04:00
|
|
|
options, args = option_parser.parse_args(argv)
|
|
|
|
if len(args) > 1:
|
|
|
|
print 'Unknown argument:', args[1:]
|
|
|
|
option_parser.print_usage()
|
|
|
|
sys.exit(1)
|
|
|
|
run_tests_helper.SetLogLevel(options.verbose_count)
|
2012-07-25 03:14:06 +04:00
|
|
|
emulator.DeleteAllTempAVDs()
|
2012-05-10 19:00:56 +04:00
|
|
|
failed_tests_count = Dispatch(options)
|
|
|
|
|
2012-07-12 15:35:03 +04:00
|
|
|
# Failures of individual test suites are communicated by printing a
|
|
|
|
# STEP_FAILURE message.
|
2012-05-10 19:00:56 +04:00
|
|
|
# Returning a success exit status also prevents the buildbot from incorrectly
|
|
|
|
# marking the last suite as failed if there were failures in other suites in
|
|
|
|
# the batch (this happens because the exit status is a sum of all failures
|
|
|
|
# from all suites, but the buildbot associates the exit status only with the
|
|
|
|
# most recent step).
|
2012-07-12 15:35:03 +04:00
|
|
|
if options.exit_code:
|
2012-05-10 19:00:56 +04:00
|
|
|
return failed_tests_count
|
2012-07-12 15:35:03 +04:00
|
|
|
return 0
|
2011-10-24 22:59:20 +04:00
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
sys.exit(main(sys.argv))
|