Creates a new test running script test_runner.py

This new script serves as a unified entry point for all tests. The
existing scripts are now just wrappers around the new script. Old
commands should thus still work, but you can now make use of the
new script to run various types of tests.

There are a few TODOs left:
* Add options to run Monkey tests.

Miscellaneous notes:
* --python_test_root is now a required flag when Python host-driven
  tests are being run.

BUG=248351

Review URL: https://chromiumcodereview.appspot.com/15942016

git-svn-id: http://src.chromium.org/svn/trunk/src/build@210035 4ff67af0-8c30-449e-8e8b-ad334ec8d88c
This commit is contained in:
gkanwar@google.com 2013-07-03 20:31:43 +00:00
Родитель 251cdcc175
Коммит 40f6a425e6
14 изменённых файлов: 662 добавлений и 480 удалений

Просмотреть файл

@ -4,6 +4,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility script to install APKs from the command line quickly."""
import multiprocessing
import optparse
import os
@ -15,6 +17,32 @@ from pylib.utils import apk_helper
from pylib.utils import test_options_parser
def AddInstallAPKOption(option_parser):
"""Adds apk option used to install the APK to the OptionParser."""
test_options_parser.AddBuildTypeOption(option_parser)
option_parser.add_option('--apk',
help=('The name of the apk containing the '
' application (with the .apk extension).'))
option_parser.add_option('--apk_package',
help=('The package name used by the apk containing '
'the application.'))
option_parser.add_option('--keep_data',
action='store_true',
default=False,
help=('Keep the package data when installing '
'the application.'))
def ValidateInstallAPKOption(option_parser, options):
"""Validates the apk option and potentially qualifies the path."""
if not options.apk:
option_parser.error('--apk is mandatory.')
if not os.path.exists(options.apk):
options.apk = os.path.join(constants.DIR_SOURCE_ROOT,
'out', options.build_type,
'apks', options.apk)
def _InstallApk(args):
apk_path, apk_package, keep_data, device = args
result = android_commands.AndroidCommands(device=device).ManagedInstall(
@ -25,9 +53,9 @@ def _InstallApk(args):
def main(argv):
parser = optparse.OptionParser()
test_options_parser.AddInstallAPKOption(parser)
AddInstallAPKOption(parser)
options, args = parser.parse_args(argv)
test_options_parser.ValidateInstallAPKOption(parser, options)
ValidateInstallAPKOption(parser, options)
if len(args) > 1:
raise Exception('Error: Unknown argument:', args[1:])

Просмотреть файл

@ -2,6 +2,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Dispatches content_browsertests."""
import logging
import os
import sys
@ -17,10 +19,12 @@ from pylib.utils import report_results
sys.path.insert(0,
os.path.join(constants.DIR_SOURCE_ROOT, 'build', 'util', 'lib'))
from common import unittest_util
def Dispatch(options):
"""Dispatches all content_browsertests."""
attached_devices = []
if options.test_device:
attached_devices = [options.test_device]
@ -61,9 +65,9 @@ def Dispatch(options):
# Get tests and split them up based on the number of devices.
all_enabled = gtest_dispatch.GetAllEnabledTests(RunnerFactory,
attached_devices)
if options.gtest_filter:
if options.test_filter:
all_tests = unittest_util.FilterTestNames(all_enabled,
options.gtest_filter)
options.test_filter)
else:
all_tests = _FilterTests(all_enabled)
@ -84,17 +88,22 @@ def Dispatch(options):
flakiness_server=options.flakiness_dashboard_server)
report_results.PrintAnnotation(test_results)
return len(test_results.GetNotPass())
def _FilterTests(all_enabled_tests):
"""Filters out tests and fixtures starting with PRE_ and MANUAL_."""
return [t for t in all_enabled_tests if _ShouldRunOnBot(t)]
def _ShouldRunOnBot(test):
fixture, case = test.split('.', 1)
if _StartsWith(fixture, case, "PRE_"):
if _StartsWith(fixture, case, 'PRE_'):
return False
if _StartsWith(fixture, case, "MANUAL_"):
if _StartsWith(fixture, case, 'MANUAL_'):
return False
return True
def _StartsWith(a, b, prefix):
return a.startswith(prefix) or b.startswith(prefix)

Просмотреть файл

@ -2,6 +2,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Dispatches GTests."""
import copy
import fnmatch
import logging
@ -65,6 +67,8 @@ def GetTestsFromDevice(runner):
Args:
runner: a TestRunner.
Returns:
All non-disabled tests on the device.
"""
# The executable/apk needs to be copied before we can call GetAllTests.
runner.test_package.StripAndCopyExecutable()
@ -155,8 +159,8 @@ def _RunATestSuite(options, suite_name):
constants.GTEST_COMMAND_LINE_FILE)
# Get tests and split them up based on the number of devices.
if options.gtest_filter:
all_tests = [t for t in options.gtest_filter.split(':') if t]
if options.test_filter:
all_tests = [t for t in options.test_filter.split(':') if t]
else:
all_tests = GetAllEnabledTests(RunnerFactory, attached_devices)
num_devices = len(attached_devices)
@ -210,7 +214,7 @@ def Dispatch(options):
framebuffer.Start()
all_test_suites = _FullyQualifiedTestSuites(options.exe, options.test_suite,
options.build_type)
options.build_type)
failures = 0
for suite_name, suite_path in all_test_suites:
# Give each test suite its own copy of options.

Просмотреть файл

@ -2,6 +2,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Defines TestPackageApk to help run APK-based native tests."""
import logging
import os
@ -96,7 +97,7 @@ class TestPackageApk(TestPackage):
self._StartActivity()
# Wait for native test to complete.
p = self._WatchFifo(timeout=30 * self.tool.GetTimeoutScale())
p.expect("<<ScopedMainEntryLogger")
p.expect('<<ScopedMainEntryLogger')
p.close()
finally:
self.tool.CleanUpEnvironment()
@ -105,8 +106,8 @@ class TestPackageApk(TestPackage):
ret = self._ParseGTestListTests(content)
return ret
def CreateTestRunnerScript(self, gtest_filter, test_arguments):
self._CreateTestRunnerScript('--gtest_filter=%s %s' % (gtest_filter,
def CreateTestRunnerScript(self, test_filter, test_arguments):
self._CreateTestRunnerScript('--gtest_filter=%s %s' % (test_filter,
test_arguments))
def RunTestsAndListResults(self):

Просмотреть файл

@ -2,6 +2,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Defines TestPackageExecutable to help run stand-alone executables."""
import logging
import os
@ -89,11 +90,11 @@ class TestPackageExecutable(TestPackage):
self.test_suite_basename))
return self._ParseGTestListTests(all_tests)
def CreateTestRunnerScript(self, gtest_filter, test_arguments):
def CreateTestRunnerScript(self, test_filter, test_arguments):
"""Creates a test runner script and pushes to the device.
Args:
gtest_filter: A gtest_filter flag.
test_filter: A test_filter flag.
test_arguments: Additional arguments to pass to the test binary.
"""
tool_wrapper = self.tool.GetTestWrapper()
@ -108,13 +109,13 @@ class TestPackageExecutable(TestPackage):
self._AddNativeCoverageExports(),
tool_wrapper, constants.TEST_EXECUTABLE_DIR,
self.test_suite_basename,
gtest_filter, test_arguments,
test_filter, test_arguments,
TestPackageExecutable._TEST_RUNNER_RET_VAL_FILE))
sh_script_file.flush()
cmd_helper.RunCmd(['chmod', '+x', sh_script_file.name])
self.adb.PushIfNeeded(
sh_script_file.name,
constants.TEST_EXECUTABLE_DIR + '/chrome_test_runner.sh')
sh_script_file.name,
constants.TEST_EXECUTABLE_DIR + '/chrome_test_runner.sh')
logging.info('Conents of the test runner script: ')
for line in open(sh_script_file.name).readlines():
logging.info(' ' + line.rstrip())

Просмотреть файл

@ -10,13 +10,12 @@ import sys
import types
from pylib import android_commands
from pylib import constants
from pylib.base import base_test_result
from pylib.instrumentation import test_package
from pylib.instrumentation import test_runner
from pylib.utils import report_results
import python_test_base
from python_test_caller import CallPythonTest
from python_test_sharder import PythonTestSharder
from test_info_collection import TestInfoCollection
@ -56,13 +55,16 @@ def DispatchPythonTests(options):
Returns:
A list of test results.
Raises:
Exception: If there are no attached devices.
"""
attached_devices = android_commands.GetAttachedDevices()
if not attached_devices:
raise Exception('You have no devices attached or visible!')
if options.device:
attached_devices = [options.device]
if options.test_device:
attached_devices = [options.test_device]
test_collection = TestInfoCollection()
all_tests = _GetAllTests(options.python_test_root, options.official_build)

Просмотреть файл

@ -10,6 +10,7 @@ import os
from pylib import android_commands
from pylib.base import base_test_result
from pylib.base import shard
from pylib.utils import report_results
import test_package
import test_runner
@ -25,7 +26,7 @@ def Dispatch(options):
options: Command line options.
Returns:
A TestRunResults object holding the results of the Java tests.
Test results in a base_test_result.TestRunResults object.
Raises:
Exception: when there are no attached devices.
@ -42,9 +43,9 @@ def Dispatch(options):
if not attached_devices:
raise Exception('There are no devices online.')
if options.device:
assert options.device in attached_devices
attached_devices = [options.device]
if options.test_device:
assert options.test_device in attached_devices
attached_devices = [options.test_device]
if len(attached_devices) > 1 and options.wait_for_debugger:
logging.warning('Debugger can not be sharded, using first available device')

Просмотреть файл

@ -10,6 +10,7 @@ import os
from pylib import android_commands
from pylib.base import base_test_result
from pylib.base import shard
from pylib.utils import report_results
import test_package
import test_runner
@ -25,7 +26,7 @@ def Dispatch(options):
options: Command line options.
Returns:
A TestRunResults object holding the results of the Java tests.
Test results in a base_test_result.TestRunResults object.
Raises:
Exception: when there are no attached devices.
@ -42,13 +43,14 @@ def Dispatch(options):
if not attached_devices:
raise Exception('There are no devices online.')
if options.device:
assert options.device in attached_devices
attached_devices = [options.device]
if options.test_device:
assert options.test_device in attached_devices
attached_devices = [options.test_device]
def TestRunnerFactory(device, shard_index):
return test_runner.TestRunner(
options, device, shard_index, test_pkg, [])
return shard.ShardAndRunTests(TestRunnerFactory, attached_devices, tests,
options.build_type)
return shard.ShardAndRunTests(TestRunnerFactory, attached_devices,
tests, options.build_type,
num_retries=options.num_retries)

Просмотреть файл

@ -4,16 +4,12 @@
"""Parses options for the instrumentation tests."""
#TODO(craigdh): pylib/utils/ should not depend on pylib/.
from pylib import constants
import optparse
import os
import sys
_SDK_OUT_DIR = os.path.join(constants.DIR_SOURCE_ROOT, 'out')
# TODO(gkanwar): Some downstream scripts current rely on these functions
# existing. This dependency should be removed, and this file deleted, in the
# future.
def AddBuildTypeOption(option_parser):
"""Decorates OptionParser with build type option."""
default_build_type = 'Debug'
@ -22,36 +18,11 @@ def AddBuildTypeOption(option_parser):
option_parser.add_option('--debug', action='store_const', const='Debug',
dest='build_type', default=default_build_type,
help='If set, run test suites under out/Debug. '
'Default is env var BUILDTYPE or Debug')
'Default is env var BUILDTYPE or Debug')
option_parser.add_option('--release', action='store_const', const='Release',
dest='build_type',
help='If set, run test suites under out/Release. '
'Default is env var BUILDTYPE or Debug.')
def AddInstallAPKOption(option_parser):
"""Decorates OptionParser with apk option used to install the APK."""
AddBuildTypeOption(option_parser)
option_parser.add_option('--apk',
help=('The name of the apk containing the '
' application (with the .apk extension).'))
option_parser.add_option('--apk_package',
help=('The package name used by the apk containing '
'the application.'))
option_parser.add_option('--keep_data',
action='store_true',
default=False,
help=('Keep the package data when installing '
'the application.'))
def ValidateInstallAPKOption(option_parser, options):
if not options.apk:
option_parser.error('--apk is mandatory.')
if not os.path.exists(options.apk):
options.apk = os.path.join(constants.DIR_SOURCE_ROOT,
'out', options.build_type,
'apks', options.apk)
'Default is env var BUILDTYPE or Debug.')
def AddTestRunnerOptions(option_parser, default_timeout=60):
@ -91,206 +62,7 @@ def AddTestRunnerOptions(option_parser, default_timeout=60):
'Chrome for Android flakiness dashboard.'))
option_parser.add_option('--skip-deps-push', dest='push_deps',
action='store_false', default=True,
help='Do not push data dependencies to the device. '
'Use this at own risk for speeding up test '
'execution on local machine.')
help='Do not push dependencies to the device. '
'Use this at own risk for speeding up test '
'execution on local machine.')
AddBuildTypeOption(option_parser)
def AddGTestOptions(option_parser):
"""Decorates OptionParser with GTest tests options."""
AddTestRunnerOptions(option_parser, default_timeout=0)
option_parser.add_option('-s', '--suite', dest='test_suite',
help='Executable name of the test suite to run '
'(use -s help to list them).')
option_parser.add_option('--out-directory', dest='out_directory',
help='Path to the out/ directory, irrespective of '
'the build type. Only for non-Chromium uses.')
option_parser.add_option('-d', '--device', dest='test_device',
help='Target device for the test suite to run on.')
option_parser.add_option('-f', '--gtest_filter', dest='gtest_filter',
help='gtest filter.')
#TODO(craigdh): Replace _ with - in arguments for consistency.
option_parser.add_option('-a', '--test_arguments', dest='test_arguments',
help='Additional arguments to pass to the test.')
option_parser.add_option('-e', '--emulator', dest='use_emulator',
action='store_true',
help='Run tests in a new instance of emulator.')
option_parser.add_option('-n', '--emulator_count',
type='int', default=1,
help='Number of emulators to launch for running the '
'tests.')
option_parser.add_option('-x', '--xvfb', dest='use_xvfb',
action='store_true',
help='Use Xvfb around tests (ignored if not Linux).')
option_parser.add_option('--webkit', action='store_true',
help='Run the tests from a WebKit checkout.')
option_parser.add_option('--exit_code', action='store_true',
help='If set, the exit code will be total number '
'of failures.')
option_parser.add_option('--exe', action='store_true',
help='If set, use the exe test runner instead of '
'the APK.')
option_parser.add_option('--abi', default='armeabi-v7a',
help='Platform of emulators to launch.')
def AddCommonInstrumentationOptions(option_parser):
"""Decorates OptionParser with base instrumentation tests options."""
AddTestRunnerOptions(option_parser)
option_parser.add_option('-f', '--test_filter',
help='Test filter (if not fully qualified, '
'will run all matches).')
option_parser.add_option(
'-A', '--annotation', dest='annotation_str',
help=('Comma-separated list of annotations. Run only tests with any of '
'the given annotations. An annotation can be either a key or a '
'key-values pair. A test that has no annotation is considered '
'"SmallTest".'))
option_parser.add_option(
'-E', '--exclude-annotation', dest='exclude_annotation_str',
help=('Comma-separated list of annotations. Exclude tests with these '
'annotations.'))
option_parser.add_option('-j', '--java_only', action='store_true',
help='Run only the Java tests.')
option_parser.add_option('-p', '--python_only', action='store_true',
help='Run only the Python tests.')
option_parser.add_option('--screenshot', dest='screenshot_failures',
action='store_true',
help='Capture screenshots of test failures')
option_parser.add_option('--save-perf-json', action='store_true',
help='Saves the JSON file for each UI Perf test.')
option_parser.add_option('--shard_retries', type=int, default=1,
help=('Number of times to retry each failure when '
'sharding.'))
option_parser.add_option('--official-build', help='Run official build tests.')
option_parser.add_option('--device',
help='Serial number of device we should use.')
option_parser.add_option('--python_test_root',
help='Root of the python-driven tests.')
option_parser.add_option('--keep_test_server_ports',
action='store_true',
help='Indicates the test server ports must be '
'kept. When this is run via a sharder '
'the test server ports should be kept and '
'should not be reset.')
option_parser.add_option('--buildbot-step-failure',
action='store_true',
help=('If present, will set the buildbot status '
'as STEP_FAILURE, otherwise as STEP_WARNINGS '
'when test(s) fail.'))
option_parser.add_option('--disable_assertions', action='store_true',
help='Run with java assertions disabled.')
option_parser.add_option('--test_data', action='append', default=[],
help=('Each instance defines a directory of test '
'data that should be copied to the target(s) '
'before running the tests. The argument '
'should be of the form <target>:<source>, '
'<target> is relative to the device data'
'directory, and <source> is relative to the '
'chromium build directory.'))
def AddInstrumentationOptions(option_parser):
"""Decorates OptionParser with instrumentation tests options."""
AddCommonInstrumentationOptions(option_parser)
option_parser.add_option('-w', '--wait_debugger', dest='wait_for_debugger',
action='store_true', help='Wait for debugger.')
option_parser.add_option('-I', dest='install_apk',
help='Install APK.', action='store_true')
option_parser.add_option(
'--test-apk', dest='test_apk',
help=('The name of the apk containing the tests (without the .apk '
'extension; e.g. "ContentShellTest"). Alternatively, this can '
'be a full path to the apk.'))
def AddUIAutomatorOptions(option_parser):
"""Decorates OptionParser with uiautomator tests options."""
AddCommonInstrumentationOptions(option_parser)
option_parser.add_option(
'--package-name',
help=('The package name used by the apk containing the application.'))
option_parser.add_option(
'--test-jar', dest='test_jar',
help=('The name of the dexed jar containing the tests (without the '
'.dex.jar extension). Alternatively, this can be a full path to '
'the jar.'))
def ValidateCommonInstrumentationOptions(option_parser, options, args):
"""Validate common options/arguments and populate options with defaults."""
if len(args) > 1:
option_parser.print_help(sys.stderr)
option_parser.error('Unknown arguments: %s' % args[1:])
if options.java_only and options.python_only:
option_parser.error('Options java_only (-j) and python_only (-p) '
'are mutually exclusive.')
options.run_java_tests = True
options.run_python_tests = True
if options.java_only:
options.run_python_tests = False
elif options.python_only:
options.run_java_tests = False
if options.annotation_str:
options.annotations = options.annotation_str.split(',')
elif options.test_filter:
options.annotations = []
else:
options.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest']
if options.exclude_annotation_str:
options.exclude_annotations = options.exclude_annotation_str.split(',')
else:
options.exclude_annotations = []
def ValidateInstrumentationOptions(option_parser, options, args):
"""Validate options/arguments and populate options with defaults."""
ValidateCommonInstrumentationOptions(option_parser, options, args)
if not options.test_apk:
option_parser.error('--test-apk must be specified.')
if os.path.exists(options.test_apk):
# The APK is fully qualified, assume the JAR lives along side.
options.test_apk_path = options.test_apk
options.test_apk_jar_path = (os.path.splitext(options.test_apk_path)[0] +
'.jar')
else:
options.test_apk_path = os.path.join(_SDK_OUT_DIR,
options.build_type,
constants.SDK_BUILD_APKS_DIR,
'%s.apk' % options.test_apk)
options.test_apk_jar_path = os.path.join(
_SDK_OUT_DIR, options.build_type, constants.SDK_BUILD_TEST_JAVALIB_DIR,
'%s.jar' % options.test_apk)
def ValidateUIAutomatorOptions(option_parser, options, args):
"""Validate uiautomator options/arguments."""
ValidateCommonInstrumentationOptions(option_parser, options, args)
if not options.package_name:
option_parser.error('--package-name must be specified.')
if not options.test_jar:
option_parser.error('--test-jar must be specified.')
if os.path.exists(options.test_jar):
# The dexed JAR is fully qualified, assume the info JAR lives along side.
options.uiautomator_jar = options.test_jar
else:
options.uiautomator_jar = os.path.join(
_SDK_OUT_DIR, options.build_type, constants.SDK_BUILD_JAVALIB_DIR,
'%s.dex.jar' % options.test_jar)
options.uiautomator_info_jar = (
options.uiautomator_jar[:options.uiautomator_jar.find('.dex.jar')] +
'_java.jar')

Просмотреть файл

@ -4,26 +4,21 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs content browser tests."""
"""Runs content_browsertests."""
import optparse
import logging
import os
import sys
from pylib.browsertests import dispatch
from pylib.utils import run_tests_helper
from pylib.utils import test_options_parser
def main(argv):
option_parser = optparse.OptionParser()
test_options_parser.AddGTestOptions(option_parser)
options, args = option_parser.parse_args(argv)
if len(args) > 1:
option_parser.error('Unknown argument: %s' % args[1:])
run_tests_helper.SetLogLevel(options.verbose_count)
return dispatch.Dispatch(options)
from pylib import cmd_helper
if __name__ == '__main__':
sys.exit(main(sys.argv))
args = ['python',
os.path.join(os.path.dirname(__file__), 'test_runner.py'),
'content_browsertests'] + sys.argv[1:]
logging.warning('*' * 80)
logging.warning('This script is deprecated and will be removed soon.')
logging.warning('Use the following instead: %s', ' '.join(args))
logging.warning('*' * 80)
sys.exit(cmd_helper.RunCmd(args))

Просмотреть файл

@ -6,73 +6,19 @@
"""Runs both the Python and Java instrumentation tests."""
import optparse
import logging
import os
import sys
from pylib import buildbot_report
from pylib import ports
from pylib.base import base_test_result
from pylib.host_driven import run_python_tests
from pylib.instrumentation import dispatch
from pylib.utils import report_results
from pylib.utils import run_tests_helper
from pylib.utils import test_options_parser
def DispatchInstrumentationTests(options):
"""Dispatches the Java and Python instrumentation tests, sharding if possible.
Uses the logging module to print the combined final results and
summary of the Java and Python tests. If the java_only option is set, only
the Java tests run. If the python_only option is set, only the python tests
run. If neither are set, run both Java and Python tests.
Args:
options: command-line options for running the Java and Python tests.
Returns:
An integer representing the number of broken tests.
"""
if not options.keep_test_server_ports:
# Reset the test port allocation. It's important to do it before starting
# to dispatch any tests.
if not ports.ResetTestServerPortAllocation():
raise Exception('Failed to reset test server port.')
all_results = base_test_result.TestRunResults()
if options.run_java_tests:
all_results.AddTestRunResults(dispatch.Dispatch(options))
if options.run_python_tests:
all_results.AddTestRunResults(run_python_tests.DispatchPythonTests(options))
report_results.LogFull(
results=all_results,
test_type='Instrumentation',
test_package=os.path.basename(options.test_apk),
annotation=options.annotations,
build_type=options.build_type,
flakiness_server=options.flakiness_dashboard_server)
return len(all_results.GetNotPass())
def main(argv):
option_parser = optparse.OptionParser()
test_options_parser.AddInstrumentationOptions(option_parser)
options, args = option_parser.parse_args(argv)
test_options_parser.ValidateInstrumentationOptions(option_parser, options,
args)
run_tests_helper.SetLogLevel(options.verbose_count)
ret = 1
try:
ret = DispatchInstrumentationTests(options)
finally:
buildbot_report.PrintStepResultIfNeeded(options, ret)
return ret
from pylib import cmd_helper
if __name__ == '__main__':
sys.exit(main(sys.argv))
args = ['python',
os.path.join(os.path.dirname(__file__), 'test_runner.py'),
'instrumentation'] + sys.argv[1:]
logging.warning('*' * 80)
logging.warning('This script is deprecated and will be removed soon.')
logging.warning('Use the following instead: %s', ' '.join(args))
logging.warning('*' * 80)
sys.exit(cmd_helper.RunCmd(args))

Просмотреть файл

@ -4,76 +4,21 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs all the native unit tests.
"""Runs all the native unit tests."""
1. Copy over test binary to /data/local on device.
2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak)
to be deployed to the device. We use the device's $EXTERNAL_STORAGE as the
base dir (which maps to Context.getExternalFilesDir()).
3. Environment:
3.1. chrome/unit_tests requires (via chrome_paths.cc) a directory named:
$EXTERNAL_STORAGE + /chrome/test/data
4. Run the binary in the device and stream the log to the host.
4.1. Optionally, filter specific tests.
4.2. If we're running a single test suite and we have multiple devices
connected, we'll shard the tests.
5. Clean up the device.
Suppressions:
Individual tests in a test binary can be suppressed by listing it in
the gtest_filter directory in a file of the same name as the test binary,
one test per line. Here is an example:
$ cat gtest_filter/base_unittests_disabled
DataPackTest.Load
ReadOnlyFileUtilTest.ContentsEqual
This file is generated by the tests running on devices. If running on emulator,
additonal filter file which lists the tests only failed in emulator will be
loaded. We don't care about the rare testcases which succeeded on emuatlor, but
failed on device.
"""
import optparse
import logging
import os
import sys
from pylib import cmd_helper
from pylib.gtest import dispatch
from pylib.utils import emulator
from pylib.utils import run_tests_helper
from pylib.utils import test_options_parser
def main(argv):
option_parser = optparse.OptionParser()
test_options_parser.AddGTestOptions(option_parser)
options, args = option_parser.parse_args(argv)
if len(args) > 1:
option_parser.error('Unknown argument: %s' % args[1:])
run_tests_helper.SetLogLevel(options.verbose_count)
if options.out_directory:
cmd_helper.OutDirectory.set(options.out_directory)
if options.use_emulator:
emulator.DeleteAllTempAVDs()
failed_tests_count = dispatch.Dispatch(options)
# Failures of individual test suites are communicated by printing a
# STEP_FAILURE message.
# Returning a success exit status also prevents the buildbot from incorrectly
# marking the last suite as failed if there were failures in other suites in
# the batch (this happens because the exit status is a sum of all failures
# from all suites, but the buildbot associates the exit status only with the
# most recent step).
if options.exit_code:
return failed_tests_count
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
args = ['python',
os.path.join(os.path.dirname(__file__), 'test_runner.py'),
'gtest'] + sys.argv[1:]
logging.warning('*' * 80)
logging.warning('This script is deprecated and will be removed soon.')
logging.warning('Use the following instead: %s', ' '.join(args))
logging.warning('*' * 80)
sys.exit(cmd_helper.RunCmd(args))

Просмотреть файл

@ -6,74 +6,19 @@
"""Runs both the Python and Java UIAutomator tests."""
import optparse
import logging
import os
import sys
import time
from pylib import buildbot_report
from pylib import constants
from pylib import ports
from pylib.base import base_test_result
from pylib.host_driven import run_python_tests
from pylib.uiautomator import dispatch
from pylib.utils import report_results
from pylib.utils import run_tests_helper
from pylib.utils import test_options_parser
def DispatchUIAutomatorTests(options):
"""Dispatches the UIAutomator tests, sharding if possible.
Uses the logging module to print the combined final results and
summary of the Java and Python tests. If the java_only option is set, only
the Java tests run. If the python_only option is set, only the python tests
run. If neither are set, run both Java and Python tests.
Args:
options: command-line options for running the Java and Python tests.
Returns:
An integer representing the number of broken tests.
"""
if not options.keep_test_server_ports:
# Reset the test port allocation. It's important to do it before starting
# to dispatch any tests.
if not ports.ResetTestServerPortAllocation():
raise Exception('Failed to reset test server port.')
all_results = base_test_result.TestRunResults()
if options.run_java_tests:
all_results.AddTestRunResults(dispatch.Dispatch(options))
if options.run_python_tests:
all_results.AddTestRunResults(run_python_tests.DispatchPythonTests(options))
report_results.LogFull(
results=all_results,
test_type='UIAutomator',
test_package=os.path.basename(options.test_jar),
annotation=options.annotations,
build_type=options.build_type,
flakiness_server=options.flakiness_dashboard_server)
return len(all_results.GetNotPass())
def main(argv):
option_parser = optparse.OptionParser()
test_options_parser.AddUIAutomatorOptions(option_parser)
options, args = option_parser.parse_args(argv)
test_options_parser.ValidateUIAutomatorOptions(option_parser, options, args)
run_tests_helper.SetLogLevel(options.verbose_count)
ret = 1
try:
ret = DispatchUIAutomatorTests(options)
finally:
buildbot_report.PrintStepResultIfNeeded(options, ret)
return ret
from pylib import cmd_helper
if __name__ == '__main__':
sys.exit(main(sys.argv))
args = ['python',
os.path.join(os.path.dirname(__file__), 'test_runner.py'),
'uiautomator'] + sys.argv[1:]
logging.warning('*' * 80)
logging.warning('This script is deprecated and will be removed soon.')
logging.warning('Use the following instead: %s', ' '.join(args))
logging.warning('*' * 80)
sys.exit(cmd_helper.RunCmd(args))

531
android/test_runner.py Executable file
Просмотреть файл

@ -0,0 +1,531 @@
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs all types of tests from one unified interface.
TODO(gkanwar):
* Add options to run Monkey tests.
"""
import collections
import optparse
import os
import sys
from pylib import cmd_helper
from pylib import constants
from pylib import ports
from pylib.base import base_test_result
from pylib.browsertests import dispatch as browsertests_dispatch
from pylib.gtest import dispatch as gtest_dispatch
from pylib.host_driven import run_python_tests as python_dispatch
from pylib.instrumentation import dispatch as instrumentation_dispatch
from pylib.uiautomator import dispatch as uiautomator_dispatch
from pylib.utils import emulator, report_results, run_tests_helper
_SDK_OUT_DIR = os.path.join(constants.DIR_SOURCE_ROOT, 'out')
def AddBuildTypeOption(option_parser):
"""Adds the build type option to |option_parser|."""
default_build_type = 'Debug'
if 'BUILDTYPE' in os.environ:
default_build_type = os.environ['BUILDTYPE']
option_parser.add_option('--debug', action='store_const', const='Debug',
dest='build_type', default=default_build_type,
help=('If set, run test suites under out/Debug. '
'Default is env var BUILDTYPE or Debug.'))
option_parser.add_option('--release', action='store_const',
const='Release', dest='build_type',
help=('If set, run test suites under out/Release.'
' Default is env var BUILDTYPE or Debug.'))
def AddEmulatorOptions(option_parser):
"""Adds all emulator-related options to |option_parser|."""
# TODO(gkanwar): Figure out what we're doing with the emulator setup
# and determine whether these options should be deprecated/removed.
option_parser.add_option('-e', '--emulator', dest='use_emulator',
action='store_true',
help='Run tests in a new instance of emulator.')
option_parser.add_option('-n', '--emulator-count',
type='int', default=1,
help=('Number of emulators to launch for '
'running the tests.'))
option_parser.add_option('--abi', default='armeabi-v7a',
help='Platform of emulators to launch.')
def ProcessEmulatorOptions(options):
"""Processes emulator options."""
if options.use_emulator:
emulator.DeleteAllTempAVDs()
def AddCommonOptions(option_parser):
"""Adds all common options to |option_parser|."""
AddBuildTypeOption(option_parser)
option_parser.add_option('--out-directory', dest='out_directory',
help=('Path to the out/ directory, irrespective of '
'the build type. Only for non-Chromium uses.'))
option_parser.add_option('-c', dest='cleanup_test_files',
help='Cleanup test files on the device after run',
action='store_true')
option_parser.add_option('--num_retries', dest='num_retries', type='int',
default=2,
help=('Number of retries for a test before '
'giving up.'))
option_parser.add_option('-v',
'--verbose',
dest='verbose_count',
default=0,
action='count',
help='Verbose level (multiple times for more)')
profilers = ['devicestatsmonitor', 'chrometrace', 'dumpheap', 'smaps',
'traceview']
option_parser.add_option('--profiler', dest='profilers', action='append',
choices=profilers,
help=('Profiling tool to run during test. Pass '
'multiple times to run multiple profilers. '
'Available profilers: %s' % profilers))
option_parser.add_option('--tool',
dest='tool',
help=('Run the test under a tool '
'(use --tool help to list them)'))
option_parser.add_option('--flakiness-dashboard-server',
dest='flakiness_dashboard_server',
help=('Address of the server that is hosting the '
'Chrome for Android flakiness dashboard.'))
option_parser.add_option('--skip-deps-push', dest='push_deps',
action='store_false', default=True,
help=('Do not push dependencies to the device. '
'Use this at own risk for speeding up test '
'execution on local machine.'))
# TODO(gkanwar): This option is deprecated. Remove it in the future.
option_parser.add_option('--exit-code', action='store_true',
help=('(DEPRECATED) If set, the exit code will be '
'total number of failures.'))
# TODO(gkanwar): This option is deprecated. It is currently used to run tests
# with the FlakyTest annotation to prevent the bots going red downstream. We
# should instead use exit codes and let the Buildbot scripts deal with test
# failures appropriately. See crbug.com/170477.
option_parser.add_option('--buildbot-step-failure',
action='store_true',
help=('(DEPRECATED) If present, will set the '
'buildbot status as STEP_FAILURE, otherwise '
'as STEP_WARNINGS when test(s) fail.'))
option_parser.add_option('-d', '--device', dest='test_device',
help=('Target device for the test suite '
'to run on.'))
def ProcessCommonOptions(options):
"""Processes and handles all common options."""
if options.out_directory:
cmd_helper.OutDirectory.set(options.out_directory)
run_tests_helper.SetLogLevel(options.verbose_count)
def AddContentBrowserTestOptions(option_parser):
"""Adds Content Browser test options to |option_parser|."""
option_parser.usage = '%prog content_browsertests [options]'
option_parser.command_list = []
option_parser.example = '%prog content_browsertests'
AddCommonOptions(option_parser)
# TODO(gkanwar): Consolidate and clean up test filtering for gtests and
# content_browsertests.
option_parser.add_option('--gtest_filter', dest='test_filter',
help='Filter GTests by name.')
def AddGTestOptions(option_parser, default_timeout=60):
"""Adds gtest options to |option_parser|."""
option_parser.usage = '%prog gtest [options]'
option_parser.command_list = []
option_parser.example = '%prog gtest -s base_unittests'
# TODO(gkanwar): Consolidate and clean up test filtering for gtests and
# content_browsertests.
option_parser.add_option('--gtest_filter', dest='test_filter',
help='Filter GTests by name.')
option_parser.add_option('-s', '--suite', dest='test_suite',
help=('Executable name of the test suite to run '
'(use -s help to list them).'))
option_parser.add_option('-a', '--test_arguments', dest='test_arguments',
help='Additional arguments to pass to the test.')
# TODO(gkanwar): Most likely deprecate/remove this option once we've pinned
# down what we're doing with the emulator setup.
option_parser.add_option('-x', '--xvfb', dest='use_xvfb',
action='store_true',
help='Use Xvfb around tests (ignored if not Linux).')
# TODO(gkanwar): Possible deprecate this flag. Waiting on word from Peter
# Beverloo.
option_parser.add_option('--webkit', action='store_true',
help='Run the tests from a WebKit checkout.')
option_parser.add_option('--exe', action='store_true',
help='If set, use the exe test runner instead of '
'the APK.')
option_parser.add_option('-t', dest='timeout',
help='Timeout to wait for each test',
type='int',
default=default_timeout)
# TODO(gkanwar): Move these to Common Options once we have the plumbing
# in our other test types to handle these commands
AddEmulatorOptions(option_parser)
AddCommonOptions(option_parser)
def AddJavaTestOptions(option_parser):
"""Adds the Java test options to |option_parser|."""
option_parser.add_option('-f', '--test_filter', dest='test_filter',
help=('Test filter (if not fully qualified, '
'will run all matches).'))
option_parser.add_option(
'-A', '--annotation', dest='annotation_str',
help=('Comma-separated list of annotations. Run only tests with any of '
'the given annotations. An annotation can be either a key or a '
'key-values pair. A test that has no annotation is considered '
'"SmallTest".'))
option_parser.add_option(
'-E', '--exclude-annotation', dest='exclude_annotation_str',
help=('Comma-separated list of annotations. Exclude tests with these '
'annotations.'))
option_parser.add_option('-j', '--java_only', action='store_true',
default=False, help='Run only the Java tests.')
option_parser.add_option('-p', '--python_only', action='store_true',
default=False,
help='Run only the host-driven tests.')
option_parser.add_option('--screenshot', dest='screenshot_failures',
action='store_true',
help='Capture screenshots of test failures')
option_parser.add_option('--save-perf-json', action='store_true',
help='Saves the JSON file for each UI Perf test.')
# TODO(gkanwar): Remove this option. It is not used anywhere.
option_parser.add_option('--shard_retries', type=int, default=1,
help=('Number of times to retry each failure when '
'sharding.'))
option_parser.add_option('--official-build', help='Run official build tests.')
option_parser.add_option('--python_test_root',
help='Root of the host-driven tests.')
option_parser.add_option('--keep_test_server_ports',
action='store_true',
help=('Indicates the test server ports must be '
'kept. When this is run via a sharder '
'the test server ports should be kept and '
'should not be reset.'))
# TODO(gkanwar): This option is deprecated. Remove it in the future.
option_parser.add_option('--disable_assertions', action='store_true',
help=('(DEPRECATED) Run with java assertions '
'disabled.'))
option_parser.add_option('--test_data', action='append', default=[],
help=('Each instance defines a directory of test '
'data that should be copied to the target(s) '
'before running the tests. The argument '
'should be of the form <target>:<source>, '
'<target> is relative to the device data'
'directory, and <source> is relative to the '
'chromium build directory.'))
def ProcessJavaTestOptions(options, error_func):
"""Processes options/arguments and populates |options| with defaults."""
if options.java_only and options.python_only:
error_func('Options java_only (-j) and python_only (-p) '
'are mutually exclusive.')
options.run_java_tests = True
options.run_python_tests = True
if options.java_only:
options.run_python_tests = False
elif options.python_only:
options.run_java_tests = False
if not options.python_test_root:
options.run_python_tests = False
if options.annotation_str:
options.annotations = options.annotation_str.split(',')
elif options.test_filter:
options.annotations = []
else:
options.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest']
if options.exclude_annotation_str:
options.exclude_annotations = options.exclude_annotation_str.split(',')
else:
options.exclude_annotations = []
if not options.keep_test_server_ports:
if not ports.ResetTestServerPortAllocation():
raise Exception('Failed to reset test server port.')
def AddInstrumentationTestOptions(option_parser):
"""Adds Instrumentation test options to |option_parser|."""
option_parser.usage = '%prog instrumentation [options]'
option_parser.command_list = []
option_parser.example = ('%prog instrumentation -I '
'--test-apk=ChromiumTestShellTest')
AddJavaTestOptions(option_parser)
AddCommonOptions(option_parser)
option_parser.add_option('-w', '--wait_debugger', dest='wait_for_debugger',
action='store_true',
help='Wait for debugger.')
option_parser.add_option('-I', dest='install_apk', action='store_true',
help='Install test APK.')
option_parser.add_option(
'--test-apk', dest='test_apk',
help=('The name of the apk containing the tests '
'(without the .apk extension; e.g. "ContentShellTest"). '
'Alternatively, this can be a full path to the apk.'))
def ProcessInstrumentationOptions(options, error_func):
"""Processes options/arguments and populate |options| with defaults."""
ProcessJavaTestOptions(options, error_func)
if not options.test_apk:
error_func('--test-apk must be specified.')
if os.path.exists(options.test_apk):
# The APK is fully qualified, assume the JAR lives along side.
options.test_apk_path = options.test_apk
options.test_apk_jar_path = (os.path.splitext(options.test_apk_path)[0] +
'.jar')
else:
options.test_apk_path = os.path.join(_SDK_OUT_DIR,
options.build_type,
constants.SDK_BUILD_APKS_DIR,
'%s.apk' % options.test_apk)
options.test_apk_jar_path = os.path.join(
_SDK_OUT_DIR, options.build_type, constants.SDK_BUILD_TEST_JAVALIB_DIR,
'%s.jar' % options.test_apk)
def AddUIAutomatorTestOptions(option_parser):
"""Adds UI Automator test options to |option_parser|."""
option_parser.usage = '%prog uiautomator [options]'
option_parser.command_list = []
option_parser.example = (
'%prog uiautomator --test-jar=chromium_testshell_uiautomator_tests'
' --package-name=org.chromium.chrome.testshell')
option_parser.add_option(
'--package-name',
help='The package name used by the apk containing the application.')
option_parser.add_option(
'--test-jar', dest='test_jar',
help=('The name of the dexed jar containing the tests (without the '
'.dex.jar extension). Alternatively, this can be a full path '
'to the jar.'))
AddJavaTestOptions(option_parser)
AddCommonOptions(option_parser)
def ProcessUIAutomatorOptions(options, error_func):
"""Processes UIAutomator options/arguments."""
ProcessJavaTestOptions(options, error_func)
if not options.package_name:
error_func('--package-name must be specified.')
if not options.test_jar:
error_func('--test-jar must be specified.')
if os.path.exists(options.test_jar):
# The dexed JAR is fully qualified, assume the info JAR lives along side.
options.uiautomator_jar = options.test_jar
else:
options.uiautomator_jar = os.path.join(
_SDK_OUT_DIR, options.build_type, constants.SDK_BUILD_JAVALIB_DIR,
'%s.dex.jar' % options.test_jar)
options.uiautomator_info_jar = (
options.uiautomator_jar[:options.uiautomator_jar.find('.dex.jar')] +
'_java.jar')
def RunTestsCommand(command, options, args, option_parser):
"""Checks test type and dispatches to the appropriate function.
Args:
command: String indicating the command that was received to trigger
this function.
options: optparse options dictionary.
args: List of extra args from optparse.
option_parser: optparse.OptionParser object.
Returns:
Integer indicated exit code.
"""
ProcessCommonOptions(options)
total_failed = 0
if command == 'gtest':
# TODO(gkanwar): See the emulator TODO above -- this call should either go
# away or become generalized.
ProcessEmulatorOptions(options)
total_failed = gtest_dispatch.Dispatch(options)
elif command == 'content_browsertests':
total_failed = browsertests_dispatch.Dispatch(options)
elif command == 'instrumentation':
ProcessInstrumentationOptions(options, option_parser.error)
results = base_test_result.TestRunResults()
if options.run_java_tests:
results.AddTestRunResults(instrumentation_dispatch.Dispatch(options))
if options.run_python_tests:
results.AddTestRunResults(python_dispatch.DispatchPythonTests(options))
report_results.LogFull(
results=results,
test_type='Instrumentation',
test_package=os.path.basename(options.test_apk),
annotation=options.annotations,
build_type=options.build_type,
flakiness_server=options.flakiness_dashboard_server)
total_failed += len(results.GetNotPass())
elif command == 'uiautomator':
ProcessUIAutomatorOptions(options, option_parser.error)
results = base_test_result.TestRunResults()
if options.run_java_tests:
results.AddTestRunResults(uiautomator_dispatch.Dispatch(options))
if options.run_python_tests:
results.AddTestRunResults(python_dispatch.Dispatch(options))
report_results.LogFull(
results=results,
test_type='UIAutomator',
test_package=os.path.basename(options.test_jar),
annotation=options.annotations,
build_type=options.build_type,
flakiness_server=options.flakiness_dashboard_server)
total_failed += len(results.GetNotPass())
else:
raise Exception('Unknown test type state')
return total_failed
def HelpCommand(command, options, args, option_parser):
"""Display help for a certain command, or overall help.
Args:
command: String indicating the command that was received to trigger
this function.
options: optparse options dictionary.
args: List of extra args from optparse.
option_parser: optparse.OptionParser object.
Returns:
Integer indicated exit code.
"""
# If we don't have any args, display overall help
if len(args) < 3:
option_parser.print_help()
return 0
command = args[2]
if command not in VALID_COMMANDS:
option_parser.error('Unrecognized command.')
# Treat the help command as a special case. We don't care about showing a
# specific help page for itself.
if command == 'help':
option_parser.print_help()
return 0
VALID_COMMANDS[command].add_options_func(option_parser)
option_parser.usage = '%prog ' + command + ' [options]'
option_parser.command_list = None
option_parser.print_help()
return 0
# Define a named tuple for the values in the VALID_COMMANDS dictionary so the
# syntax is a bit prettier. The tuple is two functions: (add options, run
# command).
CommandFunctionTuple = collections.namedtuple(
'CommandFunctionTuple', ['add_options_func', 'run_command_func'])
VALID_COMMANDS = {
'gtest': CommandFunctionTuple(AddGTestOptions, RunTestsCommand),
'content_browsertests': CommandFunctionTuple(
AddContentBrowserTestOptions, RunTestsCommand),
'instrumentation': CommandFunctionTuple(
AddInstrumentationTestOptions, RunTestsCommand),
'uiautomator': CommandFunctionTuple(
AddUIAutomatorTestOptions, RunTestsCommand),
'help': CommandFunctionTuple(lambda option_parser: None, HelpCommand)
}
class CommandOptionParser(optparse.OptionParser):
"""Wrapper class for OptionParser to help with listing commands."""
def __init__(self, *args, **kwargs):
self.command_list = kwargs.pop('command_list', [])
self.example = kwargs.pop('example', '')
optparse.OptionParser.__init__(self, *args, **kwargs)
#override
def get_usage(self):
normal_usage = optparse.OptionParser.get_usage(self)
command_list = self.get_command_list()
example = self.get_example()
return self.expand_prog_name(normal_usage + example + command_list)
#override
def get_command_list(self):
if self.command_list:
return '\nCommands:\n %s\n' % '\n '.join(sorted(self.command_list))
return ''
def get_example(self):
if self.example:
return '\nExample:\n %s\n' % self.example
return ''
def main(argv):
option_parser = CommandOptionParser(
usage='Usage: %prog <command> [options]',
command_list=VALID_COMMANDS.keys())
if len(argv) < 2 or argv[1] not in VALID_COMMANDS:
option_parser.print_help()
return 0
command = argv[1]
VALID_COMMANDS[command].add_options_func(option_parser)
options, args = option_parser.parse_args(argv)
exit_code = VALID_COMMANDS[command].run_command_func(
command, options, args, option_parser)
# Failures of individual test suites are communicated by printing a
# STEP_FAILURE message.
# Returning a success exit status also prevents the buildbot from incorrectly
# marking the last suite as failed if there were failures in other suites in
# the batch (this happens because the exit status is a sum of all failures
# from all suites, but the buildbot associates the exit status only with the
# most recent step).
return exit_code
if __name__ == '__main__':
sys.exit(main(sys.argv))