Converts host driven tests to common test_dispatcher
Also renames several files in pylib/host_driven to match the general file naming scheme. This change will break existing host-driven tests downstream which are run though scripts other than test_runner. NOTRY=True BUG=176323 Review URL: https://chromiumcodereview.appspot.com/19537004 git-svn-id: http://src.chromium.org/svn/trunk/src/build@215944 4ff67af0-8c30-449e-8e8b-ad334ec8d88c
This commit is contained in:
Родитель
ff739899f3
Коммит
db7c9d502b
|
@ -33,7 +33,7 @@ LOGCAT_DIR = os.path.join(CHROME_SRC, 'out', 'logcat')
|
|||
# apk_package: package for the apk to be installed.
|
||||
# test_apk: apk to run tests on.
|
||||
# test_data: data folder in format destination:source.
|
||||
# host_driven_root: The python test root directory.
|
||||
# host_driven_root: The host-driven test root directory.
|
||||
# annotation: Annotation of the tests to include.
|
||||
# exclude_annotation: The annotation of the tests to exclude.
|
||||
I_TEST = collections.namedtuple('InstrumentationTest', [
|
||||
|
|
|
@ -1,35 +0,0 @@
|
|||
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
|
||||
"""A module that contains a queue for running sharded tests."""
|
||||
|
||||
import multiprocessing
|
||||
|
||||
|
||||
class ShardedTestsQueue(object):
|
||||
"""A queue for managing pending tests across different runners.
|
||||
|
||||
This class should only be used when sharding.
|
||||
|
||||
Attributes:
|
||||
num_devices: an integer; the number of attached Android devices.
|
||||
tests: a list of tests to be run.
|
||||
tests_queue: if sharding, a JoinableQueue object that holds tests from
|
||||
|tests|. Otherwise, a list holding tests.
|
||||
results_queue: a Queue object to hold TestRunResults objects.
|
||||
"""
|
||||
_STOP_SENTINEL = 'STOP' # sentinel value for iter()
|
||||
|
||||
def __init__(self, num_devices, tests):
|
||||
self.num_devices = num_devices
|
||||
self.tests_queue = multiprocessing.Queue()
|
||||
for test in tests:
|
||||
self.tests_queue.put(test)
|
||||
for _ in xrange(self.num_devices):
|
||||
self.tests_queue.put(ShardedTestsQueue._STOP_SENTINEL)
|
||||
|
||||
def __iter__(self):
|
||||
"""Returns an iterator with the test cases."""
|
||||
return iter(self.tests_queue.get, ShardedTestsQueue._STOP_SENTINEL)
|
|
@ -371,15 +371,17 @@ def RunTests(tests, runner_factory, wait_for_debugger, test_device,
|
|||
shared_test_collection = _TestCollection([_Test(t) for t in tests])
|
||||
test_collection_factory = lambda: shared_test_collection
|
||||
tag_results_with_device = False
|
||||
log_string = 'sharded across devices'
|
||||
else:
|
||||
# Generate a unique _TestCollection object for each test runner, but use
|
||||
# the same set of tests.
|
||||
test_collection_factory = lambda: _TestCollection([_Test(t) for t in tests])
|
||||
tag_results_with_device = True
|
||||
log_string = 'replicated on each device'
|
||||
|
||||
devices = _GetAttachedDevices(wait_for_debugger, test_device)
|
||||
|
||||
logging.info('Will run %d tests: %s', len(tests), str(tests))
|
||||
logging.info('Will run %d tests (%s): %s', len(tests), log_string, str(tests))
|
||||
runners = _CreateRunners(runner_factory, devices, setup_timeout)
|
||||
try:
|
||||
return _RunAllTests(runners, test_collection_factory,
|
||||
|
|
|
@ -1,27 +0,0 @@
|
|||
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""This file is imported by python tests ran by run_python_tests.py."""
|
||||
|
||||
import os
|
||||
|
||||
from pylib import android_commands
|
||||
from pylib.instrumentation import test_runner
|
||||
|
||||
|
||||
def _GetPackageName(fname):
|
||||
"""Extracts the package name from the test file path."""
|
||||
base_root = os.path.join('com', 'google', 'android')
|
||||
dirname = os.path.dirname(fname)
|
||||
package = dirname[dirname.rfind(base_root):]
|
||||
return package.replace(os.sep, '.')
|
||||
|
||||
|
||||
def RunJavaTest(fname, suite, test, ports_to_forward):
|
||||
device = android_commands.GetAttachedDevices()[0]
|
||||
package_name = _GetPackageName(fname)
|
||||
test = package_name + '.' + suite + '#' + test
|
||||
java_test_runner = test_runner.TestRunner(False, device, [test], False,
|
||||
False, False, 0, ports_to_forward)
|
||||
return java_test_runner.Run()
|
|
@ -1,152 +0,0 @@
|
|||
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Base class for Android Python-driven tests.
|
||||
|
||||
This test case is intended to serve as the base class for any Python-driven
|
||||
tests. It is similar to the Python unitttest module in that the user's tests
|
||||
inherit from this case and add their tests in that case.
|
||||
|
||||
When a PythonTestBase object is instantiated, its purpose is to run only one of
|
||||
its tests. The test runner gives it the name of the test the instance will
|
||||
run. The test runner calls SetUp with the Android device ID which the test will
|
||||
run against. The runner runs the test method itself, collecting the result,
|
||||
and calls TearDown.
|
||||
|
||||
Tests can basically do whatever they want in the test methods, such as call
|
||||
Java tests using _RunJavaTests. Those methods have the advantage of massaging
|
||||
the Java test results into Python test results.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
|
||||
from pylib import android_commands
|
||||
from pylib.base import base_test_result
|
||||
from pylib.instrumentation import test_options
|
||||
from pylib.instrumentation import test_package
|
||||
from pylib.instrumentation import test_result
|
||||
from pylib.instrumentation import test_runner
|
||||
|
||||
|
||||
# aka the parent of com.google.android
|
||||
BASE_ROOT = 'src' + os.sep
|
||||
|
||||
|
||||
class PythonTestBase(object):
|
||||
"""Base class for Python-driven tests."""
|
||||
|
||||
def __init__(self, test_name):
|
||||
# test_name must match one of the test methods defined on a subclass which
|
||||
# inherits from this class.
|
||||
# It's stored so we can do the attr lookup on demand, allowing this class
|
||||
# to be pickled, a requirement for the multiprocessing module.
|
||||
self.test_name = test_name
|
||||
class_name = self.__class__.__name__
|
||||
self.qualified_name = class_name + '.' + self.test_name
|
||||
|
||||
def SetUp(self, options):
|
||||
self.options = options
|
||||
self.shard_index = self.options.shard_index
|
||||
self.device_id = self.options.device_id
|
||||
self.adb = android_commands.AndroidCommands(self.device_id)
|
||||
self.ports_to_forward = []
|
||||
|
||||
def TearDown(self):
|
||||
pass
|
||||
|
||||
def GetOutDir(self):
|
||||
return os.path.join(os.environ['CHROME_SRC'], 'out',
|
||||
self.options.build_type)
|
||||
|
||||
def Run(self):
|
||||
logging.warning('Running Python-driven test: %s', self.test_name)
|
||||
return getattr(self, self.test_name)()
|
||||
|
||||
def _RunJavaTest(self, fname, suite, test):
|
||||
"""Runs a single Java test with a Java TestRunner.
|
||||
|
||||
Args:
|
||||
fname: filename for the test (e.g. foo/bar/baz/tests/FooTest.py)
|
||||
suite: name of the Java test suite (e.g. FooTest)
|
||||
test: name of the test method to run (e.g. testFooBar)
|
||||
|
||||
Returns:
|
||||
TestRunResults object with a single test result.
|
||||
"""
|
||||
test = self._ComposeFullTestName(fname, suite, test)
|
||||
test_pkg = test_package.TestPackage(
|
||||
self.options.test_apk_path, self.options.test_apk_jar_path)
|
||||
instrumentation_options = test_options.InstrumentationOptions(
|
||||
self.options.build_type,
|
||||
self.options.tool,
|
||||
self.options.cleanup_test_files,
|
||||
self.options.push_deps,
|
||||
self.options.annotations,
|
||||
self.options.exclude_annotations,
|
||||
self.options.test_filter,
|
||||
self.options.test_data,
|
||||
self.options.save_perf_json,
|
||||
self.options.screenshot_failures,
|
||||
self.options.disable_assertions,
|
||||
self.options.wait_for_debugger,
|
||||
self.options.test_apk,
|
||||
self.options.test_apk_path,
|
||||
self.options.test_apk_jar_path)
|
||||
java_test_runner = test_runner.TestRunner(instrumentation_options,
|
||||
self.device_id,
|
||||
self.shard_index, test_pkg,
|
||||
self.ports_to_forward)
|
||||
try:
|
||||
java_test_runner.SetUp()
|
||||
return java_test_runner.RunTest(test)[0]
|
||||
finally:
|
||||
java_test_runner.TearDown()
|
||||
|
||||
def _RunJavaTests(self, fname, tests):
|
||||
"""Calls a list of tests and stops at the first test failure.
|
||||
|
||||
This method iterates until either it encounters a non-passing test or it
|
||||
exhausts the list of tests. Then it returns the appropriate Python result.
|
||||
|
||||
Args:
|
||||
fname: filename for the Python test
|
||||
tests: a list of Java test names which will be run
|
||||
|
||||
Returns:
|
||||
A TestRunResults object containing a result for this Python test.
|
||||
"""
|
||||
test_type = base_test_result.ResultType.PASS
|
||||
log = ''
|
||||
|
||||
start_ms = int(time.time()) * 1000
|
||||
for test in tests:
|
||||
# We're only running one test at a time, so this TestRunResults object
|
||||
# will hold only one result.
|
||||
suite, test_name = test.split('.')
|
||||
java_results = self._RunJavaTest(fname, suite, test_name)
|
||||
assert len(java_results.GetAll()) == 1
|
||||
if not java_results.DidRunPass():
|
||||
result = java_results.GetNotPass().pop()
|
||||
log = result.GetLog()
|
||||
test_type = result.GetType()
|
||||
break
|
||||
duration_ms = int(time.time()) * 1000 - start_ms
|
||||
|
||||
python_results = base_test_result.TestRunResults()
|
||||
python_results.AddResult(
|
||||
test_result.InstrumentationTestResult(
|
||||
self.qualified_name, test_type, start_ms, duration_ms, log=log))
|
||||
return python_results
|
||||
|
||||
def _ComposeFullTestName(self, fname, suite, test):
|
||||
package_name = self._GetPackageName(fname)
|
||||
return package_name + '.' + suite + '#' + test
|
||||
|
||||
def _GetPackageName(self, fname):
|
||||
"""Extracts the package name from the test file path."""
|
||||
dirname = os.path.dirname(fname)
|
||||
package = dirname[dirname.rfind(BASE_ROOT) + len(BASE_ROOT):]
|
||||
return package.replace(os.sep, '.')
|
|
@ -1,115 +0,0 @@
|
|||
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Helper module for calling python-based tests."""
|
||||
|
||||
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from pylib.base import base_test_result
|
||||
from pylib.instrumentation import test_result
|
||||
|
||||
|
||||
class PythonExceptionTestResult(test_result.InstrumentationTestResult):
|
||||
"""Helper class for creating a test result from python exception."""
|
||||
|
||||
def __init__(self, test_name, start_date_ms, exc_info):
|
||||
"""Constructs an PythonExceptionTestResult object.
|
||||
|
||||
Args:
|
||||
test_name: name of the test which raised an exception.
|
||||
start_date_ms: the starting time for the test.
|
||||
exc_info: exception info, ostensibly from sys.exc_info().
|
||||
"""
|
||||
exc_type, exc_value, exc_traceback = exc_info
|
||||
trace_info = ''.join(traceback.format_exception(exc_type, exc_value,
|
||||
exc_traceback))
|
||||
log_msg = 'Exception:\n' + trace_info
|
||||
duration_ms = (int(time.time()) * 1000) - start_date_ms
|
||||
|
||||
super(PythonExceptionTestResult, self).__init__(
|
||||
'PythonWrapper#' + test_name,
|
||||
base_test_result.ResultType.FAIL,
|
||||
start_date_ms,
|
||||
duration_ms,
|
||||
log=str(exc_type) + ' ' + log_msg)
|
||||
|
||||
|
||||
def CallPythonTest(test, options):
|
||||
"""Invokes a test function and translates Python exceptions into test results.
|
||||
|
||||
This method invokes SetUp()/TearDown() on the test. It is intended to be
|
||||
resilient to exceptions in SetUp(), the test itself, and TearDown(). Any
|
||||
Python exception means the test is marked as failed, and the test result will
|
||||
contain information about the exception.
|
||||
|
||||
If SetUp() raises an exception, the test is not run.
|
||||
|
||||
If TearDown() raises an exception, the test is treated as a failure. However,
|
||||
if the test itself raised an exception beforehand, that stack trace will take
|
||||
precedence whether or not TearDown() also raised an exception.
|
||||
|
||||
shard_index is not applicable in single-device scenarios, when test execution
|
||||
is serial rather than parallel. Tests can use this to bring up servers with
|
||||
unique port numbers, for example. See also python_test_sharder.
|
||||
|
||||
Args:
|
||||
test: an object which is ostensibly a subclass of PythonTestBase.
|
||||
options: Options to use for setting up tests.
|
||||
|
||||
Returns:
|
||||
A TestRunResults object which contains any results produced by the test or,
|
||||
in the case of a Python exception, the Python exception info.
|
||||
"""
|
||||
|
||||
start_date_ms = int(time.time()) * 1000
|
||||
failed = False
|
||||
|
||||
try:
|
||||
test.SetUp(options)
|
||||
except Exception:
|
||||
failed = True
|
||||
logging.exception(
|
||||
'Caught exception while trying to run SetUp() for test: ' +
|
||||
test.qualified_name)
|
||||
# Tests whose SetUp() method has failed are likely to fail, or at least
|
||||
# yield invalid results.
|
||||
exc_info = sys.exc_info()
|
||||
results = base_test_result.TestRunResults()
|
||||
results.AddResult(PythonExceptionTestResult(
|
||||
test.qualified_name, start_date_ms, exc_info))
|
||||
return results
|
||||
|
||||
try:
|
||||
results = test.Run()
|
||||
except Exception:
|
||||
# Setting this lets TearDown() avoid stomping on our stack trace from Run()
|
||||
# should TearDown() also raise an exception.
|
||||
failed = True
|
||||
logging.exception('Caught exception while trying to run test: ' +
|
||||
test.qualified_name)
|
||||
exc_info = sys.exc_info()
|
||||
results = base_test_result.TestRunResults()
|
||||
results.AddResult(PythonExceptionTestResult(
|
||||
test.qualified_name, start_date_ms, exc_info))
|
||||
|
||||
try:
|
||||
test.TearDown()
|
||||
except Exception:
|
||||
logging.exception(
|
||||
'Caught exception while trying run TearDown() for test: ' +
|
||||
test.qualified_name)
|
||||
if not failed:
|
||||
# Don't stomp the error during the test if TearDown blows up. This is a
|
||||
# trade-off: if the test fails, this will mask any problem with TearDown
|
||||
# until the test is fixed.
|
||||
exc_info = sys.exc_info()
|
||||
results = base_test_result.TestRunResults()
|
||||
results.AddResult(PythonExceptionTestResult(
|
||||
test.qualified_name, start_date_ms, exc_info))
|
||||
|
||||
return results
|
|
@ -1,203 +0,0 @@
|
|||
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Takes care of sharding the python-drive tests in multiple devices."""
|
||||
|
||||
import copy
|
||||
import logging
|
||||
import multiprocessing
|
||||
|
||||
from pylib.base import base_test_result
|
||||
from pylib.base import sharded_tests_queue
|
||||
from pylib.forwarder import Forwarder
|
||||
|
||||
from python_test_caller import CallPythonTest
|
||||
|
||||
|
||||
def SetTestsContainer(tests_container):
|
||||
"""Sets PythonTestSharder as a top-level field.
|
||||
|
||||
PythonTestSharder uses multiprocessing.Pool, which creates a pool of
|
||||
processes. This is used to initialize each worker in the pool, ensuring that
|
||||
each worker has access to this shared pool of tests.
|
||||
|
||||
The multiprocessing module requires that this be a top-level method.
|
||||
|
||||
Args:
|
||||
tests_container: the container for all the tests.
|
||||
"""
|
||||
PythonTestSharder.tests_container = tests_container
|
||||
|
||||
|
||||
def _DefaultRunnable(test_runner):
|
||||
"""A default runnable for a PythonTestRunner.
|
||||
|
||||
Args:
|
||||
test_runner: A PythonTestRunner which will run tests.
|
||||
|
||||
Returns:
|
||||
The test results.
|
||||
"""
|
||||
return test_runner.RunTests()
|
||||
|
||||
|
||||
class PythonTestRunner(object):
|
||||
"""Thin wrapper around a list of PythonTestBase instances.
|
||||
|
||||
This is meant to be a long-lived object which can run multiple Python tests
|
||||
within its lifetime. Tests will receive the device_id and shard_index.
|
||||
|
||||
The shard index affords the ability to create unique port numbers (e.g.
|
||||
DEFAULT_PORT + shard_index) if the test so wishes.
|
||||
"""
|
||||
|
||||
def __init__(self, options):
|
||||
"""Constructor.
|
||||
|
||||
Args:
|
||||
options: Options to use for setting up tests.
|
||||
"""
|
||||
self.options = options
|
||||
|
||||
def RunTests(self):
|
||||
"""Runs tests from the shared pool of tests, aggregating results.
|
||||
|
||||
Returns:
|
||||
A list of test results for all of the tests which this runner executed.
|
||||
"""
|
||||
tests = PythonTestSharder.tests_container
|
||||
|
||||
results = base_test_result.TestRunResults()
|
||||
for t in tests:
|
||||
results.AddTestRunResults(CallPythonTest(t, self.options))
|
||||
return results
|
||||
|
||||
|
||||
class PythonTestSharder(object):
|
||||
"""Runs Python tests in parallel on multiple devices.
|
||||
|
||||
This is lifted more or less wholesale from BaseTestRunner.
|
||||
|
||||
Under the covers, it creates a pool of long-lived PythonTestRunners, which
|
||||
execute tests from the pool of tests.
|
||||
|
||||
Args:
|
||||
attached_devices: a list of device IDs attached to the host.
|
||||
available_tests: a list of tests to run which subclass PythonTestBase.
|
||||
options: Options to use for setting up tests.
|
||||
|
||||
Returns:
|
||||
An aggregated list of test results.
|
||||
"""
|
||||
tests_container = None
|
||||
|
||||
def __init__(self, attached_devices, available_tests, options):
|
||||
self.options = options
|
||||
self.attached_devices = attached_devices
|
||||
self.retries = options.num_retries
|
||||
self.tests = available_tests
|
||||
|
||||
def _SetupSharding(self, tests):
|
||||
"""Creates the shared pool of tests and makes it available to test runners.
|
||||
|
||||
Args:
|
||||
tests: the list of tests which will be consumed by workers.
|
||||
"""
|
||||
SetTestsContainer(sharded_tests_queue.ShardedTestsQueue(
|
||||
len(self.attached_devices), tests))
|
||||
|
||||
def RunShardedTests(self):
|
||||
"""Runs tests in parallel using a pool of workers.
|
||||
|
||||
Returns:
|
||||
A list of test results aggregated from all test runs.
|
||||
"""
|
||||
logging.warning('*' * 80)
|
||||
logging.warning('Sharding in ' + str(len(self.attached_devices)) +
|
||||
' devices.')
|
||||
logging.warning('Note that the output is not synchronized.')
|
||||
logging.warning('Look for the "Final result" banner in the end.')
|
||||
logging.warning('*' * 80)
|
||||
final_results = base_test_result.TestRunResults()
|
||||
tests_to_run = self.tests
|
||||
|
||||
Forwarder.UseMultiprocessing()
|
||||
|
||||
for retry in xrange(self.retries):
|
||||
logging.warning('Try %d of %d', retry + 1, self.retries)
|
||||
self._SetupSharding(self.tests)
|
||||
test_runners = self._MakeTestRunners(self.attached_devices)
|
||||
logging.warning('Starting...')
|
||||
pool = multiprocessing.Pool(len(self.attached_devices),
|
||||
SetTestsContainer,
|
||||
[PythonTestSharder.tests_container])
|
||||
|
||||
# List of TestRunResults objects from each test execution.
|
||||
try:
|
||||
results_lists = pool.map(_DefaultRunnable, test_runners)
|
||||
except Exception:
|
||||
logging.exception('Unable to run tests. Something with the '
|
||||
'PythonTestRunners has gone wrong.')
|
||||
raise Exception('PythonTestRunners were unable to run tests.')
|
||||
|
||||
test_results = base_test_result.TestRunResults()
|
||||
for t in results_lists:
|
||||
test_results.AddTestRunResults(t)
|
||||
# Accumulate passing results.
|
||||
final_results.AddResults(test_results.GetPass())
|
||||
# If we have failed tests, map them to tests to retry.
|
||||
failed_tests = [t.GetName() for t in test_results.GetNotPass()]
|
||||
tests_to_run = self._GetTestsToRetry(self.tests, failed_tests)
|
||||
|
||||
# Bail out early if we have no more tests. This can happen if all tests
|
||||
# pass before we're out of retries, for example.
|
||||
if not tests_to_run:
|
||||
break
|
||||
|
||||
# all_passed has accumulated all passing test results.
|
||||
# test_results will have the results from the most recent run, which could
|
||||
# include a variety of failure modes (unknown, crashed, failed, etc).
|
||||
test_results.AddResults(final_results.GetPass())
|
||||
final_results = test_results
|
||||
|
||||
return final_results
|
||||
|
||||
def _MakeTestRunners(self, attached_devices):
|
||||
"""Initialize and return a list of PythonTestRunners.
|
||||
|
||||
Args:
|
||||
attached_devices: list of device IDs attached to host.
|
||||
|
||||
Returns:
|
||||
A list of PythonTestRunners, one for each device.
|
||||
"""
|
||||
test_runners = []
|
||||
for index, device in enumerate(attached_devices):
|
||||
logging.warning('*' * 80)
|
||||
logging.warning('Creating shard %d for %s', index, device)
|
||||
logging.warning('*' * 80)
|
||||
# Bind the PythonTestRunner to a device & shard index. Give it the
|
||||
# runnable which it will use to actually execute the tests.
|
||||
test_options = copy.deepcopy(self.options)
|
||||
test_options.ensure_value('device_id', device)
|
||||
test_options.ensure_value('shard_index', index)
|
||||
test_runner = PythonTestRunner(test_options)
|
||||
test_runners.append(test_runner)
|
||||
|
||||
return test_runners
|
||||
|
||||
def _GetTestsToRetry(self, available_tests, failed_test_names):
|
||||
"""Infers a list of tests to retry from failed tests and available tests.
|
||||
|
||||
Args:
|
||||
available_tests: a list of tests which subclass PythonTestBase.
|
||||
failed_test_names: a list of failed test names.
|
||||
|
||||
Returns:
|
||||
A list of test objects which correspond to test names found in
|
||||
failed_test_names, or an empty list if there is no correspondence.
|
||||
"""
|
||||
tests_to_retry = [t for t in available_tests
|
||||
if t.qualified_name in failed_test_names]
|
||||
return tests_to_retry
|
|
@ -1,234 +0,0 @@
|
|||
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Runs the Python tests (relies on using the Java test runner)."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import types
|
||||
|
||||
from pylib import android_commands
|
||||
from pylib.base import base_test_result
|
||||
from pylib.instrumentation import test_options
|
||||
from pylib.instrumentation import test_package
|
||||
from pylib.instrumentation import test_runner
|
||||
from pylib.utils import report_results
|
||||
|
||||
import python_test_base
|
||||
from python_test_sharder import PythonTestSharder
|
||||
from test_info_collection import TestInfoCollection
|
||||
|
||||
|
||||
def _GetPythonFiles(root, files):
|
||||
"""Returns all files from |files| that end in 'Test.py'.
|
||||
|
||||
Args:
|
||||
root: A directory name with python files.
|
||||
files: A list of file names.
|
||||
|
||||
Returns:
|
||||
A list with all Python driven test file paths.
|
||||
"""
|
||||
return [os.path.join(root, f) for f in files if f.endswith('Test.py')]
|
||||
|
||||
|
||||
def _InferImportNameFromFile(python_file):
|
||||
"""Given a file, infer the import name for that file.
|
||||
|
||||
Example: /usr/foo/bar/baz.py -> baz.
|
||||
|
||||
Args:
|
||||
python_file: path to the Python file, ostensibly to import later.
|
||||
|
||||
Returns:
|
||||
The module name for the given file.
|
||||
"""
|
||||
return os.path.splitext(os.path.basename(python_file))[0]
|
||||
|
||||
|
||||
def DispatchPythonTests(options):
|
||||
"""Dispatches the Python tests. If there are multiple devices, use sharding.
|
||||
|
||||
Args:
|
||||
options: command line options.
|
||||
|
||||
Returns:
|
||||
A tuple of (base_test_result.TestRunResults object, exit code)
|
||||
|
||||
Raises:
|
||||
Exception: If there are no attached devices.
|
||||
"""
|
||||
|
||||
attached_devices = android_commands.GetAttachedDevices()
|
||||
if not attached_devices:
|
||||
raise Exception('You have no devices attached or visible!')
|
||||
if options.test_device:
|
||||
attached_devices = [options.test_device]
|
||||
|
||||
test_collection = TestInfoCollection()
|
||||
all_tests = _GetAllTests(options.python_test_root, options.official_build)
|
||||
test_collection.AddTests(all_tests)
|
||||
test_names = [t.qualified_name for t in all_tests]
|
||||
logging.debug('All available tests: ' + str(test_names))
|
||||
|
||||
available_tests = test_collection.GetAvailableTests(
|
||||
options.annotations, options.exclude_annotations, options.test_filter)
|
||||
|
||||
if not available_tests:
|
||||
logging.warning('No Python tests to run with current args.')
|
||||
return (base_test_result.TestRunResults(), 0)
|
||||
|
||||
test_names = [t.qualified_name for t in available_tests]
|
||||
logging.debug('Final list of tests to run: ' + str(test_names))
|
||||
|
||||
# Copy files to each device before running any tests.
|
||||
for device_id in attached_devices:
|
||||
logging.debug('Pushing files to device %s', device_id)
|
||||
test_pkg = test_package.TestPackage(options.test_apk_path,
|
||||
options.test_apk_jar_path)
|
||||
instrumentation_options = test_options.InstrumentationOptions(
|
||||
options.build_type,
|
||||
options.tool,
|
||||
options.cleanup_test_files,
|
||||
options.push_deps,
|
||||
options.annotations,
|
||||
options.exclude_annotations,
|
||||
options.test_filter,
|
||||
options.test_data,
|
||||
options.save_perf_json,
|
||||
options.screenshot_failures,
|
||||
options.disable_assertions,
|
||||
options.wait_for_debugger,
|
||||
options.test_apk,
|
||||
options.test_apk_path,
|
||||
options.test_apk_jar_path)
|
||||
test_files_copier = test_runner.TestRunner(instrumentation_options,
|
||||
device_id, 0, test_pkg, [])
|
||||
test_files_copier.InstallTestPackage()
|
||||
if options.push_deps:
|
||||
logging.info('Pushing data deps to device.')
|
||||
test_files_copier.PushDataDeps()
|
||||
else:
|
||||
logging.warning('Skipping pushing data deps to device.')
|
||||
|
||||
# Actually run the tests.
|
||||
if len(attached_devices) > 1 and options.wait_for_debugger:
|
||||
logging.warning('Debugger can not be sharded, '
|
||||
'using first available device')
|
||||
attached_devices = attached_devices[:1]
|
||||
logging.debug('Running Python tests')
|
||||
sharder = PythonTestSharder(attached_devices, available_tests, options)
|
||||
test_results = sharder.RunShardedTests()
|
||||
|
||||
if not test_results.DidRunPass():
|
||||
return (test_results, 1)
|
||||
|
||||
return (test_results, 0)
|
||||
|
||||
|
||||
def _GetTestModules(python_test_root, is_official_build):
|
||||
"""Retrieve a sorted list of pythonDrivenTests.
|
||||
|
||||
Walks the location of pythonDrivenTests, imports them, and provides the list
|
||||
of imported modules to the caller.
|
||||
|
||||
Args:
|
||||
python_test_root: the path to walk, looking for pythonDrivenTests
|
||||
is_official_build: whether to run only those tests marked 'official'
|
||||
|
||||
Returns:
|
||||
A list of Python modules which may have zero or more tests.
|
||||
"""
|
||||
# By default run all python tests under pythonDrivenTests.
|
||||
python_test_file_list = []
|
||||
for root, _, files in os.walk(python_test_root):
|
||||
if (root.endswith('host_driven_tests') or
|
||||
root.endswith('pythonDrivenTests') or
|
||||
(is_official_build and root.endswith('pythonDrivenTests/official'))):
|
||||
python_test_file_list += _GetPythonFiles(root, files)
|
||||
python_test_file_list.sort()
|
||||
|
||||
test_module_list = [_GetModuleFromFile(test_file)
|
||||
for test_file in python_test_file_list]
|
||||
return test_module_list
|
||||
|
||||
|
||||
def _GetModuleFromFile(python_file):
|
||||
"""Gets the module associated with a file by importing it.
|
||||
|
||||
Args:
|
||||
python_file: file to import
|
||||
|
||||
Returns:
|
||||
The module object.
|
||||
"""
|
||||
sys.path.append(os.path.dirname(python_file))
|
||||
import_name = _InferImportNameFromFile(python_file)
|
||||
return __import__(import_name)
|
||||
|
||||
|
||||
def _GetTestsFromClass(test_class):
|
||||
"""Create a list of test objects for each test method on this class.
|
||||
|
||||
Test methods are methods on the class which begin with 'test'.
|
||||
|
||||
Args:
|
||||
test_class: class object which contains zero or more test methods.
|
||||
|
||||
Returns:
|
||||
A list of test objects, each of which is bound to one test.
|
||||
"""
|
||||
test_names = [m for m in dir(test_class)
|
||||
if _IsTestMethod(m, test_class)]
|
||||
return map(test_class, test_names)
|
||||
|
||||
|
||||
def _GetTestClassesFromModule(test_module):
|
||||
tests = []
|
||||
for name in dir(test_module):
|
||||
attr = getattr(test_module, name)
|
||||
if _IsTestClass(attr):
|
||||
tests.extend(_GetTestsFromClass(attr))
|
||||
return tests
|
||||
|
||||
|
||||
def _IsTestClass(test_class):
|
||||
return (type(test_class) is types.TypeType and
|
||||
issubclass(test_class, python_test_base.PythonTestBase) and
|
||||
test_class is not python_test_base.PythonTestBase)
|
||||
|
||||
|
||||
def _IsTestMethod(attrname, test_case_class):
|
||||
"""Checks whether this is a valid test method.
|
||||
|
||||
Args:
|
||||
attrname: the method name.
|
||||
test_case_class: the test case class.
|
||||
|
||||
Returns:
|
||||
True if test_case_class.'attrname' is callable and it starts with 'test';
|
||||
False otherwise.
|
||||
"""
|
||||
attr = getattr(test_case_class, attrname)
|
||||
return callable(attr) and attrname.startswith('test')
|
||||
|
||||
|
||||
def _GetAllTests(test_root, is_official_build):
|
||||
"""Retrieve a list of Python test modules and their respective methods.
|
||||
|
||||
Args:
|
||||
test_root: path which contains Python-driven test files
|
||||
is_official_build: whether this is an official build
|
||||
|
||||
Returns:
|
||||
List of test case objects for all available test methods.
|
||||
"""
|
||||
if not test_root:
|
||||
return []
|
||||
all_tests = []
|
||||
test_module_list = _GetTestModules(test_root, is_official_build)
|
||||
for module in test_module_list:
|
||||
all_tests.extend(_GetTestClassesFromModule(module))
|
||||
return all_tests
|
|
@ -0,0 +1,203 @@
|
|||
# Copyright 2013 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Setup for instrumentation host-driven tests."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import types
|
||||
|
||||
import test_case
|
||||
import test_info_collection
|
||||
import test_runner
|
||||
|
||||
|
||||
def _GetPythonFiles(root, files):
|
||||
"""Returns all files from |files| that end in 'Test.py'.
|
||||
|
||||
Args:
|
||||
root: A directory name with python files.
|
||||
files: A list of file names.
|
||||
|
||||
Returns:
|
||||
A list with all python files that match the testing naming scheme.
|
||||
"""
|
||||
return [os.path.join(root, f) for f in files if f.endswith('Test.py')]
|
||||
|
||||
|
||||
def _InferImportNameFromFile(python_file):
|
||||
"""Given a file, infer the import name for that file.
|
||||
|
||||
Example: /usr/foo/bar/baz.py -> baz.
|
||||
|
||||
Args:
|
||||
python_file: Path to the Python file, ostensibly to import later.
|
||||
|
||||
Returns:
|
||||
The module name for the given file.
|
||||
"""
|
||||
return os.path.splitext(os.path.basename(python_file))[0]
|
||||
|
||||
|
||||
def _GetTestModules(host_driven_test_root, is_official_build):
|
||||
"""Retrieve a list of python modules that match the testing naming scheme.
|
||||
|
||||
Walks the location of host-driven tests, imports them, and provides the list
|
||||
of imported modules to the caller.
|
||||
|
||||
Args:
|
||||
host_driven_test_root: The path to walk, looking for the
|
||||
pythonDrivenTests or host_driven_tests directory
|
||||
is_official_build: Whether to run only those tests marked 'official'
|
||||
|
||||
Returns:
|
||||
A list of python modules under |host_driven_test_root| which match the
|
||||
testing naming scheme. Each module should define one or more classes that
|
||||
derive from HostDrivenTestCase.
|
||||
"""
|
||||
# By default run all host-driven tests under pythonDrivenTests or
|
||||
# host_driven_tests.
|
||||
host_driven_test_file_list = []
|
||||
for root, _, files in os.walk(host_driven_test_root):
|
||||
if (root.endswith('host_driven_tests') or
|
||||
root.endswith('pythonDrivenTests') or
|
||||
(is_official_build and (root.endswith('pythonDrivenTests/official') or
|
||||
root.endswith('host_driven_tests/official')))):
|
||||
host_driven_test_file_list += _GetPythonFiles(root, files)
|
||||
host_driven_test_file_list.sort()
|
||||
|
||||
test_module_list = [_GetModuleFromFile(test_file)
|
||||
for test_file in host_driven_test_file_list]
|
||||
return test_module_list
|
||||
|
||||
|
||||
def _GetModuleFromFile(python_file):
|
||||
"""Gets the python module associated with a file by importing it.
|
||||
|
||||
Args:
|
||||
python_file: File to import.
|
||||
|
||||
Returns:
|
||||
The module object.
|
||||
"""
|
||||
sys.path.append(os.path.dirname(python_file))
|
||||
import_name = _InferImportNameFromFile(python_file)
|
||||
return __import__(import_name)
|
||||
|
||||
|
||||
def _GetTestsFromClass(test_case_class, **kwargs):
|
||||
"""Returns one test object for each test method in |test_case_class|.
|
||||
|
||||
Test methods are methods on the class which begin with 'test'.
|
||||
|
||||
Args:
|
||||
test_case_class: Class derived from HostDrivenTestCase which contains zero
|
||||
or more test methods.
|
||||
kwargs: Keyword args to pass into the constructor of test cases.
|
||||
|
||||
Returns:
|
||||
A list of test case objects, each initialized for a particular test method.
|
||||
"""
|
||||
test_names = [m for m in dir(test_case_class)
|
||||
if _IsTestMethod(m, test_case_class)]
|
||||
return [test_case_class(name, **kwargs) for name in test_names]
|
||||
|
||||
|
||||
def _GetTestsFromModule(test_module, **kwargs):
|
||||
"""Gets a list of test objects from |test_module|.
|
||||
|
||||
Args:
|
||||
test_module: Module from which to get the set of test methods.
|
||||
kwargs: Keyword args to pass into the constructor of test cases.
|
||||
|
||||
Returns:
|
||||
A list of test case objects each initialized for a particular test method
|
||||
defined in |test_module|.
|
||||
"""
|
||||
|
||||
tests = []
|
||||
for name in dir(test_module):
|
||||
attr = getattr(test_module, name)
|
||||
if _IsTestCaseClass(attr):
|
||||
tests.extend(_GetTestsFromClass(attr, **kwargs))
|
||||
return tests
|
||||
|
||||
|
||||
def _IsTestCaseClass(test_class):
|
||||
return (type(test_class) is types.TypeType and
|
||||
issubclass(test_class, test_case.HostDrivenTestCase) and
|
||||
test_class is not test_case.HostDrivenTestCase)
|
||||
|
||||
|
||||
def _IsTestMethod(attrname, test_case_class):
|
||||
"""Checks whether this is a valid test method.
|
||||
|
||||
Args:
|
||||
attrname: The method name.
|
||||
test_case_class: The test case class.
|
||||
|
||||
Returns:
|
||||
True if test_case_class.'attrname' is callable and it starts with 'test';
|
||||
False otherwise.
|
||||
"""
|
||||
attr = getattr(test_case_class, attrname)
|
||||
return callable(attr) and attrname.startswith('test')
|
||||
|
||||
|
||||
def _GetAllTests(test_root, is_official_build, **kwargs):
|
||||
"""Retrieve a list of host-driven tests defined under |test_root|.
|
||||
|
||||
Args:
|
||||
test_root: Path which contains host-driven test files.
|
||||
is_official_build: Whether this is an official build.
|
||||
kwargs: Keyword args to pass into the constructor of test cases.
|
||||
|
||||
Returns:
|
||||
List of test case objects, one for each available test method.
|
||||
"""
|
||||
if not test_root:
|
||||
return []
|
||||
all_tests = []
|
||||
test_module_list = _GetTestModules(test_root, is_official_build)
|
||||
for module in test_module_list:
|
||||
all_tests.extend(_GetTestsFromModule(module, **kwargs))
|
||||
return all_tests
|
||||
|
||||
|
||||
def InstrumentationSetup(host_driven_test_root, official_build,
|
||||
instrumentation_options):
|
||||
"""Creates a list of host-driven instrumentation tests and a runner factory.
|
||||
|
||||
Args:
|
||||
host_driven_test_root: Directory where the host-driven tests are.
|
||||
official_build: True if this is an official build.
|
||||
instrumentation_options: An InstrumentationOptions object.
|
||||
|
||||
Returns:
|
||||
A tuple of (TestRunnerFactory, tests).
|
||||
"""
|
||||
|
||||
test_collection = test_info_collection.TestInfoCollection()
|
||||
all_tests = _GetAllTests(
|
||||
host_driven_test_root, official_build,
|
||||
instrumentation_options=instrumentation_options)
|
||||
test_collection.AddTests(all_tests)
|
||||
|
||||
available_tests = test_collection.GetAvailableTests(
|
||||
instrumentation_options.annotations,
|
||||
instrumentation_options.exclude_annotations,
|
||||
instrumentation_options.test_filter)
|
||||
logging.debug('All available tests: ' + str(
|
||||
[t.tagged_name for t in available_tests]))
|
||||
|
||||
def TestRunnerFactory(device, shard_index):
|
||||
return test_runner.HostDrivenTestRunner(
|
||||
device, shard_index,
|
||||
instrumentation_options.tool,
|
||||
instrumentation_options.build_type,
|
||||
instrumentation_options.push_deps,
|
||||
instrumentation_options.cleanup_test_files)
|
||||
|
||||
return (TestRunnerFactory, available_tests)
|
|
@ -0,0 +1,151 @@
|
|||
# Copyright 2013 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Base class for host-driven test cases.
|
||||
|
||||
This test case is intended to serve as the base class for any host-driven
|
||||
test cases. It is similar to the Python unitttest module in that test cases
|
||||
inherit from this class and add methods which will be run as tests.
|
||||
|
||||
When a HostDrivenTestCase object is instantiated, its purpose is to run only one
|
||||
test method in the derived class. The test runner gives it the name of the test
|
||||
method the instance will run. The test runner calls SetUp with the device ID
|
||||
which the test method will run against. The test runner runs the test method
|
||||
itself, collecting the result, and calls TearDown.
|
||||
|
||||
Tests can perform arbitrary Python commands and asserts in test methods. Tests
|
||||
that run instrumentation tests can make use of the _RunJavaTests helper function
|
||||
to trigger Java tests and convert results into a single host-driven test result.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
|
||||
from pylib import android_commands
|
||||
from pylib.base import base_test_result
|
||||
from pylib.instrumentation import test_package
|
||||
from pylib.instrumentation import test_result
|
||||
from pylib.instrumentation import test_runner
|
||||
|
||||
# aka the parent of com.google.android
|
||||
BASE_ROOT = 'src' + os.sep
|
||||
|
||||
|
||||
class HostDrivenTestCase(object):
|
||||
"""Base class for host-driven test cases."""
|
||||
|
||||
_HOST_DRIVEN_TAG = 'HostDriven'
|
||||
|
||||
def __init__(self, test_name, instrumentation_options=None):
|
||||
"""Create a test case initialized to run |test_name|.
|
||||
|
||||
Args:
|
||||
test_name: The name of the method to run as the test.
|
||||
instrumentation_options: An InstrumentationOptions object.
|
||||
"""
|
||||
self.test_name = test_name
|
||||
class_name = self.__class__.__name__
|
||||
self.qualified_name = '%s.%s' % (class_name, self.test_name)
|
||||
# Use tagged_name when creating results, so that we can identify host-driven
|
||||
# tests in the overall results.
|
||||
self.tagged_name = '%s_%s' % (self._HOST_DRIVEN_TAG, self.qualified_name)
|
||||
|
||||
self.instrumentation_options = instrumentation_options
|
||||
self.ports_to_forward = []
|
||||
|
||||
def SetUp(self, device, shard_index, build_type, push_deps,
|
||||
cleanup_test_files):
|
||||
self.device_id = device
|
||||
self.shard_index = shard_index
|
||||
self.build_type = build_type
|
||||
self.adb = android_commands.AndroidCommands(self.device_id)
|
||||
self.push_deps = push_deps
|
||||
self.cleanup_test_files = cleanup_test_files
|
||||
|
||||
def TearDown(self):
|
||||
pass
|
||||
|
||||
def GetOutDir(self):
|
||||
return os.path.join(os.environ['CHROME_SRC'], 'out',
|
||||
self.build_type)
|
||||
|
||||
def Run(self):
|
||||
logging.info('Running host-driven test: %s', self.tagged_name)
|
||||
# Get the test method on the derived class and execute it
|
||||
return getattr(self, self.test_name)()
|
||||
|
||||
def __RunJavaTest(self, package_name, test_case, test_method):
|
||||
"""Runs a single Java test method with a Java TestRunner.
|
||||
|
||||
Args:
|
||||
package_name: Package name in which the java tests live
|
||||
(e.g. foo.bar.baz.tests)
|
||||
test_case: Name of the Java test case (e.g. FooTest)
|
||||
test_method: Name of the test method to run (e.g. testFooBar)
|
||||
|
||||
Returns:
|
||||
TestRunResults object with a single test result.
|
||||
"""
|
||||
test = '%s.%s#%s' % (package_name, test_case, test_method)
|
||||
test_pkg = test_package.TestPackage(
|
||||
self.instrumentation_options.test_apk_path,
|
||||
self.instrumentation_options.test_apk_jar_path)
|
||||
java_test_runner = test_runner.TestRunner(self.instrumentation_options,
|
||||
self.device_id,
|
||||
self.shard_index, test_pkg,
|
||||
self.ports_to_forward)
|
||||
try:
|
||||
java_test_runner.SetUp()
|
||||
return java_test_runner.RunTest(test)[0]
|
||||
finally:
|
||||
java_test_runner.TearDown()
|
||||
|
||||
def _RunJavaTests(self, package_name, tests):
|
||||
"""Calls a list of tests and stops at the first test failure.
|
||||
|
||||
This method iterates until either it encounters a non-passing test or it
|
||||
exhausts the list of tests. Then it returns the appropriate overall result.
|
||||
|
||||
Test cases may make use of this method internally to assist in running
|
||||
instrumentation tests. This function relies on instrumentation_options
|
||||
being defined.
|
||||
|
||||
Args:
|
||||
package_name: Package name in which the java tests live
|
||||
(e.g. foo.bar.baz.tests)
|
||||
tests: A list of Java test names which will be run
|
||||
|
||||
Returns:
|
||||
A TestRunResults object containing an overall result for this set of Java
|
||||
tests. If any Java tests do not pass, this is a fail overall.
|
||||
"""
|
||||
test_type = base_test_result.ResultType.PASS
|
||||
log = ''
|
||||
|
||||
start_ms = int(time.time()) * 1000
|
||||
for test in tests:
|
||||
# We're only running one test at a time, so this TestRunResults object
|
||||
# will hold only one result.
|
||||
suite, test_name = test.split('.')
|
||||
java_result = self.__RunJavaTest(package_name, suite, test_name)
|
||||
assert len(java_result.GetAll()) == 1
|
||||
if not java_result.DidRunPass():
|
||||
result = java_result.GetNotPass().pop()
|
||||
log = result.GetLog()
|
||||
test_type = result.GetType()
|
||||
break
|
||||
duration_ms = int(time.time()) * 1000 - start_ms
|
||||
|
||||
overall_result = base_test_result.TestRunResults()
|
||||
overall_result.AddResult(
|
||||
test_result.InstrumentationTestResult(
|
||||
self.tagged_name, test_type, start_ms, duration_ms, log=log))
|
||||
return overall_result
|
||||
|
||||
def __str__(self):
|
||||
return self.tagged_name
|
||||
|
||||
def __repr__(self):
|
||||
return self.tagged_name
|
|
@ -2,7 +2,7 @@
|
|||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Module containing information about the python-driven tests."""
|
||||
"""Module containing information about the host-driven tests."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
|
@ -70,7 +70,7 @@ class TestInfoCollection(object):
|
|||
Args:
|
||||
annotations: List of annotations. Each test in the returned list is
|
||||
annotated with atleast one of these annotations.
|
||||
exlcude_annotations: List of annotations. The tests in the returned
|
||||
exclude_annotations: List of annotations. The tests in the returned
|
||||
list are not annotated with any of these annotations.
|
||||
name_filter: name filter which tests must match, if any
|
||||
|
||||
|
|
|
@ -0,0 +1,135 @@
|
|||
# Copyright 2013 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Runs host-driven tests on a particular device."""
|
||||
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from pylib.base import base_test_result
|
||||
from pylib.base import base_test_runner
|
||||
from pylib.instrumentation import test_result
|
||||
|
||||
import test_case
|
||||
|
||||
|
||||
class HostDrivenExceptionTestResult(test_result.InstrumentationTestResult):
|
||||
"""Test result corresponding to a python exception in a host-driven test."""
|
||||
|
||||
def __init__(self, test_name, start_date_ms, exc_info):
|
||||
"""Constructs a HostDrivenExceptionTestResult object.
|
||||
|
||||
Args:
|
||||
test_name: name of the test which raised an exception.
|
||||
start_date_ms: the starting time for the test.
|
||||
exc_info: exception info, ostensibly from sys.exc_info().
|
||||
"""
|
||||
exc_type, exc_value, exc_traceback = exc_info
|
||||
trace_info = ''.join(traceback.format_exception(exc_type, exc_value,
|
||||
exc_traceback))
|
||||
log_msg = 'Exception:\n' + trace_info
|
||||
duration_ms = (int(time.time()) * 1000) - start_date_ms
|
||||
|
||||
super(HostDrivenExceptionTestResult, self).__init__(
|
||||
test_name,
|
||||
base_test_result.ResultType.FAIL,
|
||||
start_date_ms,
|
||||
duration_ms,
|
||||
log=str(exc_type) + ' ' + log_msg)
|
||||
|
||||
|
||||
class HostDrivenTestRunner(base_test_runner.BaseTestRunner):
|
||||
"""Orchestrates running a set of host-driven tests.
|
||||
|
||||
Any Python exceptions in the tests are caught and translated into a failed
|
||||
result, rather than being re-raised on the main thread.
|
||||
"""
|
||||
|
||||
#override
|
||||
def __init__(self, device, shard_index, tool, build_type, push_deps,
|
||||
cleanup_test_files):
|
||||
"""Creates a new HostDrivenTestRunner.
|
||||
|
||||
Args:
|
||||
device: Attached android device.
|
||||
shard_index: Shard index.
|
||||
tool: Name of the Valgrind tool.
|
||||
build_type: 'Release' or 'Debug'.
|
||||
push_deps: If True, push all dependencies to the device.
|
||||
cleanup_test_files: Whether or not to cleanup test files on device.
|
||||
"""
|
||||
|
||||
super(HostDrivenTestRunner, self).__init__(device, tool, build_type,
|
||||
push_deps, cleanup_test_files)
|
||||
|
||||
# The shard index affords the ability to create unique port numbers (e.g.
|
||||
# DEFAULT_PORT + shard_index) if the test so wishes.
|
||||
self.shard_index = shard_index
|
||||
|
||||
#override
|
||||
def RunTest(self, test):
|
||||
"""Sets up and runs a test case.
|
||||
|
||||
Args:
|
||||
test: An object which is ostensibly a subclass of HostDrivenTestCase.
|
||||
|
||||
Returns:
|
||||
A TestRunResults object which contains the result produced by the test
|
||||
and, in the case of a failure, the test that should be retried.
|
||||
"""
|
||||
|
||||
assert isinstance(test, test_case.HostDrivenTestCase)
|
||||
|
||||
start_date_ms = int(time.time()) * 1000
|
||||
exception_raised = False
|
||||
|
||||
try:
|
||||
test.SetUp(self.device, self.shard_index, self.build_type,
|
||||
self._push_deps, self._cleanup_test_files)
|
||||
except Exception:
|
||||
logging.exception(
|
||||
'Caught exception while trying to run SetUp() for test: ' +
|
||||
test.tagged_name)
|
||||
# Tests whose SetUp() method has failed are likely to fail, or at least
|
||||
# yield invalid results.
|
||||
exc_info = sys.exc_info()
|
||||
results = base_test_result.TestRunResults()
|
||||
results.AddResult(HostDrivenExceptionTestResult(
|
||||
test.tagged_name, start_date_ms, exc_info))
|
||||
return results, test
|
||||
|
||||
try:
|
||||
results = test.Run()
|
||||
except Exception:
|
||||
# Setting this lets TearDown() avoid stomping on our stack trace from
|
||||
# Run() should TearDown() also raise an exception.
|
||||
exception_raised = True
|
||||
logging.exception('Caught exception while trying to run test: ' +
|
||||
test.tagged_name)
|
||||
exc_info = sys.exc_info()
|
||||
results = base_test_result.TestRunResults()
|
||||
results.AddResult(HostDrivenExceptionTestResult(
|
||||
test.tagged_name, start_date_ms, exc_info))
|
||||
|
||||
try:
|
||||
test.TearDown()
|
||||
except Exception:
|
||||
logging.exception(
|
||||
'Caught exception while trying run TearDown() for test: ' +
|
||||
test.tagged_name)
|
||||
if not exception_raised:
|
||||
# Don't stomp the error during the test if TearDown blows up. This is a
|
||||
# trade-off: if the test fails, this will mask any problem with TearDown
|
||||
# until the test is fixed.
|
||||
exc_info = sys.exc_info()
|
||||
results = base_test_result.TestRunResults()
|
||||
results.AddResult(HostDrivenExceptionTestResult(
|
||||
test.tagged_name, start_date_ms, exc_info))
|
||||
|
||||
if not results.DidRunPass():
|
||||
return results, test
|
||||
else:
|
||||
return results, None
|
|
@ -2,7 +2,7 @@
|
|||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Annotations for python-driven tests."""
|
||||
"""Annotations for host-driven tests."""
|
||||
|
||||
import os
|
||||
|
||||
|
@ -13,7 +13,7 @@ class AnnotatedFunctions(object):
|
|||
|
||||
@staticmethod
|
||||
def _AddFunction(annotation, function):
|
||||
"""Adds an annotated to function to our container.
|
||||
"""Adds an annotated function to our container.
|
||||
|
||||
Args:
|
||||
annotation: the annotation string.
|
||||
|
@ -56,7 +56,7 @@ class AnnotatedFunctions(object):
|
|||
if qualified_function_name in tests]
|
||||
|
||||
|
||||
# The following functions are annotations used for the python driven tests.
|
||||
# The following functions are annotations used for the host-driven tests.
|
||||
def Smoke(function):
|
||||
return AnnotatedFunctions._AddFunction('Smoke', function)
|
||||
|
||||
|
|
|
@ -169,7 +169,7 @@ class TestJar(object):
|
|||
for test_method in self.GetTestMethods():
|
||||
annotations_ = frozenset(self.GetTestAnnotations(test_method))
|
||||
if (annotations_.isdisjoint(self._ANNOTATIONS) and
|
||||
not self.IsPythonDrivenTest(test_method)):
|
||||
not self.IsHostDrivenTest(test_method)):
|
||||
tests_missing_annotations.append(test_method)
|
||||
return sorted(tests_missing_annotations)
|
||||
|
||||
|
@ -202,7 +202,7 @@ class TestJar(object):
|
|||
available_tests = list(set(available_tests) - set(excluded_tests))
|
||||
else:
|
||||
available_tests = [m for m in self.GetTestMethods()
|
||||
if not self.IsPythonDrivenTest(m)]
|
||||
if not self.IsHostDrivenTest(m)]
|
||||
|
||||
tests = []
|
||||
if test_filter:
|
||||
|
@ -216,5 +216,5 @@ class TestJar(object):
|
|||
return tests
|
||||
|
||||
@staticmethod
|
||||
def IsPythonDrivenTest(test):
|
||||
def IsHostDrivenTest(test):
|
||||
return 'pythonDrivenTests' in test
|
||||
|
|
|
@ -2,10 +2,9 @@
|
|||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Helper functions common to native, java and python test runners."""
|
||||
"""Helper functions common to native, java and host-driven test runners."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
|
|
@ -9,32 +9,58 @@ import optparse
|
|||
import random
|
||||
import sys
|
||||
|
||||
from pylib import android_commands
|
||||
from pylib.base import base_test_result
|
||||
from pylib.host_driven import python_test_base
|
||||
from pylib.host_driven import python_test_sharder
|
||||
from pylib.base import test_dispatcher
|
||||
from pylib.host_driven import test_case
|
||||
from pylib.host_driven import test_runner
|
||||
from pylib.utils import report_results
|
||||
from pylib.utils import test_options_parser
|
||||
|
||||
|
||||
class MonkeyTest(python_test_base.PythonTestBase):
|
||||
class MonkeyTest(test_case.HostDrivenTestCase):
|
||||
def __init__(self, test_name, package_name, activity_name, category, seed,
|
||||
throttle, event_count, verbosity, extra_args):
|
||||
"""Create a MonkeyTest object.
|
||||
|
||||
Args:
|
||||
test_name: Name of the method to run for this test object.
|
||||
package_name: Allowed package.
|
||||
activity_name: Name of the activity to start.
|
||||
category: A list of allowed categories.
|
||||
seed: Seed value for pseduo-random generator. Same seed value
|
||||
generates the same sequence of events. Seed is randomized by default.
|
||||
throttle: Delay between events (ms).
|
||||
event_count: Number of events to generate.
|
||||
verbosity: Verbosity level [0-3].
|
||||
extra_args: A string of other args to pass to the command verbatim.
|
||||
"""
|
||||
super(MonkeyTest, self).__init__(test_name)
|
||||
self.package_name = package_name
|
||||
self.activity_name = activity_name
|
||||
self.category = category
|
||||
self.seed = seed or random.randint(1, 100)
|
||||
self.throttle = throttle
|
||||
self.event_count = event_count
|
||||
self.verbosity = verbosity
|
||||
self.extra_args = extra_args
|
||||
|
||||
def testMonkey(self):
|
||||
# Launch and wait for Chrome to launch.
|
||||
self.adb.StartActivity(self.options.package_name,
|
||||
self.options.activity_name,
|
||||
self.adb.StartActivity(self.package_name,
|
||||
self.activity_name,
|
||||
wait_for_completion=True,
|
||||
action='android.intent.action.MAIN',
|
||||
force_stop=True)
|
||||
|
||||
# Chrome crashes are not always caught by Monkey test runner.
|
||||
# Verify Chrome has the same PID before and after the test.
|
||||
before_pids = self.adb.ExtractPid(self.options.package_name)
|
||||
before_pids = self.adb.ExtractPid(self.package_name)
|
||||
|
||||
# Run the test.
|
||||
output = ''
|
||||
if before_pids:
|
||||
output = '\n'.join(self._LaunchMonkeyTest())
|
||||
after_pids = self.adb.ExtractPid(self.options.package_name)
|
||||
after_pids = self.adb.ExtractPid(self.package_name)
|
||||
|
||||
crashed = (not before_pids or not after_pids
|
||||
or after_pids[0] != before_pids[0])
|
||||
|
@ -42,82 +68,63 @@ class MonkeyTest(python_test_base.PythonTestBase):
|
|||
results = base_test_result.TestRunResults()
|
||||
if 'Monkey finished' in output and not crashed:
|
||||
result = base_test_result.BaseTestResult(
|
||||
self.qualified_name, base_test_result.ResultType.PASS, log=output)
|
||||
self.tagged_name, base_test_result.ResultType.PASS, log=output)
|
||||
else:
|
||||
result = base_test_result.BaseTestResult(
|
||||
self.qualified_name, base_test_result.ResultType.FAIL, log=output)
|
||||
self.tagged_name, base_test_result.ResultType.FAIL, log=output)
|
||||
results.AddResult(result)
|
||||
return results
|
||||
|
||||
def _LaunchMonkeyTest(self):
|
||||
"""Runs monkey test for a given package.
|
||||
|
||||
Looks at the following parameters in the options object provided
|
||||
in class initializer:
|
||||
package_name: Allowed package.
|
||||
category: A list of allowed categories.
|
||||
throttle: Delay between events (ms).
|
||||
seed: Seed value for pseduo-random generator. Same seed value
|
||||
generates the same sequence of events. Seed is randomized by
|
||||
default.
|
||||
event_count: Number of events to generate.
|
||||
verbosity: Verbosity level [0-3].
|
||||
extra_args: A string of other args to pass to the command verbatim.
|
||||
Returns:
|
||||
Output from the monkey command on the device.
|
||||
"""
|
||||
|
||||
category = self.options.category or []
|
||||
seed = self.options.seed or random.randint(1, 100)
|
||||
throttle = self.options.throttle or 100
|
||||
event_count = self.options.event_count or 10000
|
||||
verbosity = self.options.verbosity or 1
|
||||
extra_args = self.options.extra_args or ''
|
||||
|
||||
timeout_ms = event_count * throttle * 1.5
|
||||
timeout_ms = self.event_count * self.throttle * 1.5
|
||||
|
||||
cmd = ['monkey',
|
||||
'-p %s' % self.options.package_name,
|
||||
' '.join(['-c %s' % c for c in category]),
|
||||
'--throttle %d' % throttle,
|
||||
'-s %d' % seed,
|
||||
'-v ' * verbosity,
|
||||
'-p %s' % self.package_name,
|
||||
' '.join(['-c %s' % c for c in self.category]),
|
||||
'--throttle %d' % self.throttle,
|
||||
'-s %d' % self.seed,
|
||||
'-v ' * self.verbosity,
|
||||
'--monitor-native-crashes',
|
||||
'--kill-process-after-error',
|
||||
extra_args,
|
||||
'%d' % event_count]
|
||||
self.extra_args,
|
||||
'%d' % self.event_count]
|
||||
return self.adb.RunShellCommand(' '.join(cmd), timeout_time=timeout_ms)
|
||||
|
||||
|
||||
def DispatchPythonTests(options):
|
||||
"""Dispatches the Monkey tests, sharding it if there multiple devices."""
|
||||
def RunMonkeyTests(options):
|
||||
"""Runs the Monkey tests, replicating it if there multiple devices."""
|
||||
logger = logging.getLogger()
|
||||
logger.setLevel(logging.DEBUG)
|
||||
attached_devices = android_commands.GetAttachedDevices()
|
||||
if not attached_devices:
|
||||
raise Exception('You have no devices attached or visible!')
|
||||
|
||||
# Actually run the tests.
|
||||
logging.debug('Running monkey tests.')
|
||||
# TODO(frankf): This is a stop-gap solution. Come up with a
|
||||
# general way for running tests on every devices.
|
||||
available_tests = []
|
||||
for k in range(len(attached_devices)):
|
||||
new_method = 'testMonkey%d' % k
|
||||
setattr(MonkeyTest, new_method, MonkeyTest.testMonkey)
|
||||
available_tests.append(MonkeyTest(new_method))
|
||||
options.ensure_value('shard_retries', 1)
|
||||
sharder = python_test_sharder.PythonTestSharder(
|
||||
attached_devices, available_tests, options)
|
||||
results = sharder.RunShardedTests()
|
||||
available_tests = [
|
||||
MonkeyTest('testMonkey', options.package_name, options.activity_name,
|
||||
category=options.category, seed=options.seed,
|
||||
throttle=options.throttle, event_count=options.event_count,
|
||||
verbosity=options.verbosity, extra_args=options.extra_args)]
|
||||
|
||||
def TestRunnerFactory(device, shard_index):
|
||||
return test_runner.HostDrivenTestRunner(
|
||||
device, shard_index, '', options.build_type, False, False)
|
||||
|
||||
results, exit_code = test_dispatcher.RunTests(
|
||||
available_tests, TestRunnerFactory, False, None, shard=False,
|
||||
build_type=options.build_type, num_retries=0)
|
||||
|
||||
report_results.LogFull(
|
||||
results=results,
|
||||
test_type='Monkey',
|
||||
test_package='Monkey',
|
||||
build_type=options.build_type)
|
||||
# TODO(gkanwar): After the host-driven tests have been refactored, they sould
|
||||
# use the comment exit code system (part of pylib/base/shard.py)
|
||||
if not results.DidRunPass():
|
||||
return 1
|
||||
return 0
|
||||
|
||||
return exit_code
|
||||
|
||||
|
||||
def main():
|
||||
|
@ -128,12 +135,12 @@ def main():
|
|||
parser.add_option('--activity-name',
|
||||
default='com.google.android.apps.chrome.Main',
|
||||
help='Name of the activity to start [default: %default].')
|
||||
parser.add_option('--category',
|
||||
help='A list of allowed categories [default: ""].')
|
||||
parser.add_option('--category', default='',
|
||||
help='A list of allowed categories [default: %default].')
|
||||
parser.add_option('--throttle', default=100, type='int',
|
||||
help='Delay between events (ms) [default: %default]. ')
|
||||
parser.add_option('--seed', type='int',
|
||||
help=('Seed value for pseduo-random generator. Same seed '
|
||||
help=('Seed value for pseudo-random generator. Same seed '
|
||||
'value generates the same sequence of events. Seed '
|
||||
'is randomized by default.'))
|
||||
parser.add_option('--event-count', default=10000, type='int',
|
||||
|
@ -156,10 +163,7 @@ def main():
|
|||
if options.category:
|
||||
options.category = options.category.split(',')
|
||||
|
||||
# TODO(gkanwar): This should go away when the host-driven tests are refactored
|
||||
options.num_retries = 1
|
||||
|
||||
DispatchPythonTests(options)
|
||||
RunMonkeyTests(options)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -23,7 +23,7 @@ from pylib.base import test_dispatcher
|
|||
from pylib.gtest import gtest_config
|
||||
from pylib.gtest import setup as gtest_setup
|
||||
from pylib.gtest import test_options as gtest_test_options
|
||||
from pylib.host_driven import run_python_tests as python_dispatch
|
||||
from pylib.host_driven import setup as host_driven_setup
|
||||
from pylib.instrumentation import setup as instrumentation_setup
|
||||
from pylib.instrumentation import test_options as instrumentation_test_options
|
||||
from pylib.uiautomator import setup as uiautomator_setup
|
||||
|
@ -159,19 +159,13 @@ def AddJavaTestOptions(option_parser):
|
|||
'-E', '--exclude-annotation', dest='exclude_annotation_str',
|
||||
help=('Comma-separated list of annotations. Exclude tests with these '
|
||||
'annotations.'))
|
||||
option_parser.add_option('-j', '--java_only', action='store_true',
|
||||
default=False, help='Run only the Java tests.')
|
||||
option_parser.add_option('-p', '--python_only', action='store_true',
|
||||
default=False,
|
||||
help='Run only the host-driven tests.')
|
||||
option_parser.add_option('--screenshot', dest='screenshot_failures',
|
||||
action='store_true',
|
||||
help='Capture screenshots of test failures')
|
||||
option_parser.add_option('--save-perf-json', action='store_true',
|
||||
help='Saves the JSON file for each UI Perf test.')
|
||||
option_parser.add_option('--official-build', help='Run official build tests.')
|
||||
option_parser.add_option('--python_test_root',
|
||||
help='Root of the host-driven tests.')
|
||||
option_parser.add_option('--official-build', action='store_true',
|
||||
help='Run official build tests.')
|
||||
option_parser.add_option('--keep_test_server_ports',
|
||||
action='store_true',
|
||||
help=('Indicates the test server ports must be '
|
||||
|
@ -195,19 +189,6 @@ def AddJavaTestOptions(option_parser):
|
|||
def ProcessJavaTestOptions(options, error_func):
|
||||
"""Processes options/arguments and populates |options| with defaults."""
|
||||
|
||||
if options.java_only and options.python_only:
|
||||
error_func('Options java_only (-j) and python_only (-p) '
|
||||
'are mutually exclusive.')
|
||||
options.run_java_tests = True
|
||||
options.run_python_tests = True
|
||||
if options.java_only:
|
||||
options.run_python_tests = False
|
||||
elif options.python_only:
|
||||
options.run_java_tests = False
|
||||
|
||||
if not options.python_test_root:
|
||||
options.run_python_tests = False
|
||||
|
||||
if options.annotation_str:
|
||||
options.annotations = options.annotation_str.split(',')
|
||||
elif options.test_filter:
|
||||
|
@ -237,6 +218,13 @@ def AddInstrumentationTestOptions(option_parser):
|
|||
AddJavaTestOptions(option_parser)
|
||||
AddCommonOptions(option_parser)
|
||||
|
||||
option_parser.add_option('-j', '--java_only', action='store_true',
|
||||
default=False, help='Run only the Java tests.')
|
||||
option_parser.add_option('-p', '--python_only', action='store_true',
|
||||
default=False,
|
||||
help='Run only the host-driven tests.')
|
||||
option_parser.add_option('--python_test_root',
|
||||
help='Root of the host-driven tests.')
|
||||
option_parser.add_option('-w', '--wait_debugger', dest='wait_for_debugger',
|
||||
action='store_true',
|
||||
help='Wait for debugger.')
|
||||
|
@ -264,6 +252,19 @@ def ProcessInstrumentationOptions(options, error_func):
|
|||
|
||||
ProcessJavaTestOptions(options, error_func)
|
||||
|
||||
if options.java_only and options.python_only:
|
||||
error_func('Options java_only (-j) and python_only (-p) '
|
||||
'are mutually exclusive.')
|
||||
options.run_java_tests = True
|
||||
options.run_python_tests = True
|
||||
if options.java_only:
|
||||
options.run_python_tests = False
|
||||
elif options.python_only:
|
||||
options.run_java_tests = False
|
||||
|
||||
if not options.python_test_root:
|
||||
options.run_python_tests = False
|
||||
|
||||
if not options.test_apk:
|
||||
error_func('--test-apk must be specified.')
|
||||
|
||||
|
@ -431,8 +432,17 @@ def _RunInstrumentationTests(options, error_func):
|
|||
results.AddTestRunResults(test_results)
|
||||
|
||||
if options.run_python_tests:
|
||||
test_results, test_exit_code = (
|
||||
python_dispatch.DispatchPythonTests(options))
|
||||
runner_factory, tests = host_driven_setup.InstrumentationSetup(
|
||||
options.python_test_root, options.official_build,
|
||||
instrumentation_options)
|
||||
|
||||
test_results, test_exit_code = test_dispatcher.RunTests(
|
||||
tests, runner_factory, False,
|
||||
options.test_device,
|
||||
shard=True,
|
||||
build_type=options.build_type,
|
||||
test_timeout=None,
|
||||
num_retries=options.num_retries)
|
||||
|
||||
results.AddTestRunResults(test_results)
|
||||
|
||||
|
@ -458,27 +468,14 @@ def _RunUIAutomatorTests(options, error_func):
|
|||
results = base_test_result.TestRunResults()
|
||||
exit_code = 0
|
||||
|
||||
if options.run_java_tests:
|
||||
runner_factory, tests = uiautomator_setup.Setup(uiautomator_options)
|
||||
runner_factory, tests = uiautomator_setup.Setup(uiautomator_options)
|
||||
|
||||
test_results, exit_code = test_dispatcher.RunTests(
|
||||
tests, runner_factory, False, options.test_device,
|
||||
shard=True,
|
||||
build_type=options.build_type,
|
||||
test_timeout=None,
|
||||
num_retries=options.num_retries)
|
||||
|
||||
results.AddTestRunResults(test_results)
|
||||
|
||||
if options.run_python_tests:
|
||||
test_results, test_exit_code = (
|
||||
python_dispatch.DispatchPythonTests(options))
|
||||
|
||||
results.AddTestRunResults(test_results)
|
||||
|
||||
# Only allow exit code escalation
|
||||
if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
|
||||
exit_code = test_exit_code
|
||||
results, exit_code = test_dispatcher.RunTests(
|
||||
tests, runner_factory, False, options.test_device,
|
||||
shard=True,
|
||||
build_type=options.build_type,
|
||||
test_timeout=None,
|
||||
num_retries=options.num_retries)
|
||||
|
||||
report_results.LogFull(
|
||||
results=results,
|
||||
|
@ -525,8 +522,6 @@ def RunTestsCommand(command, options, args, option_parser):
|
|||
else:
|
||||
raise Exception('Unknown test type.')
|
||||
|
||||
return exit_code
|
||||
|
||||
|
||||
def HelpCommand(command, options, args, option_parser):
|
||||
"""Display help for a certain command, or overall help.
|
||||
|
|
Загрузка…
Ссылка в новой задаче