Android: adds instrumentation test runners.

Part of upstreaming build/android, this adds the instrumentation
test runners to allow us to run java-based tests.

BUG=136688
TEST=


Review URL: https://chromiumcodereview.appspot.com/10703165

git-svn-id: http://src.chromium.org/svn/trunk/src/build@146335 4ff67af0-8c30-449e-8e8b-ad334ec8d88c
This commit is contained in:
bulach@chromium.org 2012-07-12 13:59:53 +00:00
Родитель ea5a5e8cf3
Коммит eb91f32bdc
15 изменённых файлов: 2055 добавлений и 14 удалений

Просмотреть файл

@ -856,3 +856,38 @@ class AndroidCommands(object):
usage_dict[key] += value
return usage_dict, smaps
def ProcessesUsingDevicePort(self, device_port):
"""Lists the processes using the specified device port on loopback
interface.
Args:
device_port: Port on device we want to check.
Returns:
A list of (pid, process_name) tuples using the specified port.
"""
tcp_results = self.RunShellCommand('cat /proc/net/tcp', log_result=False)
tcp_address = "0100007F:%04X" % device_port
pids = []
for single_connect in tcp_results:
connect_results = single_connect.split()
# Column 1 is the TCP port, and Column 9 is the inode of the socket
if connect_results[1] == tcp_address:
socket_inode = connect_results[9]
socket_name = 'socket:[%s]' % socket_inode
lsof_results = self.RunShellCommand('lsof', log_result=False)
for single_process in lsof_results:
process_results = single_process.split()
# Ignore the line if it has less than nine columns in it, which may
# be the case when a process stops while lsof is executing.
if len(process_results) <= 8:
continue
# Column 0 is the executable name
# Column 1 is the pid
# Column 8 is the Inode in use
if process_results[8] == socket_name:
pids.append( (int(process_results[1]), process_results[0]) )
break
logging.info('PidsUsingDevicePort: %s', pids)
return pids

142
android/pylib/apk_info.py Normal file
Просмотреть файл

@ -0,0 +1,142 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Gathers information about APKs."""
import collections
import os
import re
import cmd_helper
class ApkInfo(object):
"""Helper class for inspecting APKs."""
_PROGUARD_PATH = os.path.join(os.environ['ANDROID_SDK_ROOT'],
'tools/proguard/bin/proguard.sh')
if not os.path.exists(_PROGUARD_PATH):
_PROGUARD_PATH = os.path.join(os.environ['ANDROID_BUILD_TOP'],
'external/proguard/bin/proguard.sh')
_PROGUARD_CLASS_RE = re.compile(r'\s*?- Program class:\s*([\S]+)$')
_PROGUARD_METHOD_RE = re.compile(r'\s*?- Method:\s*(\S*)[(].*$')
_PROGUARD_ANNOTATION_RE = re.compile(r'\s*?- Annotation \[L(\S*);\]:$')
_PROGUARD_ANNOTATION_CONST_RE = re.compile(r'\s*?- Constant element value.*$')
_PROGUARD_ANNOTATION_VALUE_RE = re.compile(r'\s*?- \S+? \[(.*)\]$')
_AAPT_PACKAGE_NAME_RE = re.compile(r'package: .*name=\'(\S*)\'')
def __init__(self, apk_path, jar_path):
if not os.path.exists(apk_path):
raise Exception('%s not found, please build it' % apk_path)
self._apk_path = apk_path
if not os.path.exists(jar_path):
raise Exception('%s not found, please build it' % jar_path)
self._jar_path = jar_path
self._annotation_map = collections.defaultdict(list)
self._test_methods = []
self._Initialize()
def _Initialize(self):
proguard_output = cmd_helper.GetCmdOutput([self._PROGUARD_PATH,
'-injars', self._jar_path,
'-dontshrink',
'-dontoptimize',
'-dontobfuscate',
'-dontpreverify',
'-dump',
]).split('\n')
clazz = None
method = None
annotation = None
has_value = False
qualified_method = None
for line in proguard_output:
m = self._PROGUARD_CLASS_RE.match(line)
if m:
clazz = m.group(1).replace('/', '.') # Change package delim.
annotation = None
continue
m = self._PROGUARD_METHOD_RE.match(line)
if m:
method = m.group(1)
annotation = None
qualified_method = clazz + '#' + method
if method.startswith('test') and clazz.endswith('Test'):
self._test_methods += [qualified_method]
continue
m = self._PROGUARD_ANNOTATION_RE.match(line)
if m:
assert qualified_method
annotation = m.group(1).split('/')[-1] # Ignore the annotation package.
self._annotation_map[qualified_method].append(annotation)
has_value = False
continue
if annotation:
assert qualified_method
if not has_value:
m = self._PROGUARD_ANNOTATION_CONST_RE.match(line)
if m:
has_value = True
else:
m = self._PROGUARD_ANNOTATION_VALUE_RE.match(line)
if m:
value = m.group(1)
self._annotation_map[qualified_method].append(
annotation + ':' + value)
has_value = False
def _GetAnnotationMap(self):
return self._annotation_map
def _IsTestMethod(self, test):
class_name, method = test.split('#')
return class_name.endswith('Test') and method.startswith('test')
def GetApkPath(self):
return self._apk_path
def GetPackageName(self):
"""Returns the package name of this APK."""
aapt_output = cmd_helper.GetCmdOutput(
['aapt', 'dump', 'badging', self._apk_path]).split('\n')
for line in aapt_output:
m = self._AAPT_PACKAGE_NAME_RE.match(line)
if m:
return m.group(1)
raise Exception('Failed to determine package name of %s' % self._apk_path)
def GetTestAnnotations(self, test):
"""Returns a list of all annotations for the given |test|. May be empty."""
if not self._IsTestMethod(test):
return []
return self._GetAnnotationMap()[test]
def _AnnotationsMatchFilters(self, annotation_filter_list, annotations):
"""Checks if annotations match any of the filters."""
if not annotation_filter_list:
return True
for annotation_filter in annotation_filter_list:
filters = annotation_filter.split('=')
if len(filters) == 2:
key = filters[0]
value_list = filters[1].split(',')
for value in value_list:
if key + ':' + value in annotations:
return True
elif annotation_filter in annotations:
return True
return False
def GetAnnotatedTests(self, annotation_filter_list):
"""Returns a list of all tests that match the given annotation filters."""
return [test for test, annotations in self._GetAnnotationMap().iteritems()
if self._IsTestMethod(test) and self._AnnotationsMatchFilters(
annotation_filter_list, annotations)]
def GetTestMethods(self):
"""Returns a list of all test methods in this apk as Class#testMethod."""
return self._test_methods
@staticmethod
def IsPythonDrivenTest(test):
return 'pythonDrivenTests' in test

Просмотреть файл

@ -0,0 +1,27 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This file is imported by python tests ran by run_python_tests.py."""
import os
import android_commands
from run_java_tests import TestRunner
def _GetPackageName(fname):
"""Extracts the package name from the test file path."""
base_root = os.path.join('com', 'google', 'android')
dirname = os.path.dirname(fname)
package = dirname[dirname.rfind(base_root):]
return package.replace(os.sep, '.')
def RunJavaTest(fname, suite, test, ports_to_forward):
device = android_commands.GetAttachedDevices()[0]
package_name = _GetPackageName(fname)
test = package_name + '.' + suite + '#' + test
java_test_runner = TestRunner(False, device, [test], False, False, False,
False, 0, ports_to_forward)
return java_test_runner.Run()

Просмотреть файл

@ -0,0 +1,160 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A helper module for parsing JSON objects from perf tests results."""
import json
def GetAverageRunInfo(json_data, name):
"""Summarizes TraceEvent JSON data for performance metrics.
Example JSON Inputs (More tags can be added but these are required):
Measuring Duration:
[
{ "cat": "Java",
"ts": 10000000000,
"ph": "B",
"name": "TestTrace"
},
{ "cat": "Java",
"ts": 10000004000,
"ph": "E",
"name": "TestTrace"
},
...
]
Measuring Call Frequency (FPS):
[
{ "cat": "Java",
"ts": 10000000000,
"ph": "I",
"name": "TestTraceFPS"
},
{ "cat": "Java",
"ts": 10000004000,
"ph": "I",
"name": "TestTraceFPS"
},
...
]
Args:
json_data: A list of dictonaries each representing a JSON object.
name: The 'name' tag to filter on in the JSON file.
Returns:
A dictionary of result data with the following tags:
min: The minimum value tracked.
max: The maximum value tracked.
average: The average of all the values tracked.
count: The number of times the category/name pair was tracked.
type: The type of tracking ('Instant' for instant tags and 'Span' for
begin/end tags.
category: The passed in category filter.
name: The passed in name filter.
data_points: A list of all of the times used to generate this data.
units: The units for the values being reported.
Raises:
Exception: if entry contains invalid data.
"""
def EntryFilter(entry):
return entry['cat'] == 'Java' and entry['name'] == name
filtered_entries = filter(EntryFilter, json_data)
result = {}
result['min'] = -1
result['max'] = -1
result['average'] = 0
result['count'] = 0
result['type'] = 'Unknown'
result['category'] = 'Java'
result['name'] = name
result['data_points'] = []
result['units'] = ''
total_sum = 0
last_val = 0
val_type = None
for entry in filtered_entries:
if not val_type:
if 'mem' in entry:
val_type = 'mem'
def GetVal(entry):
return entry['mem']
result['units'] = 'kb'
elif 'ts' in entry:
val_type = 'ts'
def GetVal(entry):
return float(entry['ts']) / 1000.0
result['units'] = 'ms'
else:
raise Exception('Entry did not contain valid value info: %s' % entry)
if not val_type in entry:
raise Exception('Entry did not contain expected value type "%s" '
'information: %s' % (val_type, entry))
val = GetVal(entry)
if (entry['ph'] == 'B' and
(result['type'] == 'Unknown' or result['type'] == 'Span')):
result['type'] = 'Span'
last_val = val
elif ((entry['ph'] == 'E' and result['type'] == 'Span') or
(entry['ph'] == 'I' and (result['type'] == 'Unknown' or
result['type'] == 'Instant'))):
if last_val > 0:
delta = val - last_val
if result['min'] == -1 or result['min'] > delta:
result['min'] = delta
if result['max'] == -1 or result['max'] < delta:
result['max'] = delta
total_sum += delta
result['count'] += 1
result['data_points'].append(delta)
if entry['ph'] == 'I':
result['type'] = 'Instant'
last_val = val
if result['count'] > 0: result['average'] = total_sum / result['count']
return result
def GetAverageRunInfoFromJSONString(json_string, name):
"""Returns the results from GetAverageRunInfo using a JSON string.
Args:
json_string: The string containing JSON.
name: The 'name' tag to filter on in the JSON file.
Returns:
See GetAverageRunInfo Returns section.
"""
return GetAverageRunInfo(json.loads(json_string), name)
def GetAverageRunInfoFromFile(json_file, name):
"""Returns the results from GetAverageRunInfo using a JSON file.
Args:
json_file: The path to a JSON file.
name: The 'name' tag to filter on in the JSON file.
Returns:
See GetAverageRunInfo Returns section.
"""
with open(json_file, 'r') as f:
data = f.read()
perf = json.loads(data)
return GetAverageRunInfo(perf, name)

Просмотреть файл

@ -0,0 +1,177 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Base class for Android Python-driven tests.
This test case is intended to serve as the base class for any Python-driven
tests. It is similar to the Python unitttest module in that the user's tests
inherit from this case and add their tests in that case.
When a PythonTestBase object is instantiated, its purpose is to run only one of
its tests. The test runner gives it the name of the test the instance will
run. The test runner calls SetUp with the Android device ID which the test will
run against. The runner runs the test method itself, collecting the result,
and calls TearDown.
Tests can basically do whatever they want in the test methods, such as call
Java tests using _RunJavaTests. Those methods have the advantage of massaging
the Java test results into Python test results.
"""
import logging
import os
import time
import android_commands
import apk_info
from run_java_tests import TestRunner
import test_options_parser
from test_result import SingleTestResult, TestResults, PYTHON
# aka the parent of com.google.android
BASE_ROOT = 'src' + os.sep
class PythonTestBase(object):
"""Base class for Python-driven tests."""
def __init__(self, test_name):
# test_name must match one of the test methods defined on a subclass which
# inherits from this class.
# It's stored so we can do the attr lookup on demand, allowing this class
# to be pickled, a requirement for the multiprocessing module.
self.test_name = test_name
class_name = self.__class__.__name__
self.qualified_name = class_name + '.' + self.test_name
self.ports_to_forward = []
def SetUp(self, device_id, shard_index):
self.shard_index = shard_index
self.device_id = device_id
self.adb = android_commands.AndroidCommands(self.device_id)
def TearDown(self):
pass
def Run(self):
logging.warning('Running Python-driven test: %s', self.test_name)
return getattr(self, self.test_name)()
def _RunJavaTest(self, fname, suite, test):
"""Runs a single Java test with a Java TestRunner.
Args:
fname: filename for the test (e.g. foo/bar/baz/tests/FooTest.py)
suite: name of the Java test suite (e.g. FooTest)
test: name of the test method to run (e.g. testFooBar)
Returns:
TestResults object with a single test result.
"""
test = self._ComposeFullTestName(fname, suite, test)
# Get a set of default options
options = test_options_parser.ParseInstrumentationArgs([''])
apks = [apk_info.ApkInfo(options.test_apk_path, options.test_apk_jar_path)]
java_test_runner = TestRunner(options, self.device_id, [test], False,
self.shard_index,
apks,
self.ports_to_forward)
return java_test_runner.Run()
def _RunJavaTests(self, fname, tests):
"""Calls a list of tests and stops at the first test failure.
This method iterates until either it encounters a non-passing test or it
exhausts the list of tests. Then it returns the appropriate Python result.
Args:
fname: filename for the Python test
tests: a list of Java test names which will be run
Returns:
A TestResults object containing a result for this Python test.
"""
start_ms = int(time.time()) * 1000
result = None
for test in tests:
# We're only running one test at a time, so this TestResults object will
# hold only one result.
suite, test_name = test.split('.')
result = self._RunJavaTest(fname, suite, test_name)
# A non-empty list means the test did not pass.
if result.GetAllBroken():
break
duration_ms = int(time.time()) * 1000 - start_ms
# Do something with result.
return self._ProcessResults(result, start_ms, duration_ms)
def _ProcessResults(self, result, start_ms, duration_ms):
"""Translates a Java test result into a Python result for this test.
The TestRunner class that we use under the covers will return a test result
for that specific Java test. However, to make reporting clearer, we have
this method to abstract that detail and instead report that as a failure of
this particular test case while still including the Java stack trace.
Args:
result: TestResults with a single Java test result
start_ms: the time the test started
duration_ms: the length of the test
Returns:
A TestResults object containing a result for this Python test.
"""
test_results = TestResults()
# If our test is in broken, then it crashed/failed.
broken = result.GetAllBroken()
if broken:
# Since we have run only one test, take the first and only item.
single_result = broken[0]
log = single_result.log
if not log:
log = 'No logging information.'
short_error_msg = single_result.log.split('\n')[0]
# err_info is ostensibly for Sponge to consume; it's a short error
# message and a longer one.
err_info = (short_error_msg, log)
python_result = SingleTestResult(self.qualified_name, start_ms,
duration_ms,
PYTHON,
log,
err_info)
# Figure out where the test belonged. There's probably a cleaner way of
# doing this.
if single_result in result.crashed:
test_results.crashed = [python_result]
elif single_result in result.failed:
test_results.failed = [python_result]
elif single_result in result.unknown:
test_results.unknown = [python_result]
else:
python_result = SingleTestResult(self.qualified_name, start_ms,
duration_ms,
PYTHON)
test_results.ok = [python_result]
return test_results
def _ComposeFullTestName(self, fname, suite, test):
package_name = self._GetPackageName(fname)
return package_name + '.' + suite + '#' + test
def _GetPackageName(self, fname):
"""Extracts the package name from the test file path."""
dirname = os.path.dirname(fname)
package = dirname[dirname.rfind(BASE_ROOT) + len(BASE_ROOT):]
return package.replace(os.sep, '.')

Просмотреть файл

@ -0,0 +1,85 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper module for calling python-based tests."""
import logging
import sys
import time
from test_result import TestResults
def CallPythonTest(test, device_id, shard_index):
"""Invokes a test function and translates Python exceptions into test results.
This method invokes SetUp()/TearDown() on the test. It is intended to be
resilient to exceptions in SetUp(), the test itself, and TearDown(). Any
Python exception means the test is marked as failed, and the test result will
contain information about the exception.
If SetUp() raises an exception, the test is not run.
If TearDown() raises an exception, the test is treated as a failure. However,
if the test itself raised an exception beforehand, that stack trace will take
precedence whether or not TearDown() also raised an exception.
shard_index is not applicable in single-device scenarios, when test execution
is serial rather than parallel. Tests can use this to bring up servers with
unique port numbers, for example. See also python_test_sharder.
Args:
test: an object which is ostensibly a subclass of PythonTestBase.
device_id: device ID against which the test will run.
shard_index: index # of the shard on which this test is running
Returns:
A TestResults object which contains any results produced by the test or, in
the case of a Python exception, the Python exception info.
"""
start_date_ms = int(time.time()) * 1000
failed = False
try:
test.SetUp(device_id, shard_index)
except Exception:
failed = True
logging.exception(
'Caught exception while trying to run SetUp() for test: ' +
test.qualified_name)
# Tests whose SetUp() method has failed are likely to fail, or at least
# yield invalid results.
exc_info = sys.exc_info()
return TestResults.FromPythonException(test.qualified_name, start_date_ms,
exc_info)
try:
result = test.Run()
except Exception:
# Setting this lets TearDown() avoid stomping on our stack trace from Run()
# should TearDown() also raise an exception.
failed = True
logging.exception('Caught exception while trying to run test: ' +
test.qualified_name)
exc_info = sys.exc_info()
result = TestResults.FromPythonException(test.qualified_name, start_date_ms,
exc_info)
try:
test.TearDown()
except Exception:
logging.exception(
'Caught exception while trying run TearDown() for test: ' +
test.qualified_name)
if not failed:
# Don't stomp the error during the test if TearDown blows up. This is a
# trade-off: if the test fails, this will mask any problem with TearDown
# until the test is fixed.
exc_info = sys.exc_info()
result = TestResults.FromPythonException(test.qualified_name,
start_date_ms, exc_info)
return result

Просмотреть файл

@ -0,0 +1,200 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Takes care of sharding the python-drive tests in multiple devices."""
import logging
import multiprocessing
from python_test_caller import CallPythonTest
from run_java_tests import FatalTestException
import sharded_tests_queue
from test_result import TestResults
def SetTestsContainer(tests_container):
"""Sets PythonTestSharder as a top-level field.
PythonTestSharder uses multiprocessing.Pool, which creates a pool of
processes. This is used to initialize each worker in the pool, ensuring that
each worker has access to this shared pool of tests.
The multiprocessing module requires that this be a top-level method.
Args:
tests_container: the container for all the tests.
"""
PythonTestSharder.tests_container = tests_container
def _DefaultRunnable(test_runner):
"""A default runnable for a PythonTestRunner.
Args:
test_runner: A PythonTestRunner which will run tests.
Returns:
The test results.
"""
return test_runner.RunTests()
class PythonTestRunner(object):
"""Thin wrapper around a list of PythonTestBase instances.
This is meant to be a long-lived object which can run multiple Python tests
within its lifetime. Tests will receive the device_id and shard_index.
The shard index affords the ability to create unique port numbers (e.g.
DEFAULT_PORT + shard_index) if the test so wishes.
"""
def __init__(self, device_id, shard_index):
"""Constructor.
Args:
device_id: ID of the device which this test will talk to.
shard_index: shard index, used to create such as unique port numbers.
"""
self.device_id = device_id
self.shard_index = shard_index
def RunTests(self):
"""Runs tests from the shared pool of tests, aggregating results.
Returns:
A list of test results for all of the tests which this runner executed.
"""
tests = PythonTestSharder.tests_container
results = []
for t in tests:
res = CallPythonTest(t, self.device_id, self.shard_index)
results.append(res)
return TestResults.FromTestResults(results)
class PythonTestSharder(object):
"""Runs Python tests in parallel on multiple devices.
This is lifted more or less wholesale from BaseTestRunner.
Under the covers, it creates a pool of long-lived PythonTestRunners, which
execute tests from the pool of tests.
Args:
attached_devices: a list of device IDs attached to the host.
shard_retries: number of retries for any given test.
available_tests: a list of tests to run which subclass PythonTestBase.
Returns:
An aggregated list of test results.
"""
tests_container = None
def __init__(self, attached_devices, shard_retries, available_tests):
self.attached_devices = attached_devices
self.retries = shard_retries
self.tests = available_tests
def _SetupSharding(self, tests):
"""Creates the shared pool of tests and makes it available to test runners.
Args:
tests: the list of tests which will be consumed by workers.
"""
SetTestsContainer(sharded_tests_queue.ShardedTestsQueue(
len(self.attached_devices), tests))
def RunShardedTests(self):
"""Runs tests in parallel using a pool of workers.
Returns:
A list of test results aggregated from all test runs.
"""
logging.warning('*' * 80)
logging.warning('Sharding in ' + str(len(self.attached_devices)) +
' devices.')
logging.warning('Note that the output is not synchronized.')
logging.warning('Look for the "Final result" banner in the end.')
logging.warning('*' * 80)
all_passed = []
test_results = TestResults()
tests_to_run = self.tests
for retry in xrange(self.retries):
logging.warning('Try %d of %d', retry + 1, self.retries)
self._SetupSharding(self.tests)
test_runners = self._MakeTestRunners(self.attached_devices)
logging.warning('Starting...')
pool = multiprocessing.Pool(len(self.attached_devices),
SetTestsContainer,
[PythonTestSharder.tests_container])
# List of TestResults objects from each test execution.
try:
results_lists = pool.map(_DefaultRunnable, test_runners)
except Exception:
logging.exception('Unable to run tests. Something with the '
'PythonTestRunners has gone wrong.')
raise FatalTestException('PythonTestRunners were unable to run tests.')
test_results = TestResults.FromTestResults(results_lists)
# Accumulate passing results.
all_passed += test_results.ok
# If we have failed tests, map them to tests to retry.
failed_tests = test_results.GetAllBroken()
tests_to_run = self._GetTestsToRetry(self.tests,
failed_tests)
# Bail out early if we have no more tests. This can happen if all tests
# pass before we're out of retries, for example.
if not tests_to_run:
break
final_results = TestResults()
# all_passed has accumulated all passing test results.
# test_results will have the results from the most recent run, which could
# include a variety of failure modes (unknown, crashed, failed, etc).
final_results = test_results
final_results.ok = all_passed
return final_results
def _MakeTestRunners(self, attached_devices):
"""Initialize and return a list of PythonTestRunners.
Args:
attached_devices: list of device IDs attached to host.
Returns:
A list of PythonTestRunners, one for each device.
"""
test_runners = []
for index, device in enumerate(attached_devices):
logging.warning('*' * 80)
logging.warning('Creating shard %d for %s', index, device)
logging.warning('*' * 80)
# Bind the PythonTestRunner to a device & shard index. Give it the
# runnable which it will use to actually execute the tests.
test_runner = PythonTestRunner(device, index)
test_runners.append(test_runner)
return test_runners
def _GetTestsToRetry(self, available_tests, failed_tests):
"""Infers a list of tests to retry from failed tests and available tests.
Args:
available_tests: a list of tests which subclass PythonTestBase.
failed_tests: a list of SingleTestResults representing failed tests.
Returns:
A list of test objects which correspond to test names found in
failed_tests, or an empty list if there is no correspondence.
"""
failed_test_names = map(lambda t: t.test_name, failed_tests)
tests_to_retry = [t for t in available_tests
if t.qualified_name in failed_test_names]
return tests_to_retry

590
android/pylib/run_java_tests.py Executable file
Просмотреть файл

@ -0,0 +1,590 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs the Java tests. See more information on run_instrumentation_tests.py."""
import fnmatch
import logging
import os
import re
import shutil
import sys
import time
import android_commands
import apk_info
from base_test_runner import BaseTestRunner
from base_test_sharder import BaseTestSharder, SetTestsContainer
import cmd_helper
import constants
import errors
from forwarder import Forwarder
from json_perf_parser import GetAverageRunInfoFromJSONString
from perf_tests_helper import PrintPerfResult
import sharded_tests_queue
from test_result import JAVA, SingleTestResult, TestResults
_PERF_TEST_ANNOTATION = 'PerfTest'
class FatalTestException(Exception):
"""A fatal test exception."""
pass
def _TestNameToExpectation(test_name):
# A test name is a Package.Path.Class#testName; convert to what we use in
# the expectation file.
return '.'.join(test_name.replace('#', '.').split('.')[-2:])
def FilterTests(test_names, pattern_list, inclusive):
"""Filters |test_names| using a list of patterns.
Args:
test_names: A list of test names.
pattern_list: A list of patterns.
inclusive: If True, returns the tests that match any pattern. if False,
returns the tests that do not match any pattern.
Returns:
A list of test names.
"""
ret = []
for t in test_names:
has_match = False
for pattern in pattern_list:
has_match = has_match or fnmatch.fnmatch(_TestNameToExpectation(t),
pattern)
if has_match == inclusive:
ret += [t]
return ret
class TestRunner(BaseTestRunner):
"""Responsible for running a series of tests connected to a single device."""
_DEVICE_DATA_DIR = '/data/local/tmp/chrome/test/data'
_EMMA_JAR = os.path.join(os.environ.get('ANDROID_BUILD_TOP', ''),
'external/emma/lib/emma.jar')
_COVERAGE_MERGED_FILENAME = 'unittest_coverage.es'
_COVERAGE_WEB_ROOT_DIR = os.environ.get('EMMA_WEB_ROOTDIR')
_COVERAGE_FILENAME = 'coverage.ec'
_COVERAGE_RESULT_PATH = ('/data/data/com.google.android.apps.chrome/files/' +
_COVERAGE_FILENAME)
_COVERAGE_META_INFO_PATH = os.path.join(os.environ.get('ANDROID_BUILD_TOP',
''),
'out/target/common/obj/APPS',
'Chrome_intermediates/coverage.em')
_HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile'
_DEVICE_PERF_OUTPUT_DIR = '/sdcard/Download/'
_DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (_DEVICE_PERF_OUTPUT_DIR +
'chrome-profile*')
_DEVICE_HAS_TEST_FILES = {}
def __init__(self, options, device, tests_iter, coverage, shard_index, apks,
ports_to_forward):
"""Create a new TestRunner.
Args:
options: An options object with the following required attributes:
- install_apk: Re-installs the apk if opted.
- save_perf_json: Whether or not to save the JSON file from UI perf
tests.
- screenshot_failures: Take a screenshot for a test failure
- tool: Name of the Valgrind tool.
- wait_for_debugger: blocks until the debugger is connected.
device: Attached android device.
tests_iter: A list of tests to be run.
coverage: Collects coverage information if opted.
shard_index: shard # for this TestRunner, used to create unique port
numbers.
apks: A list of ApkInfo objects need to be installed. The first element
should be the tests apk, the rests could be the apks used in test.
The default is ChromeTest.apk.
ports_to_forward: A list of port numbers for which to set up forwarders.
Can be optionally requested by a test case.
Raises:
FatalTestException: if coverage metadata is not available.
"""
BaseTestRunner.__init__(self, device, options.tool, shard_index)
if not apks:
apks = [apk_info.ApkInfo(options.test_apk_path,
options.test_apk_jar_path)]
self.install_apk = options.install_apk
self.save_perf_json = options.save_perf_json
self.screenshot_failures = options.screenshot_failures
self.wait_for_debugger = options.wait_for_debugger
self.tests_iter = tests_iter
self.coverage = coverage
self.apks = apks
self.test_apk = apks[0]
self.instrumentation_class_path = self.test_apk.GetPackageName()
self.ports_to_forward = ports_to_forward
self.test_results = TestResults()
# List of forwarders created by this instance of TestRunner.
self.forwarders = []
if self.coverage:
if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME):
os.remove(TestRunner._COVERAGE_MERGED_FILENAME)
if not os.path.exists(TestRunner._COVERAGE_META_INFO_PATH):
raise FatalTestException('FATAL ERROR in ' + sys.argv[0] +
' : Coverage meta info [' +
TestRunner._COVERAGE_META_INFO_PATH +
'] does not exist.')
if (not TestRunner._COVERAGE_WEB_ROOT_DIR or
not os.path.exists(TestRunner._COVERAGE_WEB_ROOT_DIR)):
raise FatalTestException('FATAL ERROR in ' + sys.argv[0] +
' : Path specified in $EMMA_WEB_ROOTDIR [' +
TestRunner._COVERAGE_WEB_ROOT_DIR +
'] does not exist.')
def _GetTestsIter(self):
if not self.tests_iter:
# multiprocessing.Queue can't be pickled across processes if we have it as
# a member set during constructor. Grab one here instead.
self.tests_iter = (BaseTestSharder.tests_container)
assert self.tests_iter
return self.tests_iter
def CopyTestFilesOnce(self):
"""Pushes the test data files to the device. Installs the apk if opted."""
if TestRunner._DEVICE_HAS_TEST_FILES.get(self.device, False):
logging.warning('Already copied test files to device %s, skipping.',
self.device)
return
host_test_files_path = (constants.CHROME_DIR +
'/chrome/test/data/android/device_files')
if os.path.exists(host_test_files_path):
self.adb.PushIfNeeded(host_test_files_path,
TestRunner._DEVICE_DATA_DIR)
if self.install_apk:
# Install -r is not reliable, so uninstall it first.
for apk in self.apks:
self.adb.Adb().SendCommand('uninstall ' + apk.GetPackageName())
self.adb.Adb().SendCommand('install ' + apk.GetApkPath())
self.tool.CopyFiles()
TestRunner._DEVICE_HAS_TEST_FILES[self.device] = True
def SaveCoverageData(self, test):
"""Saves the Emma coverage data before it's overwritten by the next test.
Args:
test: the test whose coverage data is collected.
"""
if not self.coverage:
return
if not self.adb.Adb().Pull(TestRunner._COVERAGE_RESULT_PATH,
constants.CHROME_DIR):
logging.error('ERROR: Unable to find file ' +
TestRunner._COVERAGE_RESULT_PATH +
' on the device for test ' + test)
pulled_coverage_file = os.path.join(constants.CHROME_DIR,
TestRunner._COVERAGE_FILENAME)
if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME):
cmd = ['java', '-classpath', TestRunner._EMMA_JAR, 'emma', 'merge',
'-in', pulled_coverage_file,
'-in', TestRunner._COVERAGE_MERGED_FILENAME,
'-out', TestRunner._COVERAGE_MERGED_FILENAME]
cmd_helper.RunCmd(cmd)
else:
shutil.copy(pulled_coverage_file,
TestRunner._COVERAGE_MERGED_FILENAME)
os.remove(pulled_coverage_file)
def GenerateCoverageReportIfNeeded(self):
"""Uses the Emma to generate a coverage report and a html page."""
if not self.coverage:
return
cmd = ['java', '-classpath', TestRunner._EMMA_JAR,
'emma', 'report', '-r', 'html',
'-in', TestRunner._COVERAGE_MERGED_FILENAME,
'-in', TestRunner._COVERAGE_META_INFO_PATH]
cmd_helper.RunCmd(cmd)
new_dir = os.path.join(TestRunner._COVERAGE_WEB_ROOT_DIR,
time.strftime('Coverage_for_%Y_%m_%d_%a_%H:%M'))
shutil.copytree('coverage', new_dir)
latest_dir = os.path.join(TestRunner._COVERAGE_WEB_ROOT_DIR,
'Latest_Coverage_Run')
if os.path.exists(latest_dir):
shutil.rmtree(latest_dir)
os.mkdir(latest_dir)
webserver_new_index = os.path.join(new_dir, 'index.html')
webserver_new_files = os.path.join(new_dir, '_files')
webserver_latest_index = os.path.join(latest_dir, 'index.html')
webserver_latest_files = os.path.join(latest_dir, '_files')
# Setup new softlinks to last result.
os.symlink(webserver_new_index, webserver_latest_index)
os.symlink(webserver_new_files, webserver_latest_files)
cmd_helper.RunCmd(['chmod', '755', '-R', latest_dir, new_dir])
def _GetInstrumentationArgs(self):
ret = {}
if self.coverage:
ret['coverage'] = 'true'
if self.wait_for_debugger:
ret['debug'] = 'true'
return ret
def _TakeScreenshot(self, test):
"""Takes a screenshot from the device."""
screenshot_tool = os.path.join(os.getenv('ANDROID_HOST_OUT'), 'bin',
'screenshot2')
screenshot_path = os.path.join(constants.CHROME_DIR,
'out_screenshots')
if not os.path.exists(screenshot_path):
os.mkdir(screenshot_path)
screenshot_name = os.path.join(screenshot_path, test + '.png')
logging.info('Taking screenshot named %s', screenshot_name)
cmd_helper.RunCmd([screenshot_tool, '-s', self.device, screenshot_name])
def SetUp(self):
"""Sets up the test harness and device before all tests are run."""
super(TestRunner, self).SetUp()
if self.adb.SetJavaAssertsEnabled(enable=True):
self.adb.Reboot(full_reboot=False)
# We give different default value to launch HTTP server based on shard index
# because it may have race condition when multiple processes are trying to
# launch lighttpd with same port at same time.
# This line *must* come before the forwarding below, as it nukes all
# the other forwarders. A more comprehensive fix might be to pull the
# forwarder-killing line up to here, but that might violate assumptions
# implicit in other places.
self.LaunchTestHttpServer(os.path.join(constants.CHROME_DIR),
(constants.LIGHTTPD_RANDOM_PORT_FIRST +
self.shard_index))
if self.ports_to_forward:
for port in self.ports_to_forward:
self.forwarders.append(
Forwarder(self.adb, [(port, port)], self.tool, '127.0.0.1'))
self.CopyTestFilesOnce()
self.flags.AddFlags(['--enable-test-intents'])
def TearDown(self):
"""Cleans up the test harness and saves outstanding data from test run."""
if self.forwarders:
for forwarder in self.forwarders:
forwarder.Close()
self.GenerateCoverageReportIfNeeded()
super(TestRunner, self).TearDown()
def TestSetup(self, test):
"""Sets up the test harness for running a particular test.
Args:
test: The name of the test that will be run.
"""
self.SetupPerfMonitoringIfNeeded(test)
self._SetupIndividualTestTimeoutScale(test)
self.tool.SetupEnvironment()
# Make sure the forwarder is still running.
self.RestartHttpServerForwarderIfNecessary()
def _IsPerfTest(self, test):
"""Determines whether a test is a performance test.
Args:
test: The name of the test to be checked.
Returns:
Whether the test is annotated as a performance test.
"""
return _PERF_TEST_ANNOTATION in self.test_apk.GetTestAnnotations(test)
def SetupPerfMonitoringIfNeeded(self, test):
"""Sets up performance monitoring if the specified test requires it.
Args:
test: The name of the test to be run.
"""
if not self._IsPerfTest(test):
return
self.adb.Adb().SendCommand('shell rm ' +
TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX)
self.adb.StartMonitoringLogcat()
def TestTeardown(self, test, test_result):
"""Cleans up the test harness after running a particular test.
Depending on the options of this TestRunner this might handle coverage
tracking or performance tracking. This method will only be called if the
test passed.
Args:
test: The name of the test that was just run.
test_result: result for this test.
"""
self.tool.CleanUpEnvironment()
# The logic below relies on the test passing.
if not test_result or test_result.GetStatusCode():
return
self.TearDownPerfMonitoring(test)
self.SaveCoverageData(test)
def TearDownPerfMonitoring(self, test):
"""Cleans up performance monitoring if the specified test required it.
Args:
test: The name of the test that was just run.
Raises:
FatalTestException: if there's anything wrong with the perf data.
"""
if not self._IsPerfTest(test):
return
raw_test_name = test.split('#')[1]
# Wait and grab annotation data so we can figure out which traces to parse
regex = self.adb.WaitForLogMatch(re.compile('\*\*PERFANNOTATION\(' +
raw_test_name +
'\)\:(.*)'), None)
# If the test is set to run on a specific device type only (IE: only
# tablet or phone) and it is being run on the wrong device, the test
# just quits and does not do anything. The java test harness will still
# print the appropriate annotation for us, but will add --NORUN-- for
# us so we know to ignore the results.
# The --NORUN-- tag is managed by MainActivityTestBase.java
if regex.group(1) != '--NORUN--':
# Obtain the relevant perf data. The data is dumped to a
# JSON formatted file.
json_string = self.adb.GetFileContents(
'/data/data/com.google.android.apps.chrome/files/PerfTestData.txt')
if json_string:
json_string = '\n'.join(json_string)
else:
raise FatalTestException('Perf file does not exist or is empty')
if self.save_perf_json:
json_local_file = '/tmp/chromium-android-perf-json-' + raw_test_name
with open(json_local_file, 'w') as f:
f.write(json_string)
logging.info('Saving Perf UI JSON from test ' +
test + ' to ' + json_local_file)
raw_perf_data = regex.group(1).split(';')
for raw_perf_set in raw_perf_data:
if raw_perf_set:
perf_set = raw_perf_set.split(',')
if len(perf_set) != 3:
raise FatalTestException('Unexpected number of tokens in '
'perf annotation string: ' + raw_perf_set)
# Process the performance data
result = GetAverageRunInfoFromJSONString(json_string, perf_set[0])
PrintPerfResult(perf_set[1], perf_set[2],
[result['average']], result['units'])
def _SetupIndividualTestTimeoutScale(self, test):
timeout_scale = self._GetIndividualTestTimeoutScale(test)
if timeout_scale == 1:
value = '""'
else:
value = '%f' % timeout_scale
self.adb.RunShellCommand('setprop chrome.timeout_scale %s' % value)
def _GetIndividualTestTimeoutScale(self, test):
"""Returns the timeout scale for the given |test|."""
annotations = self.apks[0].GetTestAnnotations(test)
timeout_scale = 1
if 'TimeoutScale' in annotations:
for annotation in annotations:
scale_match = re.match('TimeoutScale:([0-9]+)', annotation)
if scale_match:
timeout_scale = int(scale_match.group(1))
if self.wait_for_debugger:
timeout_scale *= 100
return timeout_scale
def _GetIndividualTestTimeoutSecs(self, test):
"""Returns the timeout in seconds for the given |test|."""
annotations = self.apks[0].GetTestAnnotations(test)
if 'Manual' in annotations:
return 600 * 60
if 'External' in annotations:
return 10 * 60
if 'LargeTest' in annotations or _PERF_TEST_ANNOTATION in annotations:
return 5 * 60
if 'MediumTest' in annotations:
return 3 * 60
return 1 * 60
def RunTests(self):
"""Runs the tests, generating the coverage if needed.
Returns:
A TestResults object.
"""
instrumentation_path = (self.instrumentation_class_path +
'/android.test.InstrumentationTestRunner')
instrumentation_args = self._GetInstrumentationArgs()
for test in self._GetTestsIter():
test_result = None
start_date_ms = None
try:
self.TestSetup(test)
start_date_ms = int(time.time()) * 1000
args_with_filter = dict(instrumentation_args)
args_with_filter['class'] = test
# |test_results| is a list that should contain
# a single TestResult object.
logging.warn(args_with_filter)
(test_results, _) = self.adb.Adb().StartInstrumentation(
instrumentation_path=instrumentation_path,
instrumentation_args=args_with_filter,
timeout_time=(self._GetIndividualTestTimeoutSecs(test) *
self._GetIndividualTestTimeoutScale(test) *
self.tool.GetTimeoutScale()))
duration_ms = int(time.time()) * 1000 - start_date_ms
assert len(test_results) == 1
test_result = test_results[0]
status_code = test_result.GetStatusCode()
if status_code:
log = test_result.GetFailureReason()
if not log:
log = 'No information.'
if self.screenshot_failures or log.find('INJECT_EVENTS perm') >= 0:
self._TakeScreenshot(test)
result = (log.split('\n')[0], log)
self.test_results.failed += [SingleTestResult(test, start_date_ms,
duration_ms, JAVA, log,
result)]
else:
result = [SingleTestResult(test, start_date_ms, duration_ms, JAVA)]
self.test_results.ok += result
# Catch exceptions thrown by StartInstrumentation().
# See ../../third_party/android/testrunner/adb_interface.py
except (errors.WaitForResponseTimedOutError,
errors.DeviceUnresponsiveError,
errors.InstrumentationError), e:
if start_date_ms:
duration_ms = int(time.time()) * 1000 - start_date_ms
else:
start_date_ms = int(time.time()) * 1000
duration_ms = 0
message = str(e)
if not message:
message = 'No information.'
self.test_results.crashed += [SingleTestResult(test, start_date_ms,
duration_ms,
JAVA, message,
(message, message))]
test_result = None
self.TestTeardown(test, test_result)
return self.test_results
class TestSharder(BaseTestSharder):
"""Responsible for sharding the tests on the connected devices."""
def __init__(self, attached_devices, options, tests, apks):
BaseTestSharder.__init__(self, attached_devices)
self.options = options
self.tests = tests
self.apks = apks
def SetupSharding(self, tests):
"""Called before starting the shards."""
SetTestsContainer(sharded_tests_queue.ShardedTestsQueue(
len(self.attached_devices), tests))
def CreateShardedTestRunner(self, device, index):
"""Creates a sharded test runner.
Args:
device: Device serial where this shard will run.
index: Index of this device in the pool.
Returns:
A TestRunner object.
"""
return TestRunner(self.options, device, None, False, index, self.apks, [])
def DispatchJavaTests(options, apks):
"""Dispatches Java tests onto connected device(s).
If possible, this method will attempt to shard the tests to
all connected devices. Otherwise, dispatch and run tests on one device.
Args:
options: Command line options.
apks: list of APKs to use.
Returns:
A TestResults object holding the results of the Java tests.
Raises:
FatalTestException: when there's no attached the devices.
"""
test_apk = apks[0]
if options.annotation:
available_tests = test_apk.GetAnnotatedTests(options.annotation)
if len(options.annotation) == 1 and options.annotation[0] == 'SmallTest':
tests_without_annotation = [
m for m in
test_apk.GetTestMethods()
if not test_apk.GetTestAnnotations(m) and
not apk_info.ApkInfo.IsPythonDrivenTest(m)]
if tests_without_annotation:
tests_without_annotation.sort()
logging.warning('The following tests do not contain any annotation. '
'Assuming "SmallTest":\n%s',
'\n'.join(tests_without_annotation))
available_tests += tests_without_annotation
else:
available_tests = [m for m in test_apk.GetTestMethods()
if not apk_info.ApkInfo.IsPythonDrivenTest(m)]
coverage = os.environ.get('EMMA_INSTRUMENT') == 'true'
tests = []
if options.test_filter:
# |available_tests| are in adb instrument format: package.path.class#test.
filter_without_hash = options.test_filter.replace('#', '.')
tests = [t for t in available_tests
if filter_without_hash in t.replace('#', '.')]
else:
tests = available_tests
if not tests:
logging.warning('No Java tests to run with current args.')
return TestResults()
tests *= options.number_of_runs
attached_devices = android_commands.GetAttachedDevices()
test_results = TestResults()
if not attached_devices:
raise FatalTestException('You have no devices attached or visible!')
if options.device:
attached_devices = [options.device]
logging.info('Will run: %s', str(tests))
if (len(attached_devices) > 1 and
not coverage and
not options.wait_for_debugger):
sharder = TestSharder(attached_devices, options, tests, apks)
test_results = sharder.RunShardedTests()
else:
runner = TestRunner(options, attached_devices[0], tests, coverage, 0, apks,
[])
test_results = runner.Run()
return test_results

225
android/pylib/run_python_tests.py Executable file
Просмотреть файл

@ -0,0 +1,225 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs the Python tests (relies on using the Java test runner)."""
import logging
import os
import sys
import types
import android_commands
import apk_info
import constants
import python_test_base
from python_test_caller import CallPythonTest
from python_test_sharder import PythonTestSharder
import run_java_tests
from run_java_tests import FatalTestException
from test_info_collection import TestInfoCollection
from test_result import TestResults
def _GetPythonFiles(root, files):
"""Returns all files from |files| that end in 'Test.py'.
Args:
root: A directory name with python files.
files: A list of file names.
Returns:
A list with all Python driven test file paths.
"""
return [os.path.join(root, f) for f in files if f.endswith('Test.py')]
def _InferImportNameFromFile(python_file):
"""Given a file, infer the import name for that file.
Example: /usr/foo/bar/baz.py -> baz.
Args:
python_file: path to the Python file, ostensibly to import later.
Returns:
The module name for the given file.
"""
return os.path.splitext(os.path.basename(python_file))[0]
def DispatchPythonTests(options):
"""Dispatches the Python tests. If there are multiple devices, use sharding.
Args:
options: command line options.
Returns:
A list of test results.
"""
attached_devices = android_commands.GetAttachedDevices()
if not attached_devices:
raise FatalTestException('You have no devices attached or visible!')
if options.device:
attached_devices = [options.device]
test_collection = TestInfoCollection()
all_tests = _GetAllTests(options.python_test_root, options.official_build)
test_collection.AddTests(all_tests)
test_names = [t.qualified_name for t in all_tests]
logging.debug('All available tests: ' + str(test_names))
available_tests = test_collection.GetAvailableTests(
options.annotation, options.test_filter)
if not available_tests:
logging.warning('No Python tests to run with current args.')
return TestResults()
available_tests *= options.number_of_runs
test_names = [t.qualified_name for t in available_tests]
logging.debug('Final list of tests to run: ' + str(test_names))
# Copy files to each device before running any tests.
for device_id in attached_devices:
logging.debug('Pushing files to device %s', device_id)
apks = [apk_info.ApkInfo(options.test_apk_path, options.test_apk_jar_path)]
test_files_copier = run_java_tests.TestRunner(options, device_id,
None, False, 0, apks, [])
test_files_copier.CopyTestFilesOnce()
# Actually run the tests.
if (len(attached_devices) > 1 and
not options.wait_for_debugger):
logging.debug('Sharding Python tests.')
sharder = PythonTestSharder(attached_devices, options.shard_retries,
available_tests)
test_results = sharder.RunShardedTests()
else:
logging.debug('Running Python tests serially.')
test_results = _RunPythonTests(available_tests, attached_devices[0])
return test_results
def _RunPythonTests(tests_to_run, device_id):
"""Runs a list of Python tests serially on one device and returns results.
Args:
tests_to_run: a list of objects inheriting from PythonTestBase.
device_id: ID of the device to run tests on.
Returns:
A list of test results, aggregated across all the tests run.
"""
# This is a list of TestResults objects.
results = [CallPythonTest(t, device_id, 0) for t in tests_to_run]
# Merge the list of TestResults into one TestResults.
return TestResults.FromTestResults(results)
def _GetTestModules(python_test_root, is_official_build):
"""Retrieve a sorted list of pythonDrivenTests.
Walks the location of pythonDrivenTests, imports them, and provides the list
of imported modules to the caller.
Args:
python_test_root: the path to walk, looking for pythonDrivenTests
is_official_build: whether to run only those tests marked 'official'
Returns:
A list of Python modules which may have zero or more tests.
"""
# By default run all python tests under pythonDrivenTests.
python_test_file_list = []
for root, _, files in os.walk(python_test_root):
if (root.endswith('pythonDrivenTests')
or (is_official_build
and root.endswith('pythonDrivenTests/official'))):
python_test_file_list += _GetPythonFiles(root, files)
python_test_file_list.sort()
test_module_list = [_GetModuleFromFile(test_file)
for test_file in python_test_file_list]
return test_module_list
def _GetModuleFromFile(python_file):
"""Gets the module associated with a file by importing it.
Args:
python_file: file to import
Returns:
The module object.
"""
sys.path.append(os.path.dirname(python_file))
import_name = _InferImportNameFromFile(python_file)
return __import__(import_name)
def _GetTestsFromClass(test_class):
"""Create a list of test objects for each test method on this class.
Test methods are methods on the class which begin with 'test'.
Args:
test_class: class object which contains zero or more test methods.
Returns:
A list of test objects, each of which is bound to one test.
"""
test_names = [m for m in dir(test_class)
if _IsTestMethod(m, test_class)]
return map(test_class, test_names)
def _GetTestClassesFromModule(test_module):
tests = []
for name in dir(test_module):
attr = getattr(test_module, name)
if _IsTestClass(attr):
tests.extend(_GetTestsFromClass(attr))
return tests
def _IsTestClass(test_class):
return (type(test_class) is types.TypeType and
issubclass(test_class, python_test_base.PythonTestBase) and
test_class is not python_test_base.PythonTestBase)
def _IsTestMethod(attrname, test_case_class):
"""Checks whether this is a valid test method.
Args:
attrname: the method name.
test_case_class: the test case class.
Returns:
True if test_case_class.'attrname' is callable and it starts with 'test';
False otherwise.
"""
attr = getattr(test_case_class, attrname)
return callable(attr) and attrname.startswith('test')
def _GetAllTests(test_root, is_official_build):
"""Retrieve a list of Python test modules and their respective methods.
Args:
test_root: path which contains Python-driven test files
is_official_build: whether this is an official build
Returns:
List of test case objects for all available test methods.
"""
if not test_root:
return []
all_tests = []
test_module_list = _GetTestModules(test_root, is_official_build)
for module in test_module_list:
all_tests.extend(_GetTestClassesFromModule(module))
return all_tests

Просмотреть файл

@ -4,19 +4,8 @@
"""Helper functions common to native, java and python test runners."""
import contextlib
import fcntl
import httplib
import logging
import optparse
import os
import re
import socket
import subprocess
import sys
import traceback
import cmd_helper
def GetExpectations(file_name):

Просмотреть файл

@ -0,0 +1,36 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module that contains a queue for running sharded tests."""
import multiprocessing
class ShardedTestsQueue(object):
"""A queue for managing pending tests across different runners.
This class should only be used when sharding.
Attributes:
num_devices: an integer; the number of attached Android devices.
tests: a list of tests to be run.
tests_queue: if sharding, a JoinableQueue object that holds tests from
|tests|. Otherwise, a list holding tests.
results_queue: a Queue object to hold TestResults objects.
"""
_STOP_SENTINEL = 'STOP' # sentinel value for iter()
def __init__(self, num_devices, tests):
assert num_devices > 1, 'At least two devices must be attached.'
self.num_devices = num_devices
self.tests_queue = multiprocessing.Queue()
for test in tests:
self.tests_queue.put(test)
for _ in xrange(self.num_devices):
self.tests_queue.put(ShardedTestsQueue._STOP_SENTINEL)
def __iter__(self):
"""Returns an iterator with the test cases."""
return iter(self.tests_queue.get, ShardedTestsQueue._STOP_SENTINEL)

Просмотреть файл

@ -0,0 +1,137 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module containing information about the python-driven tests."""
import logging
import os
import tests_annotations
class TestInfo(object):
"""An object containing and representing a test function, plus metadata."""
def __init__(self, runnable, set_up=None, tear_down=None):
# The actual test function/method.
self.runnable = runnable
# Qualified name of test function/method (e.g. FooModule.testBar).
self.qualified_name = self._GetQualifiedName(runnable)
# setUp and teardown functions, if any.
self.set_up = set_up
self.tear_down = tear_down
def _GetQualifiedName(self, runnable):
"""Helper method to infer a runnable's name and module name.
Many filters and lists presuppose a format of module_name.testMethodName.
To make this easy on everyone, we use some reflection magic to infer this
name automatically.
Args:
runnable: the test method to get the qualified name for
Returns:
qualified name for this runnable, incl. module name and method name.
"""
runnable_name = runnable.__name__
# See also tests_annotations.
module_name = os.path.splitext(
os.path.basename(runnable.__globals__['__file__']))[0]
return '.'.join([module_name, runnable_name])
def __str__(self):
return self.qualified_name
class TestInfoCollection(object):
"""A collection of TestInfo objects which facilitates filtering."""
def __init__(self):
"""Initialize a new TestInfoCollection."""
# Master list of all valid tests.
self.all_tests = []
def AddTests(self, test_infos):
"""Adds a set of tests to this collection.
The user may then retrieve them, optionally according to criteria, via
GetAvailableTests().
Args:
test_infos: a list of TestInfos representing test functions/methods.
"""
self.all_tests = test_infos
def GetAvailableTests(self, annotation, name_filter):
"""Get a collection of TestInfos which match the supplied criteria.
Args:
annotation: annotation which tests must match, if any
name_filter: name filter which tests must match, if any
Returns:
List of available tests.
"""
available_tests = self.all_tests
# Filter out tests which match neither the requested annotation, nor the
# requested name filter, if any.
available_tests = [t for t in available_tests if
self._AnnotationIncludesTest(t, annotation)]
if annotation and len(annotation) == 1 and annotation[0] == 'SmallTest':
tests_without_annotation = [
t for t in self.all_tests if
not tests_annotations.AnnotatedFunctions.GetTestAnnotations(
t.qualified_name)]
test_names = [t.qualified_name for t in tests_without_annotation]
logging.warning('The following tests do not contain any annotation. '
'Assuming "SmallTest":\n%s',
'\n'.join(test_names))
available_tests += tests_without_annotation
available_tests = [t for t in available_tests if
self._NameFilterIncludesTest(t, name_filter)]
return available_tests
def _AnnotationIncludesTest(self, test_info, annotation_filter_list):
"""Checks whether a given test represented by test_info matches annotation.
Args:
test_info: TestInfo object representing the test
annotation_filter_list: list of annotation filters to match (e.g. Smoke)
Returns:
True if no annotation was supplied or the test matches; false otherwise.
"""
if not annotation_filter_list:
return True
for annotation_filter in annotation_filter_list:
filters = annotation_filter.split('=')
if len(filters) == 2:
key = filters[0]
value_list = filters[1].split(',')
for value in value_list:
if tests_annotations.AnnotatedFunctions.IsAnnotated(
key + ':' + value, test_info.qualified_name):
return True
elif tests_annotations.AnnotatedFunctions.IsAnnotated(
annotation_filter, test_info.qualified_name):
return True
return False
def _NameFilterIncludesTest(self, test_info, name_filter):
"""Checks whether a name filter matches a given test_info's method name.
This is a case-sensitive, substring comparison: 'Foo' will match methods
Foo.testBar and Bar.testFoo. 'foo' would not match either.
Args:
test_info: TestInfo object representing the test
name_filter: substring to check for in the qualified name of the test
Returns:
True if no name filter supplied or it matches; False otherwise.
"""
return not name_filter or name_filter in test_info.qualified_name

Просмотреть файл

@ -4,8 +4,10 @@
"""Parses options for the instrumentation tests."""
import os
import constants
import optparse
import os
def CreateTestRunnerOptionParser(usage=None, default_timeout=60):
@ -17,8 +19,7 @@ def CreateTestRunnerOptionParser(usage=None, default_timeout=60):
default=default_timeout)
option_parser.add_option('-c', dest='cleanup_test_files',
help='Cleanup test files on the device after run',
action='store_true',
default=False)
action='store_true')
option_parser.add_option('-v',
'--verbose',
dest='verbose_count',
@ -37,3 +38,76 @@ def CreateTestRunnerOptionParser(usage=None, default_timeout=60):
help='Run the test under a tool '
'(use --tool help to list them)')
return option_parser
def ParseInstrumentationArgs(args):
"""Parse arguments and return options with defaults."""
option_parser = CreateTestRunnerOptionParser()
option_parser.add_option('-w', '--wait_debugger', dest='wait_for_debugger',
action='store_true', help='Wait for debugger.')
option_parser.add_option('-I', dest='install_apk', help='Install APK.',
action='store_true')
option_parser.add_option('-f', '--test_filter',
help='Test filter (if not fully qualified, '
'will run all matches).')
option_parser.add_option('-A', '--annotation', dest='annotation_str',
help=('Run only tests with any of the given '
'annotations. '
'An annotation can be either a key or a '
'key-values pair. '
'A test that has no annotation is '
'considered "SmallTest".'))
option_parser.add_option('-j', '--java_only', action='store_true',
help='Run only the Java tests.')
option_parser.add_option('-p', '--python_only', action='store_true',
help='Run only the Python tests.')
option_parser.add_option('-n', '--run_count', type='int',
dest='number_of_runs', default=1,
help=('How many times to run each test, regardless '
'of the result. (Default is 1)'))
option_parser.add_option('--test-apk', dest='test_apk',
help=('The name of the apk containing the tests '
'(without the .apk extension).'))
option_parser.add_option('--screenshot', dest='screenshot_failures',
action='store_true',
help='Capture screenshots of test failures')
option_parser.add_option('--save-perf-json', action='store_true',
help='Saves the JSON file for each UI Perf test.')
option_parser.add_option('--shard_retries', type=int, default=1,
help=('Number of times to retry each failure when '
'sharding.'))
option_parser.add_option('--official-build', help='Run official build tests.')
option_parser.add_option('--device',
help='Serial number of device we should use.')
option_parser.add_option('--python_test_root',
help='Root of the python-driven tests.')
options, args = option_parser.parse_args(args)
if len(args) > 1:
option_parser.error('Unknown argument:', args[1:])
if options.java_only and options.python_only:
option_parser.error('Options java_only (-j) and python_only (-p) '
'are mutually exclusive')
options.run_java_tests = True
options.run_python_tests = True
if options.java_only:
options.run_python_tests = False
elif options.python_only:
options.run_java_tests = False
options.test_apk_path = os.path.join(constants.CHROME_DIR,
'out', 'Release',
'%s.apk' % options.test_apk)
options.test_apk_jar_path = os.path.join(constants.CHROME_DIR,
'out', 'Release',
'%s.jar' % options.test_apk)
if options.annotation_str:
options.annotation = options.annotation_str.split()
elif options.test_filter:
options.annotation = []
else:
options.annotation = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest']
return options

Просмотреть файл

@ -0,0 +1,89 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Annotations for python-driven tests."""
import os
class AnnotatedFunctions(object):
"""A container for annotated methods."""
_ANNOTATED = {}
@staticmethod
def _AddFunction(annotation, function):
"""Adds an annotated to function to our container.
Args:
annotation: the annotation string.
function: the function.
Returns:
The function passed in.
"""
module_name = os.path.splitext(os.path.basename(
function.__globals__['__file__']))[0]
qualified_function_name = '.'.join([module_name, function.func_name])
function_list = AnnotatedFunctions._ANNOTATED.get(annotation, [])
function_list.append(qualified_function_name)
AnnotatedFunctions._ANNOTATED[annotation] = function_list
return function
@staticmethod
def IsAnnotated(annotation, qualified_function_name):
"""True if function name (module.function) contains the annotation.
Args:
annotation: the annotation string.
qualified_function_name: the qualified function name.
Returns:
True if module.function contains the annotation.
"""
return qualified_function_name in AnnotatedFunctions._ANNOTATED.get(
annotation, [])
@staticmethod
def GetTestAnnotations(qualified_function_name):
"""Returns a list containing all annotations for the given function.
Args:
qualified_function_name: the qualified function name.
Returns:
List of all annotations for this function.
"""
return [annotation
for annotation, tests in AnnotatedFunctions._ANNOTATED.iteritems()
if qualified_function_name in tests]
# The following functions are annotations used for the python driven tests.
def Smoke(function):
return AnnotatedFunctions._AddFunction('Smoke', function)
def SmallTest(function):
return AnnotatedFunctions._AddFunction('SmallTest', function)
def MediumTest(function):
return AnnotatedFunctions._AddFunction('MediumTest', function)
def LargeTest(function):
return AnnotatedFunctions._AddFunction('LargeTest', function)
def FlakyTest(function):
return AnnotatedFunctions._AddFunction('FlakyTest', function)
def DisabledTest(function):
return AnnotatedFunctions._AddFunction('DisabledTest', function)
def Feature(feature_list):
def _AddFeatures(function):
for feature in feature_list:
AnnotatedFunctions._AddFunction('Feature' + feature, function)
return AnnotatedFunctions._AddFunction('Feature', function)
return _AddFeatures

Просмотреть файл

@ -0,0 +1,75 @@
#!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs both the Python and Java tests."""
import sys
import time
from pylib import apk_info
from pylib import test_options_parser
from pylib import run_java_tests
from pylib import run_python_tests
from pylib import run_tests_helper
from pylib.test_result import TestResults
def SummarizeResults(java_results, python_results, annotation):
"""Summarize the results from the various test types.
Args:
java_results: a TestResults object with java test case results.
python_results: a TestResults object with python test case results.
annotation: the annotation used for these results.
Returns:
A tuple (all_results, summary_string, num_failing)
"""
all_results = TestResults.FromTestResults([java_results, python_results])
summary_string = all_results.LogFull('Instrumentation', annotation)
num_failing = (len(all_results.failed) + len(all_results.crashed) +
len(all_results.unknown))
return all_results, summary_string, num_failing
def DispatchInstrumentationTests(options):
"""Dispatches the Java and Python instrumentation tests, sharding if possible.
Uses the logging module to print the combined final results and
summary of the Java and Python tests. If the java_only option is set, only
the Java tests run. If the python_only option is set, only the python tests
run. If neither are set, run both Java and Python tests.
Args:
options: command-line options for running the Java and Python tests.
Returns:
An integer representing the number of failing tests.
"""
start_date = int(time.time() * 1000)
java_results = TestResults()
python_results = TestResults()
if options.run_java_tests:
java_results = run_java_tests.DispatchJavaTests(
options,
[apk_info.ApkInfo(options.test_apk_path, options.test_apk_jar_path)])
if options.run_python_tests:
python_results = run_python_tests.DispatchPythonTests(options)
all_results, summary_string, num_failing = SummarizeResults(
java_results, python_results, options.annotation)
return num_failing
def main(argv):
options = test_options_parser.ParseInstrumentationArgs(argv)
run_tests_helper.SetLogLevel(options.verbose_count)
return DispatchInstrumentationTests(options)
if __name__ == '__main__':
sys.exit(main(sys.argv))