Converge run wrappers into angle_test_util.RunTestSuite

Bug: angleproject:7299
Change-Id: Ifa882028a6f49579b4a9c6893053cb3bee8d83fd
Reviewed-on: https://chromium-review.googlesource.com/c/angle/angle/+/3808302
Reviewed-by: Jamie Madill <jmadill@chromium.org>
Reviewed-by: Amirali Abdolrashidi <abdolrashidi@google.com>
Commit-Queue: Roman Lavrov <romanl@google.com>
This commit is contained in:
Roman Lavrov 2022-08-03 18:15:52 -04:00 коммит произвёл Angle LUCI CQ
Родитель a2ad517f95
Коммит 2de8bb42a9
5 изменённых файлов: 162 добавлений и 160 удалений

Просмотреть файл

@ -492,6 +492,7 @@ if (angle_standalone || build_with_chromium) {
data = invoker.data + [
"$angle_root/scripts/run_gtest_angle_test.py",
"$angle_root/src/tests/py_utils/android_helper.py",
"$angle_root/src/tests/py_utils/angle_path_util.py",
"$angle_root/src/tests/py_utils/angle_test_util.py",
"//testing/scripts/common.py",

Просмотреть файл

@ -346,7 +346,7 @@ def RunSmokeTest():
logging.info('Smoke test passed')
def RunTests(test_suite, args, stdoutfile=None, output_dir=None, log_output=True):
def RunTests(test_suite, args, stdoutfile=None, log_output=True):
_EnsureTestSuite(test_suite)
args = args[:]

Просмотреть файл

@ -4,8 +4,28 @@
import datetime
import importlib
import io
import logging
import subprocess
import sys
import time
import android_helper
import angle_path_util
angle_path_util.AddDepsDirToPath('testing/scripts')
import common
import test_env
import xvfb
def Initialize(suite_name):
android_helper.Initialize(suite_name)
# Requires .Initialize() to be called first
def IsAndroid():
return android_helper.IsAndroid()
class LogFormatter(logging.Formatter):
@ -53,3 +73,54 @@ def HasGtestShardsAndIndex(env):
def PopGtestShardsAndIndex(env):
return int(env.pop('GTEST_TOTAL_SHARDS')), int(env.pop('GTEST_SHARD_INDEX'))
# From testing/test_env.py, see run_command_with_output below
def _popen(*args, **kwargs):
assert 'creationflags' not in kwargs
if sys.platform == 'win32':
# Necessary for signal handling. See crbug.com/733612#c6.
kwargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
return subprocess.Popen(*args, **kwargs)
# Forked from testing/test_env.py to add ability to suppress logging with log=False
def run_command_with_output(argv, stdoutfile, env=None, cwd=None, log=True):
assert stdoutfile
with io.open(stdoutfile, 'wb') as writer, \
io.open(stdoutfile, 'rb') as reader:
process = _popen(argv, env=env, cwd=cwd, stdout=writer, stderr=subprocess.STDOUT)
test_env.forward_signals([process])
while process.poll() is None:
if log:
sys.stdout.write(reader.read().decode('utf-8'))
# This sleep is needed for signal propagation. See the
# wait_with_signals() docstring.
time.sleep(0.1)
if log:
sys.stdout.write(reader.read().decode('utf-8'))
return process.returncode
def RunTestSuite(test_suite,
cmd_args,
env,
runner_args=None,
show_test_stdout=True,
use_xvfb=False):
if android_helper.IsAndroid():
result, output = android_helper.RunTests(test_suite, cmd_args, log_output=show_test_stdout)
return result, output.decode()
runner_cmd = [ExecutablePathInCurrentDir(test_suite)] + cmd_args + (runner_args or [])
logging.debug(' '.join(runner_cmd))
with common.temporary_file() as tempfile_path:
if use_xvfb:
exit_code = xvfb.run_executable(runner_cmd, env, stdoutfile=tempfile_path)
else:
exit_code = run_command_with_output(
runner_cmd, env=env, stdoutfile=tempfile_path, log=show_test_stdout)
with open(tempfile_path) as f:
output = f.read()
return exit_code, output

Просмотреть файл

@ -36,8 +36,6 @@ from skia_gold import angle_skia_gold_session_manager
angle_path_util.AddDepsDirToPath('testing/scripts')
import common
import test_env
import xvfb
ANGLE_PERFTESTS = 'angle_perftests'
@ -49,10 +47,6 @@ DEFAULT_BATCH_SIZE = 5
DEFAULT_LOG = 'info'
DEFAULT_GOLD_INSTANCE = 'angle'
# Filters out stuff like: " I 72.572s run_tests_on_device(96071FFAZ00096) "
ANDROID_LOGGING_PREFIX = r'I +\d+.\d+s \w+\(\w+\) '
ANDROID_BEGIN_SYSTEM_INFO = '>>ScopedMainEntryLogger'
# Test expectations
FAIL = 'FAIL'
PASS = 'PASS'
@ -116,24 +110,13 @@ def add_skia_gold_args(parser):
'pre-authenticated. Meant for testing locally instead of on the bots.')
def run_wrapper(test_suite, cmd_args, args, env, stdoutfile):
if android_helper.IsAndroid():
return android_helper.RunTests(test_suite, cmd_args, stdoutfile)[0]
cmd = [angle_test_util.ExecutablePathInCurrentDir(test_suite)] + cmd_args
if args.xvfb:
return xvfb.run_executable(cmd, env, stdoutfile=stdoutfile)
else:
return test_env.run_command_with_output(cmd, env=env, stdoutfile=stdoutfile)
def run_angle_system_info_test(sysinfo_args, args, env):
with temporary_dir() as temp_dir:
tempfile_path = os.path.join(temp_dir, 'stdout')
sysinfo_args += ['--render-test-output-dir=' + temp_dir]
if run_wrapper('angle_system_info_test', sysinfo_args, args, env, tempfile_path):
result, _ = angle_test_util.RunTestSuite(
'angle_system_info_test', sysinfo_args, env, use_xvfb=args.xvfb)
if result != 0:
raise Exception('Error getting system info.')
with open(os.path.join(temp_dir, 'angle_system_info.json')) as f:
@ -169,7 +152,7 @@ def get_skia_gold_keys(args, env):
if args.swiftshader:
sysinfo_args.append('--swiftshader')
if android_helper.IsAndroid():
if angle_test_util.IsAndroid():
json_data = android_helper.AngleSystemInfo(sysinfo_args)
logging.info(json_data)
else:
@ -302,7 +285,7 @@ def _get_gtest_filter_for_batch(args, batch):
def _run_tests(args, tests, extra_flags, env, screenshot_dir, results, test_results):
keys = get_skia_gold_keys(args, env)
if android_helper.IsAndroid() and args.test_suite == ANGLE_PERFTESTS:
if angle_test_util.IsAndroid() and args.test_suite == ANGLE_PERFTESTS:
android_helper.RunSmokeTest()
with temporary_dir('angle_skia_gold_') as skia_gold_temp_dir:
@ -328,57 +311,55 @@ def _run_tests(args, tests, extra_flags, env, screenshot_dir, results, test_resu
batches = _get_batches(traces, args.batch_size)
for batch in batches:
if android_helper.IsAndroid():
if angle_test_util.IsAndroid():
android_helper.PrepareRestrictedTraces(batch)
for iteration in range(0, args.flaky_retries + 1):
with common.temporary_file() as tempfile_path:
# This is how we signal early exit
if not batch:
logging.debug('All tests in batch completed.')
break
if iteration > 0:
logging.info('Test run failed, running retry #%d...' % iteration)
# This is how we signal early exit
if not batch:
logging.debug('All tests in batch completed.')
break
if iteration > 0:
logging.info('Test run failed, running retry #%d...' % iteration)
gtest_filter = _get_gtest_filter_for_batch(args, batch)
cmd_args = [
gtest_filter,
'--one-frame-only',
'--verbose-logging',
'--enable-all-trace-tests',
'--render-test-output-dir=%s' % screenshot_dir,
'--save-screenshots',
] + extra_flags
batch_result = PASS if run_wrapper(args.test_suite, cmd_args, args, env,
tempfile_path) == 0 else FAIL
gtest_filter = _get_gtest_filter_for_batch(args, batch)
cmd_args = [
gtest_filter,
'--one-frame-only',
'--verbose-logging',
'--enable-all-trace-tests',
'--render-test-output-dir=%s' % screenshot_dir,
'--save-screenshots',
] + extra_flags
result, test_output = angle_test_util.RunTestSuite(
args.test_suite, cmd_args, env, use_xvfb=args.xvfb)
batch_result = PASS if result == 0 else FAIL
with open(tempfile_path) as f:
test_output = f.read() + '\n'
next_batch = []
for trace in batch:
artifacts = {}
next_batch = []
for trace in batch:
artifacts = {}
if batch_result == PASS:
test_prefix = SWIFTSHADER_TEST_PREFIX if args.swiftshader else DEFAULT_TEST_PREFIX
trace_skipped_notice = '[ SKIPPED ] ' + test_prefix + trace + '\n'
if trace_skipped_notice in test_output:
result = SKIP
else:
logging.debug('upload test result: %s' % trace)
result = upload_test_result_to_skia_gold(
args, gold_session_manager, gold_session, gold_properties,
screenshot_dir, trace, artifacts)
if batch_result == PASS:
test_prefix = SWIFTSHADER_TEST_PREFIX if args.swiftshader else DEFAULT_TEST_PREFIX
trace_skipped_notice = '[ SKIPPED ] ' + test_prefix + trace + '\n'
if trace_skipped_notice in (test_output + '\n'):
result = SKIP
else:
result = batch_result
logging.debug('upload test result: %s' % trace)
result = upload_test_result_to_skia_gold(args, gold_session_manager,
gold_session, gold_properties,
screenshot_dir, trace,
artifacts)
else:
result = batch_result
expected_result = SKIP if result == SKIP else PASS
test_results[trace] = {'expected': expected_result, 'actual': result}
if len(artifacts) > 0:
test_results[trace]['artifacts'] = artifacts
if result == FAIL:
next_batch.append(trace)
batch = next_batch
expected_result = SKIP if result == SKIP else PASS
test_results[trace] = {'expected': expected_result, 'actual': result}
if len(artifacts) > 0:
test_results[trace]['artifacts'] = artifacts
if result == FAIL:
next_batch.append(trace)
batch = next_batch
# These properties are recorded after iteration to ensure they only happen once.
for _, trace_results in test_results.items():
@ -440,7 +421,7 @@ def main():
if angle_test_util.HasGtestShardsAndIndex(env):
args.shard_count, args.shard_index = angle_test_util.PopGtestShardsAndIndex(env)
android_helper.Initialize(args.test_suite)
angle_test_util.Initialize(args.test_suite)
results = {
'tests': {},

Просмотреть файл

@ -29,8 +29,6 @@ import angle_test_util
angle_path_util.AddDepsDirToPath('testing/scripts')
import common
import test_env
import xvfb
angle_path_util.AddDepsDirToPath('third_party/catapult/tracing')
from tracing.value import histogram
@ -45,9 +43,6 @@ DEFAULT_MAX_ERRORS = 3
DEFAULT_WARMUP_LOOPS = 2
DEFAULT_CALIBRATION_TIME = 2
# Filters out stuff like: " I 72.572s run_tests_on_device(96071FFAZ00096) "
ANDROID_LOGGING_PREFIX = r'I +\d+.\d+s \w+\(\w+\) '
# Test expectations
FAIL = 'FAIL'
PASS = 'PASS'
@ -57,53 +52,6 @@ EXIT_FAILURE = 1
EXIT_SUCCESS = 0
def _popen(*args, **kwargs):
assert 'creationflags' not in kwargs
if sys.platform == 'win32':
# Necessary for signal handling. See crbug.com/733612#c6.
kwargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
return subprocess.Popen(*args, **kwargs)
def run_command_with_output(argv, stdoutfile, env=None, cwd=None, log=True):
assert stdoutfile
with io.open(stdoutfile, 'wb') as writer, \
io.open(stdoutfile, 'rb') as reader:
process = _popen(argv, env=env, cwd=cwd, stdout=writer, stderr=subprocess.STDOUT)
test_env.forward_signals([process])
while process.poll() is None:
if log:
sys.stdout.write(reader.read().decode('utf-8'))
# This sleep is needed for signal propagation. See the
# wait_with_signals() docstring.
time.sleep(0.1)
if log:
sys.stdout.write(reader.read().decode('utf-8'))
return process.returncode
def _run_and_get_output(args, cmd, env, runner_args):
if android_helper.IsAndroid():
result, output = android_helper.RunTests(
args.test_suite, cmd[1:], log_output=args.show_test_stdout)
return result, output.decode().split('\n')
runner_cmd = cmd + runner_args
lines = []
logging.debug(' '.join(runner_cmd))
with common.temporary_file() as tempfile_path:
if args.xvfb:
exit_code = xvfb.run_executable(runner_cmd, env, stdoutfile=tempfile_path)
else:
exit_code = run_command_with_output(
runner_cmd, env=env, stdoutfile=tempfile_path, log=args.show_test_stdout)
with open(tempfile_path) as f:
for line in f:
lines.append(line.strip())
return exit_code, lines
def _filter_tests(tests, pattern):
return [test for test in tests if fnmatch.fnmatch(test, pattern)]
@ -113,7 +61,6 @@ def _shard_tests(tests, shard_count, shard_index):
def _get_results_from_output(output, result):
output = '\n'.join(output)
m = re.search(r'Running (\d+) tests', output)
if m and int(m.group(1)) > 1:
raise Exception('Found more than one test result in output')
@ -130,23 +77,11 @@ def _get_results_from_output(output, result):
return [float(value) for value in m]
def _get_tests_from_output(lines):
seen_start_of_tests = False
tests = []
android_prefix = re.compile(ANDROID_LOGGING_PREFIX)
logging.debug('Read %d lines from test output.' % len(lines))
for line in lines:
line = android_prefix.sub('', line.strip())
if line == 'Tests list:':
seen_start_of_tests = True
elif line == 'End tests list.':
break
elif seen_start_of_tests:
tests.append(line)
if not seen_start_of_tests:
raise Exception('Did not find test list in test output!')
logging.debug('Found %d tests from test output.' % len(tests))
return tests
def _get_tests_from_output(output):
out_lines = output.split('\n')
start = out_lines.index('Tests list:')
end = out_lines.index('End tests list.')
return out_lines[start + 1:end]
def _truncated_list(data, n):
@ -323,21 +258,26 @@ def main():
if angle_test_util.HasGtestShardsAndIndex(env):
args.shard_count, args.shard_index = angle_test_util.PopGtestShardsAndIndex(env)
android_helper.Initialize(args.test_suite)
angle_test_util.Initialize(args.test_suite)
# Get test list
if android_helper.IsAndroid():
if angle_test_util.IsAndroid():
tests = android_helper.ListTests(args.test_suite)
else:
cmd = [
angle_test_util.ExecutablePathInCurrentDir(args.test_suite),
cmd_args = [
'--list-tests',
'--verbose',
]
exit_code, lines = _run_and_get_output(args, cmd, env, [])
exit_code, output = angle_test_util.RunTestSuite(
args.test_suite,
cmd_args,
env,
runner_args=[],
use_xvfb=args.xvfb,
show_test_stdout=args.show_test_stdout)
if exit_code != EXIT_SUCCESS:
logging.fatal('Could not find test list from test output:\n%s' % '\n'.join(lines))
tests = _get_tests_from_output(lines)
logging.fatal('Could not find test list from test output:\n%s' % output)
tests = _get_tests_from_output(output)
if args.filter:
tests = _filter_tests(tests, args.filter)
@ -350,7 +290,7 @@ def main():
logging.error('No tests to run.')
return EXIT_FAILURE
if android_helper.IsAndroid() and args.test_suite == ANGLE_PERFTESTS:
if angle_test_util.IsAndroid() and args.test_suite == ANGLE_PERFTESTS:
android_helper.RunSmokeTest()
logging.info('Running %d test%s' % (num_tests, 's' if num_tests > 1 else ' '))
@ -365,14 +305,13 @@ def main():
for test_index in range(num_tests):
test = tests[test_index]
if android_helper.IsAndroid():
if angle_test_util.IsAndroid():
trace = android_helper.GetTraceFromTestName(test)
if trace and trace not in prepared_traces:
android_helper.PrepareRestrictedTraces([trace])
prepared_traces.add(trace)
cmd = [
angle_test_util.ExecutablePathInCurrentDir(args.test_suite),
cmd_args = [
'--gtest_filter=%s' % test,
'--verbose',
'--calibration-time',
@ -387,16 +326,20 @@ def main():
if args.steps_per_trial:
steps_per_trial = args.steps_per_trial
else:
cmd_calibrate = cmd + [
calibrate_args = cmd_args + [
'--calibration',
'--warmup-loops',
str(args.warmup_loops),
]
exit_code, calibrate_output = _run_and_get_output(args, cmd_calibrate, env,
runner_args)
exit_code, calibrate_output = angle_test_util.RunTestSuite(
args.test_suite,
calibrate_args,
env,
runner_args=runner_args,
use_xvfb=args.xvfb,
show_test_stdout=args.show_test_stdout)
if exit_code != EXIT_SUCCESS:
logging.fatal('%s failed. Output:\n%s' %
(cmd_calibrate[0], '\n'.join(calibrate_output)))
logging.fatal('%s failed. Output:\n%s' % (args.test_suite, calibrate_output))
total_errors += 1
results.result_fail(test)
continue
@ -417,7 +360,7 @@ def main():
logging.error('Error count exceeded max errors (%d). Aborting.' % args.max_errors)
return EXIT_FAILURE
cmd_run = cmd + [
run_args = cmd_args + [
'--steps-per-trial',
str(steps_per_trial),
'--trials',
@ -425,18 +368,24 @@ def main():
]
if args.smoke_test_mode:
cmd_run += ['--no-warmup']
run_args += ['--no-warmup']
else:
cmd_run += ['--warmup-loops', str(args.warmup_loops)]
run_args += ['--warmup-loops', str(args.warmup_loops)]
if args.perf_counters:
cmd_run += ['--perf-counters', args.perf_counters]
run_args += ['--perf-counters', args.perf_counters]
with common.temporary_file() as histogram_file_path:
cmd_run += ['--isolated-script-test-perf-output=%s' % histogram_file_path]
exit_code, output = _run_and_get_output(args, cmd_run, env, runner_args)
run_args += ['--isolated-script-test-perf-output=%s' % histogram_file_path]
exit_code, output = angle_test_util.RunTestSuite(
args.test_suite,
run_args,
env,
runner_args=runner_args,
use_xvfb=args.xvfb,
show_test_stdout=args.show_test_stdout)
if exit_code != EXIT_SUCCESS:
logging.error('%s failed. Output:\n%s' % (cmd_run[0], '\n'.join(output)))
logging.error('%s failed. Output:\n%s' % (args.test_suite, output))
results.result_fail(test)
total_errors += 1
break