Capture/Replay: Update process for trace upgrading.

Includes changes to the retracing script.

Also includes documentation on how the process works.

Bug: angleproject:5133
Change-Id: I1acfe338f3fe0282a0461c314274c761ed04bd2f
Reviewed-on: https://chromium-review.googlesource.com/c/angle/angle/+/3193418
Reviewed-by: Cody Northrop <cnorthrop@google.com>
Reviewed-by: Tim Van Patten <timvp@google.com>
Commit-Queue: Jamie Madill <jmadill@chromium.org>
This commit is contained in:
Jamie Madill 2021-09-29 14:02:29 -04:00 коммит произвёл Angle LUCI CQ
Родитель fefd7ae66a
Коммит 7fc6c07cca
9 изменённых файлов: 510 добавлений и 110 удалений

Просмотреть файл

@ -2,7 +2,7 @@
"src/tests/restricted_traces/.gitignore":
"e1e43b4e19ea9204910e8e121900cf7a",
"src/tests/restricted_traces/gen_restricted_traces.py":
"6748acf6499a7a2632ac8a0416c2ef66",
"241d7eb3d8322ff67f44b84f8a06a4cf",
"src/tests/restricted_traces/restricted_traces.json":
"1bb099a03ad7e0d2c260c3597374cb6a",
"src/tests/restricted_traces/restricted_traces_autogen.cpp":

Просмотреть файл

@ -47,6 +47,14 @@ void JsonSerializer::endGroup()
}
void JsonSerializer::addBlob(const std::string &name, const uint8_t *blob, size_t length)
{
addBlobWithMax(name, blob, length, 16);
}
void JsonSerializer::addBlobWithMax(const std::string &name,
const uint8_t *blob,
size_t length,
size_t maxSerializedLength)
{
unsigned char hash[angle::base::kSHA1Length];
angle::base::SHA1HashBytes(blob, length, hash);
@ -64,7 +72,8 @@ void JsonSerializer::addBlob(const std::string &name, const uint8_t *blob, size_
hashName << name << "-hash";
addString(hashName.str(), os.str());
std::vector<uint8_t> data((length < 16) ? length : static_cast<size_t>(16));
std::vector<uint8_t> data(
(length < maxSerializedLength) ? length : static_cast<size_t>(maxSerializedLength));
std::copy(blob, blob + data.size(), data.begin());
std::ostringstream rawName;

Просмотреть файл

@ -57,6 +57,10 @@ class JsonSerializer : public angle::NonCopyable
void addString(const std::string &name, const std::string &value);
void addBlob(const std::string &name, const uint8_t *value, size_t length);
void addBlobWithMax(const std::string &name,
const uint8_t *value,
size_t length,
size_t maxSerializedLength);
void startGroup(const std::string &name);

Просмотреть файл

@ -690,7 +690,7 @@ Result SerializeBuffer(const gl::Context *context,
{
GroupScope group(json, "Buffer", buffer->id().value);
SerializeBufferState(json, buffer->getState());
if (buffer->getSize())
if (buffer->getSize() > 0)
{
MemoryBuffer *dataPtr = nullptr;
ANGLE_CHECK_GL_ALLOC(

Просмотреть файл

@ -1656,7 +1656,7 @@ void TracePerfTest::validateSerializedState(const char *expectedCapturedSerializ
return;
}
printf("Serialization mismatch!\n");
GTEST_NONFATAL_FAILURE_("Serialization mismatch!");
char aFilePath[kMaxPath] = {};
if (CreateTemporaryFile(aFilePath, kMaxPath))

Просмотреть файл

@ -237,22 +237,15 @@ jq ".traces = (.traces + [\"$LABEL $VERSION\"] | unique)" restricted_traces.json
## Run code auto-generation
We use two scripts to update the test harness so it will compile and run the new trace:
The [`gen_restricted_traces`](gen_restricted_traces.py) script auto-generates entries
in our checkout dependencies to sync restricted trace data on checkout. To trigger
code generation run the following from the angle root folder:
```
python ./gen_restricted_traces.py
cd ../../..
python ./scripts/run_code_generation.py
```
After this you should be able to `git diff` and see your new trace added to the harness files:
```
$ git diff --stat
scripts/code_generation_hashes/restricted_traces.json | 12 +++++++-----
src/tests/restricted_traces/.gitignore | 2 ++
src/tests/restricted_traces/restricted_traces.json | 1 +
src/tests/restricted_traces/restricted_traces_autogen.cpp | 19 +++++++++++++++++++
src/tests/restricted_traces/restricted_traces_autogen.gni | 1 +
src/tests/restricted_traces/restricted_traces_autogen.h | 1 +
6 files changed, 31 insertions(+), 5 deletions(-)
TODO: Redo this. http://anglebug.com/5133
```
Note the absence of the traces themselves listed above. They are automatically .gitignored since
they won't be checked in directly to the repo.
@ -264,7 +257,7 @@ be done by Googlers with write access to the trace CIPD prefix. If you need writ
someone listed in the `OWNERS` file.
```
sync_restricted_traces_to_cipd.py
./sync_restricted_traces_to_cipd.py
```
## Upload your CL
@ -276,3 +269,161 @@ git cl upload
```
You're now ready to run your new trace on CI!
# Upgrading existing traces
With tracer updates sometimes we want to re-run tracing to upgrade the trace file format or to
take advantage of new tracer improvements. The [`retrace_restricted_traces`](retrace_restricted_traces.py)
script allows us to re-run tracing using [SwiftShader](https://swiftshader.googlesource.com/SwiftShader)
on a desktop machine. As of writing we require re-tracing on a Windows machine because of size
limitations with a Linux app window.
## Prep work: Back up existing traces
This will save the original traces in a temporary folder if you need to revert to the prior trace format:
```
py ./src/tests/restricted_traces/retrace_restricted_traces.py backup "*"
```
*Note: on Linux, remove the command `py` prefix to the Python scripts.*
This will save the traces to `./retrace-backups`. At any time you can revert the trace files by running:
```
py ./src/tests/restricted_traces/retrace_restricted_traces.py restore "*"
```
## Part 1: Sanity Check with T-Rex
First we'll retrace a single app to verify the workflow is intact. Please
ensure you replace the specified variables with paths that work on your
configuration and checkout:
### Step 1/3: Capture T-Rex with Validation
```
export TRACE_GN_PATH=out/Debug
export TRACE_NAME=trex_200
py ./src/tests/restricted_traces/retrace_restricted_traces.py upgrade $TRACE_GN_PATH retrace-wip -f $TRACE_NAME --validation --limit 3
```
The `--validation` flag will turn on additional validation checks in the
trace. The `--limit 3` flag forces a maximum of 3 frames of tracing so the
test will run more quickly. The trace will end up in the `retrace-wip`
folder.
### Step 2/3: Validate T-Rex
The command below will update your copy of the trace, rebuild, the run the
test suite with validation enabled:
```
py ./src/tests/restricted_traces/retrace_restricted_traces.py validate $TRACE_GN_PATH retrace-wip $TRACE_NAME
```
If the trace failed validation, see the section below on diagnosing tracer
errors. Otherwise proceed with the steps below.
### Step 3/3: Restore the Canonical T-Rex Trace
```
py ./src/tests/restricted_traces/retrace_restricted_traces.py restore $TRACE_NAME
```
## Part 2: Do a limited trace upgrade with validation enabled
### Step 1/3: Upgrade all traces with a limit of 3 frames
```
py ./src/tests/restricted_traces/retrace_restricted_traces.py upgrade $TRACE_GN_PATH retrace-wip --validation --limit 3 --no-overwrite
```
If this process gets interrupted, re-run the upgrade command. The
`--no-overwrite` argument will ensure it will complete eventually.
If any traces failed to upgrade, see the section below on diagnosing tracer
errors. Otherwise proceed with the steps below.
### Step 2/3: Validate all upgraded traces
```
py ./src/tests/restricted_traces/retrace_restricted_traces.py validate $TRACE_GN_PATH retrace-wip "*"
```
If any traces failed validation, see the section below on diagnosing tracer
errors. Otherwise proceed with the steps below.
### Step 3/3: Restore all traces
```
py ./src/tests/restricted_traces/retrace_restricted_traces.py restore "*"
```
## Part 3: Do the full trace upgrade
```
rm -rf retrace-wip
py ./src/tests/restricted_traces/retrace_restricted_traces.py upgrade $TRACE_GN_PATH retrace-wip --no-overwrite
```
If this process gets interrupted, re-run the upgrade command. The
`--no-overwrite` argument will ensure it will complete eventually.
If any traces failed to upgrade, see the section below on diagnosing tracer
errors. Otherwise proceed with the steps below.
## Part 4: Test the upgraded traces under an experimental prefix (slow)
Because there still may be trace errors undetected by validation, we first
upload the traces to a temporary CIPD path for testing. After a successful
run on the CQ, we will then upload them to the main ANGLE prefix.
To enable the experimental prefix, edit
[`restricted_traces.json`](restricted_traces.json) to use a version
number beginning with 'x'. For example:
```
"traces": [
"aliexpress x1",
"among_us x1",
"angry_birds_2_1500 x1",
"arena_of_valor x1",
"asphalt_8 x1",
"avakin_life x1",
... and so on ...
```
Then run:
```
py ./src/tests/restricted_traces/retrace_restricted_traces.py restore -o retrace-wip "*"
py ./src/tests/restricted_traces/sync_restricted_traces_to_cipd.py
py ./scripts/run_code_generation.py
```
The restore command will copy the new traces from the `retrace-wip` directory
into the trace folder before we call the sync script.
After these commands complete succesfully, create and upload a CL as normal.
Run CQ +1 Dry-Run. If you find a test regression, see the section below on
diagnosing tracer errors. Otherwise proceed with the steps below.
## Part 5: Upload the verified traces to CIPD under the stable prefix
Now that you've validated the traces on the CQ, update
[`restricted_traces.json`](restricted_traces.json) to remove the 'x' prefix
and incrementing the version of the traces (skipping versions if you prefer)
and then run:
```
py ./src/tests/restricted_traces/sync_restricted_traces_to_cipd.py
py ./scripts/run_code_generation.py
```
Then create and upload a CL as normal. Congratulations, you've finished the
trace upgrade!
# Diagnosing and fixing tracer errors
TODO: http://anglebug.com/5133

Просмотреть файл

@ -172,6 +172,12 @@ def reject_duplicate_keys(pairs):
return found_keys
def load_json_metadata(trace):
json_file_name = '%s/%s.json' % (trace, trace)
with open(json_file_name) as f:
return json.loads(f.read())
# TODO(http://anglebug.com/5878): Revert back to non-autogen'ed file names for the angledata.gz.
def get_angledata_filename(trace):
angledata_files = glob.glob('%s/%s*angledata.gz' % (trace, trace))
@ -187,20 +193,18 @@ def gen_gni(traces, gni_file, format_args):
context = get_context(trace)
angledata_file = get_angledata_filename(trace)
txt_file = '%s/%s_capture_context%s_files.txt' % (trace, trace, context)
json_file_name = '%s/%s.json' % (trace, trace)
if os.path.exists(txt_file):
with open(txt_file) as f:
files = f.readlines()
f.close()
source_files = ['"%s/%s"' % (trace, file.strip()) for file in files]
else:
assert os.path.exists(json_file_name), '%s does not exist' % json_file_name
with open(json_file_name) as f:
json_data = json.loads(f.read())
json_data = load_json_metadata(trace)
files = json_data["TraceFiles"]
source_files = ['"%s/%s"' % (trace, file.strip()) for file in files]
data_files = ['"%s"' % angledata_file]
json_file_name = '%s/%s.json' % (trace, trace)
if os.path.exists(json_file_name):
data_files.append('"%s"' % json_file_name)
@ -244,6 +248,7 @@ def contains_colorspace(trace):
return contains_string(trace, 'kReplayDrawSurfaceColorSpace')
# TODO(jmadill): Remove after retrace. http://anglebug.com/5133
def json_metadata_exists(trace):
return os.path.isfile('%s/%s.json' % (trace, trace))

Просмотреть файл

@ -15,6 +15,8 @@ import json
import logging
import os
import re
import shutil
import stat
import subprocess
import sys
@ -23,15 +25,34 @@ from gen_restricted_traces import get_context as get_context
DEFAULT_TEST_SUITE = 'angle_perftests'
DEFAULT_TEST_JSON = 'restricted_traces.json'
DEFAULT_LOG_LEVEL = 'info'
DEFAULT_BACKUP_FOLDER = 'retrace-backups'
# We preserve select metadata in the trace header that can't be re-captured properly.
# Currently this is just the set of default framebuffer surface config bits.
METADATA_KEYWORDS = ['kDefaultFramebuffer']
EXIT_SUCCESS = 0
EXIT_FAILURE = 1
def get_script_dir():
return os.path.dirname(sys.argv[0])
# TODO(jmadill): Remove after retrace. http://anglebug.com/5133
def json_metadata_exists(trace):
json_file_name = os.path.join(get_script_dir(), '%s/%s.json') % (trace, trace)
return os.path.isfile(json_file_name)
def load_json_metadata(trace):
json_file_name = os.path.join(get_script_dir(), '%s/%s.json') % (trace, trace)
with open(json_file_name) as f:
return json.loads(f.read())['TraceMetadata']
def src_trace_path(trace):
script_dir = os.path.dirname(sys.argv[0])
return os.path.join(script_dir, trace)
return os.path.join(get_script_dir(), trace)
def context_header(trace, trace_path):
@ -47,6 +68,11 @@ def context_header(trace, trace_path):
def get_num_frames(trace):
if json_metadata_exists(trace):
json_metadata = load_json_metadata(trace)
if 'FrameEnd' in json_metadata:
return int(json_metadata['FrameEnd'])
trace_path = src_trace_path(trace)
lo = 99999999
@ -105,51 +131,98 @@ def path_contains_header(path):
return False
def main():
parser = argparse.ArgumentParser()
parser.add_argument('gn_path', help='GN build path')
parser.add_argument('out_path', help='Output directory')
parser.add_argument('-f', '--filter', help='Trace filter. Defaults to all.', default='*')
parser.add_argument('-l', '--log', help='Logging level.', default=DEFAULT_LOG_LEVEL)
parser.add_argument(
'--no-swiftshader',
help='Trace against native Vulkan.',
action='store_true',
default=False)
parser.add_argument(
'-n',
'--no-overwrite',
help='Skip traces which already exist in the out directory.',
action='store_true')
parser.add_argument(
'--validation', help='Enable state serialization validation calls.', action='store_true')
parser.add_argument(
'--validation-expr',
help='Validation expression, used to add more validation checkpoints.')
parser.add_argument(
'--limit',
'--frame-limit',
type=int,
help='Limits the number of captured frames to produce a shorter trace than the original.')
args, extra_flags = parser.parse_known_args()
def chmod_directory(directory, perm):
assert os.path.isdir(directory)
for file in os.listdir(directory):
fn = os.path.join(directory, file)
os.chmod(fn, perm)
logging.basicConfig(level=args.log.upper())
script_dir = os.path.dirname(sys.argv[0])
def ensure_rmdir(directory):
if os.path.isdir(directory):
chmod_directory(directory, stat.S_IWRITE)
shutil.rmtree(directory)
# Load trace names
with open(os.path.join(script_dir, DEFAULT_TEST_JSON)) as f:
traces = json.loads(f.read())
traces = [trace.split(' ')[0] for trace in traces['traces']]
def copy_trace_folder(old_path, new_path):
logging.info('%s -> %s' % (old_path, new_path))
ensure_rmdir(new_path)
shutil.copytree(old_path, new_path)
binary = os.path.join(args.gn_path, DEFAULT_TEST_SUITE)
def backup_traces(args, traces):
for trace in fnmatch.filter(traces, args.traces):
trace_path = src_trace_path(trace)
trace_backup_path = os.path.join(args.out_path, trace)
copy_trace_folder(trace_path, trace_backup_path)
# TODO(jmadill): Remove this once migrated. http://anglebug.com/5133
def run_code_generation():
python_binary = 'py.exe' if os.name == 'nt' else 'python3'
angle_dir = os.path.join(get_script_dir(), '..', '..', '..')
gen_path = os.path.join(angle_dir, 'scripts', 'run_code_generation.py')
subprocess.check_call([python_binary, gen_path])
def restore_traces(args, traces):
for trace in fnmatch.filter(traces, args.traces):
trace_path = src_trace_path(trace)
trace_backup_path = os.path.join(args.out_path, trace)
if not os.path.isdir(trace_backup_path):
logging.error('Trace folder not found at %s' % trace_backup_path)
else:
copy_trace_folder(trace_backup_path, trace_path)
# TODO(jmadill): Remove this once migrated. http://anglebug.com/5133
angle_dir = os.path.join(get_script_dir(), '..', '..', '..')
json_path = os.path.join(angle_dir, 'scripts', 'code_generation_hashes',
'restricted_traces.json')
if os.path.exists(json_path):
os.unlink(json_path)
run_code_generation()
def run_autoninja(args):
autoninja_binary = 'autoninja'
if os.name == 'nt':
binary += '.exe'
autoninja_binary += '.bat'
autoninja_args = [autoninja_binary, '-C', args.gn_path, args.test_suite]
logging.debug('Calling %s' % ' '.join(autoninja_args))
subprocess.check_call(autoninja_args)
def run_test_suite(args, trace, max_steps, additional_args, additional_env):
trace_binary = os.path.join(args.gn_path, args.test_suite)
if os.name == 'nt':
trace_binary += '.exe'
renderer = 'vulkan' if args.no_swiftshader else 'vulkan_swiftshader'
trace_filter = '--gtest_filter=TracePerfTest.Run/%s_%s' % (renderer, trace)
run_args = [
trace_binary,
trace_filter,
'--max-steps-performed',
str(max_steps),
] + additional_args
if not args.no_swiftshader:
run_args += ['--enable-all-trace-tests']
env = {**os.environ.copy(), **additional_env}
env_string = ' '.join(['%s=%s' % item for item in additional_env.items()])
if env_string:
env_string += ' '
logging.info('%s%s' % (env_string, ' '.join(run_args)))
subprocess.check_call(run_args, env=env)
def upgrade_traces(args, traces):
run_autoninja(args)
failures = []
for trace in fnmatch.filter(traces, args.filter):
for trace in fnmatch.filter(traces, args.traces):
logging.debug('Tracing %s' % trace)
trace_path = os.path.abspath(os.path.join(args.out_path, trace))
@ -164,6 +237,11 @@ def main():
logging.debug('Read metadata: %s' % str(metadata))
if json_metadata_exists(trace):
json_metadata = load_json_metadata(trace)
else:
json_metadata = {}
max_steps = min(args.limit, num_frames) if args.limit else num_frames
# We start tracing from frame 2. --retrace-mode issues a Swap() after Setup() so we can
@ -172,35 +250,25 @@ def main():
'ANGLE_CAPTURE_LABEL': trace,
'ANGLE_CAPTURE_OUT_DIR': trace_path,
'ANGLE_CAPTURE_FRAME_START': '2',
'ANGLE_CAPTURE_FRAME_END': str(num_frames + 1),
'ANGLE_CAPTURE_FRAME_END': str(max_steps + 1),
}
if args.validation:
additional_env['ANGLE_CAPTURE_VALIDATION'] = '1'
# Also turn on shader output init to ensure we have no undefined values.
# This feature is also enabled in replay when using --validation.
additional_env['ANGLE_FEATURE_OVERRIDES_ENABLED'] = 'forceInitShaderOutputVariables'
additional_env[
'ANGLE_FEATURE_OVERRIDES_ENABLED'] = 'allocateNonZeroMemory:forceInitShaderVariables'
if args.validation_expr:
additional_env['ANGLE_CAPTURE_VALIDATION_EXPR'] = args.validation_expr
if args.trim:
additional_env['ANGLE_CAPTURE_TRIM_ENABLED'] = '1'
if args.no_trim:
additional_env['ANGLE_CAPTURE_TRIM_ENABLED'] = '0'
env = {**os.environ.copy(), **additional_env}
additional_args = ['--retrace-mode']
renderer = 'vulkan' if args.no_swiftshader else 'vulkan_swiftshader'
trace_filter = '--gtest_filter=TracePerfTest.Run/%s_%s' % (renderer, trace)
run_args = [
binary,
trace_filter,
'--retrace-mode',
'--max-steps-performed',
str(max_steps),
'--enable-all-trace-tests',
]
print('Capturing "%s" (%d frames)...' % (trace, num_frames))
logging.debug('Running "%s" with environment: %s' %
(' '.join(run_args), str(additional_env)))
try:
subprocess.check_call(run_args, env=env)
run_test_suite(args, trace, max_steps, additional_args, additional_env)
header_file = context_header(trace, trace_path)
@ -215,11 +283,135 @@ def main():
failures += [trace]
if failures:
print('The following traces failed to re-trace:\n')
print('The following traces failed to upgrade:\n')
print('\n'.join([' ' + trace for trace in failures]))
return 1
return EXIT_FAILURE
return 0
return EXIT_SUCCESS
def validate_traces(args, traces):
restore_traces(args, traces)
run_autoninja(args)
additional_args = ['--validation']
additional_env = {
'ANGLE_FEATURE_OVERRIDES_ENABLED': 'allocateNonZeroMemory:forceInitShaderVariables'
}
failures = []
for trace in fnmatch.filter(traces, args.traces):
num_frames = get_num_frames(trace)
max_steps = min(args.limit, num_frames) if args.limit else num_frames
try:
run_test_suite(args, trace, max_steps, additional_args, additional_env)
except:
logging.error('There was a failure running "%s".' % trace)
failures += [trace]
if failures:
print('The following traces failed to validate:\n')
print('\n'.join([' ' + trace for trace in failures]))
return EXIT_FAILURE
return EXIT_SUCCESS
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--log', help='Logging level.', default=DEFAULT_LOG_LEVEL)
parser.add_argument(
'--test-suite',
help='Test Suite. Default is %s' % DEFAULT_TEST_SUITE,
default=DEFAULT_TEST_SUITE)
parser.add_argument(
'--no-swiftshader',
help='Trace against native Vulkan.',
action='store_true',
default=False)
subparsers = parser.add_subparsers(dest='command', required=True, help='Command to run.')
backup_parser = subparsers.add_parser(
'backup', help='Copies trace contents into a saved folder.')
backup_parser.add_argument(
'traces', help='Traces to back up. Supports fnmatch expressions.', default='*')
backup_parser.add_argument(
'-o',
'--out-path',
'--backup-path',
help='Destination folder. Default is "%s".' % DEFAULT_BACKUP_FOLDER,
default=DEFAULT_BACKUP_FOLDER)
restore_parser = subparsers.add_parser(
'restore', help='Copies traces from a saved folder to the trace folder.')
restore_parser.add_argument(
'-o',
'--out-path',
'--backup-path',
help='Path the traces were saved. Default is "%s".' % DEFAULT_BACKUP_FOLDER,
default=DEFAULT_BACKUP_FOLDER)
restore_parser.add_argument(
'traces', help='Traces to restore. Supports fnmatch expressions.', default='*')
upgrade_parser = subparsers.add_parser(
'upgrade', help='Re-trace existing traces, upgrading the format.')
upgrade_parser.add_argument('gn_path', help='GN build path')
upgrade_parser.add_argument('out_path', help='Output directory')
upgrade_parser.add_argument(
'-f', '--traces', '--filter', help='Trace filter. Defaults to all.', default='*')
upgrade_parser.add_argument(
'-n',
'--no-overwrite',
help='Skip traces which already exist in the out directory.',
action='store_true')
upgrade_parser.add_argument(
'--validation', help='Enable state serialization validation calls.', action='store_true')
upgrade_parser.add_argument(
'--validation-expr',
help='Validation expression, used to add more validation checkpoints.')
upgrade_parser.add_argument(
'--limit',
'--frame-limit',
type=int,
help='Limits the number of captured frames to produce a shorter trace than the original.')
upgrade_parser.add_argument(
'--trim', action='store_true', help='Enables trace trimming. Breaks replay validation.')
upgrade_parser.add_argument(
'--no-trim', action='store_true', help='Disables trace trimming. Useful for validation.')
upgrade_parser.set_defaults(trim=True)
validate_parser = subparsers.add_parser(
'validate', help='Runs the an updated test suite with validation enabled.')
validate_parser.add_argument('gn_path', help='GN build path')
validate_parser.add_argument('out_path', help='Path to the upgraded trace folder.')
validate_parser.add_argument(
'traces', help='Traces to validate. Supports fnmatch expressions.', default='*')
validate_parser.add_argument(
'--limit', '--frame-limit', type=int, help='Limits the number of tested frames.')
args, extra_flags = parser.parse_known_args()
logging.basicConfig(level=args.log.upper())
# Load trace names
with open(os.path.join(get_script_dir(), DEFAULT_TEST_JSON)) as f:
traces = json.loads(f.read())
traces = [trace.split(' ')[0] for trace in traces['traces']]
if args.command == 'backup':
return backup_traces(args, traces)
elif args.command == 'restore':
return restore_traces(args, traces)
elif args.command == 'upgrade':
return upgrade_traces(args, traces)
elif args.command == 'validate':
return validate_traces(args, traces)
else:
logging.fatal('Unknown command: %s' % args.command)
return EXIT_FAILURE
if __name__ == '__main__':

Просмотреть файл

@ -12,9 +12,12 @@ import argparse
import getpass
import fnmatch
import logging
import itertools
import json
import multiprocessing
import os
import platform
import signal
import subprocess
import sys
@ -23,27 +26,33 @@ EXPERIMENTAL_CIPD_PREFIX = 'experimental/google.com/%s/angle/traces'
LOG_LEVEL = 'info'
JSON_PATH = 'restricted_traces.json'
SCRIPT_DIR = os.path.dirname(sys.argv[0])
MAX_THREADS = 8
LONG_TIMEOUT = 100000
EXIT_SUCCESS = 0
EXIT_FAILURE = 1
def cipd(*args):
logging.debug('running cipd with args: %s', ' '.join(args))
def cipd(logger, *args):
logger.debug('running cipd with args: %s', ' '.join(args))
exe = 'cipd.bat' if platform.system() == 'Windows' else 'cipd'
try:
completed = subprocess.run([exe] + list(args), stderr=subprocess.STDOUT)
except KeyboardInterrupt:
pass
if completed.stdout:
logging.debug('cipd stdout:\n%s' % completed.stdout)
logger.debug('cipd stdout:\n%s' % completed.stdout)
return completed.returncode
def main(args):
with open(os.path.join(SCRIPT_DIR, JSON_PATH)) as f:
traces = json.loads(f.read())
for trace_info in traces['traces']:
def sync_trace(param):
args, trace_info = param
logger = args.logger
trace, trace_version = trace_info.split(' ')
if args.filter and not fnmatch.fnmatch(trace, args.filter):
logging.debug('Skipping %s because it does not match the test filter.' % trace)
continue
logger.debug('Skipping %s because it does not match the test filter.' % trace)
return EXIT_SUCCESS
if 'x' in trace_version:
trace_prefix = EXPERIMENTAL_CIPD_PREFIX % getpass.getuser()
@ -53,21 +62,46 @@ def main(args):
trace_name = '%s/%s' % (trace_prefix, trace)
# Determine if this version exists
if cipd('describe', trace_name, '-version', 'version:%s' % trace_version) == 0:
logging.info('%s version %s already present' % (trace, trace_version))
continue
if cipd(logger, 'describe', trace_name, '-version', 'version:%s' % trace_version) == 0:
logger.info('%s version %s already present' % (trace, trace_version))
return EXIT_SUCCESS
logging.info('%s version %s missing. calling create.' % (trace, trace_version))
logger.info('%s version %s missing. calling create.' % (trace, trace_version))
trace_folder = os.path.join(SCRIPT_DIR, trace)
if cipd('create', '-name', trace_name, '-in', trace_folder, '-tag', 'version:%s' %
if cipd(logger, 'create', '-name', trace_name, '-in', trace_folder, '-tag', 'version:%s' %
trace_version, '-log-level', args.log.lower(), '-install-mode', 'copy') != 0:
logging.error('%s version %s create failed' % (trace, trace_version))
return 1
logger.error('%s version %s create failed' % (trace, trace_version))
return EXIT_FAILURE
return EXIT_SUCCESS
return 0
def main(args):
args.logger = multiprocessing.log_to_stderr()
args.logger.setLevel(level=args.log.upper())
with open(os.path.join(SCRIPT_DIR, JSON_PATH)) as f:
traces = json.loads(f.read())
zipped_args = zip(itertools.repeat(args), traces['traces'])
if args.threads > 1:
pool = multiprocessing.Pool(args.threads)
try:
retval = pool.map_async(sync_trace, zipped_args).get(LONG_TIMEOUT)
except KeyboardInterrupt:
pool.terminate()
except Exception as e:
print('got exception: %r, terminating the pool' % (e,))
pool.terminate()
pool.join()
else:
retval = map(sync_trace, zipped_args)
return EXIT_FAILURE if EXIT_FAILURE in retval else EXIT_SUCCESS
if __name__ == '__main__':
max_threads = min(multiprocessing.cpu_count(), MAX_THREADS)
parser = argparse.ArgumentParser()
parser.add_argument(
'-p', '--prefix', help='CIPD Prefix. Default: %s' % CIPD_PREFIX, default=CIPD_PREFIX)
@ -75,6 +109,11 @@ if __name__ == '__main__':
'-l', '--log', help='Logging level. Default: %s' % LOG_LEVEL, default=LOG_LEVEL)
parser.add_argument(
'-f', '--filter', help='Only sync specified tests. Supports fnmatch expressions.')
parser.add_argument(
'-t',
'--threads',
help='Maxiumum parallel threads. Default: %s' % max_threads,
default=max_threads)
args, extra_flags = parser.parse_known_args()
logging.basicConfig(level=args.log.upper())