Bug 1619475 - also convert jstests from optparse to argparse r=arai

Differential Revision: https://phabricator.services.mozilla.com/D65400

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Steve Fink 2020-03-06 18:17:10 +00:00
Родитель 4efc95ae8c
Коммит 4e2511c87c
2 изменённых файлов: 128 добавлений и 137 удалений

Просмотреть файл

@ -15,8 +15,8 @@ import os
import shlex import shlex
import sys import sys
import tempfile import tempfile
import textwrap
import platform import platform
from os.path import abspath, dirname, isfile, realpath from os.path import abspath, dirname, isfile, realpath
from contextlib import contextmanager from contextlib import contextmanager
from copy import copy from copy import copy
@ -92,150 +92,140 @@ def parse_args():
requested_paths :set<str>: Test paths specially requested on the CLI. requested_paths :set<str>: Test paths specially requested on the CLI.
excluded_paths :set<str>: Test paths specifically excluded by the CLI. excluded_paths :set<str>: Test paths specifically excluded by the CLI.
""" """
from optparse import OptionParser, OptionGroup from argparse import ArgumentParser
op = OptionParser(usage=textwrap.dedent(""" op = ArgumentParser(
%prog [OPTIONS] JS_SHELL [TESTS] description='Run jstests JS shell tests',
epilog='Shell output format: [ pass | fail | timeout | skip ] progress | time')
op.add_argument('--xul-info', dest='xul_info_src',
help='config data for xulRuntime'
' (avoids search for config/autoconf.mk)')
Shell output format: [ pass | fail | timeout | skip ] progress | time harness_og = op.add_argument_group("Harness Controls", "Control how tests are run.")
""").strip()) harness_og.add_argument('-j', '--worker-count', type=int,
op.add_option('--xul-info', dest='xul_info_src', default=max(1, get_cpu_count()),
help='config data for xulRuntime' help='Number of tests to run in parallel'
' (avoids search for config/autoconf.mk)') ' (default %(default)s)')
harness_og.add_argument('-t', '--timeout', type=float, default=150.0,
help='Set maximum time a test is allows to run'
' (in seconds).')
harness_og.add_argument('--show-slow', action='store_true',
help='Show tests taking longer than a minimum time'
' (in seconds).')
harness_og.add_argument('--slow-test-threshold', type=float, default=5.0,
help='Time in seconds a test can take until it is'
'considered slow (default %(default)s).')
harness_og.add_argument('-a', '--args', dest='shell_args', default='',
help='Extra args to pass to the JS shell.')
harness_og.add_argument('--feature-args', dest='feature_args', default='',
help='Extra args to pass to the JS shell even when feature-testing.')
harness_og.add_argument('--jitflags', dest='jitflags', default='none',
type=str,
help='IonMonkey option combinations. One of all,'
' debug, ion, and none (default %(default)s).')
harness_og.add_argument('--tbpl', action='store_true',
help='Runs each test in all configurations tbpl'
' tests.')
harness_og.add_argument('--tbpl-debug', action='store_true',
help='Runs each test in some faster configurations'
' tbpl tests.')
harness_og.add_argument('-g', '--debug', action='store_true',
help='Run a test in debugger.')
harness_og.add_argument('--debugger', default='gdb -q --args',
help='Debugger command.')
harness_og.add_argument('-J', '--jorendb', action='store_true',
help='Run under JS debugger.')
harness_og.add_argument('--passthrough', action='store_true',
help='Run tests with stdin/stdout attached to'
' caller.')
harness_og.add_argument('--test-reflect-stringify', dest="test_reflect_stringify",
help="instead of running tests, use them to test the "
"Reflect.stringify code in specified file")
harness_og.add_argument('--valgrind', action='store_true',
help='Run tests in valgrind.')
harness_og.add_argument('--valgrind-args', default='',
help='Extra args to pass to valgrind.')
harness_og.add_argument('--rr', action='store_true',
help='Run tests under RR record-and-replay debugger.')
harness_og.add_argument('-C', '--check-output', action='store_true',
help='Run tests to check output for different jit-flags')
harness_og = OptionGroup(op, "Harness Controls", input_og = op.add_argument_group("Inputs", "Change what tests are run.")
"Control how tests are run.") input_og.add_argument('-f', '--file', dest='test_file', action='append',
harness_og.add_option('-j', '--worker-count', type=int, help='Get tests from the given file.')
default=max(1, get_cpu_count()), input_og.add_argument('-x', '--exclude-file', action='append',
help='Number of tests to run in parallel' help='Exclude tests from the given file.')
' (default %default)') input_og.add_argument('--wpt', dest='wpt',
harness_og.add_option('-t', '--timeout', type=float, default=150.0, choices=['enabled', 'disabled', 'if-running-everything'],
help='Set maximum time a test is allows to run' default='if-running-everything',
' (in seconds).') help="Enable or disable shell web-platform-tests "
harness_og.add_option('--show-slow', action='store_true', "(default: enable if no test paths are specified).")
help='Show tests taking longer than a minimum time' input_og.add_argument('--include', action='append', dest='requested_paths', default=[],
' (in seconds).') help='Include the given test file or directory.')
harness_og.add_option('--slow-test-threshold', type=float, default=5.0, input_og.add_argument('--exclude', action='append', dest='excluded_paths', default=[],
help='Time in seconds a test can take until it is' help='Exclude the given test file or directory.')
'considered slow (default %default).') input_og.add_argument('-d', '--exclude-random', dest='random',
harness_og.add_option('-a', '--args', dest='shell_args', default='', action='store_false',
help='Extra args to pass to the JS shell.') help='Exclude tests marked as "random."')
harness_og.add_option('--feature-args', dest='feature_args', default='', input_og.add_argument('--run-skipped', action='store_true',
help='Extra args to pass to the JS shell even when feature-testing.') help='Run tests marked as "skip."')
harness_og.add_option('--jitflags', dest='jitflags', default='none', input_og.add_argument('--run-only-skipped', action='store_true',
type='string', help='Run only tests marked as "skip."')
help='IonMonkey option combinations. One of all,' input_og.add_argument('--run-slow-tests', action='store_true',
' debug, ion, and none (default %default).') help='Do not skip tests marked as "slow."')
harness_og.add_option('--tbpl', action='store_true', input_og.add_argument('--no-extensions', action='store_true',
help='Runs each test in all configurations tbpl' help='Run only tests conforming to the ECMAScript 5'
' tests.') ' standard.')
harness_og.add_option('--tbpl-debug', action='store_true', input_og.add_argument('--repeat', type=int, default=1,
help='Runs each test in some faster configurations' help='Repeat tests the given number of times.')
' tbpl tests.')
harness_og.add_option('-g', '--debug', action='store_true',
help='Run a test in debugger.')
harness_og.add_option('--debugger', default='gdb -q --args',
help='Debugger command.')
harness_og.add_option('-J', '--jorendb', action='store_true',
help='Run under JS debugger.')
harness_og.add_option('--passthrough', action='store_true',
help='Run tests with stdin/stdout attached to'
' caller.')
harness_og.add_option('--test-reflect-stringify', dest="test_reflect_stringify",
help="instead of running tests, use them to test the "
"Reflect.stringify code in specified file")
harness_og.add_option('--valgrind', action='store_true',
help='Run tests in valgrind.')
harness_og.add_option('--valgrind-args', default='',
help='Extra args to pass to valgrind.')
harness_og.add_option('--rr', action='store_true',
help='Run tests under RR record-and-replay debugger.')
harness_og.add_option('-C', '--check-output', action='store_true',
help='Run tests to check output for different jit-flags')
op.add_option_group(harness_og)
input_og = OptionGroup(op, "Inputs", "Change what tests are run.") output_og = op.add_argument_group("Output", "Modify the harness and tests output.")
input_og.add_option('-f', '--file', dest='test_file', action='append', output_og.add_argument('-s', '--show-cmd', action='store_true',
help='Get tests from the given file.') help='Show exact commandline used to run each test.')
input_og.add_option('-x', '--exclude-file', action='append', output_og.add_argument('-o', '--show-output', action='store_true',
help='Exclude tests from the given file.') help="Print each test's output to the file given by"
input_og.add_option('--wpt', dest='wpt', " --output-file.")
type='choice', output_og.add_argument('-F', '--failed-only', action='store_true',
choices=['enabled', 'disabled', 'if-running-everything'], help="If a --show-* option is given, only print"
default='if-running-everything', " output for failed tests.")
help="Enable or disable shell web-platform-tests " output_og.add_argument('--no-show-failed', action='store_true',
"(default: enable if no test paths are specified).") help="Don't print output for failed tests"
input_og.add_option('--include', action='append', dest='requested_paths', default=[], " (no-op with --show-output).")
help='Include the given test file or directory.') output_og.add_argument('-O', '--output-file',
input_og.add_option('--exclude', action='append', dest='excluded_paths', default=[], help='Write all output to the given file'
help='Exclude the given test file or directory.') ' (default: stdout).')
input_og.add_option('-d', '--exclude-random', dest='random', output_og.add_argument('--failure-file',
action='store_false', help='Write all not-passed tests to the given file.')
help='Exclude tests marked as "random."') output_og.add_argument('--no-progress', dest='hide_progress',
input_og.add_option('--run-skipped', action='store_true', action='store_true',
help='Run tests marked as "skip."') help='Do not show the progress bar.')
input_og.add_option('--run-only-skipped', action='store_true', output_og.add_argument('--tinderbox', dest='format', action='store_const',
help='Run only tests marked as "skip."') const='automation',
input_og.add_option('--run-slow-tests', action='store_true', help='Use automation-parseable output format.')
help='Do not skip tests marked as "slow."') output_og.add_argument('--format', dest='format', default='none',
input_og.add_option('--no-extensions', action='store_true', choices=['automation', 'none'],
help='Run only tests conforming to the ECMAScript 5' help='Output format. Either automation or none'
' standard.') ' (default %(default)s).')
input_og.add_option('--repeat', type=int, default=1, output_og.add_argument('--log-wptreport', dest='wptreport', action='store',
help='Repeat tests the given number of times.') help='Path to write a Web Platform Tests report (wptreport)')
op.add_option_group(input_og)
output_og = OptionGroup(op, "Output", special_og = op.add_argument_group("Special", "Special modes that do not run tests.")
"Modify the harness and tests output.") special_og.add_argument('--make-manifests', metavar='BASE_TEST_PATH',
output_og.add_option('-s', '--show-cmd', action='store_true', help='Generate reftest manifest files.')
help='Show exact commandline used to run each test.')
output_og.add_option('-o', '--show-output', action='store_true',
help="Print each test's output to the file given by"
" --output-file.")
output_og.add_option('-F', '--failed-only', action='store_true',
help="If a --show-* option is given, only print"
" output for failed tests.")
output_og.add_option('--no-show-failed', action='store_true',
help="Don't print output for failed tests"
" (no-op with --show-output).")
output_og.add_option('-O', '--output-file',
help='Write all output to the given file'
' (default: stdout).')
output_og.add_option('--failure-file',
help='Write all not-passed tests to the given file.')
output_og.add_option('--no-progress', dest='hide_progress',
action='store_true',
help='Do not show the progress bar.')
output_og.add_option('--tinderbox', dest='format', action='store_const',
const='automation',
help='Use automation-parseable output format.')
output_og.add_option('--format', dest='format', default='none',
type='choice', choices=['automation', 'none'],
help='Output format. Either automation or none'
' (default %default).')
output_og.add_option('--log-wptreport', dest='wptreport', action='store',
help='Path to write a Web Platform Tests report (wptreport)')
op.add_option_group(output_og)
special_og = OptionGroup(op, "Special", op.add_argument('--js-shell', metavar='JS_SHELL',
"Special modes that do not run tests.") help='JS shell to run tests with')
special_og.add_option('--make-manifests', metavar='BASE_TEST_PATH', options, args = op.parse_known_args()
help='Generate reftest manifest files.')
op.add_option_group(special_og)
options, args = op.parse_args()
# Acquire the JS shell given on the command line. # Need a shell unless in a special mode.
options.js_shell = None if not options.make_manifests:
requested_paths = set(options.requested_paths) if not args:
if len(args) > 0: op.error('missing JS_SHELL argument')
options.js_shell = abspath(args[0]) options.js_shell = os.path.abspath(args.pop(0))
requested_paths |= set(args[1:])
# If we do not have a shell, we must be in a special mode. requested_paths = set(args)
if options.js_shell is None and not options.make_manifests:
op.error('missing JS_SHELL argument')
# Valgrind, gdb, and rr are mutually exclusive. # Valgrind, gdb, and rr are mutually exclusive.
if sum(map(lambda e: 1 if e else 0, [options.valgrind, options.debug, options.rr])) > 1: if sum(map(bool, (options.valgrind, options.debug, options.rr))) > 1:
op.error("--valgrind, --debug, and --rr are mutually exclusive.") op.error("--valgrind, --debug, and --rr are mutually exclusive.")
# Fill the debugger field, as needed. # Fill the debugger field, as needed.

Просмотреть файл

@ -6,6 +6,7 @@ from __future__ import print_function
import os import os
import re import re
import six
import sys import sys
from subprocess import Popen, PIPE from subprocess import Popen, PIPE
@ -258,7 +259,7 @@ def _emit_manifest_at(location, relative, test_gen, depth):
filename = os.path.join(location, 'jstests.list') filename = os.path.join(location, 'jstests.list')
manifest = [] manifest = []
numTestFiles = 0 numTestFiles = 0
for k, test_list in manifests.iteritems(): for k, test_list in six.iteritems(manifests):
fullpath = os.path.join(location, k) fullpath = os.path.join(location, k)
if os.path.isdir(fullpath): if os.path.isdir(fullpath):
manifest.append("include " + k + "/jstests.list") manifest.append("include " + k + "/jstests.list")