From af2ee3bcdc21c478e1648e8687febc67dc97b964 Mon Sep 17 00:00:00 2001 From: Dirkjan Ochtman Date: Wed, 23 Jan 2013 18:46:55 +0100 Subject: [PATCH] Bug 638219 - Move jittests.main() function back into jit_test.py script. r=terrence --- js/src/jit-test/jit_test.py | 196 ++++++++++++++++++++++++++++++++++- js/src/tests/lib/jittests.py | 196 +---------------------------------- 2 files changed, 196 insertions(+), 196 deletions(-) diff --git a/js/src/jit-test/jit_test.py b/js/src/jit-test/jit_test.py index 0214e23779ad..2d8da27b1bbc 100755 --- a/js/src/jit-test/jit_test.py +++ b/js/src/jit-test/jit_test.py @@ -3,7 +3,7 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. -import sys +import os, shlex, subprocess, sys, traceback def add_libdir_to_path(): from os.path import dirname, exists, join, realpath @@ -16,5 +16,197 @@ add_libdir_to_path() import jittests +def main(argv): + + script_path = os.path.abspath(__file__) + script_dir = os.path.dirname(script_path) + test_dir = os.path.join(script_dir, 'tests') + lib_dir = os.path.join(script_dir, 'lib') + + # If no multiprocessing is available, fallback to serial test execution + max_jobs_default = 1 + if jittests.HAVE_MULTIPROCESSING: + try: + max_jobs_default = jittests.cpu_count() + except NotImplementedError: + pass + + # The [TESTS] optional arguments are paths of test files relative + # to the jit-test/tests directory. + + from optparse import OptionParser + op = OptionParser(usage='%prog [options] JS_SHELL [TESTS]') + op.add_option('-s', '--show-cmd', dest='show_cmd', action='store_true', + help='show js shell command run') + op.add_option('-f', '--show-failed-cmd', dest='show_failed', + action='store_true', help='show command lines of failed tests') + op.add_option('-o', '--show-output', dest='show_output', action='store_true', + help='show output from js shell') + op.add_option('-x', '--exclude', dest='exclude', action='append', + help='exclude given test dir or path') + op.add_option('--no-slow', dest='run_slow', action='store_false', + help='do not run tests marked as slow') + op.add_option('-t', '--timeout', dest='timeout', type=float, default=150.0, + help='set test timeout in seconds') + op.add_option('--no-progress', dest='hide_progress', action='store_true', + help='hide progress bar') + op.add_option('--tinderbox', dest='tinderbox', action='store_true', + help='Tinderbox-parseable output format') + op.add_option('--args', dest='shell_args', default='', + help='extra args to pass to the JS shell') + op.add_option('-w', '--write-failures', dest='write_failures', metavar='FILE', + help='Write a list of failed tests to [FILE]') + op.add_option('-r', '--read-tests', dest='read_tests', metavar='FILE', + help='Run test files listed in [FILE]') + op.add_option('-R', '--retest', dest='retest', metavar='FILE', + help='Retest using test list file [FILE]') + op.add_option('-g', '--debug', dest='debug', action='store_true', + help='Run test in gdb') + op.add_option('--valgrind', dest='valgrind', action='store_true', + help='Enable the |valgrind| flag, if valgrind is in $PATH.') + op.add_option('--valgrind-all', dest='valgrind_all', action='store_true', + help='Run all tests with valgrind, if valgrind is in $PATH.') + op.add_option('--jitflags', dest='jitflags', default='', + help='Example: --jitflags=m,mn to run each test with "-m" and "-m -n" [default="%default"]. ' + + 'Long flags, such as "--no-jm", should be set using --args.') + op.add_option('--avoid-stdio', dest='avoid_stdio', action='store_true', + help='Use js-shell file indirection instead of piping stdio.') + op.add_option('--write-failure-output', dest='write_failure_output', action='store_true', + help='With --write-failures=FILE, additionally write the output of failed tests to [FILE]') + op.add_option('--ion', dest='ion', action='store_true', + help='Run tests once with --ion-eager and once with --no-jm (ignores --jitflags)') + op.add_option('--tbpl', dest='tbpl', action='store_true', + help='Run tests with all IonMonkey option combinations (ignores --jitflags)') + op.add_option('-j', '--worker-count', dest='max_jobs', type=int, default=max_jobs_default, + help='Number of tests to run in parallel (default %default)') + + options, args = op.parse_args(argv) + if len(args) < 1: + op.error('missing JS_SHELL argument') + # We need to make sure we are using backslashes on Windows. + options.js_shell, test_args = os.path.abspath(args[0]), args[1:] + + if jittests.stdio_might_be_broken(): + # Prefer erring on the side of caution and not using stdio if + # it might be broken on this platform. The file-redirect + # fallback should work on any platform, so at worst by + # guessing wrong we might have slowed down the tests a bit. + # + # XXX technically we could check for broken stdio, but it + # really seems like overkill. + options.avoid_stdio = True + + if options.retest: + options.read_tests = options.retest + options.write_failures = options.retest + + test_list = [] + read_all = True + + if test_args: + read_all = False + for arg in test_args: + test_list += jittests.find_tests(test_dir, arg) + + if options.read_tests: + read_all = False + try: + f = open(options.read_tests) + for line in f: + test_list.append(os.path.join(test_dir, line.strip('\n'))) + f.close() + except IOError: + if options.retest: + read_all = True + else: + sys.stderr.write("Exception thrown trying to read test file '%s'\n"% + options.read_tests) + traceback.print_exc() + sys.stderr.write('---\n') + + if read_all: + test_list = jittests.find_tests(test_dir) + + if options.exclude: + exclude_list = [] + for exclude in options.exclude: + exclude_list += jittests.find_tests(test_dir, exclude) + test_list = [ test for test in test_list if test not in set(exclude_list) ] + + if not test_list: + print >> sys.stderr, "No tests found matching command line arguments." + sys.exit(0) + + test_list = [jittests.Test.from_file(_, options) for _ in test_list] + + if not options.run_slow: + test_list = [ _ for _ in test_list if not _.slow ] + + # The full test list is ready. Now create copies for each JIT configuration. + job_list = [] + if options.tbpl: + # Running all bits would take forever. Instead, we test a few interesting combinations. + flags = [ + ['--no-jm'], + ['--ion-eager'], + # Below, equivalents the old shell flags: ,m,am,amd,n,mn,amn,amdn,mdn + ['--no-ion', '--no-jm', '--no-ti'], + ['--no-ion', '--no-ti'], + ['--no-ion', '--no-ti', '-a', '-d'], + ['--no-ion', '--no-jm'], + ['--no-ion'], + ['--no-ion', '-a'], + ['--no-ion', '-a', '-d'], + ['--no-ion', '-d'] + ] + for test in test_list: + for variant in flags: + new_test = test.copy() + new_test.jitflags.extend(variant) + job_list.append(new_test) + elif options.ion: + flags = [['--no-jm'], ['--ion-eager']] + for test in test_list: + for variant in flags: + new_test = test.copy() + new_test.jitflags.extend(variant) + job_list.append(new_test) + else: + jitflags_list = jittests.parse_jitflags(options) + for test in test_list: + for jitflags in jitflags_list: + new_test = test.copy() + new_test.jitflags.extend(jitflags) + job_list.append(new_test) + + shell_args = shlex.split(options.shell_args) + + if options.debug: + if len(job_list) > 1: + print 'Multiple tests match command line arguments, debugger can only run one' + for tc in job_list: + print ' %s' % tc.path + sys.exit(1) + + tc = job_list[0] + cmd = [ 'gdb', '--args' ] + jittests.get_test_cmd(options.js_shell, tc.path, tc.jitflags, lib_dir, shell_args) + subprocess.call(cmd) + sys.exit() + + try: + ok = None + if options.max_jobs > 1 and jittests.HAVE_MULTIPROCESSING: + ok = jittests.run_tests_parallel(job_list, test_dir, lib_dir, shell_args, options) + else: + ok = jittests.run_tests(job_list, test_dir, lib_dir, shell_args, options) + if not ok: + sys.exit(2) + except OSError: + if not os.path.exists(options.js_shell): + print >> sys.stderr, "JS shell argument: file does not exist: '%s'" % options.js_shell + sys.exit(1) + else: + raise + if __name__ == '__main__': - jittests.main(sys.argv[1:]) + main(sys.argv[1:]) diff --git a/js/src/tests/lib/jittests.py b/js/src/tests/lib/jittests.py index 6899d3e782fb..1f49d4c69653 100644 --- a/js/src/tests/lib/jittests.py +++ b/js/src/tests/lib/jittests.py @@ -7,9 +7,9 @@ # jit_test.py -- Python harness for JavaScript trace tests. from __future__ import print_function -import os, sys, tempfile, traceback, time, shlex +import os, sys, tempfile, traceback, time import subprocess -from subprocess import Popen, PIPE, call +from subprocess import Popen, PIPE from threading import Thread import signal @@ -545,197 +545,5 @@ def platform_might_be_android(): def stdio_might_be_broken(): return platform_might_be_android() -def main(argv): - - script_path = os.path.abspath(sys.modules['__main__'].__file__) - script_dir = os.path.dirname(script_path) - test_dir = os.path.join(script_dir, 'tests') - lib_dir = os.path.join(script_dir, 'lib') - - # If no multiprocessing is available, fallback to serial test execution - max_jobs_default = 1 - if HAVE_MULTIPROCESSING: - try: - max_jobs_default = cpu_count() - except NotImplementedError: - pass - - # The [TESTS] optional arguments are paths of test files relative - # to the jit-test/tests directory. - - from optparse import OptionParser - op = OptionParser(usage='%prog [options] JS_SHELL [TESTS]') - op.add_option('-s', '--show-cmd', dest='show_cmd', action='store_true', - help='show js shell command run') - op.add_option('-f', '--show-failed-cmd', dest='show_failed', - action='store_true', help='show command lines of failed tests') - op.add_option('-o', '--show-output', dest='show_output', action='store_true', - help='show output from js shell') - op.add_option('-x', '--exclude', dest='exclude', action='append', - help='exclude given test dir or path') - op.add_option('--no-slow', dest='run_slow', action='store_false', - help='do not run tests marked as slow') - op.add_option('-t', '--timeout', dest='timeout', type=float, default=150.0, - help='set test timeout in seconds') - op.add_option('--no-progress', dest='hide_progress', action='store_true', - help='hide progress bar') - op.add_option('--tinderbox', dest='tinderbox', action='store_true', - help='Tinderbox-parseable output format') - op.add_option('--args', dest='shell_args', default='', - help='extra args to pass to the JS shell') - op.add_option('-w', '--write-failures', dest='write_failures', metavar='FILE', - help='Write a list of failed tests to [FILE]') - op.add_option('-r', '--read-tests', dest='read_tests', metavar='FILE', - help='Run test files listed in [FILE]') - op.add_option('-R', '--retest', dest='retest', metavar='FILE', - help='Retest using test list file [FILE]') - op.add_option('-g', '--debug', dest='debug', action='store_true', - help='Run test in gdb') - op.add_option('--valgrind', dest='valgrind', action='store_true', - help='Enable the |valgrind| flag, if valgrind is in $PATH.') - op.add_option('--valgrind-all', dest='valgrind_all', action='store_true', - help='Run all tests with valgrind, if valgrind is in $PATH.') - op.add_option('--jitflags', dest='jitflags', default='', - help='Example: --jitflags=m,mn to run each test with "-m" and "-m -n" [default="%default"]. ' + - 'Long flags, such as "--no-jm", should be set using --args.') - op.add_option('--avoid-stdio', dest='avoid_stdio', action='store_true', - help='Use js-shell file indirection instead of piping stdio.') - op.add_option('--write-failure-output', dest='write_failure_output', action='store_true', - help='With --write-failures=FILE, additionally write the output of failed tests to [FILE]') - op.add_option('--ion', dest='ion', action='store_true', - help='Run tests once with --ion-eager and once with --no-jm (ignores --jitflags)') - op.add_option('--tbpl', dest='tbpl', action='store_true', - help='Run tests with all IonMonkey option combinations (ignores --jitflags)') - op.add_option('-j', '--worker-count', dest='max_jobs', type=int, default=max_jobs_default, - help='Number of tests to run in parallel (default %default)') - - options, args = op.parse_args(argv) - if len(args) < 1: - op.error('missing JS_SHELL argument') - # We need to make sure we are using backslashes on Windows. - options.js_shell, test_args = os.path.abspath(args[0]), args[1:] - - if stdio_might_be_broken(): - # Prefer erring on the side of caution and not using stdio if - # it might be broken on this platform. The file-redirect - # fallback should work on any platform, so at worst by - # guessing wrong we might have slowed down the tests a bit. - # - # XXX technically we could check for broken stdio, but it - # really seems like overkill. - options.avoid_stdio = True - - if options.retest: - options.read_tests = options.retest - options.write_failures = options.retest - - test_list = [] - read_all = True - - if test_args: - read_all = False - for arg in test_args: - test_list += find_tests(test_dir, arg) - - if options.read_tests: - read_all = False - try: - f = open(options.read_tests) - for line in f: - test_list.append(os.path.join(test_dir, line.strip('\n'))) - f.close() - except IOError: - if options.retest: - read_all = True - else: - sys.stderr.write("Exception thrown trying to read test file '%s'\n"% - options.read_tests) - traceback.print_exc() - sys.stderr.write('---\n') - - if read_all: - test_list = find_tests(test_dir) - - if options.exclude: - exclude_list = [] - for exclude in options.exclude: - exclude_list += find_tests(test_dir, exclude) - test_list = [ test for test in test_list if test not in set(exclude_list) ] - - if not test_list: - print("No tests found matching command line arguments.", file=sys.stderr) - sys.exit(0) - - test_list = [ Test.from_file(_, options) for _ in test_list ] - - if not options.run_slow: - test_list = [ _ for _ in test_list if not _.slow ] - - # The full test list is ready. Now create copies for each JIT configuration. - job_list = [] - if options.tbpl: - # Running all bits would take forever. Instead, we test a few interesting combinations. - flags = [ - ['--no-jm'], - ['--ion-eager'], - # Below, equivalents the old shell flags: ,m,am,amd,n,mn,amn,amdn,mdn - ['--no-ion', '--no-jm', '--no-ti'], - ['--no-ion', '--no-ti'], - ['--no-ion', '--no-ti', '-a', '-d'], - ['--no-ion', '--no-jm'], - ['--no-ion'], - ['--no-ion', '-a'], - ['--no-ion', '-a', '-d'], - ['--no-ion', '-d'] - ] - for test in test_list: - for variant in flags: - new_test = test.copy() - new_test.jitflags.extend(variant) - job_list.append(new_test) - elif options.ion: - flags = [['--no-jm'], ['--ion-eager']] - for test in test_list: - for variant in flags: - new_test = test.copy() - new_test.jitflags.extend(variant) - job_list.append(new_test) - else: - jitflags_list = parse_jitflags(options) - for test in test_list: - for jitflags in jitflags_list: - new_test = test.copy() - new_test.jitflags.extend(jitflags) - job_list.append(new_test) - - shell_args = shlex.split(options.shell_args) - - if options.debug: - if len(job_list) > 1: - print('Multiple tests match command line arguments, debugger can only run one') - for tc in job_list: - print(' %s' % tc.path) - sys.exit(1) - - tc = job_list[0] - cmd = ['gdb', '--args'] + get_test_cmd(options.js_shell, tc.path, tc.jitflags, lib_dir, shell_args) - call(cmd) - sys.exit() - - try: - ok = None - if options.max_jobs > 1 and HAVE_MULTIPROCESSING: - ok = run_tests_parallel(job_list, test_dir, lib_dir, shell_args, options) - else: - ok = run_tests(job_list, test_dir, lib_dir, shell_args, options) - if not ok: - sys.exit(2) - except OSError: - if not os.path.exists(options.js_shell): - print >> sys.stderr, "JS shell argument: file does not exist: '%s'"%options.js_shell - sys.exit(1) - else: - raise - if __name__ == '__main__': print('Use ../jit-test/jit_test.py to run these tests.')