Bug 1619475 - partial Python 3 compatibility for jit-tests and jstests r=arai

This fixes all of the jit-test problems I encountered, and fixes all but the wpt portion of jstests. This is unnecessary, but it also ports jstests from optparse to argparse (because optparse is deprecated, because I like argparse better, and because it happened to fix the minor compatibility issue I ran into even though it has a trivial fix on its own.)

Differential Revision: https://phabricator.services.mozilla.com/D65047

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Steve Fink 2020-03-06 18:07:38 +00:00
Родитель 24227e63dc
Коммит 4efc95ae8c
11 изменённых файлов: 179 добавлений и 165 удалений

Просмотреть файл

@ -15,19 +15,23 @@ import sys
import traceback
def add_libdir_to_path():
def add_tests_dir_to_path():
from os.path import dirname, exists, join, realpath
js_src_dir = dirname(dirname(realpath(sys.argv[0])))
assert exists(join(js_src_dir, 'jsapi.h'))
sys.path.insert(0, join(js_src_dir, 'lib'))
sys.path.insert(0, join(js_src_dir, 'tests', 'lib'))
sys.path.insert(0, join(js_src_dir, 'tests'))
add_libdir_to_path()
add_tests_dir_to_path()
import jittests
from tests import get_jitflags, valid_jitflags, get_cpu_count, get_environment_overlay, \
change_env
from lib import jittests
from lib.tests import (
get_jitflags,
valid_jitflags,
get_cpu_count,
get_environment_overlay,
change_env,
)
def which(name):
@ -66,135 +70,132 @@ def choose_item(jobs, max_items, display):
def main(argv):
# The [TESTS] optional arguments are paths of test files relative
# to the jit-test/tests directory.
from optparse import OptionParser, SUPPRESS_HELP
op = OptionParser(usage='%prog [options] JS_SHELL [TESTS]')
op.add_option('-s', '--show-cmd', dest='show_cmd', action='store_true',
help='show js shell command run')
op.add_option('-f', '--show-failed-cmd', dest='show_failed',
action='store_true',
help='show command lines of failed tests')
op.add_option('-o', '--show-output', dest='show_output',
action='store_true',
help='show output from js shell')
op.add_option('-F', '--failed-only', dest='failed_only',
action='store_true',
help="if --show-output is given, only print output for"
" failed tests")
op.add_option('--no-show-failed', dest='no_show_failed',
action='store_true',
help="don't print output for failed tests"
" (no-op with --show-output)")
op.add_option('-x', '--exclude', dest='exclude',
default=[], action='append',
help='exclude given test dir or path')
op.add_option('--exclude-from', dest='exclude_from', type=str,
help='exclude each test dir or path in FILE')
op.add_option('--slow', dest='run_slow', action='store_true',
help='also run tests marked as slow')
op.add_option('--no-slow', dest='run_slow', action='store_false',
help='do not run tests marked as slow (the default)')
op.add_option('-t', '--timeout', dest='timeout', type=float, default=150.0,
help='set test timeout in seconds')
op.add_option('--no-progress', dest='hide_progress', action='store_true',
help='hide progress bar')
op.add_option('--tinderbox', dest='format', action='store_const',
const='automation',
help='Use automation-parseable output format')
op.add_option('--format', dest='format', default='none', type='choice',
choices=['automation', 'none'],
help='Output format. Either automation or none'
' (default %default).')
op.add_option('--args', dest='shell_args', metavar='ARGS', default='',
help='extra args to pass to the JS shell')
op.add_option('--feature-args', dest='feature_args', metavar='ARGS',
default='',
help='even more args to pass to the JS shell '
'(for compatibility with jstests.py)')
op.add_option('-w', '--write-failures', dest='write_failures',
metavar='FILE',
help='Write a list of failed tests to [FILE]')
op.add_option('-C', '--check-output', action='store_true', dest='check_output',
help='Run tests to check output for different jit-flags')
op.add_option('-r', '--read-tests', dest='read_tests', metavar='FILE',
help='Run test files listed in [FILE]')
op.add_option('-R', '--retest', dest='retest', metavar='FILE',
help='Retest using test list file [FILE]')
op.add_option('-g', '--debug', action='store_const', const='gdb', dest='debugger',
help='Run a single test under the gdb debugger')
op.add_option('-G', '--debug-rr', action='store_const', const='rr', dest='debugger',
help='Run a single test under the rr debugger')
op.add_option('--debugger', type='string',
help='Run a single test under the specified debugger')
op.add_option('--valgrind', dest='valgrind', action='store_true',
help='Enable the |valgrind| flag, if valgrind is in $PATH.')
op.add_option('--unusable-error-status', action='store_true',
help='Ignore incorrect exit status on tests that should return nonzero.')
op.add_option('--valgrind-all', dest='valgrind_all', action='store_true',
help='Run all tests with valgrind, if valgrind is in $PATH.')
op.add_option('--avoid-stdio', dest='avoid_stdio', action='store_true',
help='Use js-shell file indirection instead of piping stdio.')
op.add_option('--write-failure-output', dest='write_failure_output',
action='store_true',
help='With --write-failures=FILE, additionally write the'
' output of failed tests to [FILE]')
op.add_option('--jitflags', dest='jitflags', default='none',
choices=valid_jitflags(),
help='IonMonkey option combinations. One of %s.' % ', '.join(valid_jitflags()))
op.add_option('--ion', dest='jitflags', action='store_const', const='ion',
help='Run tests once with --ion-eager and once with'
' --baseline-eager (equivalent to --jitflags=ion)')
op.add_option('--tbpl', dest='jitflags', action='store_const', const='all',
help='Run tests with all IonMonkey option combinations'
' (equivalent to --jitflags=all)')
op.add_option('-j', '--worker-count', dest='max_jobs', type=int,
default=max(1, get_cpu_count()),
help='Number of tests to run in parallel (default %default)')
op.add_option('--remote', action='store_true',
help='Run tests on a remote device')
op.add_option('--deviceIP', action='store',
type='string', dest='device_ip',
help='IP address of remote device to test')
op.add_option('--devicePort', action='store',
type=int, dest='device_port', default=20701,
help='port of remote device to test')
op.add_option('--deviceSerial', action='store',
type='string', dest='device_serial', default=None,
help='ADB device serial number of remote device to test')
op.add_option('--remoteTestRoot', dest='remote_test_root', action='store',
type='string', default='/data/local/tests',
help='The remote directory to use as test root'
' (eg. /data/local/tests)')
op.add_option('--localLib', dest='local_lib', action='store',
type='string',
help='The location of libraries to push -- preferably'
' stripped')
op.add_option('--repeat', type=int, default=1,
help='Repeat tests the given number of times.')
op.add_option('--this-chunk', type=int, default=1,
help='The test chunk to run.')
op.add_option('--total-chunks', type=int, default=1,
help='The total number of test chunks.')
op.add_option('--ignore-timeouts', dest='ignore_timeouts', metavar='FILE',
help='Ignore timeouts of tests listed in [FILE]')
op.add_option('--test-reflect-stringify', dest="test_reflect_stringify",
help="instead of running tests, use them to test the "
"Reflect.stringify code in specified file")
op.add_option('--run-binast', action='store_true',
dest="run_binast",
help="By default BinAST testcases encoded from JS "
"testcases are skipped. If specified, BinAST testcases "
"are also executed.")
import argparse
op = argparse.ArgumentParser(description='Run jit-test JS shell tests')
op.add_argument('-s', '--show-cmd', dest='show_cmd', action='store_true',
help='show js shell command run')
op.add_argument('-f', '--show-failed-cmd', dest='show_failed',
action='store_true',
help='show command lines of failed tests')
op.add_argument('-o', '--show-output', dest='show_output',
action='store_true',
help='show output from js shell')
op.add_argument('-F', '--failed-only', dest='failed_only',
action='store_true',
help="if --show-output is given, only print output for"
" failed tests")
op.add_argument('--no-show-failed', dest='no_show_failed',
action='store_true',
help="don't print output for failed tests"
" (no-op with --show-output)")
op.add_argument('-x', '--exclude', dest='exclude',
default=[], action='append',
help='exclude given test dir or path')
op.add_argument('--exclude-from', dest='exclude_from', type=str,
help='exclude each test dir or path in FILE')
op.add_argument('--slow', dest='run_slow', action='store_true',
help='also run tests marked as slow')
op.add_argument('--no-slow', dest='run_slow', action='store_false',
help='do not run tests marked as slow (the default)')
op.add_argument('-t', '--timeout', dest='timeout', type=float, default=150.0,
help='set test timeout in seconds')
op.add_argument('--no-progress', dest='hide_progress', action='store_true',
help='hide progress bar')
op.add_argument('--tinderbox', dest='format', action='store_const',
const='automation',
help='Use automation-parseable output format')
op.add_argument('--format', dest='format', default='none',
choices=('automation', 'none'),
help='Output format (default %(default)s).')
op.add_argument('--args', dest='shell_args', metavar='ARGS', default='',
help='extra args to pass to the JS shell')
op.add_argument('--feature-args', dest='feature_args', metavar='ARGS',
default='',
help='even more args to pass to the JS shell '
'(for compatibility with jstests.py)')
op.add_argument('-w', '--write-failures', dest='write_failures',
metavar='FILE',
help='Write a list of failed tests to [FILE]')
op.add_argument('-C', '--check-output', action='store_true', dest='check_output',
help='Run tests to check output for different jit-flags')
op.add_argument('-r', '--read-tests', dest='read_tests', metavar='FILE',
help='Run test files listed in [FILE]')
op.add_argument('-R', '--retest', dest='retest', metavar='FILE',
help='Retest using test list file [FILE]')
op.add_argument('-g', '--debug', action='store_const', const='gdb', dest='debugger',
help='Run a single test under the gdb debugger')
op.add_argument('-G', '--debug-rr', action='store_const', const='rr', dest='debugger',
help='Run a single test under the rr debugger')
op.add_argument('--debugger', type=str,
help='Run a single test under the specified debugger')
op.add_argument('--valgrind', dest='valgrind', action='store_true',
help='Enable the |valgrind| flag, if valgrind is in $PATH.')
op.add_argument('--unusable-error-status', action='store_true',
help='Ignore incorrect exit status on tests that should return nonzero.')
op.add_argument('--valgrind-all', dest='valgrind_all', action='store_true',
help='Run all tests with valgrind, if valgrind is in $PATH.')
op.add_argument('--avoid-stdio', dest='avoid_stdio', action='store_true',
help='Use js-shell file indirection instead of piping stdio.')
op.add_argument('--write-failure-output', dest='write_failure_output',
action='store_true',
help='With --write-failures=FILE, additionally write the'
' output of failed tests to [FILE]')
op.add_argument('--jitflags', dest='jitflags', default='none',
choices=valid_jitflags(),
help='IonMonkey option combinations (default %(default)s).')
op.add_argument('--ion', dest='jitflags', action='store_const', const='ion',
help='Run tests once with --ion-eager and once with'
' --baseline-eager (equivalent to --jitflags=ion)')
op.add_argument('--tbpl', dest='jitflags', action='store_const', const='all',
help='Run tests with all IonMonkey option combinations'
' (equivalent to --jitflags=all)')
op.add_argument('-j', '--worker-count', dest='max_jobs', type=int,
default=max(1, get_cpu_count()),
help='Number of tests to run in parallel (default %(default)s).')
op.add_argument('--remote', action='store_true',
help='Run tests on a remote device')
op.add_argument('--deviceIP', action='store',
type=str, dest='device_ip',
help='IP address of remote device to test')
op.add_argument('--devicePort', action='store',
type=int, dest='device_port', default=20701,
help='port of remote device to test')
op.add_argument('--deviceSerial', action='store',
type=str, dest='device_serial', default=None,
help='ADB device serial number of remote device to test')
op.add_argument('--remoteTestRoot', dest='remote_test_root', action='store',
type=str, default='/data/local/tests',
help='The remote directory to use as test root'
' (eg. /data/local/tests)')
op.add_argument('--localLib', dest='local_lib', action='store',
type=str,
help='The location of libraries to push -- preferably'
' stripped')
op.add_argument('--repeat', type=int, default=1,
help='Repeat tests the given number of times.')
op.add_argument('--this-chunk', type=int, default=1,
help='The test chunk to run.')
op.add_argument('--total-chunks', type=int, default=1,
help='The total number of test chunks.')
op.add_argument('--ignore-timeouts', dest='ignore_timeouts', metavar='FILE',
help='Ignore timeouts of tests listed in [FILE]')
op.add_argument('--test-reflect-stringify', dest="test_reflect_stringify",
help="instead of running tests, use them to test the "
"Reflect.stringify code in specified file")
op.add_argument('--run-binast', action='store_true',
dest="run_binast",
help="By default BinAST testcases encoded from JS "
"testcases are skipped. If specified, BinAST testcases "
"are also executed.")
# --enable-webrender is ignored as it is not relevant for JIT
# tests, but is required for harness compatibility.
op.add_option('--enable-webrender', action='store_true',
dest="enable_webrender", default=False,
help=SUPPRESS_HELP)
op.add_argument('--enable-webrender', action='store_true',
dest="enable_webrender", default=False,
help=argparse.SUPPRESS)
op.add_argument('js_shell', metavar='JS_SHELL', help='JS shell to run tests with')
options, args = op.parse_args(argv)
if len(args) < 1:
op.error('missing JS_SHELL argument')
js_shell = which(args[0])
test_args = args[1:]
options, test_args = op.parse_known_args(argv)
js_shell = which(options.js_shell)
test_environment = get_environment_overlay(js_shell)
if not (os.path.isfile(js_shell) and os.access(js_shell, os.X_OK)):

Просмотреть файл

@ -1,3 +1,2 @@
assertEq((/(?!(?!(?!6)[\Wc]))/i).test(), false);
assertEq("foobar\xff5baz\u1200".search(/bar\u0178\d/i), 3);

Просмотреть файл

@ -16,13 +16,13 @@ from collections import namedtuple
from datetime import datetime
if sys.platform.startswith('linux') or sys.platform.startswith('darwin'):
from tasks_unix import run_all_tests
from .tasks_unix import run_all_tests
else:
from tasks_win import run_all_tests
from .tasks_win import run_all_tests
from progressbar import ProgressBar, NullProgressBar
from results import TestOutput, escape_cmdline
from structuredlog import TestLogger
from .progressbar import ProgressBar, NullProgressBar
from .results import TestOutput, escape_cmdline
from .structuredlog import TestLogger
TESTS_LIB_DIR = os.path.dirname(os.path.abspath(__file__))
JS_DIR = os.path.dirname(os.path.dirname(TESTS_LIB_DIR))
@ -203,7 +203,7 @@ class JitTest:
# For each list of jit flags, make a copy of the test.
return [self.copy_and_extend_jitflags(v) for v in variants]
COOKIE = '|jit-test|'
COOKIE = b'|jit-test|'
# We would use 500019 (5k19), but quit() only accepts values up to 127, due to fuzzers
SKIPPED_EXIT_STATUS = 59
@ -212,10 +212,10 @@ class JitTest:
@classmethod
def find_directives(cls, file_name):
meta = ''
line = open(file_name).readline()
line = open(file_name, "rb").readline()
i = line.find(cls.COOKIE)
if i != -1:
meta = ';' + line[i + len(cls.COOKIE):].strip('\n')
meta = ';' + line[i + len(cls.COOKIE):].decode(errors='strict').strip('\n')
return meta
@classmethod

Просмотреть файл

@ -9,7 +9,7 @@ import re
import sys
from subprocess import Popen, PIPE
from tests import RefTestCase
from .tests import RefTestCase
def split_path_into_dirs(path):
@ -106,7 +106,7 @@ class XULInfoTester:
'-e', self.js_prologue,
'-e', 'print(!!({}))'.format(cond)
]
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True)
out, err = p.communicate()
if out in ('true\n', 'true\r\n'):
ans = True

Просмотреть файл

@ -1,13 +1,14 @@
# Text progress bar library, like curl or scp.
from datetime import datetime, timedelta
import math
import sys
from datetime import datetime, timedelta
if sys.platform.startswith('win'):
from terminal_win import Terminal
from .terminal_win import Terminal
else:
from terminal_unix import Terminal
from .terminal_unix import Terminal
class NullProgressBar(object):

Просмотреть файл

@ -4,8 +4,8 @@ import json
import pipes
import re
from progressbar import NullProgressBar, ProgressBar
from structuredlog import TestLogger
from .progressbar import NullProgressBar, ProgressBar
from .structuredlog import TestLogger
# subprocess.list2cmdline does not properly escape for sh-like shells

Просмотреть файл

@ -7,9 +7,15 @@ import os
import select
import signal
import sys
from datetime import datetime, timedelta
from progressbar import ProgressBar
from results import NullTestOutput, TestOutput, escape_cmdline
from .progressbar import ProgressBar
from .results import (
NullTestOutput,
TestOutput,
escape_cmdline,
)
class Task(object):
@ -186,8 +192,8 @@ def reap_zombies(tasks, timeout):
TestOutput(
ended.test,
ended.cmd,
''.join(ended.out),
''.join(ended.err),
b''.join(ended.out).decode(errors='replace'),
b''.join(ended.err).decode(errors='replace'),
returncode,
(datetime.now() - ended.start).total_seconds(),
timed_out(ended, timeout),

Просмотреть файл

@ -5,12 +5,14 @@ from __future__ import print_function, unicode_literals, division
import subprocess
import sys
from datetime import datetime, timedelta
from progressbar import ProgressBar
from results import NullTestOutput, TestOutput, escape_cmdline
from threading import Thread
from Queue import Queue, Empty
from .progressbar import ProgressBar
from .results import NullTestOutput, TestOutput, escape_cmdline
class EndMarker:
pass

1
mach
Просмотреть файл

@ -42,7 +42,6 @@ py2commands="
ide
import-pr
install
jit-test
jsapi-tests
jsshell-bench
jstestbrowser

Просмотреть файл

@ -5,10 +5,15 @@ import os
from datetime import datetime, timedelta
import tarfile
import requests
import six
import vcs
from cStringIO import StringIO
import logging
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
HEADERS = {'User-Agent': "wpt manifest download"}
@ -117,7 +122,7 @@ def taskcluster_url(logger, commits):
def download_manifest(logger, test_paths, commits_func, url_func, force=False):
manifest_paths = [item["manifest_path"] for item in test_paths.itervalues()]
manifest_paths = [item["manifest_path"] for item in six.itervalues(test_paths)]
if not force and not should_download(logger, manifest_paths):
return True
@ -141,8 +146,8 @@ def download_manifest(logger, test_paths, commits_func, url_func, force=False):
"HTTP status %d" % req.status_code)
return False
tar = tarfile.open(mode="r:gz", fileobj=StringIO(req.content))
for paths in test_paths.itervalues():
tar = tarfile.open(mode="r:gz", fileobj=BytesIO(req.content))
for paths in six.itervalues(test_paths):
try:
member = tar.getmember(paths["manifest_rel_path"].replace(os.path.sep, "/"))
except KeyError:
@ -151,7 +156,7 @@ def download_manifest(logger, test_paths, commits_func, url_func, force=False):
try:
logger.debug("Unpacking %s to %s" % (member.name, paths["manifest_path"]))
src = tar.extractfile(member)
with open(paths["manifest_path"], "w") as dest:
with open(paths["manifest_path"], "wb") as dest:
dest.write(src.read())
src.close()
except IOError:

Просмотреть файл

@ -6,6 +6,7 @@ import argparse
import hashlib
import imp
import os
import six
import sys
from six.moves import configparser
@ -92,7 +93,7 @@ def run(src_root, obj_root, logger=None, **kwargs):
test_paths = wptcommandline.get_test_paths(
wptcommandline.config.read(config_path))
for paths in test_paths.itervalues():
for paths in six.itervalues(test_paths):
if "manifest_path" not in paths:
paths["manifest_path"] = os.path.join(paths["metadata_path"],
"MANIFEST.json")
@ -125,7 +126,7 @@ def run(src_root, obj_root, logger=None, **kwargs):
def ensure_manifest_directories(logger, test_paths):
for paths in test_paths.itervalues():
for paths in six.itervalues(test_paths):
manifest_dir = os.path.dirname(paths["manifest_path"])
if not os.path.exists(manifest_dir):
logger.info("Creating directory %s" % manifest_dir)
@ -169,7 +170,7 @@ def generate_config(logger, repo_root, wpt_dir, dest_path, force_rewrite=False):
parser.set('paths', 'prefs', os.path.abspath(os.path.join(wpt_dir, parser.get("paths", "prefs"))))
with open(dest_config_path, 'wb') as config_file:
with open(dest_config_path, 'wt') as config_file:
parser.write(config_file)
return dest_config_path
@ -179,7 +180,7 @@ def load_and_update(logger, wpt_dir, test_paths, rebuild=False, config_dir=None,
update=True):
rv = {}
wptdir_hash = hashlib.sha256(os.path.abspath(wpt_dir)).hexdigest()
for url_base, paths in test_paths.iteritems():
for url_base, paths in six.iteritems(test_paths):
manifest_path = paths["manifest_path"]
this_cache_root = os.path.join(cache_root, wptdir_hash, os.path.dirname(paths["manifest_rel_path"]))
m = manifest.manifest.load_and_update(paths["tests_path"],