Bug 1125512 - Part 2: Use .format() instead of modulo operator in js test libs. r=terrence

This commit is contained in:
Tooru Fujisawa 2015-01-28 05:19:30 +09:00
Родитель 27b4fc6b24
Коммит c413af1180
10 изменённых файлов: 142 добавлений и 103 удалений

Просмотреть файл

@ -2,9 +2,10 @@
"""usage: %progname candidate_path baseline_path
"""
from __future__ import print_function
import json
import optparse
from contextlib import nested
from operator import itemgetter
@ -18,29 +19,32 @@ def compare(current, baseline):
try:
baseline_result = baseline[key]
except KeyError:
print key, 'missing from baseline'
print(key, 'missing from baseline')
continue
val_getter = itemgetter('average_ms', 'stddev_ms')
base_avg, base_stddev = val_getter(baseline_result)
current_avg, current_stddev = val_getter(current_result)
t_best, t_worst = current_avg - current_stddev, current_avg + current_stddev
base_t_best, base_t_worst = base_avg - base_stddev, base_avg + base_stddev
fmt = '%30s: %s'
if t_worst < base_t_best: # Worst takes less time (better) than baseline's best.
t_best = current_avg - current_stddev
t_worst = current_avg + current_stddev
base_t_best = base_avg - base_stddev
base_t_worst = base_avg + base_stddev
if t_worst < base_t_best:
# Worst takes less time (better) than baseline's best.
speedup = -((t_worst - base_t_best) / base_t_best) * 100
result = 'faster: %6.2fms < baseline %6.2fms (%+6.2f%%)' % \
(t_worst, base_t_best, speedup)
result = 'faster: {:6.2f}ms < baseline {:6.2f}ms ({:+6.2f}%)'.format(
t_worst, base_t_best, speedup)
percent_speedups.append(speedup)
elif t_best > base_t_worst: # Best takes more time (worse) than baseline's worst.
elif t_best > base_t_worst:
# Best takes more time (worse) than baseline's worst.
slowdown = -((t_best - base_t_worst) / base_t_worst) * 100
result = 'SLOWER: %6.2fms > baseline %6.2fms (%+6.2f%%) ' % \
(t_best, base_t_worst, slowdown)
result = 'SLOWER: {:6.2f}ms > baseline {:6.2f}ms ({:+6.2f}%) '.format(
t_best, base_t_worst, slowdown)
percent_speedups.append(slowdown)
else:
result = 'Meh.'
print '%30s: %s' % (key, result)
print('{:30s}: {}'.format(key, result))
if percent_speedups:
print 'Average speedup: %.2f%%' % avg(percent_speedups)
print('Average speedup: {:.2f}%'.format(avg(percent_speedups)))
def compare_immediate(current_map, baseline_path):

Просмотреть файл

@ -360,7 +360,8 @@ def load(location, requested_paths, excluded_paths, xul_tester, reldir=''):
fullpath = os.path.join(location, filename)
# If any tests are requested by name, skip tests that do not match.
if requested_paths and not any(req in filename for req in requested_paths):
if requested_paths \
and not any(req in filename for req in requested_paths):
continue
# Skip excluded tests.

Просмотреть файл

@ -49,7 +49,8 @@ class ProgressBar(object):
sys.stdout.write('\r[')
for layout in self.counters_fmt:
Terminal.set_color(layout['color'])
sys.stdout.write(('%' + str(self.limit_digits) + 'd') % data[layout['value']])
sys.stdout.write(('{:' + str(self.limit_digits) + 'd}').format(
data[layout['value']]))
Terminal.reset_color()
if layout != self.counters_fmt[-1]:
sys.stdout.write('|')
@ -58,7 +59,7 @@ class ProgressBar(object):
# Build the bar.
pct = int(100.0 * current / self.limit)
sys.stdout.write('%3d%% ' % pct)
sys.stdout.write('{:3d}% '.format(pct))
barlen = int(1.0 * self.barlen * current / self.limit) - 1
bar = '=' * barlen + '>' + ' ' * (self.barlen - barlen - 1)
@ -67,7 +68,7 @@ class ProgressBar(object):
# Update the bar.
dt = datetime.now() - self.t0
dt = dt.seconds + dt.microseconds * 1e-6
sys.stdout.write('%6.1fs' % dt)
sys.stdout.write('{:6.1f}s'.format(dt))
Terminal.clear_right()
# Force redisplay, since we didn't write a \n.

Просмотреть файл

@ -6,7 +6,7 @@ import pipes
# subprocess.list2cmdline does not properly escape for sh-like shells
def escape_cmdline(args):
return ' '.join([ pipes.quote(a) for a in args ])
return ' '.join([pipes.quote(a) for a in args])
class TestOutput:
"""Output from a test run."""
@ -76,7 +76,8 @@ class TestResult:
msg = line[len(' PASSED! '):]
results.append((cls.PASS, msg))
else:
m = re.match('--- NOTE: IN THIS TESTCASE, WE EXPECT EXIT CODE ((?:-|\\d)+) ---', line)
m = re.match('--- NOTE: IN THIS TESTCASE, WE EXPECT EXIT CODE'
' ((?:-|\\d)+) ---', line)
if m:
expected_rcs.append(int(m.group(1)))
@ -118,7 +119,9 @@ class ResultsSink:
self.counts['TIMEOUT'] += 1
if isinstance(output, NullTestOutput):
if self.options.tinderbox:
self.print_tinderbox_result('TEST-KNOWN-FAIL', output.test.path, time=output.dt, skip=True)
self.print_tinderbox_result(
'TEST-KNOWN-FAIL', output.test.path, time=output.dt,
skip=True)
self.counts['SKIP'] += 1
self.n += 1
else:
@ -130,22 +133,26 @@ class ResultsSink:
self.groups.setdefault(dev_label, []).append(result.test.path)
if dev_label == 'REGRESSIONS':
show_output = self.options.show_output or not self.options.no_show_failed
show_output = self.options.show_output \
or not self.options.no_show_failed
elif dev_label == 'TIMEOUTS':
show_output = self.options.show_output
else:
show_output = self.options.show_output and not self.options.failed_only
show_output = self.options.show_output \
and not self.options.failed_only
if dev_label in ('REGRESSIONS', 'TIMEOUTS'):
show_cmd = self.options.show_cmd
else:
show_cmd = self.options.show_cmd and not self.options.failed_only
show_cmd = self.options.show_cmd \
and not self.options.failed_only
if show_output or show_cmd:
self.pb.beginline()
if show_output:
print('## %s: rc = %d, run time = %f' % (output.test.path, output.rc, output.dt), file=self.fp)
print('## {}: rc = {:d}, run time = {:f}'.format(
output.test.path, output.rc, output.dt), file=self.fp)
if show_cmd:
print(escape_cmdline(output.cmd), file=self.fp)
@ -166,19 +173,23 @@ class ResultsSink:
if self.options.tinderbox:
if len(result.results) > 1:
for sub_ok, msg in result.results:
label = self.LABELS[(sub_ok, result.test.expect, result.test.random)][0]
tup = (sub_ok, result.test.expect, result.test.random)
label = self.LABELS[tup][0]
if label == 'TEST-UNEXPECTED-PASS':
label = 'TEST-PASS (EXPECTED RANDOM)'
self.print_tinderbox_result(label, result.test.path, time=output.dt, message=msg)
self.print_tinderbox_result(self.LABELS[
(result.result, result.test.expect, result.test.random)][0],
result.test.path, time=output.dt)
self.print_tinderbox_result(
label, result.test.path, time=output.dt,
message=msg)
tup = (result.result, result.test.expect, result.test.random)
self.print_tinderbox_result(
self.LABELS[tup][0], result.test.path, time=output.dt)
return
if dev_label:
def singular(label):
return "FIXED" if label == "FIXES" else label[:-1]
self.pb.message("%s - %s" % (singular(dev_label), output.test.path))
self.pb.message("{} - {}".format(singular(dev_label),
output.test.path))
self.pb.update(self.n, self.counts)
@ -214,18 +225,18 @@ class ResultsSink:
print(label)
for path in paths:
print(' %s' % path)
print(' {}'.format(path))
if self.options.failure_file:
failure_file = open(self.options.failure_file, 'w')
if not self.all_passed():
if 'REGRESSIONS' in self.groups:
for path in self.groups['REGRESSIONS']:
print(path, file=failure_file)
if 'TIMEOUTS' in self.groups:
for path in self.groups['TIMEOUTS']:
print(path, file=failure_file)
failure_file.close()
failure_file = open(self.options.failure_file, 'w')
if not self.all_passed():
if 'REGRESSIONS' in self.groups:
for path in self.groups['REGRESSIONS']:
print(path, file=failure_file)
if 'TIMEOUTS' in self.groups:
for path in self.groups['TIMEOUTS']:
print(path, file=failure_file)
failure_file.close()
suffix = '' if completed else ' (partial run -- interrupted by user)'
if self.all_passed():
@ -236,7 +247,8 @@ class ResultsSink:
def all_passed(self):
return 'REGRESSIONS' not in self.groups and 'TIMEOUTS' not in self.groups
def print_tinderbox_result(self, label, path, message=None, skip=False, time=None):
def print_tinderbox_result(self, label, path, message=None, skip=False,
time=None):
result = label
result += " | " + path
result += " |" + self.options.shell_args

Просмотреть файл

@ -17,7 +17,7 @@ class Task(object):
self.out = []
self.err = []
def spawn_test(test, passthrough = False):
def spawn_test(test, passthrough=False):
"""Spawn one child, return a task struct."""
if not passthrough:
(rout, wout) = os.pipe()
@ -45,7 +45,8 @@ def total_seconds(td):
"""
Return the total number of seconds contained in the duration as a float
"""
return (float(td.microseconds) + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
return (float(td.microseconds) \
+ (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
def get_max_wait(tasks, results, timeout):
"""
@ -116,7 +117,7 @@ def remove_task(tasks, pid):
index = i
break
else:
raise KeyError("No such pid: %s" % pid)
raise KeyError("No such pid: {}".format(pid))
out = tasks[index]
tasks.pop(index)
@ -161,13 +162,13 @@ def reap_zombies(tasks, results, timeout):
returncode = -os.WTERMSIG(status)
out = TestOutput(
ended.test,
ended.cmd,
''.join(ended.out),
''.join(ended.err),
returncode,
total_seconds(datetime.now() - ended.start),
timed_out(ended, timeout))
ended.test,
ended.cmd,
''.join(ended.out),
''.join(ended.err),
returncode,
total_seconds(datetime.now() - ended.start),
timed_out(ended, timeout))
results.push(out)
return tasks

Просмотреть файл

@ -7,7 +7,7 @@ from Queue import Queue, Empty
from datetime import datetime
class Source:
def __init__(self, task_list, results, timeout, verbose = False):
def __init__(self, task_list, results, timeout, verbose=False):
self.tasks = Queue()
for task in task_list:
self.tasks.put_nowait(task)
@ -20,7 +20,9 @@ class Source:
t0 = datetime.now()
sink = Sink(self.results)
self.workers = [ Worker(_+1, self.tasks, sink, self.timeout, self.verbose) for _ in range(worker_count) ]
self.workers = [Worker(_ + 1, self.tasks, sink, self.timeout,
self.verbose)
for _ in range(worker_count)]
if self.verbose:
print('[P] Starting workers.')
for w in self.workers:
@ -65,12 +67,13 @@ class Worker(Thread):
self.thread = None
self.stop = False
self.t0 = 0
def log(self, msg):
if self.verbose:
dd = datetime.now() - self.t0
dt = dd.seconds + 1e-6 * dd.microseconds
print('[W%d %.3f] %s' % (self.id, dt, msg))
print('[W{:d} {:.3f}] {}'.format(self.id, dt, msg))
def run(self):
try:
@ -79,7 +82,7 @@ class Worker(Thread):
break
self.log('Get next task.')
task = self.tasks.get(False)
self.log('Start task %s.'%str(task))
self.log('Start task {}.'.format(str(task)))
result = task.run(task.js_cmd_prefix, self.timeout)
self.log('Finished task.')
self.sink.push(result)

Просмотреть файл

@ -26,7 +26,8 @@ class Terminal(object):
color = color[len('bright'):]
color_code = Terminal.COLOR[color]
sys.stdout.write(cls.ESCAPE + color_code + cls.SEPARATOR + mod + cls.COLOR_CODE)
sys.stdout.write(cls.ESCAPE + color_code + cls.SEPARATOR + mod
+ cls.COLOR_CODE)
@classmethod
def reset_color(cls):

Просмотреть файл

@ -15,27 +15,27 @@ SHORT = c_short
WORD = c_ushort
class COORD(Structure):
"""struct in wincon.h."""
_fields_ = [
("X", SHORT),
("Y", SHORT)]
"""struct in wincon.h."""
_fields_ = [
("X", SHORT),
("Y", SHORT)]
class SMALL_RECT(Structure):
"""struct in wincon.h."""
_fields_ = [
("Left", SHORT),
("Top", SHORT),
("Right", SHORT),
("Bottom", SHORT)]
"""struct in wincon.h."""
_fields_ = [
("Left", SHORT),
("Top", SHORT),
("Right", SHORT),
("Bottom", SHORT)]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", COORD)]
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", COORD)]
# winbase.h
STD_INPUT_HANDLE = -10
@ -68,9 +68,9 @@ SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
def get_text_attr():
csbi = CONSOLE_SCREEN_BUFFER_INFO()
GetConsoleScreenBufferInfo(stdout_handle, byref(csbi))
return csbi.wAttributes
csbi = CONSOLE_SCREEN_BUFFER_INFO()
GetConsoleScreenBufferInfo(stdout_handle, byref(csbi))
return csbi.wAttributes
DEFAULT_COLORS = get_text_attr()

Просмотреть файл

@ -9,18 +9,20 @@ from threading import Thread
from results import TestOutput
# When run on tbpl, we run each test multiple times with the following arguments.
# When run on tbpl, we run each test multiple times with the following
# arguments.
TBPL_FLAGS = [
[], # no flags, normal baseline and ion
['--ion-eager', '--ion-offthread-compile=off'], # implies --baseline-eager
['--ion-eager', '--ion-offthread-compile=off', '--ion-check-range-analysis', '--no-sse3', '--no-threads'],
['--ion-eager', '--ion-offthread-compile=off',
'--ion-check-range-analysis', '--no-sse3', '--no-threads'],
['--baseline-eager'],
['--baseline-eager', '--no-fpu'],
['--no-baseline', '--no-ion'],
]
def do_run_cmd(cmd):
l = [ None, None ]
l = [None, None]
th_run_cmd(cmd, l)
return l[1]
@ -55,7 +57,7 @@ def run_cmd(cmd, timeout=60.0):
if timeout is None:
return do_run_cmd(cmd)
l = [ None, None ]
l = [None, None]
timed_out = False
th = Thread(target=th_run_cmd, args=(cmd, l))
th.start()
@ -78,19 +80,23 @@ def run_cmd(cmd, timeout=60.0):
class Test(object):
"""A runnable test."""
def __init__(self, path):
self.path = path # str: path of JS file relative to tests root dir
self.path = path # str: path of JS file relative to tests root dir
self.options = [] # [str]: Extra options to pass to the shell
@staticmethod
def prefix_command(path):
"""Return the '-f shell.js' options needed to run a test with the given path."""
"""Return the '-f shell.js' options needed to run a test with the given
path."""
if path == '':
return [ '-f', 'shell.js' ]
return ['-f', 'shell.js']
head, base = os.path.split(path)
return Test.prefix_command(head) + [ '-f', os.path.join(path, 'shell.js') ]
return Test.prefix_command(head) \
+ ['-f', os.path.join(path, 'shell.js')]
def get_command(self, js_cmd_prefix):
dirname, filename = os.path.split(self.path)
cmd = js_cmd_prefix + self.options + Test.prefix_command(dirname) + [ '-f', self.path ]
cmd = js_cmd_prefix + self.options + Test.prefix_command(dirname) \
+ ['-f', self.path]
return cmd
def run(self, js_cmd_prefix, timeout=30.0):
@ -108,7 +114,6 @@ class TestCase(Test):
self.expect = True # bool: expected result, True => pass
self.random = False # bool: True => ignore output as 'random'
self.slow = False # bool: True => test may run slowly
self.options = [] # [str]: Extra options to pass to the shell
# The terms parsed to produce the above properties.
self.terms = None

Просмотреть файл

@ -9,7 +9,7 @@ For comparison, something apparently approximating a t-test is performed:
"Faster" means that:
t_baseline_goodrun = (t_baseline_avg - t_baseline_stddev)
t_current_badrun = (t_current_avg + t_current_stddev)
t_current_badrun = (t_current_avg + t_current_stddev)
t_current_badrun < t_baseline_goodrun
Effectively, a bad run from the current data is better than a good run from the
@ -72,7 +72,8 @@ def bench(shellpath, filepath, warmup_runs, counted_runs, stfu=False):
"""Return a list of milliseconds for the counted runs."""
assert '"' not in filepath
code = JS_CODE_TEMPLATE.substitute(filepath=filepath,
warmup_run_count=warmup_runs, real_run_count=counted_runs)
warmup_run_count=warmup_runs,
real_run_count=counted_runs)
proc = subp.Popen([shellpath, '-e', code], stdout=subp.PIPE)
stdout, _ = proc.communicate()
milliseconds = [float(val) for val in stdout.split(',')]
@ -81,7 +82,8 @@ def bench(shellpath, filepath, warmup_runs, counted_runs, stfu=False):
if not stfu:
print('Runs:', [int(ms) for ms in milliseconds])
print('Mean:', mean)
print('Stddev: %.2f (%.2f%% of mean)' % (sigma, sigma / mean * 100))
print('Stddev: {:.2f} ({:.2f}% of mean)'.format(
sigma, sigma / mean * 100))
return mean, sigma
@ -91,34 +93,40 @@ def parsemark(filepaths, fbench, stfu=False):
for filepath in filepaths:
filename = os.path.split(filepath)[-1]
if not stfu:
print('Parsemarking %s...' % filename)
print('Parsemarking {}...'.format(filename))
bench_map[filename] = fbench(filepath)
print('{')
for i, (filename, (avg, stddev)) in enumerate(bench_map.iteritems()):
assert '"' not in filename
fmt = ' %30s: {"average_ms": %6.2f, "stddev_ms": %6.2f}'
fmt = ' {:30s}: {{"average_ms": {:6.2f}, "stddev_ms": {:6.2f}}}'
if i != len(bench_map) - 1:
fmt += ','
filename_str = '"%s"' % filename
print(fmt % (filename_str, avg, stddev))
filename_str = '"{}"'.format(filename)
print(fmt.format(filename_str, avg, stddev))
print('}')
return dict((filename, dict(average_ms=avg, stddev_ms=stddev))
for filename, (avg, stddev) in bench_map.iteritems())
for filename, (avg, stddev) in bench_map.iteritems())
def main():
parser = optparse.OptionParser(usage=__doc__.strip())
parser.add_option('-w', '--warmup-runs', metavar='COUNT', type=int,
default=5, help='used to minimize test instability [%default]')
default=5,
help='used to minimize test instability [%default]')
parser.add_option('-c', '--counted-runs', metavar='COUNT', type=int,
default=50, help='timed data runs that count towards the average [%default]')
parser.add_option('-s', '--shell', metavar='PATH', help='explicit shell '
'location; when omitted, will look in likely places')
default=50,
help='timed data runs that count towards the average'
' [%default]')
parser.add_option('-s', '--shell', metavar='PATH',
help='explicit shell location; when omitted, will look'
' in likely places')
parser.add_option('-b', '--baseline', metavar='JSON_PATH',
dest='baseline_path', help='json file with baseline values to '
'compare against')
dest='baseline_path',
help='json file with baseline values to '
'compare against')
parser.add_option('-q', '--quiet', dest='stfu', action='store_true',
default=False, help='only print JSON to stdout [%default]')
default=False,
help='only print JSON to stdout [%default]')
options, args = parser.parse_args()
try:
shellpath = args.pop(0)
@ -142,10 +150,13 @@ def main():
print('error: baseline file does not exist', file=sys.stderr)
return -1
if not compare_bench:
print('error: JSON support is missing, cannot compare benchmarks', file=sys.stderr)
print('error: JSON support is missing, cannot compare benchmarks',
file=sys.stderr)
return -1
benchfile = lambda filepath: bench(shellpath, filepath,
options.warmup_runs, options.counted_runs, stfu=options.stfu)
options.warmup_runs,
options.counted_runs,
stfu=options.stfu)
bench_map = parsemark(gen_filepaths(dirpath), benchfile, options.stfu)
if options.baseline_path:
compare_bench.compare_immediate(bench_map, options.baseline_path)