Bug 558846 - Update parsemark to use js::Parser directly. r=jorendorff.

This commit is contained in:
Chris Leary 2010-04-15 10:53:27 -05:00
Родитель 07a6b27a70
Коммит ef6efacfec
3 изменённых файлов: 138 добавлений и 77 удалений

Просмотреть файл

@ -3730,6 +3730,30 @@ Compile(JSContext *cx, uintN argc, jsval *vp)
return JS_TRUE; return JS_TRUE;
} }
static JSBool
Parse(JSContext *cx, uintN argc, jsval *vp)
{
if (argc < 1) {
JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_MORE_ARGS_NEEDED,
"compile", "0", "s");
return JS_FALSE;
}
jsval arg0 = JS_ARGV(cx, vp)[0];
if (!JSVAL_IS_STRING(arg0)) {
const char *typeName = JS_GetTypeName(cx, JS_TypeOfValue(cx, arg0));
JS_ReportError(cx, "expected string to parse, got %s", typeName);
return JS_FALSE;
}
JSString *scriptContents = JSVAL_TO_STRING(arg0);
js::Parser parser(cx);
parser.init(JS_GetStringCharsZ(cx, scriptContents), JS_GetStringLength(scriptContents),
NULL, "<string>", 0);
parser.parse(NULL);
JS_SET_RVAL(cx, vp, JSVAL_VOID);
return JS_TRUE;
}
static JSBool static JSBool
Snarf(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) Snarf(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
{ {
@ -3883,6 +3907,7 @@ static JSFunctionSpec shell_functions[] = {
#endif #endif
JS_FS("snarf", Snarf, 0,0,0), JS_FS("snarf", Snarf, 0,0,0),
JS_FN("compile", Compile, 1,0), JS_FN("compile", Compile, 1,0),
JS_FN("parse", Parse, 1,0),
JS_FN("timeout", Timeout, 1,0), JS_FN("timeout", Timeout, 1,0),
JS_FN("elapsed", Elapsed, 0,0), JS_FN("elapsed", Elapsed, 0,0),
JS_FS_END JS_FS_END
@ -3987,7 +4012,8 @@ static const char *const shell_help_messages[] = {
"scatter(fns) Call functions concurrently (ignoring errors)", "scatter(fns) Call functions concurrently (ignoring errors)",
#endif #endif
"snarf(filename) Read filename into returned string", "snarf(filename) Read filename into returned string",
"compile(code) Parses a string, potentially throwing", "compile(code) Compiles a string to bytecode, potentially throwing",
"parse(code) Parses a string, potentially throwing",
"timeout([seconds])\n" "timeout([seconds])\n"
" Get/Set the limit in seconds for the execution time for the current context.\n" " Get/Set the limit in seconds for the execution time for the current context.\n"
" A negative value (default) means that the execution time is unlimited.", " A negative value (default) means that the execution time is unlimited.",

Просмотреть файл

@ -0,0 +1,79 @@
#!/usr/bin/env python2.4
"""usage: %progname candidate_path baseline_path
:warning: May raise ImportError on import if JSON support is missing.
"""
import optparse
from contextlib import nested
from operator import itemgetter
try:
import json
except ImportError:
import simplejson as json
def avg(seq):
return sum(seq) / len(seq)
def compare(current, baseline):
percent_speedups = []
for key, current_result in current.iteritems():
try:
baseline_result = baseline[key]
except KeyError:
print key, 'missing from baseline'
continue
val_getter = itemgetter('average_ms', 'stddev_ms')
base_avg, base_stddev = val_getter(baseline_result)
current_avg, current_stddev = val_getter(current_result)
t_best, t_worst = current_avg - current_stddev, current_avg + current_stddev
base_t_best, base_t_worst = base_avg - base_stddev, base_avg + base_stddev
fmt = '%30s: %s'
if t_worst < base_t_best: # Worst takes less time (better) than baseline's best.
speedup = -((t_worst - base_t_best) / base_t_best) * 100
result = 'faster: %6.2fms < baseline %6.2fms (%+6.2f%%)' % \
(t_worst, base_t_best, speedup)
percent_speedups.append(speedup)
elif t_best > base_t_worst: # Best takes more time (worse) than baseline's worst.
slowdown = -((t_best - base_t_worst) / base_t_worst) * 100
result = 'SLOWER: %6.2fms > baseline %6.2fms (%+6.2f%%) ' % \
(t_best, base_t_worst, slowdown)
percent_speedups.append(slowdown)
else:
result = 'Meh.'
print '%30s: %s' % (key, result)
if percent_speedups:
print 'Average speedup: %.2f%%' % avg(percent_speedups)
def compare_immediate(current_map, baseline_path):
baseline_file = open(baseline_path)
baseline_map = json.load(baseline_file)
baseline_file.close()
compare(current_map, baseline_map)
def main(candidate_path, baseline_path):
candidate_file, baseline_file = open(candidate_path), open(baseline_path)
candidate = json.load(candidate_file)
baseline = json.load(baseline_file)
compare(candidate, baseline)
candidate_file.close()
baseline_file.close()
if __name__ == '__main__':
parser = optparse.OptionParser(usage=__doc__.strip())
options, args = parser.parse_args()
try:
candidate_path = args.pop(0)
except IndexError:
parser.error('A JSON filepath to compare against baseline is required')
try:
baseline_path = args.pop(0)
except IndexError:
parser.error('A JSON filepath for baseline is required')
main(candidate_path, baseline_path)

Просмотреть файл

@ -1,6 +1,6 @@
#!/usr/bin/env python #!/usr/bin/env python
"""%prog [options] dirpath """%prog [options] shellpath dirpath
Pulls performance data on parsing via the js shell. Pulls performance data on parsing via the js shell.
Displays the average number of milliseconds it took to parse each file. Displays the average number of milliseconds it took to parse each file.
@ -17,6 +17,7 @@ baseline data, we're probably faster. A similar computation is used for
determining the "slower" designation. determining the "slower" designation.
Arguments: Arguments:
shellpath executable JavaScript shell
dirpath directory filled with parsilicious js files dirpath directory filled with parsilicious js files
""" """
@ -26,18 +27,23 @@ import os
import subprocess as subp import subprocess as subp
import sys import sys
from string import Template from string import Template
from operator import itemgetter
try:
import compare_bench
except ImportError:
compare_bench = None
_DIR = os.path.dirname(__file__) _DIR = os.path.dirname(__file__)
JS_CODE_TEMPLATE = Template(""" JS_CODE_TEMPLATE = Template("""
var contents = snarf("$filepath"); if (typeof snarf !== 'undefined') read = snarf
for (let i = 0; i < $warmup_run_count; i++) var contents = read("$filepath");
compile(contents); for (var i = 0; i < $warmup_run_count; i++)
parse(contents);
var results = []; var results = [];
for (let i = 0; i < $real_run_count; i++) { for (var i = 0; i < $real_run_count; i++) {
var start = new Date(); var start = new Date();
compile(contents); parse(contents);
var end = new Date(); var end = new Date();
results.push(end - start); results.push(end - start);
} }
@ -45,24 +51,6 @@ print(results);
""") """)
def find_shell(filename='js'):
"""Look around for the js shell. Prefer more obvious places to look.
:return: Path if found, else None.
"""
relpaths = ['', 'obj', os.pardir, [os.pardir, 'obj']]
for relpath in relpaths:
path_parts = [_DIR]
if isinstance(relpath, list):
path_parts += relpath
else:
path_parts.append(relpath)
path_parts.append(filename)
path = os.path.join(*path_parts)
if os.path.isfile(path):
return path
def gen_filepaths(dirpath, target_ext='.js'): def gen_filepaths(dirpath, target_ext='.js'):
for filename in os.listdir(dirpath): for filename in os.listdir(dirpath):
if filename.endswith(target_ext): if filename.endswith(target_ext):
@ -97,7 +85,7 @@ def bench(shellpath, filepath, warmup_runs, counted_runs, stfu=False):
def parsemark(filepaths, fbench, stfu=False): def parsemark(filepaths, fbench, stfu=False):
""":param fbench: fbench(filename) -> float""" """:param fbench: fbench(filename) -> float"""
bench_map = {} bench_map = {} # {filename: (avg, stddev)}
for filepath in filepaths: for filepath in filepaths:
filename = os.path.split(filepath)[-1] filename = os.path.split(filepath)[-1]
if not stfu: if not stfu:
@ -112,58 +100,31 @@ def parsemark(filepaths, fbench, stfu=False):
filename_str = '"%s"' % filename filename_str = '"%s"' % filename
print fmt % (filename_str, avg, stddev) print fmt % (filename_str, avg, stddev)
print '}' print '}'
return bench_map return dict((filename, dict(average_ms=avg, stddev_ms=stddev))
for filename, (avg, stddev) in bench_map.iteritems())
def compare(current, baseline):
for key, (avg, stddev) in current.iteritems():
try:
base_avg, base_stddev = itemgetter('average_ms', 'stddev_ms')(baseline.get(key, None))
except TypeError:
print key, 'missing from baseline'
continue
t_best, t_worst = avg - stddev, avg + stddev
base_t_best, base_t_worst = base_avg - base_stddev, base_avg + base_stddev
fmt = '%30s: %s'
if t_worst < base_t_best: # Worst takes less time (better) than baseline's best.
speedup = -((t_worst - base_t_best) / base_t_best) * 100
result = 'faster: %6.2fms < baseline %6.2fms (%+6.2f%%)' % \
(t_worst, base_t_best, speedup)
elif t_best > base_t_worst: # Best takes more time (worse) than baseline's worst.
slowdown = -((t_best - base_t_worst) / base_t_worst) * 100
result = 'SLOWER: %6.2fms > baseline %6.2fms (%+6.2f%%) ' % \
(t_best, base_t_worst, slowdown)
else:
result = 'Meh.'
print '%30s: %s' % (key, result)
def try_import_json():
try:
import json
return json
except ImportError:
try:
import simplejson as json
return json
except ImportError:
pass
def main(): def main():
parser = optparse.OptionParser(usage=__doc__.strip()) parser = optparse.OptionParser(usage=__doc__.strip())
parser.add_option('-w', '--warmup-runs', metavar='COUNT', type=int, parser.add_option('-w', '--warmup-runs', metavar='COUNT', type=int,
default=5, help='used to minimize test instability') default=5, help='used to minimize test instability [%default]')
parser.add_option('-c', '--counted-runs', metavar='COUNT', type=int, parser.add_option('-c', '--counted-runs', metavar='COUNT', type=int,
default=20, help='timed data runs that count towards the average') default=50, help='timed data runs that count towards the average [%default]')
parser.add_option('-s', '--shell', metavar='PATH', help='explicit shell ' parser.add_option('-s', '--shell', metavar='PATH', help='explicit shell '
'location; when omitted, will look in likely places') 'location; when omitted, will look in likely places')
parser.add_option('-b', '--baseline', metavar='JSON_PATH', parser.add_option('-b', '--baseline', metavar='JSON_PATH',
dest='baseline_path', help='json file with baseline values to ' dest='baseline_path', help='json file with baseline values to '
'compare against') 'compare against')
parser.add_option('-q', '--quiet', dest='stfu', action='store_true', parser.add_option('-q', '--quiet', dest='stfu', action='store_true',
default=False, help='only print JSON to stdout') default=False, help='only print JSON to stdout [%default]')
options, args = parser.parse_args() options, args = parser.parse_args()
try:
shellpath = args.pop(0)
except IndexError:
parser.print_help()
print
print >> sys.stderr, 'error: shellpath required'
return -1
try: try:
dirpath = args.pop(0) dirpath = args.pop(0)
except IndexError: except IndexError:
@ -171,26 +132,21 @@ def main():
print print
print >> sys.stderr, 'error: dirpath required' print >> sys.stderr, 'error: dirpath required'
return -1 return -1
shellpath = options.shell or find_shell() if not shellpath or not os.path.exists(shellpath):
if not shellpath: print >> sys.stderr, 'error: could not find shell:', shellpath
print >> sys.stderr, 'Could not find shell'
return -1 return -1
if options.baseline_path: if options.baseline_path:
if not os.path.isfile(options.baseline_path): if not os.path.isfile(options.baseline_path):
print >> sys.stderr, 'Baseline file does not exist' print >> sys.stderr, 'error: baseline file does not exist'
return -1 return -1
json = try_import_json() if not compare_bench:
if not json: print >> sys.stderr, 'error: JSON support is missing, cannot compare benchmarks'
print >> sys.stderr, 'You need a json lib for baseline comparison'
return -1 return -1
benchfile = lambda filepath: bench(shellpath, filepath, benchfile = lambda filepath: bench(shellpath, filepath,
options.warmup_runs, options.counted_runs, stfu=options.stfu) options.warmup_runs, options.counted_runs, stfu=options.stfu)
bench_map = parsemark(gen_filepaths(dirpath), benchfile, options.stfu) bench_map = parsemark(gen_filepaths(dirpath), benchfile, options.stfu)
if options.baseline_path: if options.baseline_path:
fh = open(options.baseline_path, 'r') # 2.4 compat, no 'with'. compare_bench.compare_immediate(bench_map, options.baseline_path)
baseline_map = json.load(fh)
fh.close()
compare(current=bench_map, baseline=baseline_map)
return 0 return 0