From e555bb0ae41820ccd6e3f787171c953827875bef Mon Sep 17 00:00:00 2001 From: Coroiu Cristina Date: Wed, 27 Jun 2018 13:09:31 +0300 Subject: [PATCH] Backed out 2 changesets (bug 1447591) for mochitest failures at dom/base/test/test_postMessages.html Backed out changeset 0800fdb509d2 (bug 1447591) Backed out changeset 9b5347dee1f2 (bug 1447591) --- devtools/server/actors/source.js | 4 +- .../tests/unit/test_frameactor_wasm-01.js | 5 +- js/src/builtin/TestingFunctions.cpp | 100 +- js/src/jit-test/lib/wasm.js | 27 +- js/src/jit-test/tests/debug/wasm-05.js | 115 + js/src/jit-test/tests/debug/wasm-07.js | 28 +- .../tests/debug/wasm-binary-sources.js | 6 +- .../jit-test/tests/debug/wasm-breakpoint.js | 105 +- .../tests/debug/wasm-getAllColumnOffsets.js | 8 +- js/src/jit-test/tests/wasm/atomic.js | 69 + js/src/jit-test/tests/wasm/gc/structs.js | 9 + js/src/jit-test/tests/wasm/to-text.js | 293 ++ js/src/moz.build | 3 + js/src/wasm/WasmAST.h | 28 +- js/src/wasm/WasmBinaryToAST.cpp | 2390 +++++++++++++++++ js/src/wasm/WasmBinaryToAST.h | 37 + js/src/wasm/WasmBinaryToText.cpp | 2138 +++++++++++++++ js/src/wasm/WasmBinaryToText.h | 45 + js/src/wasm/WasmDebug.cpp | 210 +- js/src/wasm/WasmDebug.h | 38 +- js/src/wasm/WasmTextToBinary.cpp | 19 +- js/src/wasm/WasmTextToBinary.h | 3 +- js/src/wasm/WasmTextUtils.cpp | 80 + js/src/wasm/WasmTextUtils.h | 105 + js/src/wasm/WasmValidate.h | 3 + 25 files changed, 5689 insertions(+), 179 deletions(-) create mode 100644 js/src/jit-test/tests/debug/wasm-05.js create mode 100644 js/src/jit-test/tests/wasm/to-text.js create mode 100644 js/src/wasm/WasmBinaryToAST.cpp create mode 100644 js/src/wasm/WasmBinaryToAST.h create mode 100644 js/src/wasm/WasmBinaryToText.cpp create mode 100644 js/src/wasm/WasmBinaryToText.h create mode 100644 js/src/wasm/WasmTextUtils.cpp create mode 100644 js/src/wasm/WasmTextUtils.h diff --git a/devtools/server/actors/source.js b/devtools/server/actors/source.js index 511701e4024d..677f017babc8 100644 --- a/devtools/server/actors/source.js +++ b/devtools/server/actors/source.js @@ -792,10 +792,8 @@ const SourceActor = ActorClassWithSpec(sourceSpec, { if (!this.isSourceMapped) { const generatedLocation = GeneratedLocation.fromOriginalLocation(originalLocation); - const isWasm = this.source && this.source.introductionType === "wasm"; if (!this._setBreakpointAtGeneratedLocation(actor, generatedLocation) && - !noSliding && - !isWasm) { + !noSliding) { const query = { line: originalLine }; // For most cases, we have a real source to query for. The // only time we don't is for HTML pages. In that case we want diff --git a/devtools/server/tests/unit/test_frameactor_wasm-01.js b/devtools/server/tests/unit/test_frameactor_wasm-01.js index 4f0f43566a25..883170eb26ae 100644 --- a/devtools/server/tests/unit/test_frameactor_wasm-01.js +++ b/devtools/server/tests/unit/test_frameactor_wasm-01.js @@ -26,10 +26,7 @@ function run_test() { gClient, "test-stack", function(response, tabClient, threadClient) { gThreadClient = threadClient; - gThreadClient.reconfigure({ - observeAsmJS: true, - wasmBinarySource: true - }, function(response) { + gThreadClient.reconfigure({ observeAsmJS: true }, function(response) { Assert.equal(!!response.error, false); test_pause_frame(); }); diff --git a/js/src/builtin/TestingFunctions.cpp b/js/src/builtin/TestingFunctions.cpp index fff29feaacc9..ffb3179696c9 100644 --- a/js/src/builtin/TestingFunctions.cpp +++ b/js/src/builtin/TestingFunctions.cpp @@ -63,6 +63,7 @@ #include "vm/StringType.h" #include "vm/TraceLogging.h" #include "wasm/AsmJS.h" +#include "wasm/WasmBinaryToText.h" #include "wasm/WasmJS.h" #include "wasm/WasmModule.h" #include "wasm/WasmSignalHandlers.h" @@ -658,61 +659,90 @@ WasmTextToBinary(JSContext* cx, unsigned argc, Value* vp) if (!twoByteChars.initTwoByte(cx, args[0].toString())) return false; - bool withOffsets = false; if (args.hasDefined(1)) { - if (!args[1].isBoolean()) { - ReportUsageErrorASCII(cx, callee, "Second argument, if present, must be a boolean"); + if (!args[1].isString()) { + ReportUsageErrorASCII(cx, callee, "Second argument, if present, must be a String"); return false; } - withOffsets = ToBoolean(args[1]); } uintptr_t stackLimit = GetNativeStackLimit(cx); wasm::Bytes bytes; UniqueChars error; - wasm::Uint32Vector offsets; - if (!wasm::TextToBinary(twoByteChars.twoByteChars(), stackLimit, &bytes, &offsets, &error)) { + if (!wasm::TextToBinary(twoByteChars.twoByteChars(), stackLimit, &bytes, &error)) { JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_TEXT_FAIL, error.get() ? error.get() : "out of memory"); return false; } - RootedObject binary(cx, JS_NewUint8Array(cx, bytes.length())); - if (!binary) - return false; - - memcpy(binary->as().viewDataUnshared(), bytes.begin(), bytes.length()); - - if (!withOffsets) { - args.rval().setObject(*binary); - return true; - } - - RootedObject obj(cx, JS_NewPlainObject(cx)); + RootedObject obj(cx, JS_NewUint8Array(cx, bytes.length())); if (!obj) return false; - constexpr unsigned propAttrs = JSPROP_ENUMERATE; - if (!JS_DefineProperty(cx, obj, "binary", binary, propAttrs)) - return false; - - RootedObject jsOffsets(cx, JS_NewArrayObject(cx, offsets.length())); - if (!jsOffsets) - return false; - for (size_t i = 0; i < offsets.length(); i++) { - uint32_t offset = offsets[i]; - RootedValue offsetVal(cx, NumberValue(offset)); - if (!JS_SetElement(cx, jsOffsets, i, offsetVal)) - return false; - } - if (!JS_DefineProperty(cx, obj, "offsets", jsOffsets, propAttrs)) - return false; + memcpy(obj->as().viewDataUnshared(), bytes.begin(), bytes.length()); args.rval().setObject(*obj); return true; } +static bool +WasmBinaryToText(JSContext* cx, unsigned argc, Value* vp) +{ + if (!cx->options().wasm()) { + JS_ReportErrorASCII(cx, "wasm support unavailable"); + return false; + } + + CallArgs args = CallArgsFromVp(argc, vp); + + if (!args.get(0).isObject() || !args.get(0).toObject().is()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_BUF_ARG); + return false; + } + + Rooted code(cx, &args[0].toObject().as()); + + if (!TypedArrayObject::ensureHasBuffer(cx, code)) + return false; + + if (code->isSharedMemory()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_BUF_ARG); + return false; + } + + const uint8_t* bufferStart = code->bufferUnshared()->dataPointer(); + const uint8_t* bytes = bufferStart + code->byteOffset(); + uint32_t length = code->byteLength(); + + Vector copy(cx); + if (code->bufferUnshared()->hasInlineData()) { + if (!copy.append(bytes, length)) + return false; + bytes = copy.begin(); + } + + if (args.length() > 1) { + JS_ReportErrorASCII(cx, "wasm text format selection is not supported"); + return false; + } + + StringBuffer buffer(cx); + bool ok = wasm::BinaryToText(cx, bytes, length, buffer); + if (!ok) { + if (!cx->isExceptionPending()) + JS_ReportErrorASCII(cx, "wasm binary to text print error"); + return false; + } + + JSString* result = buffer.finishString(); + if (!result) + return false; + + args.rval().setString(result); + return true; +} + static bool WasmExtractCode(JSContext* cx, unsigned argc, Value* vp) { @@ -5648,6 +5678,10 @@ gc::ZealModeHelpText), "wasmTextToBinary(str)", " Translates the given text wasm module into its binary encoding."), + JS_FN_HELP("wasmBinaryToText", WasmBinaryToText, 1, 0, +"wasmBinaryToText(bin)", +" Translates binary encoding to text format"), + JS_FN_HELP("wasmExtractCode", WasmExtractCode, 1, 0, "wasmExtractCode(module[, tier])", " Extracts generated machine code from WebAssembly.Module. The tier is a string,\n" diff --git a/js/src/jit-test/lib/wasm.js b/js/src/jit-test/lib/wasm.js index 92ca6f18bea8..537bf5a2b245 100644 --- a/js/src/jit-test/lib/wasm.js +++ b/js/src/jit-test/lib/wasm.js @@ -116,6 +116,14 @@ function wasmFullPass(text, expected, maybeImports, ...args) { let instance = new WebAssembly.Instance(module, maybeImports); assertEq(typeof instance.exports.run, 'function', "A 'run' function must be exported."); assertEq(instance.exports.run(...args), expected, "Initial module must return the expected result."); + + let retext = wasmBinaryToText(binary); + let rebinary = wasmTextToBinary(retext); + + assertEq(WebAssembly.validate(rebinary), true, "Recreated binary must validate."); + let remodule = new WebAssembly.Module(rebinary); + let reinstance = new WebAssembly.Instance(remodule, maybeImports); + assertEq(reinstance.exports.run(...args), expected, "Reformed module must return the expected result"); } // Ditto, but expects a function named '$run' instead of exported with this name. @@ -126,15 +134,15 @@ function wasmFullPassI64(text, expected, maybeImports, ...args) { let augmentedSrc = _augmentSrc(text, [ { type: 'i64', func: '$run', args, expected } ]); let augmentedBinary = wasmTextToBinary(augmentedSrc); new WebAssembly.Instance(new WebAssembly.Module(augmentedBinary), maybeImports).exports.assert_0(); + + let retext = wasmBinaryToText(augmentedBinary); + new WebAssembly.Instance(new WebAssembly.Module(wasmTextToBinary(retext)), maybeImports).exports.assert_0(); } function wasmRunWithDebugger(wast, lib, init, done) { let g = newGlobal(''); let dbg = new Debugger(g); - // Enable binary source mode. - dbg.allowWasmBinarySource = true; - g.eval(` var wasm = wasmTextToBinary('${wast}'); var lib = ${lib || 'undefined'}; @@ -152,6 +160,19 @@ var m = new WebAssembly.Instance(new WebAssembly.Module(wasm), lib);`); done({dbg, result, error, wasmScript, g,}); } +function wasmGetScriptBreakpoints(wasmScript) { + var result = []; + var sourceText = wasmScript.source.text; + sourceText.split('\n').forEach(function (line, i) { + var lineOffsets = wasmScript.getLineOffsets(i + 1); + if (lineOffsets.length === 0) + return; + assertEq(lineOffsets.length, 1); + result.push({str: line.trim(), line: i + 1, offset: lineOffsets[0]}); + }); + return result; +} + const WasmHelpers = {}; (function() { diff --git a/js/src/jit-test/tests/debug/wasm-05.js b/js/src/jit-test/tests/debug/wasm-05.js new file mode 100644 index 000000000000..9332baa275e3 --- /dev/null +++ b/js/src/jit-test/tests/debug/wasm-05.js @@ -0,0 +1,115 @@ +// |jit-test| test-also-no-wasm-baseline +// Tests that wasm module scripts have text line to bytecode offset information +// when source text is generated. + +load(libdir + "asserts.js"); + +if (!wasmDebuggingIsSupported()) + quit(); + +// Checking if experimental format generates internal source map to binary file +// by querying debugger scripts getLineOffsets. +// (Notice that the source map will not be produced by wasmBinaryToText) +function getAllOffsets(wast) { + var sandbox = newGlobal(''); + var dbg = new Debugger(); + dbg.addDebuggee(sandbox); + sandbox.eval(` + var wasm = wasmTextToBinary('${wast}'); + var m = new WebAssembly.Instance(new WebAssembly.Module(wasm)); + `); + var wasmScript = dbg.findScripts().filter(s => s.format == 'wasm')[0]; + var lines = wasmScript.source.text.split('\n'); + return lines.map((l, n) => { return { str: l, offsets: wasmScript.getLineOffsets(n + 1) }; }); +} + +var result1 = getAllOffsets('(module \ + (func (nop)) \ + (func (drop (f32.sqrt (f32.add (f32.const 1.0) (f32.const 2.0))))) \ +)'); + +var nopLine = result1.filter(i => i.str.indexOf('nop') >= 0); +assertEq(nopLine.length, 1); +// The nopLine shall have single offset. +assertEq(nopLine[0].offsets.length, 1); +assertEq(nopLine[0].offsets[0] > 0, true); + +var singleOffsetLines = result1.filter(i => i.offsets.length === 1); +// There shall be total 8 lines with single offset. +assertEq(singleOffsetLines.length, 8); + +// Checking if all reported offsets can be resolved back to the corresponding +// line number. +function checkOffsetLineNumberMapping(wast, offsetsExpected) { + var sandbox = newGlobal(''); + var dbg = new Debugger(); + dbg.addDebuggee(sandbox); + sandbox.eval(` +var wasm = wasmTextToBinary('${wast}'); +var module = new WebAssembly.Module(wasm); +imports = {} +for (let descriptor of WebAssembly.Module.imports(module)) { + imports[descriptor.module] = {} + switch(descriptor.kind) { + case "function": imports[descriptor.module][descriptor.name] = new Function(''); break; + } +} +var instance = new WebAssembly.Instance(module, imports); +`); + var wasmScript = dbg.findScripts().filter(s => s.format == 'wasm')[0]; + assertEq(wasmScript.startLine, 1); + assertEq(wasmScript.lineCount >= 0, true); + var lines = wasmScript.source.text.split('\n'); + var offsetsFound = 0; + lines.forEach(function (l, n) { + var offsets = wasmScript.getLineOffsets(n + 1); + if (offsets.length < 1) return; + assertEq(offsets.length, 1); + assertEq(offsets[0] > 0, true); + offsetsFound++; + var loc = wasmScript.getOffsetLocation(offsets[0]); + assertEq(loc instanceof Object, true); + assertEq(loc.isEntryPoint, true); + assertEq(loc.lineNumber, n + 1); + assertEq(loc.columnNumber > 0, true); + }); + assertEq(offsetsFound, offsetsExpected); +} + +checkOffsetLineNumberMapping('(module (func))', 1); +checkOffsetLineNumberMapping('(module (func (nop)))', 2); +checkOffsetLineNumberMapping('(module (import "a" "b"))', 0); +checkOffsetLineNumberMapping('(module \ + (func (nop)) \ + (func (drop (f32.sqrt (f32.add (f32.const 1.0) (f32.const 2.0))))) \ +)', 8); +checkOffsetLineNumberMapping('(module \ + (func (local i32) i32.const 0 i32.const 1 set_local 0 get_local 0 call 0 i32.add nop drop) \ +)', 9); + +// Checking that there are no offsets are present in a wasm instance script for +// which debug mode was not enabled. +function getWasmScriptAfterDebuggerAttached(wast) { + var sandbox = newGlobal(''); + var dbg = new Debugger(); + sandbox.eval(` + var wasm = wasmTextToBinary('${wast}'); + var m = new WebAssembly.Instance(new WebAssembly.Module(wasm)); + `); + // Attaching after wasm instance is created. + dbg.addDebuggee(sandbox); + var wasmScript = dbg.findScripts().filter(s => s.format == 'wasm')[0]; + return wasmScript; +} + +var wasmScript1 = getWasmScriptAfterDebuggerAttached('(module (func (nop)))'); +var wasmLines1 = wasmScript1.source.text.split('\n'); +assertEq(wasmScript1.startLine, 1); +assertEq(wasmScript1.lineCount, 0); +assertEq(wasmLines1.every((l, n) => wasmScript1.getLineOffsets(n + 1).length == 0), true); + +// Checking that we must not resolve any location for any offset in a wasm +// instance which debug mode was not enabled. +var wasmScript2 = getWasmScriptAfterDebuggerAttached('(module (func (nop)))'); +for (var i = wasmTextToBinary('(module (func (nop)))').length - 1; i >= 0; i--) + assertThrowsInstanceOf(() => wasmScript2.getOffsetLocation(i), Error); diff --git a/js/src/jit-test/tests/debug/wasm-07.js b/js/src/jit-test/tests/debug/wasm-07.js index 890b6168390e..b045597c8ba0 100644 --- a/js/src/jit-test/tests/debug/wasm-07.js +++ b/js/src/jit-test/tests/debug/wasm-07.js @@ -15,9 +15,7 @@ wasmRunWithDebugger( function ({dbg}) { offsets = []; dbg.onEnterFrame = function (frame) { - if (frame.type != 'wasmcall') { - return; - } + if (frame.type != 'wasmcall') return; offsets.push(frame.offset); frame.onStep = function () { offsets.push(frame.offset); @@ -26,16 +24,16 @@ wasmRunWithDebugger( offsets.push(frame.offset); }; }; - }, - function ({wasmScript, error}) { - assertEq(error, undefined); - assertEq(offsets.length, 5); - offsets.forEach(offset => { - var loc = wasmScript.getOffsetLocation(offset); - assertEq(loc.isEntryPoint, true); - assertEq(loc.lineNumber > 0, true); - assertEq(loc.columnNumber > 0, true); - assertEq(wasmScript.getLineOffsets(loc.lineNumber).length, 1); - }); - } + }, + function ({wasmScript, error}) { + assertEq(error, undefined); + assertEq(offsets.length, 5); + offsets.forEach(offset => { + var loc = wasmScript.getOffsetLocation(offset); + assertEq(loc.isEntryPoint, true); + assertEq(loc.lineNumber > 0, true); + assertEq(loc.columnNumber > 0, true); + assertEq(wasmScript.getLineOffsets(loc.lineNumber).length, 1); + }); + } ); diff --git a/js/src/jit-test/tests/debug/wasm-binary-sources.js b/js/src/jit-test/tests/debug/wasm-binary-sources.js index a0b9545829b7..d352eef6543a 100644 --- a/js/src/jit-test/tests/debug/wasm-binary-sources.js +++ b/js/src/jit-test/tests/debug/wasm-binary-sources.js @@ -19,8 +19,8 @@ assertEq(s.format, "wasm"); var source = s.source; -// The text is never generated with the native Debugger API. -assertEq(source.text.includes('module'), false); +// The text is generated if wasm binary sources are disabled. +assertEq(source.text.includes('module'), true); assertThrowsInstanceOf(() => source.binary, Error); // Enable binary sources. @@ -31,7 +31,7 @@ assertEq(s.format, "wasm"); var source2 = s.source; -// The text is predefined if wasm binary sources are enabled. +// The text is '[wasm]' if wasm binary sources are enabled. assertEq(source2.text, '[wasm]'); // The binary contains Uint8Array which is equal to wasm bytecode; arraysEqual(source2.binary, wasmTextToBinary('(module (func) (export "" 0))')); diff --git a/js/src/jit-test/tests/debug/wasm-breakpoint.js b/js/src/jit-test/tests/debug/wasm-breakpoint.js index 039cd5e1c63a..249342907b7e 100644 --- a/js/src/jit-test/tests/debug/wasm-breakpoint.js +++ b/js/src/jit-test/tests/debug/wasm-breakpoint.js @@ -6,55 +6,21 @@ load(libdir + "wasm.js"); if (!wasmDebuggingIsSupported()) quit(); -function runTest(wast, initFunc, doneFunc) { - let g = newGlobal(''); - let dbg = new Debugger(g); - - g.eval(` -var { binary, offsets } = wasmTextToBinary('${wast}', /* offsets */ true); -var m = new WebAssembly.Instance(new WebAssembly.Module(binary));`); - - var { offsets } = g; - - var wasmScript = dbg.findScripts().filter(s => s.format == 'wasm')[0]; - - initFunc({ - dbg, - wasmScript, - g, - breakpoints: offsets - }); - - let result, error; - try { - result = g.eval("m.exports.test()"); - } catch (ex) { - error = ex; - } - - doneFunc({ - dbg, - result, - error, - wasmScript, - g - }); -} - - -var onBreakpointCalled; - // Checking if we can stop at specified breakpoint. -runTest( +var onBreakpointCalled; +wasmRunWithDebugger( '(module (func (nop) (nop)) (export "test" 0))', - function ({wasmScript, breakpoints}) { + undefined, + function ({wasmScript}) { + var breakpoints = wasmGetScriptBreakpoints(wasmScript); assertEq(breakpoints.length, 3); - assertEq(breakpoints[0] > 0, true); + assertEq(breakpoints[0].offset > 0, true); // Checking if breakpoints offsets are in ascending order. - assertEq(breakpoints[0] < breakpoints[1], true); - assertEq(breakpoints[1] < breakpoints[2], true); + assertEq(breakpoints[0].offset < breakpoints[1].offset, true); + assertEq(breakpoints[1].offset < breakpoints[2].offset, true); onBreakpointCalled = 0; - breakpoints.forEach(function (offset) { + breakpoints.forEach(function (bp) { + var offset = bp.offset; wasmScript.setBreakpoint(offset, { hit: (frame) => { assertEq(frame.offset, offset); @@ -70,15 +36,18 @@ runTest( ); // Checking if we can remove breakpoint one by one. -runTest( +wasmRunWithDebugger( '(module (func (nop) (nop)) (export "test" 0))', - function ({wasmScript, breakpoints}) { + undefined, + function ({wasmScript}) { + var breakpoints = wasmGetScriptBreakpoints(wasmScript); onBreakpointCalled = 0; var handlers = []; - breakpoints.forEach(function (offset, i) { + breakpoints.forEach(function (bp, i) { + var offset = bp.offset; wasmScript.setBreakpoint(offset, handlers[i] = { hit: (frame) => { - assertEq(frame.offset, breakpoints[0]); + assertEq(frame.offset, breakpoints[0].offset); onBreakpointCalled++; // Removing all handlers. handlers.forEach(h => wasmScript.clearBreakpoint(h)); @@ -93,15 +62,18 @@ runTest( ); // Checking if we can remove breakpoint one by one from a breakpoint handler. -runTest( +wasmRunWithDebugger( '(module (func (nop) (nop)) (export "test" 0))', - function ({wasmScript, breakpoints}) { + undefined, + function ({wasmScript}) { + var breakpoints = wasmGetScriptBreakpoints(wasmScript); onBreakpointCalled = 0; var handlers = []; - breakpoints.forEach(function (offset, i) { + breakpoints.forEach(function (bp, i) { + var offset = bp.offset; wasmScript.setBreakpoint(offset, handlers[i] = { hit: (frame) => { - assertEq(frame.offset, breakpoints[0]); + assertEq(frame.offset, breakpoints[0].offset); onBreakpointCalled++; // Removing all handlers. handlers.forEach(h => wasmScript.clearBreakpoint(h)); @@ -118,13 +90,16 @@ runTest( // Checking if we can remove breakpoint one by one from onEnterFrame, // but onStep will still work. var onStepCalled; -runTest( +wasmRunWithDebugger( '(module (func (nop) (nop)) (export "test" 0))', - function ({dbg, wasmScript, breakpoints}) { + undefined, + function ({dbg, wasmScript}) { + var breakpoints = wasmGetScriptBreakpoints(wasmScript); onBreakpointCalled = 0; onStepCalled = []; var handlers = []; - breakpoints.forEach(function (offset, i) { + breakpoints.forEach(function (bp, i) { + var offset = bp.offset; wasmScript.setBreakpoint(offset, handlers[i] = { hit: (frame) => { assertEq(false, true); @@ -150,14 +125,17 @@ runTest( ); // Checking if we can remove all breakpoints. -runTest( +wasmRunWithDebugger( '(module (func (nop) (nop)) (export "test" 0))', - function ({wasmScript, breakpoints}) { + undefined, + function ({wasmScript}) { + var breakpoints = wasmGetScriptBreakpoints(wasmScript); onBreakpointCalled = 0; - breakpoints.forEach(function (offset, i) { + breakpoints.forEach(function (bp, i) { + var offset = bp.offset; wasmScript.setBreakpoint(offset, { hit: (frame) => { - assertEq(frame.offset, breakpoints[0]); + assertEq(frame.offset, breakpoints[0].offset); onBreakpointCalled++; // Removing all handlers. wasmScript.clearAllBreakpoints(); @@ -172,11 +150,14 @@ runTest( ); // Checking if breakpoints are removed after debugger has been detached. -runTest( +wasmRunWithDebugger( '(module (func (nop) (nop)) (export "test" 0))', - function ({dbg, wasmScript, g, breakpoints}) { + undefined, + function ({dbg, wasmScript, g}) { + var breakpoints = wasmGetScriptBreakpoints(wasmScript); onBreakpointCalled = 0; - breakpoints.forEach(function (offset, i) { + breakpoints.forEach(function (bp, i) { + var offset = bp.offset; wasmScript.setBreakpoint(offset, { hit: (frame) => { onBreakpointCalled++; diff --git a/js/src/jit-test/tests/debug/wasm-getAllColumnOffsets.js b/js/src/jit-test/tests/debug/wasm-getAllColumnOffsets.js index c47f2ccd17b8..70e6ab1f3b92 100644 --- a/js/src/jit-test/tests/debug/wasm-getAllColumnOffsets.js +++ b/js/src/jit-test/tests/debug/wasm-getAllColumnOffsets.js @@ -5,15 +5,15 @@ load(libdir + "asserts.js"); if (!wasmDebuggingIsSupported()) - quit(); + quit(); // Checking if experimental format generates internal source map to binary file // by querying debugger scripts getAllColumnOffsets. +// (Notice that the source map will not be produced by wasmBinaryToText) function getAllOffsets(wast) { var sandbox = newGlobal(''); var dbg = new Debugger(); dbg.addDebuggee(sandbox); - dbg.allowWasmBinarySource = true; sandbox.eval(` var wasm = wasmTextToBinary('${wast}'); var m = new WebAssembly.Instance(new WebAssembly.Module(wasm)); @@ -28,10 +28,8 @@ var offsets1 = getAllOffsets('(module \ )'); // There shall be total 8 lines with single and unique offset per line. -var usedOffsets = Object.create(null), - usedLines = Object.create(null); +var usedOffsets = Object.create(null), usedLines = Object.create(null); assertEq(offsets1.length, 8); - offsets1.forEach(({offset, lineNumber, columnNumber}) => { assertEq(offset > 0, true); assertEq(lineNumber > 0, true); diff --git a/js/src/jit-test/tests/wasm/atomic.js b/js/src/jit-test/tests/wasm/atomic.js index 9dfb8d1e767d..23be9da9dae5 100644 --- a/js/src/jit-test/tests/wasm/atomic.js +++ b/js/src/jit-test/tests/wasm/atomic.js @@ -84,10 +84,79 @@ for (let align of [1, 2, 4, 8]) { assertEq(valText(text), align == 4); } +// Check that the text output is sane. + +for (let [type,view] of [['i32','8_u'],['i32','16_u'],['i32',''],['i64','8_u'],['i64','16_u'],['i64','32_u'],['i64','']]) { + let addr = "i32.const 48"; + let value = `${type}.const 1`; + let value2 = `${type}.const 2`; + for (let op of ["add", "and", "or", "xor", "xchg"]) { + let operator = `${type}.atomic.rmw${view}.${op}`; + let text = `(module (memory 1 1 shared) + (func (result ${type}) (${operator} (${addr}) (${value}))) + (export "" 0))`; + checkRoundTrip(text, [addr, value, operator]); + } + { + let operator = `${type}.atomic.rmw${view}.cmpxchg`; + let text = `(module (memory 1 1 shared) + (func (result ${type}) (${operator} (${addr}) (${value}) (${value2}))) + (export "" 0))`; + checkRoundTrip(text, [addr, value, value2, operator]); + } + { + let operator = `${type}.atomic.load${view}`; + let text = `(module (memory 1 1 shared) + (func (result ${type}) (${operator} (${addr}))) + (export "" 0))`; + checkRoundTrip(text, [addr, operator]); + } + { + let operator = `${type}.atomic.store${view}`; + let text = `(module (memory 1 1 shared) + (func (${operator} (${addr}) (${value}))) + (export "" 0))`; + checkRoundTrip(text, [addr, value, operator]); + } +} + +for (let type of ['i32', 'i64']) { + let addr = "i32.const 48"; + let operator = `${type}.atomic.wait` + let value = `${type}.const 1`; + let timeout = "i64.const 314159"; + let text = `(module (memory 1 1 shared) + (func (result i32) (${operator} (${addr}) (${value}) (${timeout}))) + (export "" 0))`; + checkRoundTrip(text, [addr, value, timeout, operator]); +} + +{ + let addr = "i32.const 48"; + let operator = "atomic.wake" + let count = "i32.const 1"; + let text = `(module (memory 1 1 shared) + (func (result i32) (${operator} (${addr}) (${count}))) + (export "" 0))`; + checkRoundTrip(text, [addr, count, operator]); +} + function valText(text) { return WebAssembly.validate(wasmTextToBinary(text)); } +function checkRoundTrip(text, ss) { + let input = wasmTextToBinary(text); + let output = wasmBinaryToText(input).split("\n").map(String.trim); + for (let s of output) { + if (ss.length == 0) + break; + if (s.match(ss[0])) + ss.shift(); + } + assertEq(ss.length, 0); +} + // Test that atomic operations work. function I64(hi, lo) { diff --git a/js/src/jit-test/tests/wasm/gc/structs.js b/js/src/jit-test/tests/wasm/gc/structs.js index 75044c94057c..dca23258c018 100644 --- a/js/src/jit-test/tests/wasm/gc/structs.js +++ b/js/src/jit-test/tests/wasm/gc/structs.js @@ -69,6 +69,15 @@ assertEq(ins.hello(4.0, 1), 16.0) assertEq(ins.x1(12), 36) assertEq(ins.x2(8), Math.PI) +// Crude but at least checks that we have *something*. + +var txt = wasmBinaryToText(bin); +var re = /\(type\s+\$[a-z0-9]+\s+\(struct/gm; +assertEq(Array.isArray(re.exec(txt)), true); +assertEq(Array.isArray(re.exec(txt)), true); +assertEq(Array.isArray(re.exec(txt)), true); +assertEq(Array.isArray(re.exec(txt)), false); + // The field name is optional, so this should work. wasmEvalText(` diff --git a/js/src/jit-test/tests/wasm/to-text.js b/js/src/jit-test/tests/wasm/to-text.js new file mode 100644 index 000000000000..e89ce615b018 --- /dev/null +++ b/js/src/jit-test/tests/wasm/to-text.js @@ -0,0 +1,293 @@ +var caught = false; +try { + wasmBinaryToText(new Int8Array(1)); +} catch (e) { + caught = true; +} +assertEq(caught, true); + +assertErrorMessage(() => wasmBinaryToText(wasmTextToBinary(`(module (func (result i32) (f32.const 13.37)))`)), WebAssembly.CompileError, /type mismatch/); + +function runTest(code) { + var expected = wasmTextToBinary(code); + var s = wasmBinaryToText(expected); + print("TEXT: " + s); + var roundtrip = wasmTextToBinary(s); + assertDeepEq(expected, roundtrip); +} + +// Smoke test +runTest(` +(module + (func (param i32) (result f64) + (local $l f32) + (block + (set_local $l (f32.const 0.0)) + (loop $exit $cont + (br_if $exit (get_local 0)) + (br 2) + ) + (drop (if f64 (i32.const 1) + (f64.min (f64.neg (f64.const 1)) (f64.const 0)) + (f64.add (f64.const 0.5) (f64.load offset=0 (i32.const 0)) ) + )) + ) + (i32.store16 (i32.const 8) (i32.const 128)) + + (return (f64.const 0)) + ) + (export "test" 0) + (memory 1 10) +)`); + +// Constants, stores and loads +runTest(` +(module (func + (local i32) (local f32) (local f64) + (drop (i32.const 0)) + (drop (i32.const 100002)) + (drop (f32.const 0.0)) + (drop (f32.const 1.5)) + (drop (f64.const 0.0)) + (drop (f64.const -10.25)) + (i32.store (i32.const 0) (i32.load (i32.const 0))) + (i32.store8 (i32.const 1) (i32.load8_s (i32.const 2))) + (i32.store8 (i32.const 3) (i32.load8_u (i32.const 4))) + (i32.store16 (i32.const 2) (i32.load16_s (i32.const 0))) + (i32.store16 (i32.const 1) (i32.load16_u (i32.const 0))) + (f32.store (i32.const 5) (f32.load (i32.const 6))) + (f64.store (i32.const 5) (f64.load (i32.const 6))) + (set_local 0 (get_local 0)) + (set_local 2 (get_local 2)) +)(memory 100))`); + +// Branching +runTest(` +(module +(func + (block (block (block (nop)))) + (block (loop )) + (if (i32.const 0) (block $label (nop))) + (if (i32.const 1) (nop) (loop $exit $cont (block ))) + (block $l (br $l)) + (block $m (block (block (br $m)))) + (block $k (br_if 0 (i32.const 0)) (return)) + (block $n (block (block (br_if 2 (i32.const 1)) (nop)))) + (block $1 (block $2 (block $3 (br_table $2 $3 $1 (i32.const 1)) )) (nop)) + (loop $exit $cont (br_if $cont (i32.const 0)) (nop)) + (return) +) +(func (result f32) (return (f32.const -0.5))) +(memory 0) +)`); + +// i32, f32 and f64 operations +runTest(` +(module + (func $iadd (param $x i32) (param $y i32) (result i32) (i32.add (get_local $x) (get_local $y))) + (func $isub (param $x i32) (param $y i32) (result i32) (i32.sub (get_local $x) (get_local $y))) + (func $imul (param $x i32) (param $y i32) (result i32) (i32.mul (get_local $x) (get_local $y))) + (func $idiv_s (param $x i32) (param $y i32) (result i32) (i32.div_s (get_local $x) (get_local $y))) + (func $idiv_u (param $x i32) (param $y i32) (result i32) (i32.div_u (get_local $x) (get_local $y))) + (func $irem_s (param $x i32) (param $y i32) (result i32) (i32.rem_s (get_local $x) (get_local $y))) + (func $irem_u (param $x i32) (param $y i32) (result i32) (i32.rem_u (get_local $x) (get_local $y))) + (func $iand (param $x i32) (param $y i32) (result i32) (i32.and (get_local $x) (get_local $y))) + (func $ior (param $x i32) (param $y i32) (result i32) (i32.or (get_local $x) (get_local $y))) + (func $ixor (param $x i32) (param $y i32) (result i32) (i32.xor (get_local $x) (get_local $y))) + (func $ishl (param $x i32) (param $y i32) (result i32) (i32.shl (get_local $x) (get_local $y))) + (func $ishr_s (param $x i32) (param $y i32) (result i32) (i32.shr_s (get_local $x) (get_local $y))) + (func $ishr_u (param $x i32) (param $y i32) (result i32) (i32.shr_u (get_local $x) (get_local $y))) + (func $iclz (param $x i32) (result i32) (i32.clz (get_local $x))) + (func $ictz (param $x i32) (result i32) (i32.ctz (get_local $x))) + (func $ipopcnt (param $x i32) (result i32) (i32.popcnt (get_local $x))) + (func $ieq (param $x i32) (param $y i32) (result i32) (i32.eq (get_local $x) (get_local $y))) + (func $ine (param $x i32) (param $y i32) (result i32) (i32.ne (get_local $x) (get_local $y))) + (func $ilt_s (param $x i32) (param $y i32) (result i32) (i32.lt_s (get_local $x) (get_local $y))) + (func $ilt_u (param $x i32) (param $y i32) (result i32) (i32.lt_u (get_local $x) (get_local $y))) + (func $ile_s (param $x i32) (param $y i32) (result i32) (i32.le_s (get_local $x) (get_local $y))) + (func $ile_u (param $x i32) (param $y i32) (result i32) (i32.le_u (get_local $x) (get_local $y))) + (func $igt_s (param $x i32) (param $y i32) (result i32) (i32.gt_s (get_local $x) (get_local $y))) + (func $igt_u (param $x i32) (param $y i32) (result i32) (i32.gt_u (get_local $x) (get_local $y))) + (func $ige_s (param $x i32) (param $y i32) (result i32) (i32.ge_s (get_local $x) (get_local $y))) + (func $ige_u (param $x i32) (param $y i32) (result i32) (i32.ge_u (get_local $x) (get_local $y))) + + (func $fadd (param $x f32) (param $y f32) (result f32) (f32.add (get_local $x) (get_local $y))) + (func $fsub (param $x f32) (param $y f32) (result f32) (f32.sub (get_local $x) (get_local $y))) + (func $fmul (param $x f32) (param $y f32) (result f32) (f32.mul (get_local $x) (get_local $y))) + (func $fdiv (param $x f32) (param $y f32) (result f32) (f32.div (get_local $x) (get_local $y))) + (func $fsqrt (param $x f32) (result f32) (f32.sqrt (get_local $x))) + (func $fmin (param $x f32) (param $y f32) (result f32) (f32.min (get_local $x) (get_local $y))) + (func $fmax (param $x f32) (param $y f32) (result f32) (f32.max (get_local $x) (get_local $y))) + (func $fceil (param $x f32) (result f32) (f32.ceil (get_local $x))) + (func $ffloor (param $x f32) (result f32) (f32.floor (get_local $x))) + (func $fabs (param $x f32) (result f32) (f32.abs (get_local $x))) + (func $fneg (param $x f32) (result f32) (f32.neg (get_local $x))) + + (func $dadd (param $x f64) (param $y f64) (result f64) (f64.add (get_local $x) (get_local $y))) + (func $dsub (param $x f64) (param $y f64) (result f64) (f64.sub (get_local $x) (get_local $y))) + (func $dmul (param $x f64) (param $y f64) (result f64) (f64.mul (get_local $x) (get_local $y))) + (func $ddiv (param $x f64) (param $y f64) (result f64) (f64.div (get_local $x) (get_local $y))) + (func $dceil (param $x f64) (result f64) (f64.ceil (get_local $x))) + (func $dfloor (param $x f64) (result f64) (f64.floor (get_local $x))) + (func $dabs (param $x f64) (result f64) (f64.abs (get_local $x))) + (func $dneg (param $x f64) (result f64) (f64.neg (get_local $x))) +(memory 0))`); + +// conversions +runTest(` +(module + (func $itrunc_s_f32 (param $x f32) (result i32) (i32.trunc_s/f32 (get_local $x))) + (func $itrunc_u_f32 (param $x f32) (result i32) (i32.trunc_u/f32 (get_local $x))) + (func $itrunc_s_f64 (param $x f64) (result i32) (i32.trunc_s/f64 (get_local $x))) + (func $itrunc_u_f64 (param $x f64) (result i32) (i32.trunc_u/f64 (get_local $x))) + (func $fconvert_s_i32 (param $x i32) (result f32) (f32.convert_s/i32 (get_local $x))) + (func $dconvert_s_i32 (param $x i32) (result f64) (f64.convert_s/i32 (get_local $x))) + (func $fconvert_u_i32 (param $x i32) (result f32) (f32.convert_u/i32 (get_local $x))) + (func $dconvert_u_i32 (param $x i32) (result f64) (f64.convert_u/i32 (get_local $x))) + (func $dpromote_f32 (param $x f32) (result f64) (f64.promote/f32 (get_local $x))) + (func $fdemote_f64 (param $x f64) (result f32) (f32.demote/f64 (get_local $x))) +(memory 0))`); + +// function calls +runTest(` +(module + (type $type1 (func (param i32) (result i32))) + (import $import1 "mod" "test" (param f32) (result f32)) + (table anyfunc (elem $func1 $func2)) + (func $func1 (param i32) (param f32) (nop)) + (func $func2 (param i32) (result i32) (get_local 0)) + (func $test + (call $func1 + (call_indirect $type1 (i32.const 2) (i32.const 1)) + (call $import1 (f32.const 1.0)) + ) + ) + (export "test" $test) + (memory 1) +)`); + +// default memory export from binaryen +runTest(`(module (func (nop)) (memory 0 65535))`); + +// stack-machine code that isn't directly representable as an AST +runTest(` +(module + (func (result i32) + (local $x i32) + i32.const 100 + set_local $x + i32.const 200 + set_local $x + i32.const 400 + set_local $x + i32.const 2 + i32.const 16 + nop + set_local $x + i32.const 3 + i32.const 17 + set_local $x + i32.const 18 + set_local $x + i32.lt_s + if i32 + i32.const 101 + set_local $x + i32.const 8 + i32.const 102 + set_local $x + else + i32.const 103 + set_local $x + i32.const 900 + i32.const 104 + set_local $x + i32.const 105 + set_local $x + end + i32.const 107 + set_local $x + get_local $x + i32.add + i32.const 106 + set_local $x + ) + (export "" 0) +)`); + +// more stack-machine code that isn't directly representable as an AST +runTest(` +(module + (func $return_void) + + (func (result i32) + (local $x i32) + i32.const 0 + block + i32.const 1 + set_local $x + end + i32.const 2 + set_local $x + i32.const 3 + loop + i32.const 4 + set_local $x + end + i32.const 5 + set_local $x + i32.add + call $return_void + ) + (export "" 0) +)`); + +runTest(` + (module + (func $func + block $block + i32.const 0 + if + i32.const 0 + if + end + else + end + end + ) + (export "" 0) +)`); + +// Branch table. +runTest(`(module + (func (export "run") (param $p i32) (local $n i32) + i32.const 0 + set_local $n + loop $outer + block $c block $b block $a + loop $inner + get_local $p + br_table $b $a $c $inner $outer + end $inner + end $a + get_local $n + i32.const 1 + i32.add + set_local $n + end $b + block + get_local $n + i32.const 2 + i32.add + set_local $n + end + end $c + end $outer + ) +)`); + +// Import as a start function. +runTest(`(module + (import "env" "test" (func)) + (start 0) +)`); diff --git a/js/src/moz.build b/js/src/moz.build index 043e49ede68c..f9b803b400b8 100755 --- a/js/src/moz.build +++ b/js/src/moz.build @@ -401,6 +401,8 @@ UNIFIED_SOURCES += [ 'vm/Xdr.cpp', 'wasm/AsmJS.cpp', 'wasm/WasmBaselineCompile.cpp', + 'wasm/WasmBinaryToAST.cpp', + 'wasm/WasmBinaryToText.cpp', 'wasm/WasmBuiltins.cpp', 'wasm/WasmCode.cpp', 'wasm/WasmCompile.cpp', @@ -418,6 +420,7 @@ UNIFIED_SOURCES += [ 'wasm/WasmStubs.cpp', 'wasm/WasmTable.cpp', 'wasm/WasmTextToBinary.cpp', + 'wasm/WasmTextUtils.cpp', 'wasm/WasmTypes.cpp', 'wasm/WasmValidate.cpp' ] diff --git a/js/src/wasm/WasmAST.h b/js/src/wasm/WasmAST.h index da52a778dd3b..f0302d9d4ec0 100644 --- a/js/src/wasm/WasmAST.h +++ b/js/src/wasm/WasmAST.h @@ -126,17 +126,10 @@ struct AstBase } }; -struct AstNode -{ - void* operator new(size_t numBytes, LifoAlloc& astLifo) throw() { - return astLifo.alloc(numBytes); - } -}; - class AstFuncType; class AstStructType; -class AstTypeDef : public AstNode +class AstTypeDef : public AstBase { protected: enum class Which { IsFuncType, IsStructType }; @@ -265,6 +258,19 @@ AstTypeDef::asStructType() const return *static_cast(this); } +const uint32_t AstNodeUnknownOffset = 0; + +class AstNode : public AstBase +{ + uint32_t offset_; // if applicable, offset in the binary format file + + public: + AstNode() : offset_(AstNodeUnknownOffset) {} + + uint32_t offset() const { return offset_; } + void setOffset(uint32_t offset) { offset_ = offset; } +}; + enum class AstExprKind { AtomicCmpXchg, @@ -856,6 +862,7 @@ class AstFunc : public AstNode AstValTypeVector vars_; AstNameVector localNames_; AstExprVector body_; + uint32_t endOffset_; // if applicable, offset in the binary format file public: AstFunc(AstName name, AstRef ft, AstValTypeVector&& vars, @@ -864,13 +871,16 @@ class AstFunc : public AstNode funcType_(ft), vars_(std::move(vars)), localNames_(std::move(locals)), - body_(std::move(body)) + body_(std::move(body)), + endOffset_(AstNodeUnknownOffset) {} AstRef& funcType() { return funcType_; } const AstValTypeVector& vars() const { return vars_; } const AstNameVector& locals() const { return localNames_; } const AstExprVector& body() const { return body_; } AstName name() const { return name_; } + uint32_t endOffset() const { return endOffset_; } + void setEndOffset(uint32_t offset) { endOffset_ = offset; } }; class AstGlobal : public AstNode diff --git a/js/src/wasm/WasmBinaryToAST.cpp b/js/src/wasm/WasmBinaryToAST.cpp new file mode 100644 index 000000000000..da72508716d2 --- /dev/null +++ b/js/src/wasm/WasmBinaryToAST.cpp @@ -0,0 +1,2390 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * + * Copyright 2016 Mozilla Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "wasm/WasmBinaryToAST.h" + +#include "mozilla/MathAlgorithms.h" +#include "mozilla/Sprintf.h" + +#include "vm/JSContext.h" +#include "vm/Realm.h" +#include "wasm/WasmOpIter.h" +#include "wasm/WasmValidate.h" + +using namespace js; +using namespace js::wasm; + +using mozilla::FloorLog2; + +enum AstDecodeTerminationKind +{ + Unknown, + End, + Else +}; + +struct AstDecodeStackItem +{ + AstExpr* expr; + AstDecodeTerminationKind terminationKind; + ExprType type; + + explicit AstDecodeStackItem() + : expr(nullptr), + terminationKind(AstDecodeTerminationKind::Unknown), + type(ExprType::Limit) + {} + explicit AstDecodeStackItem(AstDecodeTerminationKind terminationKind, ExprType type) + : expr(nullptr), + terminationKind(terminationKind), + type(type) + {} + explicit AstDecodeStackItem(AstExpr* expr) + : expr(expr), + terminationKind(AstDecodeTerminationKind::Unknown), + type(ExprType::Limit) + {} +}; + +// We don't define a Value type because OpIter doesn't push void values, which +// we actually need here because we're building an AST, so we maintain our own +// stack. +struct AstDecodePolicy +{ + typedef Nothing Value; + typedef Nothing ControlItem; +}; + +typedef OpIter AstDecodeOpIter; + +class AstDecodeContext +{ + public: + typedef AstVector AstDecodeStack; + typedef AstVector DepthStack; + + JSContext* cx; + LifoAlloc& lifo; + Decoder& d; + bool generateNames; + + private: + ModuleEnvironment env_; + + AstModule& module_; + AstDecodeOpIter *iter_; + AstDecodeStack exprs_; + DepthStack depths_; + const ValTypeVector* locals_; + AstNameVector blockLabels_; + uint32_t currentLabelIndex_; + ExprType retType_; + + public: + AstDecodeContext(JSContext* cx, LifoAlloc& lifo, Decoder& d, AstModule& module, + bool generateNames, HasGcTypes hasGcTypes) + : cx(cx), + lifo(lifo), + d(d), + generateNames(generateNames), + env_(CompileMode::Once, Tier::Ion, DebugEnabled::False, hasGcTypes, + cx->realm()->creationOptions().getSharedMemoryAndAtomicsEnabled() + ? Shareable::True + : Shareable::False), + module_(module), + iter_(nullptr), + exprs_(lifo), + depths_(lifo), + locals_(nullptr), + blockLabels_(lifo), + currentLabelIndex_(0), + retType_(ExprType::Limit) + {} + + ModuleEnvironment& env() { return env_; } + + AstModule& module() { return module_; } + AstDecodeOpIter& iter() { return *iter_; } + AstDecodeStack& exprs() { return exprs_; } + DepthStack& depths() { return depths_; } + + AstNameVector& blockLabels() { return blockLabels_; } + + ExprType retType() const { return retType_; } + const ValTypeVector& locals() const { return *locals_; } + + void popBack() { return exprs().popBack(); } + AstDecodeStackItem popCopy() { return exprs().popCopy(); } + AstDecodeStackItem& top() { return exprs().back(); } + MOZ_MUST_USE bool push(AstDecodeStackItem item) { return exprs().append(item); } + + bool needFirst() { + for (size_t i = depths().back(); i < exprs().length(); ++i) { + if (!exprs()[i].expr->isVoid()) + return true; + } + return false; + } + + AstExpr* handleVoidExpr(AstExpr* voidNode) + { + MOZ_ASSERT(voidNode->isVoid()); + + // To attach a node that "returns void" to the middle of an AST, wrap it + // in a first node next to the node it should accompany. + if (needFirst()) { + AstExpr *prev = popCopy().expr; + + // If the previous/A node is already a First, reuse it. + if (prev->kind() == AstExprKind::First) { + if (!prev->as().exprs().append(voidNode)) + return nullptr; + return prev; + } + + AstExprVector exprs(lifo); + if (!exprs.append(prev)) + return nullptr; + if (!exprs.append(voidNode)) + return nullptr; + + return new(lifo) AstFirst(std::move(exprs)); + } + + return voidNode; + } + + void startFunction(AstDecodeOpIter* iter, const ValTypeVector* locals, ExprType retType) + { + iter_ = iter; + locals_ = locals; + currentLabelIndex_ = 0; + retType_ = retType; + } + void endFunction() + { + iter_ = nullptr; + locals_ = nullptr; + retType_ = ExprType::Limit; + MOZ_ASSERT(blockLabels_.length() == 0); + } + uint32_t nextLabelIndex() + { + return currentLabelIndex_++; + } +}; + +static bool +GenerateName(AstDecodeContext& c, const AstName& prefix, uint32_t index, AstName* name) +{ + if (!c.generateNames) { + *name = AstName(); + return true; + } + + AstVector result(c.lifo); + if (!result.append(u'$')) + return false; + if (!result.append(prefix.begin(), prefix.length())) + return false; + + uint32_t tmp = index; + do { + if (!result.append(u'0')) + return false; + tmp /= 10; + } while (tmp); + + if (index) { + char16_t* p = result.end(); + for (tmp = index; tmp; tmp /= 10) + *(--p) = u'0' + (tmp % 10); + } + + size_t length = result.length(); + char16_t* begin = result.extractOrCopyRawBuffer(); + if (!begin) + return false; + + *name = AstName(begin, length); + return true; +} + +static bool +GenerateRef(AstDecodeContext& c, const AstName& prefix, uint32_t index, AstRef* ref) +{ + MOZ_ASSERT(index != AstNoIndex); + + if (!c.generateNames) { + *ref = AstRef(index); + return true; + } + + AstName name; + if (!GenerateName(c, prefix, index, &name)) + return false; + MOZ_ASSERT(!name.empty()); + + *ref = AstRef(name); + ref->setIndex(index); + return true; +} + +static bool +GenerateFuncRef(AstDecodeContext& c, uint32_t funcIndex, AstRef* ref) +{ + if (funcIndex < c.module().numFuncImports()) { + *ref = AstRef(c.module().funcImportNames()[funcIndex]); + } else { + if (!GenerateRef(c, AstName(u"func"), funcIndex, ref)) + return false; + } + return true; +} + +static bool +AstDecodeCallArgs(AstDecodeContext& c, const FuncTypeWithId& funcType, AstExprVector* funcArgs) +{ + MOZ_ASSERT(!c.iter().currentBlockHasPolymorphicBase()); + + uint32_t numArgs = funcType.args().length(); + if (!funcArgs->resize(numArgs)) + return false; + + for (size_t i = 0; i < numArgs; ++i) + (*funcArgs)[i] = c.exprs()[c.exprs().length() - numArgs + i].expr; + + c.exprs().shrinkBy(numArgs); + + return true; +} + +static bool +AstDecodeExpr(AstDecodeContext& c); + +static bool +AstDecodeDrop(AstDecodeContext& c) +{ + if (!c.iter().readDrop()) + return false; + + AstDecodeStackItem value = c.popCopy(); + + AstExpr* tmp = new(c.lifo) AstDrop(*value.expr); + if (!tmp) + return false; + + tmp = c.handleVoidExpr(tmp); + if (!tmp) + return false; + + if (!c.push(AstDecodeStackItem(tmp))) + return false; + + return true; +} + +static bool +AstDecodeCall(AstDecodeContext& c) +{ + uint32_t funcIndex; + AstDecodeOpIter::ValueVector unusedArgs; + if (!c.iter().readCall(&funcIndex, &unusedArgs)) + return false; + + if (c.iter().currentBlockHasPolymorphicBase()) + return true; + + AstRef funcRef; + if (!GenerateFuncRef(c, funcIndex, &funcRef)) + return false; + + const FuncTypeWithId* funcType = c.env().funcTypes[funcIndex]; + + AstExprVector args(c.lifo); + if (!AstDecodeCallArgs(c, *funcType, &args)) + return false; + + AstCall* call = new(c.lifo) AstCall(Op::Call, funcType->ret(), funcRef, std::move(args)); + if (!call) + return false; + + AstExpr* result = call; + if (IsVoid(funcType->ret())) + result = c.handleVoidExpr(call); + + if (!c.push(AstDecodeStackItem(result))) + return false; + + return true; +} + +static bool +AstDecodeCallIndirect(AstDecodeContext& c) +{ + uint32_t funcTypeIndex; + AstDecodeOpIter::ValueVector unusedArgs; + if (!c.iter().readCallIndirect(&funcTypeIndex, nullptr, &unusedArgs)) + return false; + + if (c.iter().currentBlockHasPolymorphicBase()) + return true; + + AstDecodeStackItem index = c.popCopy(); + + AstRef funcTypeRef; + if (!GenerateRef(c, AstName(u"type"), funcTypeIndex, &funcTypeRef)) + return false; + + const FuncTypeWithId& funcType = c.env().types[funcTypeIndex].funcType(); + AstExprVector args(c.lifo); + if (!AstDecodeCallArgs(c, funcType, &args)) + return false; + + AstCallIndirect* call = + new(c.lifo) AstCallIndirect(funcTypeRef, funcType.ret(), std::move(args), index.expr); + if (!call) + return false; + + AstExpr* result = call; + if (IsVoid(funcType.ret())) + result = c.handleVoidExpr(call); + + if (!c.push(AstDecodeStackItem(result))) + return false; + + return true; +} + +static bool +AstDecodeGetBlockRef(AstDecodeContext& c, uint32_t depth, AstRef* ref) +{ + if (!c.generateNames || depth >= c.blockLabels().length()) { + // Also ignoring if it's a function body label. + *ref = AstRef(depth); + return true; + } + + uint32_t index = c.blockLabels().length() - depth - 1; + if (c.blockLabels()[index].empty()) { + if (!GenerateName(c, AstName(u"label"), c.nextLabelIndex(), &c.blockLabels()[index])) + return false; + } + *ref = AstRef(c.blockLabels()[index]); + ref->setIndex(depth); + return true; +} + +static bool +AstDecodeBrTable(AstDecodeContext& c) +{ + bool unreachable = c.iter().currentBlockHasPolymorphicBase(); + + Uint32Vector depths; + uint32_t defaultDepth; + ExprType type; + if (!c.iter().readBrTable(&depths, &defaultDepth, &type, nullptr, nullptr)) + return false; + + if (unreachable) + return true; + + AstRefVector table(c.lifo); + if (!table.resize(depths.length())) + return false; + + for (size_t i = 0; i < depths.length(); ++i) { + if (!AstDecodeGetBlockRef(c, depths[i], &table[i])) + return false; + } + + AstDecodeStackItem index = c.popCopy(); + AstDecodeStackItem value; + if (!IsVoid(type)) + value = c.popCopy(); + + AstRef def; + if (!AstDecodeGetBlockRef(c, defaultDepth, &def)) + return false; + + auto branchTable = new(c.lifo) AstBranchTable(*index.expr, def, std::move(table), value.expr); + if (!branchTable) + return false; + + if (!c.push(AstDecodeStackItem(branchTable))) + return false; + + return true; +} + +static bool +AstDecodeBlock(AstDecodeContext& c, Op op) +{ + MOZ_ASSERT(op == Op::Block || op == Op::Loop); + + if (!c.blockLabels().append(AstName())) + return false; + + if (op == Op::Loop) { + if (!c.iter().readLoop()) + return false; + } else { + if (!c.iter().readBlock()) + return false; + } + + if (!c.depths().append(c.exprs().length())) + return false; + + ExprType type; + while (true) { + if (!AstDecodeExpr(c)) + return false; + + const AstDecodeStackItem& item = c.top(); + if (!item.expr) { // Op::End was found + type = item.type; + c.popBack(); + break; + } + } + + AstExprVector exprs(c.lifo); + for (auto i = c.exprs().begin() + c.depths().back(), e = c.exprs().end(); + i != e; ++i) { + if (!exprs.append(i->expr)) + return false; + } + c.exprs().shrinkTo(c.depths().popCopy()); + + AstName name = c.blockLabels().popCopy(); + AstBlock* block = new(c.lifo) AstBlock(op, type, name, std::move(exprs)); + if (!block) + return false; + + AstExpr* result = block; + if (IsVoid(type)) + result = c.handleVoidExpr(block); + + if (!c.push(AstDecodeStackItem(result))) + return false; + + return true; +} + +static bool +AstDecodeIf(AstDecodeContext& c) +{ + if (!c.iter().readIf(nullptr)) + return false; + + AstDecodeStackItem cond = c.popCopy(); + + bool hasElse = false; + + if (!c.depths().append(c.exprs().length())) + return false; + + if (!c.blockLabels().append(AstName())) + return false; + + ExprType type; + while (true) { + if (!AstDecodeExpr(c)) + return false; + + const AstDecodeStackItem& item = c.top(); + if (!item.expr) { // Op::End was found + hasElse = item.terminationKind == AstDecodeTerminationKind::Else; + type = item.type; + c.popBack(); + break; + } + } + + AstExprVector thenExprs(c.lifo); + for (auto i = c.exprs().begin() + c.depths().back(), e = c.exprs().end(); + i != e; ++i) { + if (!thenExprs.append(i->expr)) + return false; + } + c.exprs().shrinkTo(c.depths().back()); + + AstExprVector elseExprs(c.lifo); + if (hasElse) { + while (true) { + if (!AstDecodeExpr(c)) + return false; + + const AstDecodeStackItem& item = c.top(); + if (!item.expr) { // Op::End was found + c.popBack(); + break; + } + } + + for (auto i = c.exprs().begin() + c.depths().back(), e = c.exprs().end(); + i != e; ++i) { + if (!elseExprs.append(i->expr)) + return false; + } + c.exprs().shrinkTo(c.depths().back()); + } + + c.depths().popBack(); + + AstName name = c.blockLabels().popCopy(); + + AstIf* if_ = new(c.lifo) AstIf(type, cond.expr, name, std::move(thenExprs), std::move(elseExprs)); + if (!if_) + return false; + + AstExpr* result = if_; + if (IsVoid(type)) + result = c.handleVoidExpr(if_); + + if (!c.push(AstDecodeStackItem(result))) + return false; + + return true; +} + +static bool +AstDecodeEnd(AstDecodeContext& c) +{ + LabelKind kind; + ExprType type; + if (!c.iter().readEnd(&kind, &type, nullptr)) + return false; + + c.iter().popEnd(); + + if (!c.push(AstDecodeStackItem(AstDecodeTerminationKind::End, type))) + return false; + + return true; +} + +static bool +AstDecodeElse(AstDecodeContext& c) +{ + ExprType type; + + if (!c.iter().readElse(&type, nullptr)) + return false; + + if (!c.push(AstDecodeStackItem(AstDecodeTerminationKind::Else, type))) + return false; + + return true; +} + +static bool +AstDecodeNop(AstDecodeContext& c) +{ + if (!c.iter().readNop()) + return false; + + AstExpr* tmp = new(c.lifo) AstNop(); + if (!tmp) + return false; + + tmp = c.handleVoidExpr(tmp); + if (!tmp) + return false; + + if (!c.push(AstDecodeStackItem(tmp))) + return false; + + return true; +} + +static bool +AstDecodeUnary(AstDecodeContext& c, ValType type, Op op) +{ + if (!c.iter().readUnary(type, nullptr)) + return false; + + AstDecodeStackItem operand = c.popCopy(); + + AstUnaryOperator* unary = new(c.lifo) AstUnaryOperator(op, operand.expr); + if (!unary) + return false; + + if (!c.push(AstDecodeStackItem(unary))) + return false; + + return true; +} + +static bool +AstDecodeBinary(AstDecodeContext& c, ValType type, Op op) +{ + if (!c.iter().readBinary(type, nullptr, nullptr)) + return false; + + AstDecodeStackItem rhs = c.popCopy(); + AstDecodeStackItem lhs = c.popCopy(); + + AstBinaryOperator* binary = new(c.lifo) AstBinaryOperator(op, lhs.expr, rhs.expr); + if (!binary) + return false; + + if (!c.push(AstDecodeStackItem(binary))) + return false; + + return true; +} + +static bool +AstDecodeSelect(AstDecodeContext& c) +{ + StackType type; + if (!c.iter().readSelect(&type, nullptr, nullptr, nullptr)) + return false; + + if (c.iter().currentBlockHasPolymorphicBase()) + return true; + + AstDecodeStackItem selectFalse = c.popCopy(); + AstDecodeStackItem selectTrue = c.popCopy(); + AstDecodeStackItem cond = c.popCopy(); + + auto* select = new(c.lifo) AstTernaryOperator(Op::Select, cond.expr, selectTrue.expr, + selectFalse.expr); + if (!select) + return false; + + if (!c.push(AstDecodeStackItem(select))) + return false; + + return true; +} + +static bool +AstDecodeComparison(AstDecodeContext& c, ValType type, Op op) +{ + if (!c.iter().readComparison(type, nullptr, nullptr)) + return false; + + AstDecodeStackItem rhs = c.popCopy(); + AstDecodeStackItem lhs = c.popCopy(); + + AstComparisonOperator* comparison = new(c.lifo) AstComparisonOperator(op, lhs.expr, rhs.expr); + if (!comparison) + return false; + + if (!c.push(AstDecodeStackItem(comparison))) + return false; + + return true; +} + +static bool +AstDecodeConversion(AstDecodeContext& c, ValType fromType, ValType toType, Op op) +{ + if (!c.iter().readConversion(fromType, toType, nullptr)) + return false; + + AstDecodeStackItem operand = c.popCopy(); + + AstConversionOperator* conversion = new(c.lifo) AstConversionOperator(op, operand.expr); + if (!conversion) + return false; + + if (!c.push(AstDecodeStackItem(conversion))) + return false; + + return true; +} + +#ifdef ENABLE_WASM_SATURATING_TRUNC_OPS +static bool +AstDecodeExtraConversion(AstDecodeContext& c, ValType fromType, ValType toType, MiscOp op) +{ + if (!c.iter().readConversion(fromType, toType, nullptr)) + return false; + + AstDecodeStackItem operand = c.popCopy(); + + AstExtraConversionOperator* conversion = + new(c.lifo) AstExtraConversionOperator(op, operand.expr); + if (!conversion) + return false; + + if (!c.push(AstDecodeStackItem(conversion))) + return false; + + return true; +} +#endif + +static AstLoadStoreAddress +AstDecodeLoadStoreAddress(const LinearMemoryAddress& addr, const AstDecodeStackItem& item) +{ + uint32_t flags = FloorLog2(addr.align); + return AstLoadStoreAddress(item.expr, flags, addr.offset); +} + +static bool +AstDecodeLoad(AstDecodeContext& c, ValType type, uint32_t byteSize, Op op) +{ + LinearMemoryAddress addr; + if (!c.iter().readLoad(type, byteSize, &addr)) + return false; + + AstDecodeStackItem item = c.popCopy(); + + AstLoad* load = new(c.lifo) AstLoad(op, AstDecodeLoadStoreAddress(addr, item)); + if (!load) + return false; + + if (!c.push(AstDecodeStackItem(load))) + return false; + + return true; +} + +static bool +AstDecodeStore(AstDecodeContext& c, ValType type, uint32_t byteSize, Op op) +{ + LinearMemoryAddress addr; + if (!c.iter().readStore(type, byteSize, &addr, nullptr)) + return false; + + AstDecodeStackItem value = c.popCopy(); + AstDecodeStackItem item = c.popCopy(); + + AstStore* store = new(c.lifo) AstStore(op, AstDecodeLoadStoreAddress(addr, item), value.expr); + if (!store) + return false; + + AstExpr* wrapped = c.handleVoidExpr(store); + if (!wrapped) + return false; + + if (!c.push(AstDecodeStackItem(wrapped))) + return false; + + return true; +} + +static bool +AstDecodeCurrentMemory(AstDecodeContext& c) +{ + if (!c.iter().readCurrentMemory()) + return false; + + AstCurrentMemory* gm = new(c.lifo) AstCurrentMemory(); + if (!gm) + return false; + + if (!c.push(AstDecodeStackItem(gm))) + return false; + + return true; +} + +static bool +AstDecodeGrowMemory(AstDecodeContext& c) +{ + if (!c.iter().readGrowMemory(nullptr)) + return false; + + AstDecodeStackItem operand = c.popCopy(); + + AstGrowMemory* gm = new(c.lifo) AstGrowMemory(operand.expr); + if (!gm) + return false; + + if (!c.push(AstDecodeStackItem(gm))) + return false; + + return true; +} + +static bool +AstDecodeBranch(AstDecodeContext& c, Op op) +{ + MOZ_ASSERT(op == Op::Br || op == Op::BrIf); + + uint32_t depth; + ExprType type; + AstDecodeStackItem value; + AstDecodeStackItem cond; + if (op == Op::Br) { + if (!c.iter().readBr(&depth, &type, nullptr)) + return false; + if (!IsVoid(type)) + value = c.popCopy(); + } else { + if (!c.iter().readBrIf(&depth, &type, nullptr, nullptr)) + return false; + if (!IsVoid(type)) + value = c.popCopy(); + cond = c.popCopy(); + } + + AstRef depthRef; + if (!AstDecodeGetBlockRef(c, depth, &depthRef)) + return false; + + if (op == Op::Br || !value.expr) + type = ExprType::Void; + AstBranch* branch = new(c.lifo) AstBranch(op, type, cond.expr, depthRef, value.expr); + if (!branch) + return false; + + if (!c.push(AstDecodeStackItem(branch))) + return false; + + return true; +} + +static bool +AstDecodeGetLocal(AstDecodeContext& c) +{ + uint32_t getLocalId; + if (!c.iter().readGetLocal(c.locals(), &getLocalId)) + return false; + + AstRef localRef; + if (!GenerateRef(c, AstName(u"var"), getLocalId, &localRef)) + return false; + + AstGetLocal* getLocal = new(c.lifo) AstGetLocal(localRef); + if (!getLocal) + return false; + + if (!c.push(AstDecodeStackItem(getLocal))) + return false; + + return true; +} + +static bool +AstDecodeSetLocal(AstDecodeContext& c) +{ + uint32_t setLocalId; + if (!c.iter().readSetLocal(c.locals(), &setLocalId, nullptr)) + return false; + + AstDecodeStackItem setLocalValue = c.popCopy(); + + AstRef localRef; + if (!GenerateRef(c, AstName(u"var"), setLocalId, &localRef)) + return false; + + AstSetLocal* setLocal = new(c.lifo) AstSetLocal(localRef, *setLocalValue.expr); + if (!setLocal) + return false; + + AstExpr* expr = c.handleVoidExpr(setLocal); + if (!expr) + return false; + + if (!c.push(AstDecodeStackItem(expr))) + return false; + + return true; +} + +static bool +AstDecodeTeeLocal(AstDecodeContext& c) +{ + uint32_t teeLocalId; + if (!c.iter().readTeeLocal(c.locals(), &teeLocalId, nullptr)) + return false; + + AstDecodeStackItem teeLocalValue = c.popCopy(); + + AstRef localRef; + if (!GenerateRef(c, AstName(u"var"), teeLocalId, &localRef)) + return false; + + AstTeeLocal* teeLocal = new(c.lifo) AstTeeLocal(localRef, *teeLocalValue.expr); + if (!teeLocal) + return false; + + if (!c.push(AstDecodeStackItem(teeLocal))) + return false; + + return true; +} + +static bool +AstDecodeGetGlobal(AstDecodeContext& c) +{ + uint32_t globalId; + if (!c.iter().readGetGlobal(&globalId)) + return false; + + AstRef globalRef; + if (!GenerateRef(c, AstName(u"global"), globalId, &globalRef)) + return false; + + auto* getGlobal = new(c.lifo) AstGetGlobal(globalRef); + if (!getGlobal) + return false; + + if (!c.push(AstDecodeStackItem(getGlobal))) + return false; + + return true; +} + +static bool +AstDecodeSetGlobal(AstDecodeContext& c) +{ + uint32_t globalId; + if (!c.iter().readSetGlobal(&globalId, nullptr)) + return false; + + AstDecodeStackItem value = c.popCopy(); + + AstRef globalRef; + if (!GenerateRef(c, AstName(u"global"), globalId, &globalRef)) + return false; + + auto* setGlobal = new(c.lifo) AstSetGlobal(globalRef, *value.expr); + if (!setGlobal) + return false; + + AstExpr* expr = c.handleVoidExpr(setGlobal); + if (!expr) + return false; + + if (!c.push(AstDecodeStackItem(expr))) + return false; + + return true; +} + +static bool +AstDecodeReturn(AstDecodeContext& c) +{ + if (!c.iter().readReturn(nullptr)) + return false; + + AstDecodeStackItem result; + if (!IsVoid(c.retType())) + result = c.popCopy(); + + AstReturn* ret = new(c.lifo) AstReturn(result.expr); + if (!ret) + return false; + + if (!c.push(AstDecodeStackItem(ret))) + return false; + + return true; +} + +static bool +AstDecodeAtomicLoad(AstDecodeContext& c, ThreadOp op) +{ + ValType type; + uint32_t byteSize; + switch (op) { + case ThreadOp::I32AtomicLoad: type = ValType::I32; byteSize = 4; break; + case ThreadOp::I64AtomicLoad: type = ValType::I64; byteSize = 8; break; + case ThreadOp::I32AtomicLoad8U: type = ValType::I32; byteSize = 1; break; + case ThreadOp::I32AtomicLoad16U: type = ValType::I32; byteSize = 2; break; + case ThreadOp::I64AtomicLoad8U: type = ValType::I64; byteSize = 1; break; + case ThreadOp::I64AtomicLoad16U: type = ValType::I64; byteSize = 2; break; + case ThreadOp::I64AtomicLoad32U: type = ValType::I64; byteSize = 4; break; + default: + MOZ_CRASH("Should not happen"); + } + + LinearMemoryAddress addr; + if (!c.iter().readAtomicLoad(&addr, type, byteSize)) + return false; + + AstDecodeStackItem item = c.popCopy(); + + AstAtomicLoad* load = new(c.lifo) AstAtomicLoad(op, AstDecodeLoadStoreAddress(addr, item)); + if (!load) + return false; + + if (!c.push(AstDecodeStackItem(load))) + return false; + + return true; +} + +static bool +AstDecodeAtomicStore(AstDecodeContext& c, ThreadOp op) +{ + ValType type; + uint32_t byteSize; + switch (op) { + case ThreadOp::I32AtomicStore: type = ValType::I32; byteSize = 4; break; + case ThreadOp::I64AtomicStore: type = ValType::I64; byteSize = 8; break; + case ThreadOp::I32AtomicStore8U: type = ValType::I32; byteSize = 1; break; + case ThreadOp::I32AtomicStore16U: type = ValType::I32; byteSize = 2; break; + case ThreadOp::I64AtomicStore8U: type = ValType::I64; byteSize = 1; break; + case ThreadOp::I64AtomicStore16U: type = ValType::I64; byteSize = 2; break; + case ThreadOp::I64AtomicStore32U: type = ValType::I64; byteSize = 4; break; + default: + MOZ_CRASH("Should not happen"); + } + + Nothing nothing; + LinearMemoryAddress addr; + if (!c.iter().readAtomicStore(&addr, type, byteSize, ¬hing)) + return false; + + AstDecodeStackItem value = c.popCopy(); + AstDecodeStackItem item = c.popCopy(); + + AstAtomicStore* store = new(c.lifo) AstAtomicStore(op, AstDecodeLoadStoreAddress(addr, item), value.expr); + if (!store) + return false; + + AstExpr* wrapped = c.handleVoidExpr(store); + if (!wrapped) + return false; + + if (!c.push(AstDecodeStackItem(wrapped))) + return false; + + return true; +} + +static bool +AstDecodeAtomicRMW(AstDecodeContext& c, ThreadOp op) +{ + ValType type; + uint32_t byteSize; + switch (op) { + case ThreadOp::I32AtomicAdd: + case ThreadOp::I32AtomicSub: + case ThreadOp::I32AtomicAnd: + case ThreadOp::I32AtomicOr: + case ThreadOp::I32AtomicXor: + case ThreadOp::I32AtomicXchg: + type = ValType::I32; + byteSize = 4; + break; + case ThreadOp::I64AtomicAdd: + case ThreadOp::I64AtomicSub: + case ThreadOp::I64AtomicAnd: + case ThreadOp::I64AtomicOr: + case ThreadOp::I64AtomicXor: + case ThreadOp::I64AtomicXchg: + type = ValType::I64; + byteSize = 8; + break; + case ThreadOp::I32AtomicAdd8U: + case ThreadOp::I32AtomicSub8U: + case ThreadOp::I32AtomicOr8U: + case ThreadOp::I32AtomicXor8U: + case ThreadOp::I32AtomicXchg8U: + case ThreadOp::I32AtomicAnd8U: + type = ValType::I32; + byteSize = 1; + break; + case ThreadOp::I32AtomicAdd16U: + case ThreadOp::I32AtomicSub16U: + case ThreadOp::I32AtomicAnd16U: + case ThreadOp::I32AtomicOr16U: + case ThreadOp::I32AtomicXor16U: + case ThreadOp::I32AtomicXchg16U: + type = ValType::I32; + byteSize = 2; + break; + case ThreadOp::I64AtomicAdd8U: + case ThreadOp::I64AtomicSub8U: + case ThreadOp::I64AtomicAnd8U: + case ThreadOp::I64AtomicOr8U: + case ThreadOp::I64AtomicXor8U: + case ThreadOp::I64AtomicXchg8U: + type = ValType::I64; + byteSize = 1; + break; + case ThreadOp::I64AtomicAdd16U: + case ThreadOp::I64AtomicSub16U: + case ThreadOp::I64AtomicAnd16U: + case ThreadOp::I64AtomicOr16U: + case ThreadOp::I64AtomicXor16U: + case ThreadOp::I64AtomicXchg16U: + type = ValType::I64; + byteSize = 2; + break; + case ThreadOp::I64AtomicAdd32U: + case ThreadOp::I64AtomicSub32U: + case ThreadOp::I64AtomicAnd32U: + case ThreadOp::I64AtomicOr32U: + case ThreadOp::I64AtomicXor32U: + case ThreadOp::I64AtomicXchg32U: + type = ValType::I64; + byteSize = 4; + break; + default: + MOZ_CRASH("Should not happen"); + } + + Nothing nothing; + LinearMemoryAddress addr; + if (!c.iter().readAtomicRMW(&addr, type, byteSize, ¬hing)) + return false; + + AstDecodeStackItem value = c.popCopy(); + AstDecodeStackItem item = c.popCopy(); + + AstAtomicRMW* rmw = new(c.lifo) AstAtomicRMW(op, AstDecodeLoadStoreAddress(addr, item), + value.expr); + if (!rmw) + return false; + + if (!c.push(AstDecodeStackItem(rmw))) + return false; + + return true; +} + +static bool +AstDecodeAtomicCmpXchg(AstDecodeContext& c, ThreadOp op) +{ + ValType type; + uint32_t byteSize; + switch (op) { + case ThreadOp::I32AtomicCmpXchg: type = ValType::I32; byteSize = 4; break; + case ThreadOp::I64AtomicCmpXchg: type = ValType::I64; byteSize = 8; break; + case ThreadOp::I32AtomicCmpXchg8U: type = ValType::I32; byteSize = 1; break; + case ThreadOp::I32AtomicCmpXchg16U: type = ValType::I32; byteSize = 2; break; + case ThreadOp::I64AtomicCmpXchg8U: type = ValType::I64; byteSize = 1; break; + case ThreadOp::I64AtomicCmpXchg16U: type = ValType::I64; byteSize = 2; break; + case ThreadOp::I64AtomicCmpXchg32U: type = ValType::I64; byteSize = 4; break; + default: + MOZ_CRASH("Should not happen"); + } + + Nothing nothing; + LinearMemoryAddress addr; + if (!c.iter().readAtomicCmpXchg(&addr, type, byteSize, ¬hing, ¬hing)) + return false; + + AstDecodeStackItem replacement = c.popCopy(); + AstDecodeStackItem expected = c.popCopy(); + AstDecodeStackItem item = c.popCopy(); + + AstAtomicCmpXchg* cmpxchg = + new(c.lifo) AstAtomicCmpXchg(op, AstDecodeLoadStoreAddress(addr, item), expected.expr, + replacement.expr); + if (!cmpxchg) + return false; + + if (!c.push(AstDecodeStackItem(cmpxchg))) + return false; + + return true; +} + +static bool +AstDecodeWait(AstDecodeContext& c, ThreadOp op) +{ + ValType type; + uint32_t byteSize; + switch (op) { + case ThreadOp::I32Wait: type = ValType::I32; byteSize = 4; break; + case ThreadOp::I64Wait: type = ValType::I64; byteSize = 8; break; + default: + MOZ_CRASH("Should not happen"); + } + + Nothing nothing; + LinearMemoryAddress addr; + if (!c.iter().readWait(&addr, type, byteSize, ¬hing, ¬hing)) + return false; + + AstDecodeStackItem timeout = c.popCopy(); + AstDecodeStackItem value = c.popCopy(); + AstDecodeStackItem item = c.popCopy(); + + AstWait* wait = new(c.lifo) AstWait(op, AstDecodeLoadStoreAddress(addr, item), value.expr, + timeout.expr); + if (!wait) + return false; + + if (!c.push(AstDecodeStackItem(wait))) + return false; + + return true; +} + +static bool +AstDecodeWake(AstDecodeContext& c) +{ + Nothing nothing; + LinearMemoryAddress addr; + if (!c.iter().readWake(&addr, ¬hing)) + return false; + + AstDecodeStackItem count = c.popCopy(); + AstDecodeStackItem item = c.popCopy(); + + AstWake* wake = new(c.lifo) AstWake(AstDecodeLoadStoreAddress(addr, item), count.expr); + if (!wake) + return false; + + if (!c.push(AstDecodeStackItem(wake))) + return false; + + return true; +} + +#ifdef ENABLE_WASM_BULKMEM_OPS +static bool +AstDecodeMemCopy(AstDecodeContext& c) +{ + if (!c.iter().readMemCopy(nullptr, nullptr, nullptr)) + return false; + + AstDecodeStackItem dest = c.popCopy(); + AstDecodeStackItem src = c.popCopy(); + AstDecodeStackItem len = c.popCopy(); + + AstMemCopy* mc = new(c.lifo) AstMemCopy(dest.expr, src.expr, len.expr); + + if (!mc) + return false; + + if (!c.push(AstDecodeStackItem(mc))) + return false; + + return true; +} + +static bool +AstDecodeMemFill(AstDecodeContext& c) +{ + if (!c.iter().readMemFill(nullptr, nullptr, nullptr)) + return false; + + AstDecodeStackItem len = c.popCopy(); + AstDecodeStackItem val = c.popCopy(); + AstDecodeStackItem start = c.popCopy(); + + AstMemFill* mf = new(c.lifo) AstMemFill(start.expr, val.expr, len.expr); + + if (!mf) + return false; + + if (!c.push(AstDecodeStackItem(mf))) + return false; + + return true; +} +#endif + +static bool +AstDecodeExpr(AstDecodeContext& c) +{ + uint32_t exprOffset = c.iter().currentOffset(); + OpBytes op; + if (!c.iter().readOp(&op)) + return false; + + AstExpr* tmp; + switch (op.b0) { + case uint16_t(Op::Nop): + if (!AstDecodeNop(c)) + return false; + break; + case uint16_t(Op::Drop): + if (!AstDecodeDrop(c)) + return false; + break; + case uint16_t(Op::Call): + if (!AstDecodeCall(c)) + return false; + break; + case uint16_t(Op::CallIndirect): + if (!AstDecodeCallIndirect(c)) + return false; + break; + case uint16_t(Op::I32Const): + int32_t i32; + if (!c.iter().readI32Const(&i32)) + return false; + tmp = new(c.lifo) AstConst(Val((uint32_t)i32)); + if (!tmp || !c.push(AstDecodeStackItem(tmp))) + return false; + break; + case uint16_t(Op::I64Const): + int64_t i64; + if (!c.iter().readI64Const(&i64)) + return false; + tmp = new(c.lifo) AstConst(Val((uint64_t)i64)); + if (!tmp || !c.push(AstDecodeStackItem(tmp))) + return false; + break; + case uint16_t(Op::F32Const): { + float f32; + if (!c.iter().readF32Const(&f32)) + return false; + tmp = new(c.lifo) AstConst(Val(f32)); + if (!tmp || !c.push(AstDecodeStackItem(tmp))) + return false; + break; + } + case uint16_t(Op::F64Const): { + double f64; + if (!c.iter().readF64Const(&f64)) + return false; + tmp = new(c.lifo) AstConst(Val(f64)); + if (!tmp || !c.push(AstDecodeStackItem(tmp))) + return false; + break; + } + case uint16_t(Op::GetLocal): + if (!AstDecodeGetLocal(c)) + return false; + break; + case uint16_t(Op::SetLocal): + if (!AstDecodeSetLocal(c)) + return false; + break; + case uint16_t(Op::TeeLocal): + if (!AstDecodeTeeLocal(c)) + return false; + break; + case uint16_t(Op::Select): + if (!AstDecodeSelect(c)) + return false; + break; + case uint16_t(Op::Block): + case uint16_t(Op::Loop): + if (!AstDecodeBlock(c, Op(op.b0))) + return false; + break; + case uint16_t(Op::If): + if (!AstDecodeIf(c)) + return false; + break; + case uint16_t(Op::Else): + if (!AstDecodeElse(c)) + return false; + break; + case uint16_t(Op::End): + if (!AstDecodeEnd(c)) + return false; + break; + case uint16_t(Op::I32Clz): + case uint16_t(Op::I32Ctz): + case uint16_t(Op::I32Popcnt): + if (!AstDecodeUnary(c, ValType::I32, Op(op.b0))) + return false; + break; + case uint16_t(Op::I64Clz): + case uint16_t(Op::I64Ctz): + case uint16_t(Op::I64Popcnt): + if (!AstDecodeUnary(c, ValType::I64, Op(op.b0))) + return false; + break; + case uint16_t(Op::F32Abs): + case uint16_t(Op::F32Neg): + case uint16_t(Op::F32Ceil): + case uint16_t(Op::F32Floor): + case uint16_t(Op::F32Sqrt): + case uint16_t(Op::F32Trunc): + case uint16_t(Op::F32Nearest): + if (!AstDecodeUnary(c, ValType::F32, Op(op.b0))) + return false; + break; + case uint16_t(Op::F64Abs): + case uint16_t(Op::F64Neg): + case uint16_t(Op::F64Ceil): + case uint16_t(Op::F64Floor): + case uint16_t(Op::F64Sqrt): + case uint16_t(Op::F64Trunc): + case uint16_t(Op::F64Nearest): + if (!AstDecodeUnary(c, ValType::F64, Op(op.b0))) + return false; + break; + case uint16_t(Op::I32Add): + case uint16_t(Op::I32Sub): + case uint16_t(Op::I32Mul): + case uint16_t(Op::I32DivS): + case uint16_t(Op::I32DivU): + case uint16_t(Op::I32RemS): + case uint16_t(Op::I32RemU): + case uint16_t(Op::I32And): + case uint16_t(Op::I32Or): + case uint16_t(Op::I32Xor): + case uint16_t(Op::I32Shl): + case uint16_t(Op::I32ShrS): + case uint16_t(Op::I32ShrU): + case uint16_t(Op::I32Rotl): + case uint16_t(Op::I32Rotr): + if (!AstDecodeBinary(c, ValType::I32, Op(op.b0))) + return false; + break; + case uint16_t(Op::I64Add): + case uint16_t(Op::I64Sub): + case uint16_t(Op::I64Mul): + case uint16_t(Op::I64DivS): + case uint16_t(Op::I64DivU): + case uint16_t(Op::I64RemS): + case uint16_t(Op::I64RemU): + case uint16_t(Op::I64And): + case uint16_t(Op::I64Or): + case uint16_t(Op::I64Xor): + case uint16_t(Op::I64Shl): + case uint16_t(Op::I64ShrS): + case uint16_t(Op::I64ShrU): + case uint16_t(Op::I64Rotl): + case uint16_t(Op::I64Rotr): + if (!AstDecodeBinary(c, ValType::I64, Op(op.b0))) + return false; + break; + case uint16_t(Op::F32Add): + case uint16_t(Op::F32Sub): + case uint16_t(Op::F32Mul): + case uint16_t(Op::F32Div): + case uint16_t(Op::F32Min): + case uint16_t(Op::F32Max): + case uint16_t(Op::F32CopySign): + if (!AstDecodeBinary(c, ValType::F32, Op(op.b0))) + return false; + break; + case uint16_t(Op::F64Add): + case uint16_t(Op::F64Sub): + case uint16_t(Op::F64Mul): + case uint16_t(Op::F64Div): + case uint16_t(Op::F64Min): + case uint16_t(Op::F64Max): + case uint16_t(Op::F64CopySign): + if (!AstDecodeBinary(c, ValType::F64, Op(op.b0))) + return false; + break; + case uint16_t(Op::I32Eq): + case uint16_t(Op::I32Ne): + case uint16_t(Op::I32LtS): + case uint16_t(Op::I32LtU): + case uint16_t(Op::I32LeS): + case uint16_t(Op::I32LeU): + case uint16_t(Op::I32GtS): + case uint16_t(Op::I32GtU): + case uint16_t(Op::I32GeS): + case uint16_t(Op::I32GeU): + if (!AstDecodeComparison(c, ValType::I32, Op(op.b0))) + return false; + break; + case uint16_t(Op::I64Eq): + case uint16_t(Op::I64Ne): + case uint16_t(Op::I64LtS): + case uint16_t(Op::I64LtU): + case uint16_t(Op::I64LeS): + case uint16_t(Op::I64LeU): + case uint16_t(Op::I64GtS): + case uint16_t(Op::I64GtU): + case uint16_t(Op::I64GeS): + case uint16_t(Op::I64GeU): + if (!AstDecodeComparison(c, ValType::I64, Op(op.b0))) + return false; + break; + case uint16_t(Op::F32Eq): + case uint16_t(Op::F32Ne): + case uint16_t(Op::F32Lt): + case uint16_t(Op::F32Le): + case uint16_t(Op::F32Gt): + case uint16_t(Op::F32Ge): + if (!AstDecodeComparison(c, ValType::F32, Op(op.b0))) + return false; + break; + case uint16_t(Op::F64Eq): + case uint16_t(Op::F64Ne): + case uint16_t(Op::F64Lt): + case uint16_t(Op::F64Le): + case uint16_t(Op::F64Gt): + case uint16_t(Op::F64Ge): + if (!AstDecodeComparison(c, ValType::F64, Op(op.b0))) + return false; + break; + case uint16_t(Op::I32Eqz): + if (!AstDecodeConversion(c, ValType::I32, ValType::I32, Op(op.b0))) + return false; + break; + case uint16_t(Op::I64Eqz): + case uint16_t(Op::I32WrapI64): + if (!AstDecodeConversion(c, ValType::I64, ValType::I32, Op(op.b0))) + return false; + break; + case uint16_t(Op::I32TruncSF32): + case uint16_t(Op::I32TruncUF32): + case uint16_t(Op::I32ReinterpretF32): + if (!AstDecodeConversion(c, ValType::F32, ValType::I32, Op(op.b0))) + return false; + break; + case uint16_t(Op::I32TruncSF64): + case uint16_t(Op::I32TruncUF64): + if (!AstDecodeConversion(c, ValType::F64, ValType::I32, Op(op.b0))) + return false; + break; + case uint16_t(Op::I64ExtendSI32): + case uint16_t(Op::I64ExtendUI32): + if (!AstDecodeConversion(c, ValType::I32, ValType::I64, Op(op.b0))) + return false; + break; + case uint16_t(Op::I64TruncSF32): + case uint16_t(Op::I64TruncUF32): + if (!AstDecodeConversion(c, ValType::F32, ValType::I64, Op(op.b0))) + return false; + break; + case uint16_t(Op::I64TruncSF64): + case uint16_t(Op::I64TruncUF64): + case uint16_t(Op::I64ReinterpretF64): + if (!AstDecodeConversion(c, ValType::F64, ValType::I64, Op(op.b0))) + return false; + break; + case uint16_t(Op::F32ConvertSI32): + case uint16_t(Op::F32ConvertUI32): + case uint16_t(Op::F32ReinterpretI32): + if (!AstDecodeConversion(c, ValType::I32, ValType::F32, Op(op.b0))) + return false; + break; + case uint16_t(Op::F32ConvertSI64): + case uint16_t(Op::F32ConvertUI64): + if (!AstDecodeConversion(c, ValType::I64, ValType::F32, Op(op.b0))) + return false; + break; + case uint16_t(Op::F32DemoteF64): + if (!AstDecodeConversion(c, ValType::F64, ValType::F32, Op(op.b0))) + return false; + break; + case uint16_t(Op::F64ConvertSI32): + case uint16_t(Op::F64ConvertUI32): + if (!AstDecodeConversion(c, ValType::I32, ValType::F64, Op(op.b0))) + return false; + break; + case uint16_t(Op::F64ConvertSI64): + case uint16_t(Op::F64ConvertUI64): + case uint16_t(Op::F64ReinterpretI64): + if (!AstDecodeConversion(c, ValType::I64, ValType::F64, Op(op.b0))) + return false; + break; + case uint16_t(Op::F64PromoteF32): + if (!AstDecodeConversion(c, ValType::F32, ValType::F64, Op(op.b0))) + return false; + break; + case uint16_t(Op::I32Extend8S): + case uint16_t(Op::I32Extend16S): + if (!AstDecodeConversion(c, ValType::I32, ValType::I32, Op(op.b0))) + return false; + break; + case uint16_t(Op::I64Extend8S): + case uint16_t(Op::I64Extend16S): + case uint16_t(Op::I64Extend32S): + if (!AstDecodeConversion(c, ValType::I64, ValType::I64, Op(op.b0))) + return false; + break; + case uint16_t(Op::I32Load8S): + case uint16_t(Op::I32Load8U): + if (!AstDecodeLoad(c, ValType::I32, 1, Op(op.b0))) + return false; + break; + case uint16_t(Op::I32Load16S): + case uint16_t(Op::I32Load16U): + if (!AstDecodeLoad(c, ValType::I32, 2, Op(op.b0))) + return false; + break; + case uint16_t(Op::I32Load): + if (!AstDecodeLoad(c, ValType::I32, 4, Op(op.b0))) + return false; + break; + case uint16_t(Op::I64Load8S): + case uint16_t(Op::I64Load8U): + if (!AstDecodeLoad(c, ValType::I64, 1, Op(op.b0))) + return false; + break; + case uint16_t(Op::I64Load16S): + case uint16_t(Op::I64Load16U): + if (!AstDecodeLoad(c, ValType::I64, 2, Op(op.b0))) + return false; + break; + case uint16_t(Op::I64Load32S): + case uint16_t(Op::I64Load32U): + if (!AstDecodeLoad(c, ValType::I64, 4, Op(op.b0))) + return false; + break; + case uint16_t(Op::I64Load): + if (!AstDecodeLoad(c, ValType::I64, 8, Op(op.b0))) + return false; + break; + case uint16_t(Op::F32Load): + if (!AstDecodeLoad(c, ValType::F32, 4, Op(op.b0))) + return false; + break; + case uint16_t(Op::F64Load): + if (!AstDecodeLoad(c, ValType::F64, 8, Op(op.b0))) + return false; + break; + case uint16_t(Op::I32Store8): + if (!AstDecodeStore(c, ValType::I32, 1, Op(op.b0))) + return false; + break; + case uint16_t(Op::I32Store16): + if (!AstDecodeStore(c, ValType::I32, 2, Op(op.b0))) + return false; + break; + case uint16_t(Op::I32Store): + if (!AstDecodeStore(c, ValType::I32, 4, Op(op.b0))) + return false; + break; + case uint16_t(Op::I64Store8): + if (!AstDecodeStore(c, ValType::I64, 1, Op(op.b0))) + return false; + break; + case uint16_t(Op::I64Store16): + if (!AstDecodeStore(c, ValType::I64, 2, Op(op.b0))) + return false; + break; + case uint16_t(Op::I64Store32): + if (!AstDecodeStore(c, ValType::I64, 4, Op(op.b0))) + return false; + break; + case uint16_t(Op::I64Store): + if (!AstDecodeStore(c, ValType::I64, 8, Op(op.b0))) + return false; + break; + case uint16_t(Op::F32Store): + if (!AstDecodeStore(c, ValType::F32, 4, Op(op.b0))) + return false; + break; + case uint16_t(Op::F64Store): + if (!AstDecodeStore(c, ValType::F64, 8, Op(op.b0))) + return false; + break; + case uint16_t(Op::CurrentMemory): + if (!AstDecodeCurrentMemory(c)) + return false; + break; + case uint16_t(Op::GrowMemory): + if (!AstDecodeGrowMemory(c)) + return false; + break; + case uint16_t(Op::SetGlobal): + if (!AstDecodeSetGlobal(c)) + return false; + break; + case uint16_t(Op::GetGlobal): + if (!AstDecodeGetGlobal(c)) + return false; + break; + case uint16_t(Op::Br): + case uint16_t(Op::BrIf): + if (!AstDecodeBranch(c, Op(op.b0))) + return false; + break; + case uint16_t(Op::BrTable): + if (!AstDecodeBrTable(c)) + return false; + break; + case uint16_t(Op::Return): + if (!AstDecodeReturn(c)) + return false; + break; + case uint16_t(Op::Unreachable): + if (!c.iter().readUnreachable()) + return false; + tmp = new(c.lifo) AstUnreachable(); + if (!tmp) + return false; + if (!c.push(AstDecodeStackItem(tmp))) + return false; + break; + case uint16_t(Op::MiscPrefix): + switch (op.b1) { +#ifdef ENABLE_WASM_SATURATING_TRUNC_OPS + case uint16_t(MiscOp::I32TruncSSatF32): + case uint16_t(MiscOp::I32TruncUSatF32): + if (!AstDecodeExtraConversion(c, ValType::F32, ValType::I32, MiscOp(op.b1))) + return false; + break; + case uint16_t(MiscOp::I32TruncSSatF64): + case uint16_t(MiscOp::I32TruncUSatF64): + if (!AstDecodeExtraConversion(c, ValType::F64, ValType::I32, MiscOp(op.b1))) + return false; + break; + case uint16_t(MiscOp::I64TruncSSatF32): + case uint16_t(MiscOp::I64TruncUSatF32): + if (!AstDecodeExtraConversion(c, ValType::F32, ValType::I64, MiscOp(op.b1))) + return false; + break; + case uint16_t(MiscOp::I64TruncSSatF64): + case uint16_t(MiscOp::I64TruncUSatF64): + if (!AstDecodeExtraConversion(c, ValType::F64, ValType::I64, MiscOp(op.b1))) + return false; + break; +#endif +#ifdef ENABLE_WASM_BULKMEM_OPS + case uint16_t(MiscOp::MemCopy): + if (!AstDecodeMemCopy(c)) + return false; + break; + case uint16_t(MiscOp::MemFill): + if (!AstDecodeMemFill(c)) + return false; + break; +#endif + default: + return c.iter().unrecognizedOpcode(&op); + } + break; + case uint16_t(Op::ThreadPrefix): + switch (op.b1) { + case uint16_t(ThreadOp::Wake): + if (!AstDecodeWake(c)) + return false; + break; + case uint16_t(ThreadOp::I32Wait): + case uint16_t(ThreadOp::I64Wait): + if (!AstDecodeWait(c, ThreadOp(op.b1))) + return false; + break; + case uint16_t(ThreadOp::I32AtomicLoad): + case uint16_t(ThreadOp::I64AtomicLoad): + case uint16_t(ThreadOp::I32AtomicLoad8U): + case uint16_t(ThreadOp::I32AtomicLoad16U): + case uint16_t(ThreadOp::I64AtomicLoad8U): + case uint16_t(ThreadOp::I64AtomicLoad16U): + case uint16_t(ThreadOp::I64AtomicLoad32U): + if (!AstDecodeAtomicLoad(c, ThreadOp(op.b1))) + return false; + break; + case uint16_t(ThreadOp::I32AtomicStore): + case uint16_t(ThreadOp::I64AtomicStore): + case uint16_t(ThreadOp::I32AtomicStore8U): + case uint16_t(ThreadOp::I32AtomicStore16U): + case uint16_t(ThreadOp::I64AtomicStore8U): + case uint16_t(ThreadOp::I64AtomicStore16U): + case uint16_t(ThreadOp::I64AtomicStore32U): + if (!AstDecodeAtomicStore(c, ThreadOp(op.b1))) + return false; + break; + case uint16_t(ThreadOp::I32AtomicAdd): + case uint16_t(ThreadOp::I64AtomicAdd): + case uint16_t(ThreadOp::I32AtomicAdd8U): + case uint16_t(ThreadOp::I32AtomicAdd16U): + case uint16_t(ThreadOp::I64AtomicAdd8U): + case uint16_t(ThreadOp::I64AtomicAdd16U): + case uint16_t(ThreadOp::I64AtomicAdd32U): + case uint16_t(ThreadOp::I32AtomicSub): + case uint16_t(ThreadOp::I64AtomicSub): + case uint16_t(ThreadOp::I32AtomicSub8U): + case uint16_t(ThreadOp::I32AtomicSub16U): + case uint16_t(ThreadOp::I64AtomicSub8U): + case uint16_t(ThreadOp::I64AtomicSub16U): + case uint16_t(ThreadOp::I64AtomicSub32U): + case uint16_t(ThreadOp::I32AtomicAnd): + case uint16_t(ThreadOp::I64AtomicAnd): + case uint16_t(ThreadOp::I32AtomicAnd8U): + case uint16_t(ThreadOp::I32AtomicAnd16U): + case uint16_t(ThreadOp::I64AtomicAnd8U): + case uint16_t(ThreadOp::I64AtomicAnd16U): + case uint16_t(ThreadOp::I64AtomicAnd32U): + case uint16_t(ThreadOp::I32AtomicOr): + case uint16_t(ThreadOp::I64AtomicOr): + case uint16_t(ThreadOp::I32AtomicOr8U): + case uint16_t(ThreadOp::I32AtomicOr16U): + case uint16_t(ThreadOp::I64AtomicOr8U): + case uint16_t(ThreadOp::I64AtomicOr16U): + case uint16_t(ThreadOp::I64AtomicOr32U): + case uint16_t(ThreadOp::I32AtomicXor): + case uint16_t(ThreadOp::I64AtomicXor): + case uint16_t(ThreadOp::I32AtomicXor8U): + case uint16_t(ThreadOp::I32AtomicXor16U): + case uint16_t(ThreadOp::I64AtomicXor8U): + case uint16_t(ThreadOp::I64AtomicXor16U): + case uint16_t(ThreadOp::I64AtomicXor32U): + case uint16_t(ThreadOp::I32AtomicXchg): + case uint16_t(ThreadOp::I64AtomicXchg): + case uint16_t(ThreadOp::I32AtomicXchg8U): + case uint16_t(ThreadOp::I32AtomicXchg16U): + case uint16_t(ThreadOp::I64AtomicXchg8U): + case uint16_t(ThreadOp::I64AtomicXchg16U): + case uint16_t(ThreadOp::I64AtomicXchg32U): + if (!AstDecodeAtomicRMW(c, ThreadOp(op.b1))) + return false; + break; + case uint16_t(ThreadOp::I32AtomicCmpXchg): + case uint16_t(ThreadOp::I64AtomicCmpXchg): + case uint16_t(ThreadOp::I32AtomicCmpXchg8U): + case uint16_t(ThreadOp::I32AtomicCmpXchg16U): + case uint16_t(ThreadOp::I64AtomicCmpXchg8U): + case uint16_t(ThreadOp::I64AtomicCmpXchg16U): + case uint16_t(ThreadOp::I64AtomicCmpXchg32U): + if (!AstDecodeAtomicCmpXchg(c, ThreadOp(op.b1))) + return false; + break; + default: + return c.iter().unrecognizedOpcode(&op); + } + break; + case uint16_t(Op::MozPrefix): + return c.iter().unrecognizedOpcode(&op); + default: + return c.iter().unrecognizedOpcode(&op); + } + + AstExpr* lastExpr = c.top().expr; + if (lastExpr) { + // If last node is a 'first' node, the offset must assigned to it + // last child. + if (lastExpr->kind() == AstExprKind::First) + lastExpr->as().exprs().back()->setOffset(exprOffset); + else + lastExpr->setOffset(exprOffset); + } + return true; +} + +static bool +AstDecodeFunctionBody(AstDecodeContext &c, uint32_t funcIndex, AstFunc** func) +{ + uint32_t offset = c.d.currentOffset(); + uint32_t bodySize; + if (!c.d.readVarU32(&bodySize)) + return c.d.fail("expected number of function body bytes"); + + if (c.d.bytesRemain() < bodySize) + return c.d.fail("function body length too big"); + + const uint8_t* bodyBegin = c.d.currentPosition(); + const uint8_t* bodyEnd = bodyBegin + bodySize; + + const FuncTypeWithId* funcType = c.env().funcTypes[funcIndex]; + + ValTypeVector locals; + if (!locals.appendAll(funcType->args())) + return false; + + if (!DecodeLocalEntries(c.d, ModuleKind::Wasm, c.env().gcTypesEnabled, &locals)) + return false; + + AstDecodeOpIter iter(c.env(), c.d); + c.startFunction(&iter, &locals, funcType->ret()); + + AstName funcName; + if (!GenerateName(c, AstName(u"func"), funcIndex, &funcName)) + return false; + + uint32_t numParams = funcType->args().length(); + uint32_t numLocals = locals.length(); + + AstValTypeVector vars(c.lifo); + for (uint32_t i = numParams; i < numLocals; i++) { + if (!vars.append(locals[i])) + return false; + } + + AstNameVector localsNames(c.lifo); + for (uint32_t i = 0; i < numLocals; i++) { + AstName varName; + if (!GenerateName(c, AstName(u"var"), i, &varName)) + return false; + if (!localsNames.append(varName)) + return false; + } + + if (!c.iter().readFunctionStart(funcType->ret())) + return false; + + if (!c.depths().append(c.exprs().length())) + return false; + + uint32_t endOffset = offset; + while (c.d.currentPosition() < bodyEnd) { + if (!AstDecodeExpr(c)) + return false; + + const AstDecodeStackItem& item = c.top(); + if (!item.expr) { // Op::End was found + c.popBack(); + break; + } + + endOffset = c.d.currentOffset(); + } + + AstExprVector body(c.lifo); + for (auto i = c.exprs().begin() + c.depths().back(), e = c.exprs().end(); i != e; ++i) { + if (!body.append(i->expr)) + return false; + } + c.exprs().shrinkTo(c.depths().popCopy()); + + if (!c.iter().readFunctionEnd(bodyEnd)) + return false; + + c.endFunction(); + + if (c.d.currentPosition() != bodyEnd) + return c.d.fail("function body length mismatch"); + + size_t funcTypeIndex = c.env().funcIndexToFuncTypeIndex(funcIndex); + + AstRef funcTypeRef; + if (!GenerateRef(c, AstName(u"type"), funcTypeIndex, &funcTypeRef)) + return false; + + *func = new(c.lifo) AstFunc(funcName, funcTypeRef, std::move(vars), std::move(localsNames), + std::move(body)); + if (!*func) + return false; + (*func)->setOffset(offset); + (*func)->setEndOffset(endOffset); + + return true; +} + +/*****************************************************************************/ +// wasm decoding and generation + +static bool +AstCreateTypes(AstDecodeContext& c) +{ + uint32_t typeIndexForNames = 0; + for (const TypeDef& td : c.env().types) { + if (td.isFuncType()) { + const FuncType& funcType = td.funcType(); + + AstValTypeVector args(c.lifo); + if (!args.appendAll(funcType.args())) + return false; + + AstFuncType ftNoName(std::move(args), funcType.ret()); + + AstName ftName; + if (!GenerateName(c, AstName(u"type"), typeIndexForNames, &ftName)) + return false; + + AstFuncType* astFuncType = new(c.lifo) AstFuncType(ftName, std::move(ftNoName)); + if (!astFuncType || !c.module().append(astFuncType)) + return false; + } else if (td.isStructType()) { + const StructType& st = td.structType(); + + AstValTypeVector fieldTypes(c.lifo); + if (!fieldTypes.appendAll(st.fields_)) + return false; + + AstNameVector fieldNames(c.lifo); + if (!fieldNames.resize(fieldTypes.length())) + return false; + + // The multiplication ensures that generated field names are unique + // within the module, though the resulting namespace is very sparse. + + for (size_t fieldIndex = 0; fieldIndex < fieldTypes.length(); fieldIndex++) { + size_t idx = (typeIndexForNames * MaxStructFields) + fieldIndex; + if (!GenerateName(c, AstName(u"f"), idx, &fieldNames[fieldIndex])) + return false; + } + + AstStructType stNoName(std::move(fieldNames), std::move(fieldTypes)); + + AstName stName; + if (!GenerateName(c, AstName(u"type"), typeIndexForNames, &stName)) + return false; + + AstStructType* astStruct = new(c.lifo) AstStructType(stName, std::move(stNoName)); + if (!astStruct || !c.module().append(astStruct)) + return false; + } else { + MOZ_CRASH(); + } + typeIndexForNames++; + } + + return true; +} + +static bool +ToAstName(AstDecodeContext& c, const char* name, AstName* out) +{ + size_t len = strlen(name); + char16_t* buffer = static_cast(c.lifo.alloc(len * sizeof(char16_t))); + if (!buffer) + return false; + + for (size_t i = 0; i < len; i++) + buffer[i] = name[i]; + + *out = AstName(buffer, len); + return true; +} + +static bool +AstCreateImports(AstDecodeContext& c) +{ + size_t lastFunc = 0; + size_t lastGlobal = 0; + size_t lastTable = 0; + size_t lastMemory = 0; + + Maybe memory; + if (c.env().usesMemory()) { + memory = Some(Limits(c.env().minMemoryLength, + c.env().maxMemoryLength, + c.env().memoryUsage == MemoryUsage::Shared + ? Shareable::True + : Shareable::False)); + } + + for (size_t importIndex = 0; importIndex < c.env().imports.length(); importIndex++) { + const Import& import = c.env().imports[importIndex]; + + AstName moduleName; + if (!ToAstName(c, import.module.get(), &moduleName)) + return false; + + AstName fieldName; + if (!ToAstName(c, import.field.get(), &fieldName)) + return false; + + AstImport* ast = nullptr; + switch (import.kind) { + case DefinitionKind::Function: { + AstName importName; + if (!GenerateName(c, AstName(u"import"), lastFunc, &importName)) + return false; + + size_t funcTypeIndex = c.env().funcIndexToFuncTypeIndex(lastFunc); + + AstRef funcTypeRef; + if (!GenerateRef(c, AstName(u"type"), funcTypeIndex, &funcTypeRef)) + return false; + + ast = new(c.lifo) AstImport(importName, moduleName, fieldName, funcTypeRef); + lastFunc++; + break; + } + case DefinitionKind::Global: { + AstName importName; + if (!GenerateName(c, AstName(u"global"), lastGlobal, &importName)) + return false; + + const GlobalDesc& global = c.env().globals[lastGlobal]; + ValType type = global.type(); + bool isMutable = global.isMutable(); + + ast = new(c.lifo) AstImport(importName, moduleName, fieldName, + AstGlobal(importName, type, isMutable)); + lastGlobal++; + break; + } + case DefinitionKind::Table: { + AstName importName; + if (!GenerateName(c, AstName(u"table"), lastTable, &importName)) + return false; + + ast = new(c.lifo) AstImport(importName, moduleName, fieldName, DefinitionKind::Table, + c.env().tables[lastTable].limits); + lastTable++; + break; + } + case DefinitionKind::Memory: { + AstName importName; + if (!GenerateName(c, AstName(u"memory"), lastMemory, &importName)) + return false; + + ast = new(c.lifo) AstImport(importName, moduleName, fieldName, DefinitionKind::Memory, + *memory); + lastMemory++; + break; + } + } + + if (!ast || !c.module().append(ast)) + return false; + } + + return true; +} + +static bool +AstCreateTables(AstDecodeContext& c) +{ + size_t numImported = c.module().tables().length(); + + for (size_t i = numImported; i < c.env().tables.length(); i++) { + AstName name; + if (!GenerateName(c, AstName(u"table"), i, &name)) + return false; + if (!c.module().addTable(name, c.env().tables[i].limits)) + return false; + } + + return true; +} + +static bool +AstCreateMemory(AstDecodeContext& c) +{ + bool importedMemory = !!c.module().memories().length(); + if (!c.env().usesMemory() || importedMemory) + return true; + + AstName name; + if (!GenerateName(c, AstName(u"memory"), c.module().memories().length(), &name)) + return false; + + return c.module().addMemory(name, Limits(c.env().minMemoryLength, + c.env().maxMemoryLength, + c.env().memoryUsage == MemoryUsage::Shared + ? Shareable::True + : Shareable::False)); +} + +static AstExpr* +ToAstExpr(AstDecodeContext& c, const InitExpr& initExpr) +{ + switch (initExpr.kind()) { + case InitExpr::Kind::Constant: { + return new(c.lifo) AstConst(Val(initExpr.val())); + } + case InitExpr::Kind::GetGlobal: { + AstRef globalRef; + if (!GenerateRef(c, AstName(u"global"), initExpr.globalIndex(), &globalRef)) + return nullptr; + return new(c.lifo) AstGetGlobal(globalRef); + } + } + return nullptr; +} + +static bool +AstCreateGlobals(AstDecodeContext& c) +{ + for (uint32_t i = 0; i < c.env().globals.length(); i++) { + const GlobalDesc& global = c.env().globals[i]; + if (global.isImport()) + continue; + + AstName name; + if (!GenerateName(c, AstName(u"global"), i, &name)) + return false; + + AstExpr* init = global.isConstant() + ? new(c.lifo) AstConst(global.constantValue()) + : ToAstExpr(c, global.initExpr()); + if (!init) + return false; + + auto* g = new(c.lifo) AstGlobal(name, global.type(), global.isMutable(), Some(init)); + if (!g || !c.module().append(g)) + return false; + } + + return true; +} + +static bool +AstCreateExports(AstDecodeContext& c) +{ + for (const Export& exp : c.env().exports) { + size_t index; + switch (exp.kind()) { + case DefinitionKind::Function: index = exp.funcIndex(); break; + case DefinitionKind::Global: index = exp.globalIndex(); break; + case DefinitionKind::Memory: index = 0; break; + case DefinitionKind::Table: index = 0; break; + } + + AstName name; + if (!ToAstName(c, exp.fieldName(), &name)) + return false; + + AstExport* e = new(c.lifo) AstExport(name, exp.kind(), AstRef(index)); + if (!e || !c.module().append(e)) + return false; + } + + return true; +} + +static bool +AstCreateStartFunc(AstDecodeContext &c) +{ + if (!c.env().startFuncIndex) + return true; + + AstRef funcRef; + if (!GenerateFuncRef(c, *c.env().startFuncIndex, &funcRef)) + return false; + + c.module().setStartFunc(AstStartFunc(funcRef)); + return true; +} + +static bool +AstCreateElems(AstDecodeContext &c) +{ + for (const ElemSegment& seg : c.env().elemSegments) { + AstRefVector elems(c.lifo); + if (!elems.reserve(seg.elemFuncIndices.length())) + return false; + + for (uint32_t i : seg.elemFuncIndices) + elems.infallibleAppend(AstRef(i)); + + AstExpr* offset = ToAstExpr(c, seg.offset); + if (!offset) + return false; + + AstElemSegment* segment = new(c.lifo) AstElemSegment(offset, std::move(elems)); + if (!segment || !c.module().append(segment)) + return false; + } + + return true; +} + +static bool +AstDecodeEnvironment(AstDecodeContext& c) +{ + if (!DecodeModuleEnvironment(c.d, &c.env())) + return false; + + if (!AstCreateTypes(c)) + return false; + + if (!AstCreateImports(c)) + return false; + + if (!AstCreateTables(c)) + return false; + + if (!AstCreateMemory(c)) + return false; + + if (!AstCreateGlobals(c)) + return false; + + if (!AstCreateExports(c)) + return false; + + if (!AstCreateStartFunc(c)) + return false; + + if (!AstCreateElems(c)) + return false; + + return true; +} + +static bool +AstDecodeCodeSection(AstDecodeContext& c) +{ + if (!c.env().codeSection) { + if (c.env().numFuncDefs() != 0) + return c.d.fail("expected function bodies"); + return true; + } + + uint32_t numFuncBodies; + if (!c.d.readVarU32(&numFuncBodies)) + return c.d.fail("expected function body count"); + + if (numFuncBodies != c.env().numFuncDefs()) + return c.d.fail("function body count does not match function signature count"); + + for (uint32_t funcDefIndex = 0; funcDefIndex < numFuncBodies; funcDefIndex++) { + AstFunc* func; + if (!AstDecodeFunctionBody(c, c.module().numFuncImports() + funcDefIndex, &func)) + return false; + if (!c.module().append(func)) + return false; + } + + return c.d.finishSection(*c.env().codeSection, "code"); +} + +// Number of bytes to display in a single fragment of a data section (per line). +static const size_t WRAP_DATA_BYTES = 30; + +static bool +AstDecodeModuleTail(AstDecodeContext& c) +{ + MOZ_ASSERT(c.module().memories().length() <= 1, "at most one memory in MVP"); + + if (!DecodeModuleTail(c.d, &c.env())) + return false; + + for (DataSegment& s : c.env().dataSegments) { + char16_t* buffer = static_cast(c.lifo.alloc(s.length * sizeof(char16_t))); + if (!buffer) + return false; + + const uint8_t* src = c.d.begin() + s.bytecodeOffset; + for (size_t i = 0; i < s.length; i++) + buffer[i] = src[i]; + + AstExpr* offset = ToAstExpr(c, s.offset); + if (!offset) + return false; + + AstNameVector fragments(c.lifo); + for (size_t start = 0; start < s.length; start += WRAP_DATA_BYTES) { + AstName name(buffer + start, Min(WRAP_DATA_BYTES, s.length - start)); + if (!fragments.append(name)) + return false; + } + + AstDataSegment* segment = new(c.lifo) AstDataSegment(offset, std::move(fragments)); + if (!segment || !c.module().append(segment)) + return false; + } + + return true; +} + +bool +wasm::BinaryToAst(JSContext* cx, const uint8_t* bytes, uint32_t length, LifoAlloc& lifo, + AstModule** module) +{ + AstModule* result = new(lifo) AstModule(lifo); + if (!result || !result->init()) + return false; + + UniqueChars error; + Decoder d(bytes, bytes + length, 0, &error, nullptr, /* resilient */ true); + AstDecodeContext c(cx, lifo, d, *result, /* generateNames */ true, HasGcTypes::True); + + if (!AstDecodeEnvironment(c) || + !AstDecodeCodeSection(c) || + !AstDecodeModuleTail(c)) + { + if (error) { + JS_ReportErrorNumberUTF8(c.cx, GetErrorMessage, nullptr, JSMSG_WASM_COMPILE_ERROR, + error.get()); + return false; + } + ReportOutOfMemory(c.cx); + return false; + } + + MOZ_ASSERT(!error, "unreported error in decoding"); + + *module = result; + return true; +} diff --git a/js/src/wasm/WasmBinaryToAST.h b/js/src/wasm/WasmBinaryToAST.h new file mode 100644 index 000000000000..320862dbbc7c --- /dev/null +++ b/js/src/wasm/WasmBinaryToAST.h @@ -0,0 +1,37 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * + * Copyright 2015 Mozilla Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef wasmbinarytoast_h +#define wasmbinarytoast_h + +#include "ds/LifoAlloc.h" + +#include "wasm/WasmAST.h" +#include "wasm/WasmTypes.h" + +namespace js { +namespace wasm { + +bool +BinaryToAst(JSContext* cx, const uint8_t* bytes, uint32_t length, LifoAlloc& lifo, + AstModule** module); + +} // end wasm namespace +} // end js namespace + +#endif // namespace wasmbinarytoast_h diff --git a/js/src/wasm/WasmBinaryToText.cpp b/js/src/wasm/WasmBinaryToText.cpp new file mode 100644 index 000000000000..c7f5110d7ef0 --- /dev/null +++ b/js/src/wasm/WasmBinaryToText.cpp @@ -0,0 +1,2138 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * + * Copyright 2015 Mozilla Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "wasm/WasmBinaryToText.h" + +#include "jsnum.h" + +#include "util/StringBuffer.h" +#include "vm/ArrayBufferObject.h" +#include "wasm/WasmAST.h" +#include "wasm/WasmBinaryToAST.h" +#include "wasm/WasmDebug.h" +#include "wasm/WasmTextUtils.h" +#include "wasm/WasmTypes.h" + +using namespace js; +using namespace js::wasm; + +using mozilla::IsInfinite; +using mozilla::IsNaN; +using mozilla::IsNegativeZero; + +struct WasmRenderContext +{ + JSContext* cx; + AstModule* module; + WasmPrintBuffer& buffer; + GeneratedSourceMap* maybeSourceMap; + uint32_t indent; + uint32_t currentFuncIndex; + + WasmRenderContext(JSContext* cx, AstModule* module, WasmPrintBuffer& buffer, + GeneratedSourceMap* sourceMap) + : cx(cx), + module(module), + buffer(buffer), + maybeSourceMap(sourceMap), + indent(0), + currentFuncIndex(0) + {} + + StringBuffer& sb() { return buffer.stringBuffer(); } +}; + +/*****************************************************************************/ +// utilities + +// Return true on purpose, so that we have a useful error message to provide to +// the user. +static bool +Fail(WasmRenderContext& c, const char* msg) +{ + c.buffer.stringBuffer().clear(); + + return c.buffer.append("There was a problem when rendering the wasm text format: ") && + c.buffer.append(msg, strlen(msg)) && + c.buffer.append("\nYou should consider file a bug on Bugzilla in the " + "Core:::JavaScript Engine::JIT component at " + "https://bugzilla.mozilla.org/enter_bug.cgi."); +} + +static bool +RenderIndent(WasmRenderContext& c) +{ + for (uint32_t i = 0; i < c.indent; i++) { + if (!c.buffer.append(" ")) + return false; + } + return true; +} + +static bool +RenderInt32(WasmRenderContext& c, int32_t num) +{ + return NumberValueToStringBuffer(c.cx, Int32Value(num), c.sb()); +} + +static bool +RenderInt64(WasmRenderContext& c, int64_t num) +{ + if (num < 0 && !c.buffer.append("-")) + return false; + if (!num) + return c.buffer.append("0"); + return RenderInBase<10>(c.sb(), mozilla::Abs(num)); +} + +static bool +RenderDouble(WasmRenderContext& c, double d) +{ + if (IsNaN(d)) + return RenderNaN(c.sb(), d); + if (IsNegativeZero(d)) + return c.buffer.append("-0"); + if (IsInfinite(d)) { + if (d > 0) + return c.buffer.append("infinity"); + return c.buffer.append("-infinity"); + } + return NumberValueToStringBuffer(c.cx, DoubleValue(d), c.sb()); +} + +static bool +RenderFloat32(WasmRenderContext& c, float f) +{ + if (IsNaN(f)) + return RenderNaN(c.sb(), f); + return RenderDouble(c, double(f)); +} + +static bool +RenderEscapedString(WasmRenderContext& c, const AstName& s) +{ + size_t length = s.length(); + const char16_t* p = s.begin(); + for (size_t i = 0; i < length; i++) { + char16_t byte = p[i]; + switch (byte) { + case '\n': + if (!c.buffer.append("\\n")) + return false; + break; + case '\r': + if (!c.buffer.append("\\0d")) + return false; + break; + case '\t': + if (!c.buffer.append("\\t")) + return false; + break; + case '\f': + if (!c.buffer.append("\\0c")) + return false; + break; + case '\b': + if (!c.buffer.append("\\08")) + return false; + break; + case '\\': + if (!c.buffer.append("\\\\")) + return false; + break; + case '"' : + if (!c.buffer.append("\\\"")) + return false; + break; + case '\'': + if (!c.buffer.append("\\'")) + return false; + break; + default: + if (byte >= 32 && byte < 127) { + if (!c.buffer.append((char)byte)) + return false; + } else { + char digit1 = byte / 16, digit2 = byte % 16; + if (!c.buffer.append("\\")) + return false; + if (!c.buffer.append((char)(digit1 < 10 ? digit1 + '0' : digit1 + 'a' - 10))) + return false; + if (!c.buffer.append((char)(digit2 < 10 ? digit2 + '0' : digit2 + 'a' - 10))) + return false; + } + break; + } + } + return true; +} + +static bool +RenderExprType(WasmRenderContext& c, ExprType type) +{ + switch (type) { + case ExprType::Void: return true; // ignoring void + case ExprType::I32: return c.buffer.append("i32"); + case ExprType::I64: return c.buffer.append("i64"); + case ExprType::F32: return c.buffer.append("f32"); + case ExprType::F64: return c.buffer.append("f64"); + case ExprType::AnyRef: return c.buffer.append("anyref"); + default:; + } + + MOZ_CRASH("bad type"); +} + +static bool +RenderValType(WasmRenderContext& c, ValType type) +{ + return RenderExprType(c, ToExprType(type)); +} + +static bool +RenderName(WasmRenderContext& c, const AstName& name) +{ + return c.buffer.append(name.begin(), name.end()); +} + +static bool +RenderNonemptyName(WasmRenderContext& c, const AstName& name) +{ + return name.empty() || (RenderName(c, name) && c.buffer.append(' ')); +} + +static bool +RenderRef(WasmRenderContext& c, const AstRef& ref) +{ + if (ref.name().empty()) + return RenderInt32(c, ref.index()); + + return RenderName(c, ref.name()); +} + +static bool +RenderBlockNameAndSignature(WasmRenderContext& c, const AstName& name, ExprType type) +{ + if (!name.empty()) { + if (!c.buffer.append(' ')) + return false; + + if (!RenderName(c, name)) + return false; + } + + if (!IsVoid(type)) { + if (!c.buffer.append(' ')) + return false; + + if (!RenderExprType(c, type)) + return false; + } + + return true; +} + +static bool +RenderExpr(WasmRenderContext& c, AstExpr& expr, bool newLine = true); + +#define MAP_AST_EXPR(c, expr) \ + if (c.maybeSourceMap) { \ + uint32_t lineno = c.buffer.lineno(); \ + uint32_t column = c.buffer.column(); \ + if (!c.maybeSourceMap->exprlocs().emplaceBack(lineno, column, expr.offset())) \ + return false; \ + } + +/*****************************************************************************/ +// binary format parsing and rendering + +static bool +RenderNop(WasmRenderContext& c, AstNop& nop) +{ + if (!RenderIndent(c)) + return false; + MAP_AST_EXPR(c, nop); + return c.buffer.append("nop"); +} + +static bool +RenderDrop(WasmRenderContext& c, AstDrop& drop) +{ + if (!RenderExpr(c, drop.value())) + return false; + + if (!RenderIndent(c)) + return false; + MAP_AST_EXPR(c, drop); + return c.buffer.append("drop"); +} + +static bool +RenderUnreachable(WasmRenderContext& c, AstUnreachable& unreachable) +{ + if (!RenderIndent(c)) + return false; + MAP_AST_EXPR(c, unreachable); + return c.buffer.append("unreachable"); +} + +static bool +RenderCallArgs(WasmRenderContext& c, const AstExprVector& args) +{ + for (uint32_t i = 0; i < args.length(); i++) { + if (!RenderExpr(c, *args[i])) + return false; + } + + return true; +} + +static bool +RenderCall(WasmRenderContext& c, AstCall& call) +{ + if (!RenderCallArgs(c, call.args())) + return false; + + if (!RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, call); + if (call.op() == Op::Call) { + if (!c.buffer.append("call ")) + return false; + } else { + return Fail(c, "unexpected operator"); + } + + return RenderRef(c, call.func()); +} + +static bool +RenderCallIndirect(WasmRenderContext& c, AstCallIndirect& call) +{ + if (!RenderCallArgs(c, call.args())) + return false; + + if (!RenderExpr(c, *call.index())) + return false; + + if (!RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, call); + if (!c.buffer.append("call_indirect ")) + return false; + return RenderRef(c, call.funcType()); +} + +static bool +RenderConst(WasmRenderContext& c, AstConst& cst) +{ + if (!RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, cst); + if (!RenderValType(c, cst.val().type())) + return false; + if (!c.buffer.append(".const ")) + return false; + + switch (ToExprType(cst.val().type())) { + case ExprType::I32: + return RenderInt32(c, (int32_t)cst.val().i32()); + case ExprType::I64: + return RenderInt64(c, (int64_t)cst.val().i64()); + case ExprType::F32: + return RenderFloat32(c, cst.val().f32()); + case ExprType::F64: + return RenderDouble(c, cst.val().f64()); + default: + break; + } + + return false; +} + +static bool +RenderGetLocal(WasmRenderContext& c, AstGetLocal& gl) +{ + if (!RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, gl); + if (!c.buffer.append("get_local ")) + return false; + return RenderRef(c, gl.local()); +} + +static bool +RenderSetLocal(WasmRenderContext& c, AstSetLocal& sl) +{ + if (!RenderExpr(c, sl.value())) + return false; + + if (!RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, sl); + if (!c.buffer.append("set_local ")) + return false; + return RenderRef(c, sl.local()); +} + +static bool +RenderTeeLocal(WasmRenderContext& c, AstTeeLocal& tl) +{ + if (!RenderExpr(c, tl.value())) + return false; + + if (!RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, tl); + if (!c.buffer.append("tee_local ")) + return false; + return RenderRef(c, tl.local()); +} + +static bool +RenderGetGlobal(WasmRenderContext& c, AstGetGlobal& gg) +{ + if (!RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, gg); + if (!c.buffer.append("get_global ")) + return false; + return RenderRef(c, gg.global()); +} + +static bool +RenderSetGlobal(WasmRenderContext& c, AstSetGlobal& sg) +{ + if (!RenderExpr(c, sg.value())) + return false; + + if (!RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, sg); + if (!c.buffer.append("set_global ")) + return false; + return RenderRef(c, sg.global()); +} + +static bool +RenderExprList(WasmRenderContext& c, const AstExprVector& exprs, uint32_t startAt = 0) +{ + for (uint32_t i = startAt; i < exprs.length(); i++) { + if (!RenderExpr(c, *exprs[i])) + return false; + } + return true; +} + +static bool +RenderBlock(WasmRenderContext& c, AstBlock& block, bool isInline = false) +{ + if (!isInline && !RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, block); + if (block.op() == Op::Block) { + if (!c.buffer.append("block")) + return false; + } else if (block.op() == Op::Loop) { + if (!c.buffer.append("loop")) + return false; + } else { + return Fail(c, "unexpected block kind"); + } + + if (!RenderBlockNameAndSignature(c, block.name(), block.type())) + return false; + + uint32_t startAtSubExpr = 0; + + // If there is a stack of blocks, print them all inline. + if (block.op() == Op::Block && + block.exprs().length() && + block.exprs()[0]->kind() == AstExprKind::Block && + block.exprs()[0]->as().op() == Op::Block) + { + if (!c.buffer.append(' ')) + return false; + + // Render the first inner expr (block) at the same indent level, but + // next instructions one level further. + if (!RenderBlock(c, block.exprs()[0]->as(), /* isInline */ true)) + return false; + + startAtSubExpr = 1; + } + + if (!c.buffer.append('\n')) + return false; + + c.indent++; + if (!RenderExprList(c, block.exprs(), startAtSubExpr)) + return false; + c.indent--; + + return RenderIndent(c) && + c.buffer.append("end ") && + RenderName(c, block.name()); +} + +static bool +RenderFirst(WasmRenderContext& c, AstFirst& first) +{ + return RenderExprList(c, first.exprs()); +} + +static bool +RenderCurrentMemory(WasmRenderContext& c, AstCurrentMemory& cm) +{ + if (!RenderIndent(c)) + return false; + + return c.buffer.append("current_memory\n"); +} + +static bool +RenderGrowMemory(WasmRenderContext& c, AstGrowMemory& gm) +{ + if (!RenderExpr(c, *gm.operand())) + return false; + + if (!RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, gm); + return c.buffer.append("grow_memory\n"); +} + +static bool +RenderUnaryOperator(WasmRenderContext& c, AstUnaryOperator& unary) +{ + if (!RenderExpr(c, *unary.operand())) + return false; + + if (!RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, unary); + const char* opStr; + switch (unary.op()) { + case Op::I32Eqz: opStr = "i32.eqz"; break; + case Op::I32Clz: opStr = "i32.clz"; break; + case Op::I32Ctz: opStr = "i32.ctz"; break; + case Op::I32Popcnt: opStr = "i32.popcnt"; break; + case Op::I64Clz: opStr = "i64.clz"; break; + case Op::I64Ctz: opStr = "i64.ctz"; break; + case Op::I64Popcnt: opStr = "i64.popcnt"; break; + case Op::F32Abs: opStr = "f32.abs"; break; + case Op::F32Neg: opStr = "f32.neg"; break; + case Op::F32Ceil: opStr = "f32.ceil"; break; + case Op::F32Floor: opStr = "f32.floor"; break; + case Op::F32Sqrt: opStr = "f32.sqrt"; break; + case Op::F32Trunc: opStr = "f32.trunc"; break; + case Op::F32Nearest: opStr = "f32.nearest"; break; + case Op::F64Abs: opStr = "f64.abs"; break; + case Op::F64Neg: opStr = "f64.neg"; break; + case Op::F64Ceil: opStr = "f64.ceil"; break; + case Op::F64Floor: opStr = "f64.floor"; break; + case Op::F64Nearest: opStr = "f64.nearest"; break; + case Op::F64Sqrt: opStr = "f64.sqrt"; break; + case Op::F64Trunc: opStr = "f64.trunc"; break; + default: return Fail(c, "unexpected unary operator"); + } + + return c.buffer.append(opStr, strlen(opStr)); +} + +static bool +RenderBinaryOperator(WasmRenderContext& c, AstBinaryOperator& binary) +{ + if (!RenderExpr(c, *binary.lhs())) + return false; + if (!RenderExpr(c, *binary.rhs())) + return false; + + if (!RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, binary); + const char* opStr; + switch (binary.op()) { + case Op::I32Add: opStr = "i32.add"; break; + case Op::I32Sub: opStr = "i32.sub"; break; + case Op::I32Mul: opStr = "i32.mul"; break; + case Op::I32DivS: opStr = "i32.div_s"; break; + case Op::I32DivU: opStr = "i32.div_u"; break; + case Op::I32RemS: opStr = "i32.rem_s"; break; + case Op::I32RemU: opStr = "i32.rem_u"; break; + case Op::I32And: opStr = "i32.and"; break; + case Op::I32Or: opStr = "i32.or"; break; + case Op::I32Xor: opStr = "i32.xor"; break; + case Op::I32Shl: opStr = "i32.shl"; break; + case Op::I32ShrS: opStr = "i32.shr_s"; break; + case Op::I32ShrU: opStr = "i32.shr_u"; break; + case Op::I32Rotl: opStr = "i32.rotl"; break; + case Op::I32Rotr: opStr = "i32.rotr"; break; + case Op::I64Add: opStr = "i64.add"; break; + case Op::I64Sub: opStr = "i64.sub"; break; + case Op::I64Mul: opStr = "i64.mul"; break; + case Op::I64DivS: opStr = "i64.div_s"; break; + case Op::I64DivU: opStr = "i64.div_u"; break; + case Op::I64RemS: opStr = "i64.rem_s"; break; + case Op::I64RemU: opStr = "i64.rem_u"; break; + case Op::I64And: opStr = "i64.and"; break; + case Op::I64Or: opStr = "i64.or"; break; + case Op::I64Xor: opStr = "i64.xor"; break; + case Op::I64Shl: opStr = "i64.shl"; break; + case Op::I64ShrS: opStr = "i64.shr_s"; break; + case Op::I64ShrU: opStr = "i64.shr_u"; break; + case Op::I64Rotl: opStr = "i64.rotl"; break; + case Op::I64Rotr: opStr = "i64.rotr"; break; + case Op::F32Add: opStr = "f32.add"; break; + case Op::F32Sub: opStr = "f32.sub"; break; + case Op::F32Mul: opStr = "f32.mul"; break; + case Op::F32Div: opStr = "f32.div"; break; + case Op::F32Min: opStr = "f32.min"; break; + case Op::F32Max: opStr = "f32.max"; break; + case Op::F32CopySign: opStr = "f32.copysign"; break; + case Op::F64Add: opStr = "f64.add"; break; + case Op::F64Sub: opStr = "f64.sub"; break; + case Op::F64Mul: opStr = "f64.mul"; break; + case Op::F64Div: opStr = "f64.div"; break; + case Op::F64Min: opStr = "f64.min"; break; + case Op::F64Max: opStr = "f64.max"; break; + case Op::F64CopySign: opStr = "f64.copysign"; break; + default: return Fail(c, "unexpected binary operator"); + } + + return c.buffer.append(opStr, strlen(opStr)); +} + +static bool +RenderTernaryOperator(WasmRenderContext& c, AstTernaryOperator& ternary) +{ + if (!RenderExpr(c, *ternary.op0())) + return false; + if (!RenderExpr(c, *ternary.op1())) + return false; + if (!RenderExpr(c, *ternary.op2())) + return false; + + if (!RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, ternary); + const char* opStr; + switch (ternary.op()) { + case Op::Select: opStr = "select"; break; + default: return Fail(c, "unexpected ternary operator"); + } + + return c.buffer.append(opStr, strlen(opStr)); +} + +static bool +RenderComparisonOperator(WasmRenderContext& c, AstComparisonOperator& comp) +{ + if (!RenderExpr(c, *comp.lhs())) + return false; + if (!RenderExpr(c, *comp.rhs())) + return false; + + if (!RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, comp); + const char* opStr; + switch (comp.op()) { + case Op::I32Eq: opStr = "i32.eq"; break; + case Op::I32Ne: opStr = "i32.ne"; break; + case Op::I32LtS: opStr = "i32.lt_s"; break; + case Op::I32LtU: opStr = "i32.lt_u"; break; + case Op::I32LeS: opStr = "i32.le_s"; break; + case Op::I32LeU: opStr = "i32.le_u"; break; + case Op::I32GtS: opStr = "i32.gt_s"; break; + case Op::I32GtU: opStr = "i32.gt_u"; break; + case Op::I32GeS: opStr = "i32.ge_s"; break; + case Op::I32GeU: opStr = "i32.ge_u"; break; + case Op::I64Eq: opStr = "i64.eq"; break; + case Op::I64Ne: opStr = "i64.ne"; break; + case Op::I64LtS: opStr = "i64.lt_s"; break; + case Op::I64LtU: opStr = "i64.lt_u"; break; + case Op::I64LeS: opStr = "i64.le_s"; break; + case Op::I64LeU: opStr = "i64.le_u"; break; + case Op::I64GtS: opStr = "i64.gt_s"; break; + case Op::I64GtU: opStr = "i64.gt_u"; break; + case Op::I64GeS: opStr = "i64.ge_s"; break; + case Op::I64GeU: opStr = "i64.ge_u"; break; + case Op::F32Eq: opStr = "f32.eq"; break; + case Op::F32Ne: opStr = "f32.ne"; break; + case Op::F32Lt: opStr = "f32.lt"; break; + case Op::F32Le: opStr = "f32.le"; break; + case Op::F32Gt: opStr = "f32.gt"; break; + case Op::F32Ge: opStr = "f32.ge"; break; + case Op::F64Eq: opStr = "f64.eq"; break; + case Op::F64Ne: opStr = "f64.ne"; break; + case Op::F64Lt: opStr = "f64.lt"; break; + case Op::F64Le: opStr = "f64.le"; break; + case Op::F64Gt: opStr = "f64.gt"; break; + case Op::F64Ge: opStr = "f64.ge"; break; + default: return Fail(c, "unexpected comparison operator"); + } + + return c.buffer.append(opStr, strlen(opStr)); +} + +static bool +RenderConversionOperator(WasmRenderContext& c, AstConversionOperator& conv) +{ + if (!RenderExpr(c, *conv.operand())) + return false; + + if (!RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, conv); + const char* opStr; + switch (conv.op()) { + case Op::I32WrapI64: opStr = "i32.wrap/i64"; break; + case Op::I32TruncSF32: opStr = "i32.trunc_s/f32"; break; + case Op::I32TruncUF32: opStr = "i32.trunc_u/f32"; break; + case Op::I32ReinterpretF32: opStr = "i32.reinterpret/f32"; break; + case Op::I32TruncSF64: opStr = "i32.trunc_s/f64"; break; + case Op::I32TruncUF64: opStr = "i32.trunc_u/f64"; break; + case Op::I64ExtendSI32: opStr = "i64.extend_s/i32"; break; + case Op::I64ExtendUI32: opStr = "i64.extend_u/i32"; break; + case Op::I64TruncSF32: opStr = "i64.trunc_s/f32"; break; + case Op::I64TruncUF32: opStr = "i64.trunc_u/f32"; break; + case Op::I64TruncSF64: opStr = "i64.trunc_s/f64"; break; + case Op::I64TruncUF64: opStr = "i64.trunc_u/f64"; break; + case Op::I64ReinterpretF64: opStr = "i64.reinterpret/f64"; break; + case Op::F32ConvertSI32: opStr = "f32.convert_s/i32"; break; + case Op::F32ConvertUI32: opStr = "f32.convert_u/i32"; break; + case Op::F32ReinterpretI32: opStr = "f32.reinterpret/i32"; break; + case Op::F32ConvertSI64: opStr = "f32.convert_s/i64"; break; + case Op::F32ConvertUI64: opStr = "f32.convert_u/i64"; break; + case Op::F32DemoteF64: opStr = "f32.demote/f64"; break; + case Op::F64ConvertSI32: opStr = "f64.convert_s/i32"; break; + case Op::F64ConvertUI32: opStr = "f64.convert_u/i32"; break; + case Op::F64ConvertSI64: opStr = "f64.convert_s/i64"; break; + case Op::F64ConvertUI64: opStr = "f64.convert_u/i64"; break; + case Op::F64ReinterpretI64: opStr = "f64.reinterpret/i64"; break; + case Op::F64PromoteF32: opStr = "f64.promote/f32"; break; + case Op::I32Extend8S: opStr = "i32.extend8_s"; break; + case Op::I32Extend16S: opStr = "i32.extend16_s"; break; + case Op::I64Extend8S: opStr = "i64.extend8_s"; break; + case Op::I64Extend16S: opStr = "i64.extend16_s"; break; + case Op::I64Extend32S: opStr = "i64.extend32_s"; break; + case Op::I32Eqz: opStr = "i32.eqz"; break; + case Op::I64Eqz: opStr = "i64.eqz"; break; + default: return Fail(c, "unexpected conversion operator"); + } + return c.buffer.append(opStr, strlen(opStr)); +} + +#ifdef ENABLE_WASM_SATURATING_TRUNC_OPS +static bool +RenderExtraConversionOperator(WasmRenderContext& c, AstExtraConversionOperator& conv) +{ + if (!RenderExpr(c, *conv.operand())) + return false; + + if (!RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, conv); + const char* opStr; + switch (conv.op()) { + case MiscOp::I32TruncSSatF32: opStr = "i32.trunc_s:sat/f32"; break; + case MiscOp::I32TruncUSatF32: opStr = "i32.trunc_u:sat/f32"; break; + case MiscOp::I32TruncSSatF64: opStr = "i32.trunc_s:sat/f64"; break; + case MiscOp::I32TruncUSatF64: opStr = "i32.trunc_u:sat/f64"; break; + case MiscOp::I64TruncSSatF32: opStr = "i64.trunc_s:sat/f32"; break; + case MiscOp::I64TruncUSatF32: opStr = "i64.trunc_u:sat/f32"; break; + case MiscOp::I64TruncSSatF64: opStr = "i64.trunc_s:sat/f64"; break; + case MiscOp::I64TruncUSatF64: opStr = "i64.trunc_u:sat/f64"; break; + default: return Fail(c, "unexpected extra conversion operator"); + } + return c.buffer.append(opStr, strlen(opStr)); +} +#endif + +static bool +RenderIf(WasmRenderContext& c, AstIf& if_) +{ + if (!RenderExpr(c, if_.cond())) + return false; + + if (!RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, if_); + if (!c.buffer.append("if")) + return false; + if (!RenderBlockNameAndSignature(c, if_.name(), if_.type())) + return false; + if (!c.buffer.append('\n')) + return false; + + c.indent++; + if (!RenderExprList(c, if_.thenExprs())) + return false; + c.indent--; + + if (if_.hasElse()) { + if (!RenderIndent(c)) + return false; + + if (!c.buffer.append("else\n")) + return false; + + c.indent++; + if (!RenderExprList(c, if_.elseExprs())) + return false; + c.indent--; + } + + if (!RenderIndent(c)) + return false; + + return c.buffer.append("end"); +} + +static bool +RenderLoadStoreBase(WasmRenderContext& c, const AstLoadStoreAddress& lsa) +{ + return RenderExpr(c, lsa.base()); +} + +static bool +RenderLoadStoreAddress(WasmRenderContext& c, const AstLoadStoreAddress& lsa, uint32_t defaultAlignLog2) +{ + if (lsa.offset() != 0) { + if (!c.buffer.append(" offset=")) + return false; + if (!RenderInt32(c, lsa.offset())) + return false; + } + + uint32_t alignLog2 = lsa.flags(); + if (defaultAlignLog2 != alignLog2) { + if (!c.buffer.append(" align=")) + return false; + if (!RenderInt32(c, 1 << alignLog2)) + return false; + } + + return true; +} + +static bool +RenderLoad(WasmRenderContext& c, AstLoad& load) +{ + if (!RenderLoadStoreBase(c, load.address())) + return false; + + if (!RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, load); + uint32_t defaultAlignLog2; + switch (load.op()) { + case Op::I32Load8S: + if (!c.buffer.append("i32.load8_s")) + return false; + defaultAlignLog2 = 0; + break; + case Op::I64Load8S: + if (!c.buffer.append("i64.load8_s")) + return false; + defaultAlignLog2 = 0; + break; + case Op::I32Load8U: + if (!c.buffer.append("i32.load8_u")) + return false; + defaultAlignLog2 = 0; + break; + case Op::I64Load8U: + if (!c.buffer.append("i64.load8_u")) + return false; + defaultAlignLog2 = 0; + break; + case Op::I32Load16S: + if (!c.buffer.append("i32.load16_s")) + return false; + defaultAlignLog2 = 1; + break; + case Op::I64Load16S: + if (!c.buffer.append("i64.load16_s")) + return false; + defaultAlignLog2 = 1; + break; + case Op::I32Load16U: + if (!c.buffer.append("i32.load16_u")) + return false; + defaultAlignLog2 = 1; + break; + case Op::I64Load16U: + if (!c.buffer.append("i64.load16_u")) + return false; + defaultAlignLog2 = 1; + break; + case Op::I64Load32S: + if (!c.buffer.append("i64.load32_s")) + return false; + defaultAlignLog2 = 2; + break; + case Op::I64Load32U: + if (!c.buffer.append("i64.load32_u")) + return false; + defaultAlignLog2 = 2; + break; + case Op::I32Load: + if (!c.buffer.append("i32.load")) + return false; + defaultAlignLog2 = 2; + break; + case Op::I64Load: + if (!c.buffer.append("i64.load")) + return false; + defaultAlignLog2 = 3; + break; + case Op::F32Load: + if (!c.buffer.append("f32.load")) + return false; + defaultAlignLog2 = 2; + break; + case Op::F64Load: + if (!c.buffer.append("f64.load")) + return false; + defaultAlignLog2 = 3; + break; + default: + return Fail(c, "unexpected load operator"); + } + + return RenderLoadStoreAddress(c, load.address(), defaultAlignLog2); +} + +static bool +RenderStore(WasmRenderContext& c, AstStore& store) +{ + if (!RenderLoadStoreBase(c, store.address())) + return false; + + if (!RenderExpr(c, store.value())) + return false; + + if (!RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, store); + uint32_t defaultAlignLog2; + switch (store.op()) { + case Op::I32Store8: + if (!c.buffer.append("i32.store8")) + return false; + defaultAlignLog2 = 0; + break; + case Op::I64Store8: + if (!c.buffer.append("i64.store8")) + return false; + defaultAlignLog2 = 0; + break; + case Op::I32Store16: + if (!c.buffer.append("i32.store16")) + return false; + defaultAlignLog2 = 1; + break; + case Op::I64Store16: + if (!c.buffer.append("i64.store16")) + return false; + defaultAlignLog2 = 1; + break; + case Op::I64Store32: + if (!c.buffer.append("i64.store32")) + return false; + defaultAlignLog2 = 2; + break; + case Op::I32Store: + if (!c.buffer.append("i32.store")) + return false; + defaultAlignLog2 = 2; + break; + case Op::I64Store: + if (!c.buffer.append("i64.store")) + return false; + defaultAlignLog2 = 3; + break; + case Op::F32Store: + if (!c.buffer.append("f32.store")) + return false; + defaultAlignLog2 = 2; + break; + case Op::F64Store: + if (!c.buffer.append("f64.store")) + return false; + defaultAlignLog2 = 3; + break; + default: + return Fail(c, "unexpected store operator"); + } + + return RenderLoadStoreAddress(c, store.address(), defaultAlignLog2); +} + +static bool +RenderBranch(WasmRenderContext& c, AstBranch& branch) +{ + Op op = branch.op(); + MOZ_ASSERT(op == Op::BrIf || op == Op::Br); + + if (op == Op::BrIf) { + if (!RenderExpr(c, branch.cond())) + return false; + } + + if (branch.maybeValue()) { + if (!RenderExpr(c, *(branch.maybeValue()))) + return false; + } + + if (!RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, branch); + if (op == Op::BrIf ? !c.buffer.append("br_if ") : !c.buffer.append("br ")) + return false; + + return RenderRef(c, branch.target()); +} + +static bool +RenderBrTable(WasmRenderContext& c, AstBranchTable& table) +{ + if (table.maybeValue()) { + if (!RenderExpr(c, *(table.maybeValue()))) + return false; + } + + // Index + if (!RenderExpr(c, table.index())) + return false; + + if (!RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, table); + if (!c.buffer.append("br_table ")) + return false; + + uint32_t tableLength = table.table().length(); + for (uint32_t i = 0; i < tableLength; i++) { + if (!RenderRef(c, table.table()[i])) + return false; + + if (!c.buffer.append(" ")) + return false; + } + + return RenderRef(c, table.def()); +} + +static bool +RenderReturn(WasmRenderContext& c, AstReturn& ret) +{ + if (ret.maybeExpr()) { + if (!RenderExpr(c, *(ret.maybeExpr()))) + return false; + } + + if (!RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, ret); + return c.buffer.append("return"); +} + +static bool +RenderAtomicCmpXchg(WasmRenderContext& c, AstAtomicCmpXchg& cmpxchg) +{ + if (!RenderLoadStoreBase(c, cmpxchg.address())) + return false; + + if (!RenderExpr(c, cmpxchg.expected())) + return false; + if (!RenderExpr(c, cmpxchg.replacement())) + return false; + + if (!RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, cmpxchg); + const char* opname; + switch (cmpxchg.op()) { + case ThreadOp::I32AtomicCmpXchg8U: opname = "i32.atomic.rmw8_u.cmpxchg"; break; + case ThreadOp::I64AtomicCmpXchg8U: opname = "i64.atomic.rmw8_u.cmpxchg"; break; + case ThreadOp::I32AtomicCmpXchg16U: opname = "i32.atomic.rmw16_u.cmpxchg"; break; + case ThreadOp::I64AtomicCmpXchg16U: opname = "i64.atomic.rmw16_u.cmpxchg"; break; + case ThreadOp::I64AtomicCmpXchg32U: opname = "i64.atomic.rmw32_u.cmpxchg"; break; + case ThreadOp::I32AtomicCmpXchg: opname = "i32.atomic.rmw.cmpxchg"; break; + case ThreadOp::I64AtomicCmpXchg: opname = "i64.atomic.rmw.cmpxchg"; break; + default: return Fail(c, "unexpected cmpxchg operator"); + } + + if (!c.buffer.append(opname, strlen(opname))) + return false; + + return RenderLoadStoreAddress(c, cmpxchg.address(), 0); +} + +static bool +RenderAtomicLoad(WasmRenderContext& c, AstAtomicLoad& load) +{ + if (!RenderLoadStoreBase(c, load.address())) + return false; + + if (!RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, load); + const char* opname; + switch (load.op()) { + case ThreadOp::I32AtomicLoad8U: opname = "i32.atomic.load8_u"; break; + case ThreadOp::I64AtomicLoad8U: opname = "i64.atomic.load8_u"; break; + case ThreadOp::I32AtomicLoad16U: opname = "i32.atomic.load16_u"; break; + case ThreadOp::I64AtomicLoad16U: opname = "i64.atomic.load16_u"; break; + case ThreadOp::I64AtomicLoad32U: opname = "i64.atomic.load32_u"; break; + case ThreadOp::I32AtomicLoad: opname = "i32.atomic.load"; break; + case ThreadOp::I64AtomicLoad: opname = "i64.atomic.load"; break; + default: return Fail(c, "unexpected load operator"); + } + + if (!c.buffer.append(opname, strlen(opname))) + return false; + + return RenderLoadStoreAddress(c, load.address(), 0); +} + +static bool +RenderAtomicRMW(WasmRenderContext& c, AstAtomicRMW& rmw) +{ + if (!RenderLoadStoreBase(c, rmw.address())) + return false; + + if (!RenderExpr(c, rmw.value())) + return false; + + if (!RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, rmw); + const char* opname; + switch (rmw.op()) { + case ThreadOp::I32AtomicAdd: opname = "i32.atomic.rmw.add"; break; + case ThreadOp::I64AtomicAdd: opname = "i64.atomic.rmw.add"; break; + case ThreadOp::I32AtomicAdd8U: opname = "i32.atomic.rmw8_u.add"; break; + case ThreadOp::I32AtomicAdd16U: opname = "i32.atomic.rmw16_u.add"; break; + case ThreadOp::I64AtomicAdd8U: opname = "i64.atomic.rmw8_u.add"; break; + case ThreadOp::I64AtomicAdd16U: opname = "i64.atomic.rmw16_u.add"; break; + case ThreadOp::I64AtomicAdd32U: opname = "i64.atomic.rmw32_u.add"; break; + case ThreadOp::I32AtomicSub: opname = "i32.atomic.rmw.sub"; break; + case ThreadOp::I64AtomicSub: opname = "i64.atomic.rmw.sub"; break; + case ThreadOp::I32AtomicSub8U: opname = "i32.atomic.rmw8_u.sub"; break; + case ThreadOp::I32AtomicSub16U: opname = "i32.atomic.rmw16_u.sub"; break; + case ThreadOp::I64AtomicSub8U: opname = "i64.atomic.rmw8_u.sub"; break; + case ThreadOp::I64AtomicSub16U: opname = "i64.atomic.rmw16_u.sub"; break; + case ThreadOp::I64AtomicSub32U: opname = "i64.atomic.rmw32_u.sub"; break; + case ThreadOp::I32AtomicAnd: opname = "i32.atomic.rmw.and"; break; + case ThreadOp::I64AtomicAnd: opname = "i64.atomic.rmw.and"; break; + case ThreadOp::I32AtomicAnd8U: opname = "i32.atomic.rmw8_u.and"; break; + case ThreadOp::I32AtomicAnd16U: opname = "i32.atomic.rmw16_u.and"; break; + case ThreadOp::I64AtomicAnd8U: opname = "i64.atomic.rmw8_u.and"; break; + case ThreadOp::I64AtomicAnd16U: opname = "i64.atomic.rmw16_u.and"; break; + case ThreadOp::I64AtomicAnd32U: opname = "i64.atomic.rmw32_u.and"; break; + case ThreadOp::I32AtomicOr: opname = "i32.atomic.rmw.or"; break; + case ThreadOp::I64AtomicOr: opname = "i64.atomic.rmw.or"; break; + case ThreadOp::I32AtomicOr8U: opname = "i32.atomic.rmw8_u.or"; break; + case ThreadOp::I32AtomicOr16U: opname = "i32.atomic.rmw16_u.or"; break; + case ThreadOp::I64AtomicOr8U: opname = "i64.atomic.rmw8_u.or"; break; + case ThreadOp::I64AtomicOr16U: opname = "i64.atomic.rmw16_u.or"; break; + case ThreadOp::I64AtomicOr32U: opname = "i64.atomic.rmw32_u.or"; break; + case ThreadOp::I32AtomicXor: opname = "i32.atomic.rmw.xor"; break; + case ThreadOp::I64AtomicXor: opname = "i64.atomic.rmw.xor"; break; + case ThreadOp::I32AtomicXor8U: opname = "i32.atomic.rmw8_u.xor"; break; + case ThreadOp::I32AtomicXor16U: opname = "i32.atomic.rmw16_u.xor"; break; + case ThreadOp::I64AtomicXor8U: opname = "i64.atomic.rmw8_u.xor"; break; + case ThreadOp::I64AtomicXor16U: opname = "i64.atomic.rmw16_u.xor"; break; + case ThreadOp::I64AtomicXor32U: opname = "i64.atomic.rmw32_u.xor"; break; + case ThreadOp::I32AtomicXchg: opname = "i32.atomic.rmw.xchg"; break; + case ThreadOp::I64AtomicXchg: opname = "i64.atomic.rmw.xchg"; break; + case ThreadOp::I32AtomicXchg8U: opname = "i32.atomic.rmw8_u.xchg"; break; + case ThreadOp::I32AtomicXchg16U: opname = "i32.atomic.rmw16_u.xchg"; break; + case ThreadOp::I64AtomicXchg8U: opname = "i64.atomic.rmw8_u.xchg"; break; + case ThreadOp::I64AtomicXchg16U: opname = "i64.atomic.rmw16_u.xchg"; break; + case ThreadOp::I64AtomicXchg32U: opname = "i64.atomic.rmw32_u.xchg"; break; + default: return Fail(c, "unexpected rmw operator"); + } + + if (!c.buffer.append(opname, strlen(opname))) + return false; + + return RenderLoadStoreAddress(c, rmw.address(), 0); +} + +static bool +RenderAtomicStore(WasmRenderContext& c, AstAtomicStore& store) +{ + if (!RenderLoadStoreBase(c, store.address())) + return false; + + if (!RenderExpr(c, store.value())) + return false; + + if (!RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, store); + const char* opname; + switch (store.op()) { + case ThreadOp::I32AtomicStore8U: opname = "i32.atomic.store8_u"; break; + case ThreadOp::I64AtomicStore8U: opname = "i64.atomic.store8_u"; break; + case ThreadOp::I32AtomicStore16U: opname = "i32.atomic.store16_u"; break; + case ThreadOp::I64AtomicStore16U: opname = "i64.atomic.store16_u"; break; + case ThreadOp::I64AtomicStore32U: opname = "i64.atomic.store32_u"; break; + case ThreadOp::I32AtomicStore: opname = "i32.atomic.store"; break; + case ThreadOp::I64AtomicStore: opname = "i64.atomic.store"; break; + default: return Fail(c, "unexpected store operator"); + } + + if (!c.buffer.append(opname, strlen(opname))) + return false; + + return RenderLoadStoreAddress(c, store.address(), 0); +} + +static bool +RenderWait(WasmRenderContext& c, AstWait& wait) +{ + if (!RenderLoadStoreBase(c, wait.address())) + return false; + + if (!RenderExpr(c, wait.expected())) + return false; + + if (!RenderExpr(c, wait.timeout())) + return false; + + if (!RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, wait); + const char* opname; + switch (wait.op()) { + case ThreadOp::I32Wait: opname = "i32.atomic.wait"; break; + case ThreadOp::I64Wait: opname = "i64.atomic.wait"; break; + default: return Fail(c, "unexpected wait operator"); + } + + if (!c.buffer.append(opname, strlen(opname))) + return false; + + return RenderLoadStoreAddress(c, wait.address(), 0); +} + +static bool +RenderWake(WasmRenderContext& c, AstWake& wake) +{ + if (!RenderLoadStoreBase(c, wake.address())) + return false; + + if (!RenderExpr(c, wake.count())) + return false; + + if (!RenderIndent(c)) + return false; + + if (!c.buffer.append("atomic.wake", strlen("atomic.wake"))) + return false; + + return RenderLoadStoreAddress(c, wake.address(), 0); +} + +#ifdef ENABLE_WASM_BULKMEM_OPS +static bool +RenderMemCopy(WasmRenderContext& c, AstMemCopy& mc) +{ + if (!RenderExpr(c, mc.dest())) + return false; + if (!RenderExpr(c, mc.src())) + return false; + if (!RenderExpr(c, mc.len())) + return false; + + if (!RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, mc); + const char* opStr = "memory.copy"; + + return c.buffer.append(opStr, strlen(opStr)); +} + +static bool +RenderMemFill(WasmRenderContext& c, AstMemFill& mf) +{ + if (!RenderExpr(c, mf.start())) + return false; + if (!RenderExpr(c, mf.val())) + return false; + if (!RenderExpr(c, mf.len())) + return false; + + if (!RenderIndent(c)) + return false; + + MAP_AST_EXPR(c, mf); + const char* opStr = "memory.fill"; + + return c.buffer.append(opStr, strlen(opStr)); +} +#endif + +static bool +RenderExpr(WasmRenderContext& c, AstExpr& expr, bool newLine /* = true */) +{ + switch (expr.kind()) { + case AstExprKind::Drop: + if (!RenderDrop(c, expr.as())) + return false; + break; + case AstExprKind::Nop: + if (!RenderNop(c, expr.as())) + return false; + break; + case AstExprKind::Unreachable: + if (!RenderUnreachable(c, expr.as())) + return false; + break; + case AstExprKind::Call: + if (!RenderCall(c, expr.as())) + return false; + break; + case AstExprKind::CallIndirect: + if (!RenderCallIndirect(c, expr.as())) + return false; + break; + case AstExprKind::Const: + if (!RenderConst(c, expr.as())) + return false; + break; + case AstExprKind::GetLocal: + if (!RenderGetLocal(c, expr.as())) + return false; + break; + case AstExprKind::SetLocal: + if (!RenderSetLocal(c, expr.as())) + return false; + break; + case AstExprKind::GetGlobal: + if (!RenderGetGlobal(c, expr.as())) + return false; + break; + case AstExprKind::SetGlobal: + if (!RenderSetGlobal(c, expr.as())) + return false; + break; + case AstExprKind::TeeLocal: + if (!RenderTeeLocal(c, expr.as())) + return false; + break; + case AstExprKind::Block: + if (!RenderBlock(c, expr.as())) + return false; + break; + case AstExprKind::If: + if (!RenderIf(c, expr.as())) + return false; + break; + case AstExprKind::UnaryOperator: + if (!RenderUnaryOperator(c, expr.as())) + return false; + break; + case AstExprKind::BinaryOperator: + if (!RenderBinaryOperator(c, expr.as())) + return false; + break; + case AstExprKind::TernaryOperator: + if (!RenderTernaryOperator(c, expr.as())) + return false; + break; + case AstExprKind::ComparisonOperator: + if (!RenderComparisonOperator(c, expr.as())) + return false; + break; + case AstExprKind::ConversionOperator: + if (!RenderConversionOperator(c, expr.as())) + return false; + break; +#ifdef ENABLE_WASM_SATURATING_TRUNC_OPS + case AstExprKind::ExtraConversionOperator: + if (!RenderExtraConversionOperator(c, expr.as())) + return false; + break; +#endif + case AstExprKind::Load: + if (!RenderLoad(c, expr.as())) + return false; + break; + case AstExprKind::Store: + if (!RenderStore(c, expr.as())) + return false; + break; + case AstExprKind::Branch: + if (!RenderBranch(c, expr.as())) + return false; + break; + case AstExprKind::BranchTable: + if (!RenderBrTable(c, expr.as())) + return false; + break; + case AstExprKind::Return: + if (!RenderReturn(c, expr.as())) + return false; + break; + case AstExprKind::First: + newLine = false; + if (!RenderFirst(c, expr.as())) + return false; + break; + case AstExprKind::CurrentMemory: + if (!RenderCurrentMemory(c, expr.as())) + return false; + break; + case AstExprKind::GrowMemory: + if (!RenderGrowMemory(c, expr.as())) + return false; + break; + case AstExprKind::AtomicCmpXchg: + if (!RenderAtomicCmpXchg(c, expr.as())) + return false; + break; + case AstExprKind::AtomicLoad: + if (!RenderAtomicLoad(c, expr.as())) + return false; + break; + case AstExprKind::AtomicRMW: + if (!RenderAtomicRMW(c, expr.as())) + return false; + break; + case AstExprKind::AtomicStore: + if (!RenderAtomicStore(c, expr.as())) + return false; + break; + case AstExprKind::Wait: + if (!RenderWait(c, expr.as())) + return false; + break; + case AstExprKind::Wake: + if (!RenderWake(c, expr.as())) + return false; + break; +#ifdef ENABLE_WASM_BULKMEM_OPS + case AstExprKind::MemCopy: + if (!RenderMemCopy(c, expr.as())) + return false; + break; + case AstExprKind::MemFill: + if (!RenderMemFill(c, expr.as())) + return false; + break; +#endif + default: + MOZ_CRASH("Bad AstExprKind"); + } + + return !newLine || c.buffer.append("\n"); +} + +static bool +RenderSignature(WasmRenderContext& c, const AstFuncType& funcType, + const AstNameVector* maybeLocals = nullptr) +{ + uint32_t paramsNum = funcType.args().length(); + + if (maybeLocals) { + for (uint32_t i = 0; i < paramsNum; i++) { + if (!c.buffer.append(" (param ")) + return false; + const AstName& name = (*maybeLocals)[i]; + if (!RenderNonemptyName(c, name)) + return false; + ValType arg = funcType.args()[i]; + if (!RenderValType(c, arg)) + return false; + if (!c.buffer.append(")")) + return false; + } + } else if (paramsNum > 0) { + if (!c.buffer.append(" (param")) + return false; + for (uint32_t i = 0; i < paramsNum; i++) { + if (!c.buffer.append(" ")) + return false; + ValType arg = funcType.args()[i]; + if (!RenderValType(c, arg)) + return false; + } + if (!c.buffer.append(")")) + return false; + } + if (funcType.ret() != ExprType::Void) { + if (!c.buffer.append(" (result ")) + return false; + if (!RenderExprType(c, funcType.ret())) + return false; + if (!c.buffer.append(")")) + return false; + } + return true; +} + +static bool +RenderFields(WasmRenderContext& c, const AstStructType& st) +{ + const AstNameVector& fieldNames = st.fieldNames(); + const AstValTypeVector& fieldTypes = st.fieldTypes(); + + for (uint32_t fieldIndex = 0; fieldIndex < fieldTypes.length(); fieldIndex++) { + if (!c.buffer.append("\n")) + return false; + if (!RenderIndent(c)) + return false; + if (!c.buffer.append("(field ")) + return false; + if (!RenderNonemptyName(c, fieldNames[fieldIndex])) + return false; + if (!RenderValType(c, fieldTypes[fieldIndex])) + return false; + if (!c.buffer.append(')')) + return false; + } + return true; +} + +template +static bool +RenderTypeStart(WasmRenderContext& c, const AstName& name, const char (&keyword)[ArrayLength]) +{ + if (!RenderIndent(c)) + return false; + if (!c.buffer.append("(type ")) + return false; + if (!RenderNonemptyName(c, name)) + return false; + if (!c.buffer.append("(")) + return false; + return c.buffer.append(keyword); +} + +static bool +RenderTypeEnd(WasmRenderContext& c) +{ + return c.buffer.append("))\n"); +} + +static bool +RenderTypeSection(WasmRenderContext& c, const AstModule::TypeDefVector& types) +{ + for (uint32_t typeIndex = 0; typeIndex < types.length(); typeIndex++) { + const AstTypeDef* type = types[typeIndex]; + if (type->isFuncType()) { + const AstFuncType* funcType = &type->asFuncType(); + if (!RenderTypeStart(c, funcType->name(), "func")) + return false; + if (!RenderSignature(c, *funcType)) + return false; + } else { + const AstStructType* st = &type->asStructType(); + if (!RenderTypeStart(c, st->name(), "struct")) + return false; + c.indent++; + if (!RenderFields(c, *st)) + return false; + c.indent--; + } + if (!RenderTypeEnd(c)) + return false; + } + + return true; +} + +static bool +RenderLimits(WasmRenderContext& c, const Limits& limits) +{ + if (!RenderInt32(c, limits.initial)) + return false; + if (limits.maximum) { + if (!c.buffer.append(" ")) + return false; + if (!RenderInt32(c, *limits.maximum)) + return false; + } + if (limits.shared == Shareable::True) { + if (!c.buffer.append(" shared")) + return false; + } + return true; +} + +static bool +RenderResizableTable(WasmRenderContext& c, const Limits& table) +{ + if (!c.buffer.append("(table ")) + return false; + if (!RenderLimits(c, table)) + return false; + MOZ_ASSERT(table.shared == Shareable::False); + return c.buffer.append(" anyfunc)"); +} + +static bool +RenderTableSection(WasmRenderContext& c, const AstModule& module) +{ + if (!module.hasTable()) + return true; + for (const AstResizable& table : module.tables()) { + if (table.imported) + continue; + if (!RenderIndent(c)) + return false; + if (!RenderResizableTable(c, table.limits)) + return false; + if (!c.buffer.append("\n")) + return false; + } + return true; +} + +static bool +RenderInlineExpr(WasmRenderContext& c, AstExpr& expr) +{ + if (!c.buffer.append("(")) + return false; + + uint32_t prevIndent = c.indent; + c.indent = 0; + if (!RenderExpr(c, expr, /* newLine */ false)) + return false; + c.indent = prevIndent; + + return c.buffer.append(")"); +} + +static bool +RenderElemSection(WasmRenderContext& c, const AstModule& module) +{ + for (const AstElemSegment* segment : module.elemSegments()) { + if (!RenderIndent(c)) + return false; + if (!c.buffer.append("(elem ")) + return false; + if (!RenderInlineExpr(c, *segment->offset())) + return false; + + for (const AstRef& elem : segment->elems()) { + if (!c.buffer.append(" ")) + return false; + + uint32_t index = elem.index(); + AstName name = index < module.funcImportNames().length() + ? module.funcImportNames()[index] + : module.funcs()[index - module.funcImportNames().length()]->name(); + + if (name.empty()) { + if (!RenderInt32(c, index)) + return false; + } else { + if (!RenderName(c, name)) + return false; + } + } + + if (!c.buffer.append(")\n")) + return false; + } + + return true; +} + +static bool +RenderGlobal(WasmRenderContext& c, const AstGlobal& glob, bool inImport = false) +{ + if (!c.buffer.append("(global ")) + return false; + + if (!inImport) { + if (!RenderName(c, glob.name())) + return false; + if (!c.buffer.append(" ")) + return false; + } + + if (glob.isMutable()) { + if (!c.buffer.append("(mut ")) + return false; + if (!RenderValType(c, glob.type())) + return false; + if (!c.buffer.append(")")) + return false; + } else { + if (!RenderValType(c, glob.type())) + return false; + } + + if (glob.hasInit()) { + if (!c.buffer.append(" ")) + return false; + if (!RenderInlineExpr(c, glob.init())) + return false; + } + + if (!c.buffer.append(")")) + return false; + + return inImport || c.buffer.append("\n"); +} + +static bool +RenderGlobalSection(WasmRenderContext& c, const AstModule& module) +{ + if (module.globals().empty()) + return true; + + for (const AstGlobal* global : module.globals()) { + if (!RenderIndent(c)) + return false; + if (!RenderGlobal(c, *global)) + return false; + } + + return true; +} + +static bool +RenderResizableMemory(WasmRenderContext& c, const Limits& memory) +{ + if (!c.buffer.append("(memory ")) + return false; + + Limits resizedMemory = memory; + + MOZ_ASSERT(resizedMemory.initial % PageSize == 0); + resizedMemory.initial /= PageSize; + + if (resizedMemory.maximum) { + if (*resizedMemory.maximum == UINT32_MAX) { + // See special casing in DecodeMemoryLimits. + *resizedMemory.maximum = MaxMemoryMaximumPages; + } else { + MOZ_ASSERT(*resizedMemory.maximum % PageSize == 0); + *resizedMemory.maximum /= PageSize; + } + } + + if (!RenderLimits(c, resizedMemory)) + return false; + + return c.buffer.append(")"); +} + +static bool +RenderImport(WasmRenderContext& c, AstImport& import, const AstModule& module) +{ + if (!RenderIndent(c)) + return false; + if (!c.buffer.append("(import ")) + return false; + if (!RenderName(c, import.name())) + return false; + if (!c.buffer.append(" \"")) + return false; + + const AstName& moduleName = import.module(); + if (!RenderEscapedString(c, moduleName)) + return false; + + if (!c.buffer.append("\" \"")) + return false; + + const AstName& fieldName = import.field(); + if (!RenderEscapedString(c, fieldName)) + return false; + + if (!c.buffer.append("\" ")) + return false; + + switch (import.kind()) { + case DefinitionKind::Function: { + if (!c.buffer.append("(func")) + return false; + const AstFuncType* funcType = &module.types()[import.funcType().index()]->asFuncType(); + if (!RenderSignature(c, *funcType)) + return false; + if (!c.buffer.append(")")) + return false; + break; + } + case DefinitionKind::Table: { + if (!RenderResizableTable(c, import.limits())) + return false; + break; + } + case DefinitionKind::Memory: { + if (!RenderResizableMemory(c, import.limits())) + return false; + break; + } + case DefinitionKind::Global: { + const AstGlobal& glob = import.global(); + if (!RenderGlobal(c, glob, /* inImport */ true)) + return false; + break; + } + } + + return c.buffer.append(")\n"); +} + +static bool +RenderImportSection(WasmRenderContext& c, const AstModule& module) +{ + for (AstImport* import : module.imports()) { + if (!RenderImport(c, *import, module)) + return false; + } + return true; +} + +static bool +RenderExport(WasmRenderContext& c, AstExport& export_, + const AstModule::NameVector& funcImportNames, + const AstModule::FuncVector& funcs) +{ + if (!RenderIndent(c)) + return false; + if (!c.buffer.append("(export \"")) + return false; + if (!RenderEscapedString(c, export_.name())) + return false; + if (!c.buffer.append("\" ")) + return false; + + switch (export_.kind()) { + case DefinitionKind::Function: { + uint32_t index = export_.ref().index(); + AstName name = index < funcImportNames.length() + ? funcImportNames[index] + : funcs[index - funcImportNames.length()]->name(); + if (name.empty()) { + if (!RenderInt32(c, index)) + return false; + } else { + if (!RenderName(c, name)) + return false; + } + break; + } + case DefinitionKind::Table: { + if (!c.buffer.append("table")) + return false; + break; + } + case DefinitionKind::Memory: { + if (!c.buffer.append("memory")) + return false; + break; + } + case DefinitionKind::Global: { + if (!c.buffer.append("global ")) + return false; + if (!RenderRef(c, export_.ref())) + return false; + break; + } + } + + return c.buffer.append(")\n"); +} + +static bool +RenderExportSection(WasmRenderContext& c, const AstModule::ExportVector& exports, + const AstModule::NameVector& funcImportNames, + const AstModule::FuncVector& funcs) +{ + uint32_t numExports = exports.length(); + for (uint32_t i = 0; i < numExports; i++) { + if (!RenderExport(c, *exports[i], funcImportNames, funcs)) + return false; + } + return true; +} + +static bool +RenderFunctionBody(WasmRenderContext& c, AstFunc& func, const AstModule::TypeDefVector& types) +{ + const AstFuncType* funcType = &types[func.funcType().index()]->asFuncType(); + + uint32_t argsNum = funcType->args().length(); + uint32_t localsNum = func.vars().length(); + if (localsNum > 0) { + if (!RenderIndent(c)) + return false; + for (uint32_t i = 0; i < localsNum; i++) { + if (!c.buffer.append("(local ")) + return false; + const AstName& name = func.locals()[argsNum + i]; + if (!name.empty()) { + if (!RenderName(c, name)) + return false; + if (!c.buffer.append(" ")) + return false; + } + ValType local = func.vars()[i]; + if (!RenderValType(c, local)) + return false; + if (!c.buffer.append(") ")) + return false; + } + if (!c.buffer.append("\n")) + return false; + } + + + uint32_t exprsNum = func.body().length(); + for (uint32_t i = 0; i < exprsNum; i++) { + if (!RenderExpr(c, *func.body()[i])) + return false; + } + + if (c.maybeSourceMap) { + if (!c.maybeSourceMap->exprlocs().emplaceBack(c.buffer.lineno(), c.buffer.column(), func.endOffset())) + return false; + } + + return true; +} + +static bool +RenderCodeSection(WasmRenderContext& c, const AstModule::FuncVector& funcs, + const AstModule::TypeDefVector& types) +{ + uint32_t numFuncBodies = funcs.length(); + for (uint32_t funcIndex = 0; funcIndex < numFuncBodies; funcIndex++) { + AstFunc* func = funcs[funcIndex]; + uint32_t funcTypeIndex = func->funcType().index(); + AstFuncType* funcType = &types[funcTypeIndex]->asFuncType(); + + if (!RenderIndent(c)) + return false; + if (!c.buffer.append("(func ")) + return false; + if (!func->name().empty()) { + if (!RenderName(c, func->name())) + return false; + } + + if (!RenderSignature(c, *funcType, &(func->locals()))) + return false; + if (!c.buffer.append("\n")) + return false; + + c.currentFuncIndex = funcIndex; + + c.indent++; + if (!RenderFunctionBody(c, *func, types)) + return false; + c.indent--; + if (!RenderIndent(c)) + return false; + if (!c.buffer.append(")\n")) + return false; + } + + return true; +} + +static bool +RenderMemorySection(WasmRenderContext& c, const AstModule& module) +{ + if (!module.hasMemory()) + return true; + + for (const AstResizable& memory : module.memories()) { + if (memory.imported) + continue; + if (!RenderIndent(c)) + return false; + if (!RenderResizableMemory(c, memory.limits)) + return false; + if (!c.buffer.append("\n")) + return false; + } + + return true; +} + +static bool +RenderDataSection(WasmRenderContext& c, const AstModule& module) +{ + uint32_t numSegments = module.dataSegments().length(); + if (!numSegments) + return true; + + for (const AstDataSegment* seg : module.dataSegments()) { + if (!RenderIndent(c)) + return false; + if (!c.buffer.append("(data ")) + return false; + if (!RenderInlineExpr(c, *seg->offset())) + return false; + if (!c.buffer.append("\n")) + return false; + + c.indent++; + for (const AstName& fragment : seg->fragments()) { + if (!RenderIndent(c)) + return false; + if (!c.buffer.append("\"")) + return false; + if (!RenderEscapedString(c, fragment)) + return false; + if (!c.buffer.append("\"\n")) + return false; + } + c.indent--; + + if (!RenderIndent(c)) + return false; + if (!c.buffer.append(")\n")) + return false; + } + + return true; +} + +static bool +RenderStartSection(WasmRenderContext& c, AstModule& module) +{ + if (!module.hasStartFunc()) + return true; + + if (!RenderIndent(c)) + return false; + if (!c.buffer.append("(start ")) + return false; + if (!RenderRef(c, module.startFunc().func())) + return false; + if (!c.buffer.append(")\n")) + return false; + + return true; +} + +static bool +RenderModule(WasmRenderContext& c, AstModule& module) +{ + if (!c.buffer.append("(module\n")) + return false; + + c.indent++; + + if (!RenderTypeSection(c, module.types())) + return false; + + if (!RenderImportSection(c, module)) + return false; + + if (!RenderTableSection(c, module)) + return false; + + if (!RenderMemorySection(c, module)) + return false; + + if (!RenderGlobalSection(c, module)) + return false; + + if (!RenderExportSection(c, module.exports(), module.funcImportNames(), module.funcs())) + return false; + + if (!RenderStartSection(c, module)) + return false; + + if (!RenderElemSection(c, module)) + return false; + + if (!RenderCodeSection(c, module.funcs(), module.types())) + return false; + + if (!RenderDataSection(c, module)) + return false; + + c.indent--; + + if (!c.buffer.append(")")) + return false; + + return true; +} + +#undef MAP_AST_EXPR + +/*****************************************************************************/ +// Top-level functions + +bool +wasm::BinaryToText(JSContext* cx, const uint8_t* bytes, size_t length, StringBuffer& buffer, + GeneratedSourceMap* sourceMap /* = nullptr */) +{ + LifoAlloc lifo(AST_LIFO_DEFAULT_CHUNK_SIZE); + + AstModule* module; + if (!BinaryToAst(cx, bytes, length, lifo, &module)) + return false; + + WasmPrintBuffer buf(buffer); + WasmRenderContext c(cx, module, buf, sourceMap); + + if (!RenderModule(c, *module)) { + if (!cx->isExceptionPending()) + ReportOutOfMemory(cx); + return false; + } + + return true; +} diff --git a/js/src/wasm/WasmBinaryToText.h b/js/src/wasm/WasmBinaryToText.h new file mode 100644 index 000000000000..2aad45917e14 --- /dev/null +++ b/js/src/wasm/WasmBinaryToText.h @@ -0,0 +1,45 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * + * Copyright 2015 Mozilla Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef wasm_binary_to_text_h +#define wasm_binary_to_text_h + +#include "NamespaceImports.h" + +#include "gc/Rooting.h" +#include "js/Class.h" +#include "wasm/WasmCode.h" + +namespace js { + +class StringBuffer; + +namespace wasm { + +// Translate the given binary representation of a wasm module into the module's textual +// representation. + +MOZ_MUST_USE bool +BinaryToText(JSContext* cx, const uint8_t* bytes, size_t length, StringBuffer& buffer, + GeneratedSourceMap* sourceMap = nullptr); + +} // namespace wasm + +} // namespace js + +#endif // namespace wasm_binary_to_text_h diff --git a/js/src/wasm/WasmDebug.cpp b/js/src/wasm/WasmDebug.cpp index b9597baa23b7..7ce1a7eee015 100644 --- a/js/src/wasm/WasmDebug.cpp +++ b/js/src/wasm/WasmDebug.cpp @@ -27,6 +27,7 @@ #include "util/StringBuffer.h" #include "util/Text.h" #include "vm/Debugger.h" +#include "wasm/WasmBinaryToText.h" #include "wasm/WasmInstance.h" #include "wasm/WasmValidate.h" @@ -36,6 +37,56 @@ using namespace js::wasm; using mozilla::BinarySearchIf; +bool +GeneratedSourceMap::searchLineByOffset(JSContext* cx, uint32_t offset, size_t* exprlocIndex) +{ + MOZ_ASSERT(!exprlocs_.empty()); + size_t exprlocsLength = exprlocs_.length(); + + // Lazily build sorted array for fast log(n) lookup. + if (!sortedByOffsetExprLocIndices_) { + ExprLocIndexVector scratch; + auto indices = MakeUnique(); + if (!indices || !indices->resize(exprlocsLength) || !scratch.resize(exprlocsLength)) { + ReportOutOfMemory(cx); + return false; + } + sortedByOffsetExprLocIndices_ = std::move(indices); + + for (size_t i = 0; i < exprlocsLength; i++) + (*sortedByOffsetExprLocIndices_)[i] = i; + + auto compareExprLocViaIndex = [&](uint32_t i, uint32_t j, bool* lessOrEqualp) -> bool { + *lessOrEqualp = exprlocs_[i].offset <= exprlocs_[j].offset; + return true; + }; + MOZ_ALWAYS_TRUE(MergeSort(sortedByOffsetExprLocIndices_->begin(), exprlocsLength, + scratch.begin(), compareExprLocViaIndex)); + } + + // Allowing non-exact search and if BinarySearchIf returns out-of-bound + // index, moving the index to the last index. + auto lookupFn = [&](uint32_t i) -> int { + const ExprLoc& loc = exprlocs_[i]; + return offset == loc.offset ? 0 : offset < loc.offset ? -1 : 1; + }; + size_t match; + Unused << BinarySearchIf(sortedByOffsetExprLocIndices_->begin(), 0, exprlocsLength, lookupFn, &match); + if (match >= exprlocsLength) + match = exprlocsLength - 1; + *exprlocIndex = (*sortedByOffsetExprLocIndices_)[match]; + return true; +} + +size_t +GeneratedSourceMap::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const +{ + size_t size = exprlocs_.sizeOfExcludingThis(mallocSizeOf); + if (sortedByOffsetExprLocIndices_) + size += sortedByOffsetExprLocIndices_->sizeOfIncludingThis(mallocSizeOf); + return size; +} + DebugState::DebugState(SharedCode code, const ShareableBytes* maybeBytecode, bool binarySource) @@ -48,14 +99,17 @@ DebugState::DebugState(SharedCode code, } const char enabledMessage[] = - "Restart with developer tools open to view WebAssembly source."; + "Restart with developer tools open to view WebAssembly source"; -const char noBinarySource[] = - "Configure the debugger to display WebAssembly bytecode."; +const char tooBigMessage[] = + "Unfortunately, this WebAssembly module is too big to view as text.\n" + "We are working hard to remove this limitation."; const char notGeneratedMessage[] = "WebAssembly text generation was disabled."; +static const unsigned TooBig = 1000000; + static const uint32_t DefaultBinarySourceColumnNumber = 1; static const CallSite* @@ -75,26 +129,98 @@ DebugState::createText(JSContext* cx) if (!maybeBytecode_) { if (!buffer.append(enabledMessage)) return nullptr; + + MOZ_ASSERT(!maybeSourceMap_); } else if (binarySource_) { if (!buffer.append(notGeneratedMessage)) return nullptr; - } else { - if (!buffer.append(noBinarySource)) + return buffer.finishString(); + } else if (maybeBytecode_->bytes.length() > TooBig) { + if (!buffer.append(tooBigMessage)) return nullptr; + + MOZ_ASSERT(!maybeSourceMap_); + } else { + const Bytes& bytes = maybeBytecode_->bytes; + auto sourceMap = MakeUnique(); + if (!sourceMap) { + ReportOutOfMemory(cx); + return nullptr; + } + maybeSourceMap_ = std::move(sourceMap); + + if (!BinaryToText(cx, bytes.begin(), bytes.length(), buffer, maybeSourceMap_.get())) + return nullptr; + +#if DEBUG + // Check that expression locations are sorted by line number. + uint32_t lastLineno = 0; + for (const ExprLoc& loc : maybeSourceMap_->exprlocs()) { + MOZ_ASSERT(lastLineno <= loc.lineno); + lastLineno = loc.lineno; + } +#endif } + return buffer.finishString(); } +bool +DebugState::ensureSourceMap(JSContext* cx) +{ + if (maybeSourceMap_ || !maybeBytecode_) + return true; + + // We just need to cache maybeSourceMap_, ignoring the text result. + return createText(cx); +} + +struct LineComparator +{ + const uint32_t lineno; + explicit LineComparator(uint32_t lineno) : lineno(lineno) {} + + int operator()(const ExprLoc& loc) const { + return lineno == loc.lineno ? 0 : lineno < loc.lineno ? -1 : 1; + } +}; + bool DebugState::getLineOffsets(JSContext* cx, size_t lineno, Vector* offsets) { if (!debugEnabled()) return true; - if (!binarySource_) + + if (binarySource_) { + const CallSite* callsite = SlowCallSiteSearchByOffset(metadata(Tier::Debug), lineno); + if (callsite && !offsets->append(lineno)) + return false; return true; - const CallSite* callsite = SlowCallSiteSearchByOffset(metadata(Tier::Debug), lineno); - if (callsite && !offsets->append(lineno)) + } + + if (!ensureSourceMap(cx)) return false; + + if (!maybeSourceMap_) + return true; // no source text available, keep offsets empty. + + ExprLocVector& exprlocs = maybeSourceMap_->exprlocs(); + + // Binary search for the expression with the specified line number and + // rewind to the first expression, if more than one expression on the same line. + size_t match; + if (!BinarySearchIf(exprlocs, 0, exprlocs.length(), LineComparator(lineno), &match)) + return true; + + while (match > 0 && exprlocs[match - 1].lineno == lineno) + match--; + + // Return all expression offsets that were printed on the specified line. + for (size_t i = match; i < exprlocs.length() && exprlocs[i].lineno == lineno; i++) { + if (!offsets->append(exprlocs[i].offset)) + return false; + } + return true; } @@ -103,16 +229,25 @@ DebugState::getAllColumnOffsets(JSContext* cx, Vector* offsets) { if (!metadata().debugEnabled) return true; - if (!binarySource_) + + if (binarySource_) { + for (const CallSite& callSite : metadata(Tier::Debug).callSites) { + if (callSite.kind() != CallSite::Breakpoint) + continue; + uint32_t offset = callSite.lineOrBytecode(); + if (!offsets->emplaceBack(offset, DefaultBinarySourceColumnNumber, offset)) + return false; + } return true; - for (const CallSite& callSite : metadata(Tier::Debug).callSites) { - if (callSite.kind() != CallSite::Breakpoint) - continue; - uint32_t offset = callSite.lineOrBytecode(); - if (!offsets->emplaceBack(offset, DefaultBinarySourceColumnNumber, offset)) - return false; } - return true; + + if (!ensureSourceMap(cx)) + return false; + + if (!maybeSourceMap_) + return true; // no source text available, keep offsets empty. + + return offsets->appendAll(maybeSourceMap_->exprlocs()); } bool @@ -121,13 +256,30 @@ DebugState::getOffsetLocation(JSContext* cx, uint32_t offset, bool* found, size_ *found = false; if (!debugEnabled()) return true; - if (!binarySource_) + + if (binarySource_) { + if (!SlowCallSiteSearchByOffset(metadata(Tier::Debug), offset)) + return true; // offset was not found + *found = true; + *lineno = offset; + *column = DefaultBinarySourceColumnNumber; return true; - if (!SlowCallSiteSearchByOffset(metadata(Tier::Debug), offset)) - return true; // offset was not found + } + + if (!ensureSourceMap(cx)) + return false; + + if (!maybeSourceMap_ || maybeSourceMap_->exprlocs().empty()) + return true; // no source text available + + size_t foundAt; + if (!maybeSourceMap_->searchLineByOffset(cx, offset, &foundAt)) + return false; + + const ExprLoc& loc = maybeSourceMap_->exprlocs()[foundAt]; *found = true; - *lineno = offset; - *column = DefaultBinarySourceColumnNumber; + *lineno = loc.lineno; + *column = loc.column; return true; } @@ -137,10 +289,18 @@ DebugState::totalSourceLines(JSContext* cx, uint32_t* count) *count = 0; if (!debugEnabled()) return true; - if (!binarySource_) + + if (binarySource_) { + if (maybeBytecode_) + *count = maybeBytecode_->length(); return true; - if (maybeBytecode_) - *count = maybeBytecode_->length(); + } + + if (!ensureSourceMap(cx)) + return false; + + if (maybeSourceMap_) + *count = maybeSourceMap_->totalLines(); return true; } @@ -548,6 +708,8 @@ DebugState::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* data) const { code_->addSizeOfMiscIfNotSeen(mallocSizeOf, seenMetadata, seenCode, code, data); + if (maybeSourceMap_) + *data += maybeSourceMap_->sizeOfExcludingThis(mallocSizeOf); if (maybeBytecode_) *data += maybeBytecode_->sizeOfIncludingThisIfNotSeen(mallocSizeOf, seenBytes); } diff --git a/js/src/wasm/WasmDebug.h b/js/src/wasm/WasmDebug.h index d2b458f311b1..ab901bb1e1a9 100644 --- a/js/src/wasm/WasmDebug.h +++ b/js/src/wasm/WasmDebug.h @@ -34,8 +34,8 @@ namespace wasm { struct MetadataTier; -// The generated source location for the AST node/expression. The offset field -// refers an offset in an binary format file. +// The generated source location for the AST node/expression. The offset field refers +// an offset in an binary format file. struct ExprLoc { @@ -48,14 +48,39 @@ struct ExprLoc {} }; +typedef Vector ExprLocVector; +typedef Vector ExprLocIndexVector; + +// The generated source map for WebAssembly binary file. This map is generated during +// building the text buffer (see BinaryToExperimentalText). + +class GeneratedSourceMap +{ + ExprLocVector exprlocs_; + UniquePtr sortedByOffsetExprLocIndices_; + uint32_t totalLines_; + + public: + explicit GeneratedSourceMap() : totalLines_(0) {} + ExprLocVector& exprlocs() { return exprlocs_; } + + uint32_t totalLines() { return totalLines_; } + void setTotalLines(uint32_t val) { totalLines_ = val; } + + bool searchLineByOffset(JSContext* cx, uint32_t offset, size_t* exprlocIndex); + + size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const; +}; + +typedef UniquePtr UniqueGeneratedSourceMap; typedef HashMap, SystemAllocPolicy> StepModeCounters; -typedef HashMap, SystemAllocPolicy> - WasmBreakpointSiteMap; +typedef HashMap, SystemAllocPolicy> WasmBreakpointSiteMap; class DebugState { const SharedCode code_; const SharedBytes maybeBytecode_; + UniqueGeneratedSourceMap maybeSourceMap_; bool binarySource_; // State maintained when debugging is enabled. In this case, the Code is @@ -67,6 +92,7 @@ class DebugState StepModeCounters stepModeCounters_; void toggleDebugTrap(uint32_t offset, bool enabled); + bool ensureSourceMap(JSContext* cx); public: DebugState(SharedCode code, @@ -76,6 +102,10 @@ class DebugState const Bytes* maybeBytecode() const { return maybeBytecode_ ? &maybeBytecode_->bytes : nullptr; } bool binarySource() const { return binarySource_; } + // If the source bytecode was saved when this Code was constructed, this + // method will render the binary as text. Otherwise, a diagnostic string + // will be returned. + JSString* createText(JSContext* cx); bool getLineOffsets(JSContext* cx, size_t lineno, Vector* offsets); bool getAllColumnOffsets(JSContext* cx, Vector* offsets); diff --git a/js/src/wasm/WasmTextToBinary.cpp b/js/src/wasm/WasmTextToBinary.cpp index df066677be6d..b307b3117ea5 100644 --- a/js/src/wasm/WasmTextToBinary.cpp +++ b/js/src/wasm/WasmTextToBinary.cpp @@ -5544,7 +5544,7 @@ EncodeTableSection(Encoder& e, AstModule& module) } static bool -EncodeFunctionBody(Encoder& e, Uint32Vector* offsets, AstFunc& func) +EncodeFunctionBody(Encoder& e, AstFunc& func) { size_t bodySizeAt; if (!e.writePatchableVarU32(&bodySizeAt)) @@ -5559,14 +5559,10 @@ EncodeFunctionBody(Encoder& e, Uint32Vector* offsets, AstFunc& func) return false; for (AstExpr* expr : func.body()) { - if (!offsets->append(e.currentOffset())) - return false; if (!EncodeExpr(e, *expr)) return false; } - if (!offsets->append(e.currentOffset())) - return false; if (!e.writeOp(Op::End)) return false; @@ -5592,7 +5588,7 @@ EncodeStartSection(Encoder& e, AstModule& module) } static bool -EncodeCodeSection(Encoder& e, Uint32Vector* offsets, AstModule& module) +EncodeCodeSection(Encoder& e, AstModule& module) { if (module.funcs().empty()) return true; @@ -5605,7 +5601,7 @@ EncodeCodeSection(Encoder& e, Uint32Vector* offsets, AstModule& module) return false; for (AstFunc* func : module.funcs()) { - if (!EncodeFunctionBody(e, offsets, *func)) + if (!EncodeFunctionBody(e, *func)) return false; } @@ -5712,7 +5708,7 @@ EncodeElemSection(Encoder& e, AstModule& module) } static bool -EncodeModule(AstModule& module, Uint32Vector* offsets, Bytes* bytes) +EncodeModule(AstModule& module, Bytes* bytes) { Encoder e(*bytes); @@ -5749,7 +5745,7 @@ EncodeModule(AstModule& module, Uint32Vector* offsets, Bytes* bytes) if (!EncodeElemSection(e, module)) return false; - if (!EncodeCodeSection(e, offsets, module)) + if (!EncodeCodeSection(e, module)) return false; if (!EncodeDataSection(e, module)) @@ -5783,8 +5779,7 @@ EncodeBinaryModule(const AstModule& module, Bytes* bytes) /*****************************************************************************/ bool -wasm::TextToBinary(const char16_t* text, uintptr_t stackLimit, Bytes* bytes, Uint32Vector* offsets, - UniqueChars* error) +wasm::TextToBinary(const char16_t* text, uintptr_t stackLimit, Bytes* bytes, UniqueChars* error) { LifoAlloc lifo(AST_LIFO_DEFAULT_CHUNK_SIZE); @@ -5799,5 +5794,5 @@ wasm::TextToBinary(const char16_t* text, uintptr_t stackLimit, Bytes* bytes, Uin if (!ResolveModule(lifo, module, error)) return false; - return EncodeModule(*module, offsets, bytes); + return EncodeModule(*module, bytes); } diff --git a/js/src/wasm/WasmTextToBinary.h b/js/src/wasm/WasmTextToBinary.h index 0e525bb4aea6..a67e14c2a1f1 100644 --- a/js/src/wasm/WasmTextToBinary.h +++ b/js/src/wasm/WasmTextToBinary.h @@ -29,8 +29,7 @@ namespace wasm { // other than out-of-memory an error message string will be stored in 'error'. extern MOZ_MUST_USE bool -TextToBinary(const char16_t* text, uintptr_t stackLimit, Bytes* bytes, Uint32Vector* offsets, - UniqueChars* error); +TextToBinary(const char16_t* text, uintptr_t stackLimit, Bytes* bytes, UniqueChars* error); } // namespace wasm } // namespace js diff --git a/js/src/wasm/WasmTextUtils.cpp b/js/src/wasm/WasmTextUtils.cpp new file mode 100644 index 000000000000..6582ae9ce89d --- /dev/null +++ b/js/src/wasm/WasmTextUtils.cpp @@ -0,0 +1,80 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * + * Copyright 2016 Mozilla Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "wasm/WasmTextUtils.h" + +#include "util/StringBuffer.h" +#include "wasm/WasmTypes.h" + +using namespace js; +using namespace jit; +using namespace wasm; + +using mozilla::IsNaN; + +template +bool +wasm::RenderInBase(StringBuffer& sb, uint64_t num) +{ + uint64_t n = num; + uint64_t pow = 1; + while (n) { + pow *= base; + n /= base; + } + pow /= base; + + n = num; + while (pow) { + if (!sb.append("0123456789abcdef"[n / pow])) + return false; + n -= (n / pow) * pow; + pow /= base; + } + + return true; +} + +template bool wasm::RenderInBase<10>(StringBuffer& sb, uint64_t num); + +template +bool +wasm::RenderNaN(StringBuffer& sb, T num) +{ + typedef typename mozilla::SelectTrait Traits; + typedef typename Traits::Bits Bits; + + MOZ_ASSERT(IsNaN(num)); + + Bits bits = mozilla::BitwiseCast(num); + if ((bits & Traits::kSignBit) && !sb.append("-")) + return false; + if (!sb.append("nan")) + return false; + + Bits payload = bits & Traits::kSignificandBits; + // Only render the payload if it's not the spec's default NaN. + if (payload == ((Traits::kSignificandBits + 1) >> 1)) + return true; + + return sb.append(":0x") && + RenderInBase<16>(sb, payload); +} + +template MOZ_MUST_USE bool wasm::RenderNaN(StringBuffer& b, float num); +template MOZ_MUST_USE bool wasm::RenderNaN(StringBuffer& b, double num); diff --git a/js/src/wasm/WasmTextUtils.h b/js/src/wasm/WasmTextUtils.h new file mode 100644 index 000000000000..386ba03a032a --- /dev/null +++ b/js/src/wasm/WasmTextUtils.h @@ -0,0 +1,105 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * + * Copyright 2016 Mozilla Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef wasm_text_utils +#define wasm_text_utils + +#include "NamespaceImports.h" + +#include "util/StringBuffer.h" + +namespace js { +namespace wasm { + +template +MOZ_MUST_USE bool +RenderInBase(StringBuffer& sb, uint64_t num); + +template +MOZ_MUST_USE bool +RenderNaN(StringBuffer& sb, T num); + +// Helper class, StringBuffer wrapper, to track the position (line and column) +// within the generated source. + +class WasmPrintBuffer +{ + StringBuffer& stringBuffer_; + uint32_t lineno_; + uint32_t column_; + + public: + explicit WasmPrintBuffer(StringBuffer& stringBuffer) + : stringBuffer_(stringBuffer), + lineno_(1), + column_(1) + {} + inline char processChar(char ch) { + if (ch == '\n') { + lineno_++; column_ = 1; + } else + column_++; + return ch; + } + inline char16_t processChar(char16_t ch) { + if (ch == '\n') { + lineno_++; column_ = 1; + } else + column_++; + return ch; + } + bool append(const char ch) { + return stringBuffer_.append(processChar(ch)); + } + bool append(const char16_t ch) { + return stringBuffer_.append(processChar(ch)); + } + bool append(const char* str, size_t length) { + for (size_t i = 0; i < length; i++) + processChar(str[i]); + return stringBuffer_.append(str, length); + } + bool append(const char16_t* begin, const char16_t* end) { + for (const char16_t* p = begin; p != end; p++) + processChar(*p); + return stringBuffer_.append(begin, end); + } + bool append(const char16_t* str, size_t length) { + return append(str, str + length); + } + template + bool append(const char (&array)[ArrayLength]) { + static_assert(ArrayLength > 0, "null-terminated"); + MOZ_ASSERT(array[ArrayLength - 1] == '\0'); + return append(array, ArrayLength - 1); + } + char16_t getChar(size_t index) { + return stringBuffer_.getChar(index); + } + size_t length() { + return stringBuffer_.length(); + } + StringBuffer& stringBuffer() { return stringBuffer_; } + uint32_t lineno() { return lineno_; } + uint32_t column() { return column_; } +}; + +} // namespace wasm +} // namespace js + +#endif // namespace wasm_text_utils diff --git a/js/src/wasm/WasmValidate.h b/js/src/wasm/WasmValidate.h index 6f19cb6fbe0b..ed6bdbd0ec98 100644 --- a/js/src/wasm/WasmValidate.h +++ b/js/src/wasm/WasmValidate.h @@ -132,6 +132,9 @@ struct ModuleEnvironment bool funcIsImport(uint32_t funcIndex) const { return funcIndex < funcImportGlobalDataOffsets.length(); } + uint32_t funcIndexToFuncTypeIndex(uint32_t funcIndex) const { + return TypeDef::fromFuncTypeWithIdPtr(funcTypes[funcIndex]) - types.begin(); + } }; // The Encoder class appends bytes to the Bytes object it is given during