From 4a962a958480b1023caf4885ac7d286cefc16d9f Mon Sep 17 00:00:00 2001 From: Dan Gohman Date: Thu, 28 Apr 2016 10:36:22 -0500 Subject: [PATCH] Bug 1259295 - BaldrMonkey: Postorder (r=luke) MozReview-Commit-ID: ImqMOvb2B4o --HG-- extra : rebase_source : b41c64be0480ea787fadedde98b2173a6aa0485c --- js/src/asmjs/AsmJS.cpp | 1048 +++---- js/src/asmjs/Wasm.cpp | 775 ++--- js/src/asmjs/WasmBinary.h | 70 +- js/src/asmjs/WasmBinaryIterator.h | 2153 ++++++++++++++ js/src/asmjs/WasmBinaryToText.cpp | 8 +- js/src/asmjs/WasmGenerator.cpp | 33 +- js/src/asmjs/WasmGenerator.h | 87 +- js/src/asmjs/WasmIonCompile.cpp | 2624 ++++++++--------- js/src/asmjs/WasmIonCompile.h | 8 +- js/src/asmjs/WasmStubs.cpp | 2 + js/src/asmjs/WasmTextToBinary.cpp | 143 +- js/src/asmjs/WasmTypes.h | 78 + .../jit-test/tests/asm.js/testExpressions.js | 8 + .../jit-test/tests/wasm/basic-control-flow.js | 14 +- js/src/jit-test/tests/wasm/basic-memory.js | 4 +- js/src/jit-test/tests/wasm/binary.js | 24 +- js/src/jit-test/tests/wasm/totext1.js | 3 + 17 files changed, 4401 insertions(+), 2681 deletions(-) create mode 100644 js/src/asmjs/WasmBinaryIterator.h diff --git a/js/src/asmjs/AsmJS.cpp b/js/src/asmjs/AsmJS.cpp index a9955549c0d6..9a23943a7a35 100644 --- a/js/src/asmjs/AsmJS.cpp +++ b/js/src/asmjs/AsmJS.cpp @@ -1811,7 +1811,7 @@ class MOZ_STACK_CLASS ModuleValidator MOZ_ASSERT(type == Type::canonicalize(Type::lit(lit))); uint32_t index; - if (!mg_.allocateGlobalVar(type.canonicalToValType(), isConst, &index)) + if (!mg_.allocateGlobal(type.canonicalToValType(), isConst, &index)) return false; Global::Which which = isConst ? Global::ConstantLiteral : Global::Variable; @@ -1828,7 +1828,7 @@ class MOZ_STACK_CLASS ModuleValidator AsmJSGlobal g(AsmJSGlobal::Variable, nullptr); g.pod.u.var.initKind_ = AsmJSGlobal::InitConstant; g.pod.u.var.u.val_ = lit.value(); - g.pod.u.var.globalDataOffset_ = mg_.globalVar(index).globalDataOffset; + g.pod.u.var.globalDataOffset_ = mg_.global(index).globalDataOffset; return module_->globals.append(g); } bool addGlobalVarImport(PropertyName* var, PropertyName* field, Type type, bool isConst) { @@ -1836,7 +1836,7 @@ class MOZ_STACK_CLASS ModuleValidator uint32_t index; ValType valType = type.canonicalToValType(); - if (!mg_.allocateGlobalVar(valType, isConst, &index)) + if (!mg_.allocateGlobal(valType, isConst, &index)) return false; Global::Which which = isConst ? Global::ConstantImport : Global::Variable; @@ -1851,7 +1851,7 @@ class MOZ_STACK_CLASS ModuleValidator AsmJSGlobal g(AsmJSGlobal::Variable, field); g.pod.u.var.initKind_ = AsmJSGlobal::InitImport; g.pod.u.var.u.importType_ = valType; - g.pod.u.var.globalDataOffset_ = mg_.globalVar(index).globalDataOffset; + g.pod.u.var.globalDataOffset_ = mg_.global(index).globalDataOffset; return module_->globals.append(g); } bool addArrayView(PropertyName* var, Scalar::Type vt, PropertyName* maybeField) { @@ -2550,13 +2550,13 @@ SimdToExpr(SimdType type, SimdOperation op) case SimdOperation::Fn_greaterThan: return Expr::I32x4greaterThanU; case SimdOperation::Fn_greaterThanOrEqual: return Expr::I32x4greaterThanOrEqualU; case SimdOperation::Fn_fromFloat32x4: return Expr::I32x4fromFloat32x4U; - case SimdOperation::Fn_fromInt32x4Bits: return Expr::Id; + case SimdOperation::Fn_fromInt32x4Bits: return Expr::Limit; default: break; } MOZ_FALLTHROUGH; case SimdType::Int32x4: { // Bitcasts Uint32x4 <--> Int32x4 become noops. - if (op == SimdOperation::Fn_fromUint32x4Bits) return Expr::Id; + if (op == SimdOperation::Fn_fromUint32x4Bits) return Expr::Limit; ENUMERATE(I32x4, FORALL_INT32X4_ASMJS_OP, I32CASE) break; } @@ -2699,8 +2699,7 @@ class MOZ_STACK_CLASS FunctionValidator MOZ_ASSERT(expr == Expr::Br || expr == Expr::BrIf); MOZ_ASSERT(absolute < blockDepth_); return encoder().writeExpr(expr) && - encoder().writeVarU32(blockDepth_ - 1 - absolute) && - encoder().writeExpr(Expr::Nop); + encoder().writeVarU32(blockDepth_ - 1 - absolute); } void removeLabel(PropertyName* label, LabelMap* map) { LabelMap::Ptr p = map->lookup(label); @@ -2709,16 +2708,16 @@ class MOZ_STACK_CLASS FunctionValidator } public: - bool pushBreakableBlock(uint32_t numStmts) { + bool pushBreakableBlock() { return encoder().writeExpr(Expr::Block) && - encoder().writeVarU32(numStmts) && breakableStack_.append(blockDepth_++); } - void popBreakableBlock() { + bool popBreakableBlock() { JS_ALWAYS_TRUE(breakableStack_.popCopy() == --blockDepth_); + return encoder().writeExpr(Expr::End); } - bool pushUnbreakableBlock(uint32_t numStmts, const NameVector* labels = nullptr) { + bool pushUnbreakableBlock(const NameVector* labels = nullptr) { if (labels) { for (PropertyName* label : *labels) { if (!breakLabels_.putNew(label, blockDepth_)) @@ -2726,35 +2725,49 @@ class MOZ_STACK_CLASS FunctionValidator } } blockDepth_++; - return encoder().writeExpr(Expr::Block) && - encoder().writeVarU32(numStmts); + return encoder().writeExpr(Expr::Block); } - void popUnbreakableBlock(const NameVector* labels = nullptr) { + bool popUnbreakableBlock(const NameVector* labels = nullptr) { if (labels) { for (PropertyName* label : *labels) removeLabel(label, &breakLabels_); } --blockDepth_; + return encoder().writeExpr(Expr::End); } - bool pushContinuableBlock(uint32_t numStmts) { + bool pushContinuableBlock() { return encoder().writeExpr(Expr::Block) && - encoder().writeVarU32(numStmts) && continuableStack_.append(blockDepth_++); } - void popContinuableBlock() { + bool popContinuableBlock() { JS_ALWAYS_TRUE(continuableStack_.popCopy() == --blockDepth_); + return encoder().writeExpr(Expr::End); } - bool pushLoop(uint32_t numStmts) { + bool pushLoop() { return encoder().writeExpr(Expr::Loop) && - encoder().writeVarU32(numStmts) && breakableStack_.append(blockDepth_++) && continuableStack_.append(blockDepth_++); } - void popLoop() { + bool popLoop() { JS_ALWAYS_TRUE(continuableStack_.popCopy() == --blockDepth_); JS_ALWAYS_TRUE(breakableStack_.popCopy() == --blockDepth_); + return encoder().writeExpr(Expr::End); + } + + bool pushIf() { + ++blockDepth_; + return encoder().writeExpr(Expr::If); + } + bool switchToElse() { + MOZ_ASSERT(blockDepth_ > 0); + return encoder().writeExpr(Expr::Else); + } + bool popIf() { + MOZ_ASSERT(blockDepth_ > 0); + --blockDepth_; + return encoder().writeExpr(Expr::End); } bool writeBreakIf() { @@ -2850,12 +2863,14 @@ class MOZ_STACK_CLASS FunctionValidator return encoder().writeExpr(op) && fg_.addCallSiteLineNum(m().tokenStream().srcCoords.lineNum(pn->pn_pos.begin)); } - MOZ_WARN_UNUSED_RESULT bool patchableCall(ParseNode* pn, size_t* offset) { - return encoder().writePatchableOneByteExpr(offset) && - fg_.addCallSiteLineNum(m().tokenStream().srcCoords.lineNum(pn->pn_pos.begin)); + MOZ_WARN_UNUSED_RESULT bool prepareCall(ParseNode* pn) { + return fg_.addCallSiteLineNum(m().tokenStream().srcCoords.lineNum(pn->pn_pos.begin)); } MOZ_WARN_UNUSED_RESULT bool writeSimdOp(SimdType simdType, SimdOperation op) { - return encoder().writeExpr(SimdToExpr(simdType, op)); + Expr expr = SimdToExpr(simdType, op); + if (expr == Expr::Limit) + return true; + return encoder().writeExpr(expr); } }; @@ -3536,12 +3551,12 @@ CheckVariables(FunctionValidator& f, ParseNode** stmtIter) NumLit lit = inits[i]; if (lit.isZeroBits()) continue; + if (!f.writeConstExpr(lit)) + return false; if (!f.encoder().writeExpr(Expr::SetLocal)) return false; if (!f.encoder().writeVarU32(firstVar + i)) return false; - if (!f.writeConstExpr(lit)) - return false; } *stmtIter = stmt; @@ -3619,10 +3634,8 @@ static const bool NoSimd = false; static bool CheckArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr, - bool isSimd, Scalar::Type* viewType, int32_t* mask) + bool isSimd, Scalar::Type* viewType) { - *mask = 0; - if (!viewName->isKind(PNK_NAME)) return f.fail(viewName, "base of array access must be a typed array view name"); @@ -3639,14 +3652,13 @@ CheckArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr if (!f.m().tryConstantAccess(byteOffset, width)) return f.fail(indexExpr, "constant index out of range"); - *mask = NoMask; return f.writeInt32Lit(byteOffset); } // Mask off the low bits to account for the clearing effect of a right shift // followed by the left shift implicit in the array access. E.g., H32[i>>2] // loses the low two bits. - *mask = ~(TypedArrayElemSize(*viewType) - 1); + int32_t mask = ~(TypedArrayElemSize(*viewType) - 1); if (indexExpr->isKind(PNK_RSH)) { ParseNode* shiftAmountNode = BitwiseRight(indexExpr); @@ -3673,7 +3685,7 @@ CheckArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr if (TypedArrayShift(*viewType) != 0) return f.fail(indexExpr, "index expression isn't shifted; must be an Int8/Uint8 access"); - MOZ_ASSERT(*mask == NoMask); + MOZ_ASSERT(mask == NoMask); ParseNode* pointerNode = indexExpr; @@ -3690,6 +3702,13 @@ CheckArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr } } + // Don't generate the mask op if there is no need for it which could happen for + // a shift of zero or a SIMD access. + if (mask != NoMask) { + return f.writeInt32Lit(mask) && + f.encoder().writeExpr(Expr::I32And); + } + return true; } @@ -3697,35 +3716,22 @@ static bool CheckAndPrepareArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr, bool isSimd, Scalar::Type* viewType) { - size_t flagsAt; - if (!f.encoder().writePatchableFixedU8(&flagsAt)) + return CheckArrayAccess(f, viewName, indexExpr, isSimd, viewType); +} + +static bool +WriteArrayAccessFlags(FunctionValidator& f, Scalar::Type viewType) +{ + // asm.js only has naturally-aligned accesses. + size_t align = TypedArrayElemSize(viewType); + MOZ_ASSERT(IsPowerOfTwo(align)); + if (!f.encoder().writeFixedU8(CeilingLog2(align))) return false; // asm.js doesn't have constant offsets, so just encode a 0. if (!f.encoder().writeVarU32(0)) return false; - size_t prepareAt; - if (!f.encoder().writePatchableOneByteExpr(&prepareAt)) - return false; - - int32_t mask; - if (!CheckArrayAccess(f, viewName, indexExpr, isSimd, viewType, &mask)) - return false; - - // asm.js only has naturally-aligned accesses. - size_t align = TypedArrayElemSize(*viewType); - MOZ_ASSERT(IsPowerOfTwo(align)); - f.encoder().patchFixedU8(flagsAt, CeilingLog2(align)); - - // Don't generate the mask op if there is no need for it which could happen for - // a shift of zero or a SIMD access. - if (mask != NoMask) { - f.encoder().patchOneByteExpr(prepareAt, Expr::I32And); - return f.writeInt32Lit(mask); - } - - f.encoder().patchOneByteExpr(prepareAt, Expr::Id); return true; } @@ -3734,22 +3740,18 @@ CheckLoadArray(FunctionValidator& f, ParseNode* elem, Type* type) { Scalar::Type viewType; - size_t opcodeAt; - if (!f.encoder().writePatchableOneByteExpr(&opcodeAt)) - return false; - if (!CheckAndPrepareArrayAccess(f, ElemBase(elem), ElemIndex(elem), NoSimd, &viewType)) return false; switch (viewType) { - case Scalar::Int8: f.encoder().patchOneByteExpr(opcodeAt, Expr::I32Load8S); break; - case Scalar::Uint8: f.encoder().patchOneByteExpr(opcodeAt, Expr::I32Load8U); break; - case Scalar::Int16: f.encoder().patchOneByteExpr(opcodeAt, Expr::I32Load16S); break; - case Scalar::Uint16: f.encoder().patchOneByteExpr(opcodeAt, Expr::I32Load16U); break; + case Scalar::Int8: if (!f.encoder().writeExpr(Expr::I32Load8S)) return false; break; + case Scalar::Uint8: if (!f.encoder().writeExpr(Expr::I32Load8U)) return false; break; + case Scalar::Int16: if (!f.encoder().writeExpr(Expr::I32Load16S)) return false; break; + case Scalar::Uint16: if (!f.encoder().writeExpr(Expr::I32Load16U)) return false; break; case Scalar::Uint32: - case Scalar::Int32: f.encoder().patchOneByteExpr(opcodeAt, Expr::I32Load); break; - case Scalar::Float32: f.encoder().patchOneByteExpr(opcodeAt, Expr::F32Load); break; - case Scalar::Float64: f.encoder().patchOneByteExpr(opcodeAt, Expr::F64Load); break; + case Scalar::Int32: if (!f.encoder().writeExpr(Expr::I32Load)) return false; break; + case Scalar::Float32: if (!f.encoder().writeExpr(Expr::F32Load)) return false; break; + case Scalar::Float64: if (!f.encoder().writeExpr(Expr::F64Load)) return false; break; default: MOZ_CRASH("unexpected scalar type"); } @@ -3771,16 +3773,15 @@ CheckLoadArray(FunctionValidator& f, ParseNode* elem, Type* type) default: MOZ_CRASH("Unexpected array type"); } + if (!WriteArrayAccessFlags(f, viewType)) + return false; + return true; } static bool CheckStoreArray(FunctionValidator& f, ParseNode* lhs, ParseNode* rhs, Type* type) { - size_t opcodeAt; - if (!f.encoder().writePatchableOneByteExpr(&opcodeAt)) - return false; - Scalar::Type viewType; if (!CheckAndPrepareArrayAccess(f, ElemBase(lhs), ElemIndex(lhs), NoSimd, &viewType)) return false; @@ -3814,31 +3815,43 @@ CheckStoreArray(FunctionValidator& f, ParseNode* lhs, ParseNode* rhs, Type* type switch (viewType) { case Scalar::Int8: case Scalar::Uint8: - f.encoder().patchOneByteExpr(opcodeAt, Expr::I32Store8); + if (!f.encoder().writeExpr(Expr::I32Store8)) + return false; break; case Scalar::Int16: case Scalar::Uint16: - f.encoder().patchOneByteExpr(opcodeAt, Expr::I32Store16); + if (!f.encoder().writeExpr(Expr::I32Store16)) + return false; break; case Scalar::Int32: case Scalar::Uint32: - f.encoder().patchOneByteExpr(opcodeAt, Expr::I32Store); + if (!f.encoder().writeExpr(Expr::I32Store)) + return false; break; case Scalar::Float32: - if (rhsType.isFloatish()) - f.encoder().patchOneByteExpr(opcodeAt, Expr::F32Store); - else - f.encoder().patchOneByteExpr(opcodeAt, Expr::F64StoreF32); + if (rhsType.isFloatish()) { + if (!f.encoder().writeExpr(Expr::F32Store)) + return false; + } else { + if (!f.encoder().writeExpr(Expr::F64StoreF32)) + return false; + } break; case Scalar::Float64: - if (rhsType.isFloatish()) - f.encoder().patchOneByteExpr(opcodeAt, Expr::F32StoreF64); - else - f.encoder().patchOneByteExpr(opcodeAt, Expr::F64Store); + if (rhsType.isFloatish()) { + if (!f.encoder().writeExpr(Expr::F32StoreF64)) + return false; + } else { + if (!f.encoder().writeExpr(Expr::F64Store)) + return false; + } break; default: MOZ_CRASH("unexpected scalar type"); } + if (!WriteArrayAccessFlags(f, viewType)) + return false; + *type = rhsType; return true; } @@ -3849,15 +3862,15 @@ CheckAssignName(FunctionValidator& f, ParseNode* lhs, ParseNode* rhs, Type* type RootedPropertyName name(f.cx(), lhs->name()); if (const FunctionValidator::Local* lhsVar = f.lookupLocal(name)) { + Type rhsType; + if (!CheckExpr(f, rhs, &rhsType)) + return false; + if (!f.encoder().writeExpr(Expr::SetLocal)) return false; if (!f.encoder().writeVarU32(lhsVar->slot)) return false; - Type rhsType; - if (!CheckExpr(f, rhs, &rhsType)) - return false; - if (!(rhsType <= lhsVar->type)) { return f.failf(lhs, "%s is not a subtype of %s", rhsType.toChars(), lhsVar->type.toChars()); @@ -3870,11 +3883,6 @@ CheckAssignName(FunctionValidator& f, ParseNode* lhs, ParseNode* rhs, Type* type if (global->which() != ModuleValidator::Global::Variable) return f.failName(lhs, "'%s' is not a mutable variable", name); - if (!f.encoder().writeExpr(Expr::StoreGlobal)) - return false; - if (!f.encoder().writeVarU32(global->varOrConstIndex())) - return false; - Type rhsType; if (!CheckExpr(f, rhs, &rhsType)) return false; @@ -3882,6 +3890,11 @@ CheckAssignName(FunctionValidator& f, ParseNode* lhs, ParseNode* rhs, Type* type Type globType = global->varOrConstType(); if (!(rhsType <= globType)) return f.failf(lhs, "%s is not a subtype of %s", rhsType.toChars(), globType.toChars()); + if (!f.encoder().writeExpr(Expr::StoreGlobal)) + return false; + if (!f.encoder().writeVarU32(global->varOrConstIndex())) + return false; + *type = rhsType; return true; } @@ -3915,9 +3928,6 @@ CheckMathIMul(FunctionValidator& f, ParseNode* call, Type* type) ParseNode* lhs = CallArgList(call); ParseNode* rhs = NextNode(lhs); - if (!f.encoder().writeExpr(Expr::I32Mul)) - return false; - Type lhsType; if (!CheckExpr(f, lhs, &lhsType)) return false; @@ -3932,7 +3942,7 @@ CheckMathIMul(FunctionValidator& f, ParseNode* call, Type* type) return f.failf(rhs, "%s is not a subtype of intish", rhsType.toChars()); *type = Type::Signed; - return true; + return f.encoder().writeExpr(Expr::I32Mul); } static bool @@ -3941,9 +3951,6 @@ CheckMathClz32(FunctionValidator& f, ParseNode* call, Type* type) if (CallArgListLength(call) != 1) return f.fail(call, "Math.clz32 must be passed 1 argument"); - if (!f.encoder().writeExpr(Expr::I32Clz)) - return false; - ParseNode* arg = CallArgList(call); Type argType; @@ -3954,7 +3961,7 @@ CheckMathClz32(FunctionValidator& f, ParseNode* call, Type* type) return f.failf(arg, "%s is not a subtype of intish", argType.toChars()); *type = Type::Fixnum; - return true; + return f.encoder().writeExpr(Expr::I32Clz); } static bool @@ -3965,30 +3972,23 @@ CheckMathAbs(FunctionValidator& f, ParseNode* call, Type* type) ParseNode* arg = CallArgList(call); - size_t opcodeAt; - if (!f.encoder().writePatchableOneByteExpr(&opcodeAt)) - return false; - Type argType; if (!CheckExpr(f, arg, &argType)) return false; if (argType.isSigned()) { - f.encoder().patchOneByteExpr(opcodeAt, Expr::I32Abs); *type = Type::Unsigned; - return true; + return f.encoder().writeExpr(Expr::I32Abs); } if (argType.isMaybeDouble()) { - f.encoder().patchOneByteExpr(opcodeAt, Expr::F64Abs); *type = Type::Double; - return true; + return f.encoder().writeExpr(Expr::F64Abs); } if (argType.isMaybeFloat()) { - f.encoder().patchOneByteExpr(opcodeAt, Expr::F32Abs); *type = Type::Floatish; - return true; + return f.encoder().writeExpr(Expr::F32Abs); } return f.failf(call, "%s is not a subtype of signed, float? or double?", argType.toChars()); @@ -4002,24 +4002,18 @@ CheckMathSqrt(FunctionValidator& f, ParseNode* call, Type* type) ParseNode* arg = CallArgList(call); - size_t opcodeAt; - if (!f.encoder().writePatchableOneByteExpr(&opcodeAt)) - return false; - Type argType; if (!CheckExpr(f, arg, &argType)) return false; if (argType.isMaybeDouble()) { - f.encoder().patchOneByteExpr(opcodeAt, Expr::F64Sqrt); *type = Type::Double; - return true; + return f.encoder().writeExpr(Expr::F64Sqrt); } if (argType.isMaybeFloat()) { - f.encoder().patchOneByteExpr(opcodeAt, Expr::F32Sqrt); *type = Type::Floatish; - return true; + return f.encoder().writeExpr(Expr::F32Sqrt); } return f.failf(call, "%s is neither a subtype of double? nor float?", argType.toChars()); @@ -4031,10 +4025,6 @@ CheckMathMinMax(FunctionValidator& f, ParseNode* callNode, bool isMax, Type* typ if (CallArgListLength(callNode) < 2) return f.fail(callNode, "Math.min/max must be passed at least 2 arguments"); - size_t opcodeAt; - if (!f.encoder().writePatchableOneByteExpr(&opcodeAt)) - return false; - ParseNode* firstArg = CallArgList(callNode); Type firstType; if (!CheckExpr(f, firstArg, &firstType)) @@ -4057,19 +4047,18 @@ CheckMathMinMax(FunctionValidator& f, ParseNode* callNode, bool isMax, Type* typ return f.failf(firstArg, "%s is not a subtype of double?, float? or signed", firstType.toChars()); } - f.encoder().patchOneByteExpr(opcodeAt, expr); unsigned numArgs = CallArgListLength(callNode); ParseNode* nextArg = NextNode(firstArg); for (unsigned i = 1; i < numArgs; i++, nextArg = NextNode(nextArg)) { - if (i != numArgs - 1 && !f.encoder().writeExpr(expr)) - return false; - Type nextType; if (!CheckExpr(f, nextArg, &nextType)) return false; if (!(nextType <= firstType)) return f.failf(nextArg, "%s is not a subtype of %s", nextType.toChars(), firstType.toChars()); + + if (!f.encoder().writeExpr(expr)) + return false; } return true; @@ -4105,10 +4094,10 @@ CheckSharedArrayAtomicAccess(FunctionValidator& f, ParseNode* viewName, ParseNod } static bool -WriteAtomicOperator(FunctionValidator& f, Expr opcode, size_t* viewTypeAt) +WriteAtomicOperator(FunctionValidator& f, Expr opcode, Scalar::Type viewType) { return f.encoder().writeExpr(opcode) && - f.encoder().writePatchableFixedU8(viewTypeAt); + f.encoder().writeFixedU8(viewType); } static bool @@ -4120,15 +4109,15 @@ CheckAtomicsLoad(FunctionValidator& f, ParseNode* call, Type* type) ParseNode* arrayArg = CallArgList(call); ParseNode* indexArg = NextNode(arrayArg); - size_t viewTypeAt; - if (!WriteAtomicOperator(f, Expr::I32AtomicsLoad, &viewTypeAt)) - return false; - Scalar::Type viewType; if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType)) return false; - f.encoder().patchFixedU8(viewTypeAt, uint8_t(viewType)); + if (!WriteAtomicOperator(f, Expr::I32AtomicsLoad, viewType)) + return false; + + if (!WriteArrayAccessFlags(f, viewType)) + return false; *type = Type::Int; return true; @@ -4144,14 +4133,6 @@ CheckAtomicsStore(FunctionValidator& f, ParseNode* call, Type* type) ParseNode* indexArg = NextNode(arrayArg); ParseNode* valueArg = NextNode(indexArg); - size_t viewTypeAt; - if (!WriteAtomicOperator(f, Expr::I32AtomicsStore, &viewTypeAt)) - return false; - - Scalar::Type viewType; - if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType)) - return false; - Type rhsType; if (!CheckExpr(f, valueArg, &rhsType)) return false; @@ -4159,7 +4140,15 @@ CheckAtomicsStore(FunctionValidator& f, ParseNode* call, Type* type) if (!rhsType.isIntish()) return f.failf(arrayArg, "%s is not a subtype of intish", rhsType.toChars()); - f.encoder().patchFixedU8(viewTypeAt, uint8_t(viewType)); + Scalar::Type viewType; + if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType)) + return false; + + if (!WriteAtomicOperator(f, Expr::I32AtomicsStore, viewType)) + return false; + + if (!WriteArrayAccessFlags(f, viewType)) + return false; *type = rhsType; return true; @@ -4175,16 +4164,6 @@ CheckAtomicsBinop(FunctionValidator& f, ParseNode* call, Type* type, AtomicOp op ParseNode* indexArg = NextNode(arrayArg); ParseNode* valueArg = NextNode(indexArg); - size_t viewTypeAt; - if (!WriteAtomicOperator(f, Expr::I32AtomicsBinOp, &viewTypeAt)) - return false; - if (!f.encoder().writeFixedU8(uint8_t(op))) - return false; - - Scalar::Type viewType; - if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType)) - return false; - Type valueArgType; if (!CheckExpr(f, valueArg, &valueArgType)) return false; @@ -4192,7 +4171,17 @@ CheckAtomicsBinop(FunctionValidator& f, ParseNode* call, Type* type, AtomicOp op if (!valueArgType.isIntish()) return f.failf(valueArg, "%s is not a subtype of intish", valueArgType.toChars()); - f.encoder().patchFixedU8(viewTypeAt, uint8_t(viewType)); + Scalar::Type viewType; + if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType)) + return false; + + if (!WriteAtomicOperator(f, Expr::I32AtomicsBinOp, viewType)) + return false; + if (!f.encoder().writeFixedU8(uint8_t(op))) + return false; + + if (!WriteArrayAccessFlags(f, viewType)) + return false; *type = Type::Int; return true; @@ -4225,14 +4214,6 @@ CheckAtomicsCompareExchange(FunctionValidator& f, ParseNode* call, Type* type) ParseNode* oldValueArg = NextNode(indexArg); ParseNode* newValueArg = NextNode(oldValueArg); - size_t viewTypeAt; - if (!WriteAtomicOperator(f, Expr::I32AtomicsCompareExchange, &viewTypeAt)) - return false; - - Scalar::Type viewType; - if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType)) - return false; - Type oldValueArgType; if (!CheckExpr(f, oldValueArg, &oldValueArgType)) return false; @@ -4247,7 +4228,15 @@ CheckAtomicsCompareExchange(FunctionValidator& f, ParseNode* call, Type* type) if (!newValueArgType.isIntish()) return f.failf(newValueArg, "%s is not a subtype of intish", newValueArgType.toChars()); - f.encoder().patchFixedU8(viewTypeAt, uint8_t(viewType)); + Scalar::Type viewType; + if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType)) + return false; + + if (!WriteAtomicOperator(f, Expr::I32AtomicsCompareExchange, viewType)) + return false; + + if (!WriteArrayAccessFlags(f, viewType)) + return false; *type = Type::Int; return true; @@ -4263,14 +4252,6 @@ CheckAtomicsExchange(FunctionValidator& f, ParseNode* call, Type* type) ParseNode* indexArg = NextNode(arrayArg); ParseNode* valueArg = NextNode(indexArg); - size_t viewTypeAt; - if (!WriteAtomicOperator(f, Expr::I32AtomicsExchange, &viewTypeAt)) - return false; - - Scalar::Type viewType; - if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType)) - return false; - Type valueArgType; if (!CheckExpr(f, valueArg, &valueArgType)) return false; @@ -4278,7 +4259,15 @@ CheckAtomicsExchange(FunctionValidator& f, ParseNode* call, Type* type) if (!valueArgType.isIntish()) return f.failf(arrayArg, "%s is not a subtype of intish", valueArgType.toChars()); - f.encoder().patchFixedU8(viewTypeAt, uint8_t(viewType)); + Scalar::Type viewType; + if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType)) + return false; + + if (!WriteAtomicOperator(f, Expr::I32AtomicsExchange, viewType)) + return false; + + if (!WriteArrayAccessFlags(f, viewType)) + return false; *type = Type::Int; return true; @@ -4394,14 +4383,6 @@ CheckInternalCall(FunctionValidator& f, ParseNode* callNode, PropertyName* calle { MOZ_ASSERT(ret.isCanonical()); - if (!f.writeCall(callNode, Expr::Call)) - return false; - - // Function's index, to find out the function's entry - size_t funcIndexAt; - if (!f.encoder().writePatchableVarU32(&funcIndexAt)) - return false; - ValTypeVector args; if (!CheckCallArgs(f, callNode, &args)) return false; @@ -4411,7 +4392,13 @@ CheckInternalCall(FunctionValidator& f, ParseNode* callNode, PropertyName* calle if (!CheckFunctionSignature(f.m(), callNode, Move(sig), calleeName, &callee)) return false; - f.encoder().patchVarU32(funcIndexAt, callee->index()); + if (!f.writeCall(callNode, Expr::Call)) + return false; + + // Function's index, to find out the function's entry + if (!f.encoder().writeVarU32(callee->index())) + return false; + *type = Type::ret(ret); return true; } @@ -4472,15 +4459,6 @@ CheckFuncPtrCall(FunctionValidator& f, ParseNode* callNode, Type ret, Type* type if (!IsLiteralInt(f.m(), maskNode, &mask) || mask == UINT32_MAX || !IsPowerOfTwo(mask + 1)) return f.fail(maskNode, "function-pointer table index mask value must be a power of two minus 1"); - // Opcode - if (!f.writeCall(callNode, Expr::CallIndirect)) - return false; - - // Call signature - size_t sigIndexAt; - if (!f.encoder().writePatchableVarU32(&sigIndexAt)) - return false; - Type indexType; if (!CheckExpr(f, indexNode, &indexType)) return false; @@ -4498,7 +4476,12 @@ CheckFuncPtrCall(FunctionValidator& f, ParseNode* callNode, Type ret, Type* type if (!CheckFuncPtrTableAgainstExisting(f.m(), tableNode, name, Move(sig), mask, &tableIndex)) return false; - f.encoder().patchVarU32(sigIndexAt, f.m().funcPtrTable(tableIndex).sigIndex()); + if (!f.writeCall(callNode, Expr::CallIndirect)) + return false; + + // Call signature + if (!f.encoder().writeVarU32(f.m().funcPtrTable(tableIndex).sigIndex())) + return false; *type = Type::ret(ret); return true; @@ -4524,15 +4507,6 @@ CheckFFICall(FunctionValidator& f, ParseNode* callNode, unsigned ffiIndex, Type if (ret.isSimd()) return f.fail(callNode, "FFI calls can't return SIMD values"); - // Opcode - if (!f.writeCall(callNode, Expr::CallImport)) - return false; - - // Import index - size_t importIndexAt; - if (!f.encoder().writePatchableVarU32(&importIndexAt)) - return false; - ValTypeVector args; if (!CheckCallArgs(f, callNode, &args)) return false; @@ -4542,32 +4516,28 @@ CheckFFICall(FunctionValidator& f, ParseNode* callNode, unsigned ffiIndex, Type if (!f.m().declareImport(calleeName, Move(sig), ffiIndex, &importIndex)) return false; - f.encoder().patchVarU32(importIndexAt, importIndex); + if (!f.writeCall(callNode, Expr::CallImport)) + return false; + + // Import index + if (!f.encoder().writeVarU32(importIndex)) + return false; *type = Type::ret(ret); return true; } static bool -CheckFloatCoercionArg(FunctionValidator& f, ParseNode* inputNode, Type inputType, - size_t opcodeAt) +CheckFloatCoercionArg(FunctionValidator& f, ParseNode* inputNode, Type inputType) { - if (inputType.isMaybeDouble()) { - f.encoder().patchOneByteExpr(opcodeAt, Expr::F32DemoteF64); + if (inputType.isMaybeDouble()) + return f.encoder().writeExpr(Expr::F32DemoteF64); + if (inputType.isSigned()) + return f.encoder().writeExpr(Expr::F32ConvertSI32); + if (inputType.isUnsigned()) + return f.encoder().writeExpr(Expr::F32ConvertUI32); + if (inputType.isFloatish()) return true; - } - if (inputType.isSigned()) { - f.encoder().patchOneByteExpr(opcodeAt, Expr::F32ConvertSI32); - return true; - } - if (inputType.isUnsigned()) { - f.encoder().patchOneByteExpr(opcodeAt, Expr::F32ConvertUI32); - return true; - } - if (inputType.isFloatish()) { - f.encoder().patchOneByteExpr(opcodeAt, Expr::Id); - return true; - } return f.failf(inputNode, "%s is not a subtype of signed, unsigned, double? or floatish", inputType.toChars()); @@ -4584,21 +4554,16 @@ CheckCoercionArg(FunctionValidator& f, ParseNode* arg, Type expected, Type* type if (arg->isKind(PNK_CALL)) return CheckCoercedCall(f, arg, expected, type); - size_t opcodeAt; - if (!f.encoder().writePatchableOneByteExpr(&opcodeAt)) - return false; - Type argType; if (!CheckExpr(f, arg, &argType)) return false; if (expected.isFloat()) { - if (!CheckFloatCoercionArg(f, arg, argType, opcodeAt)) + if (!CheckFloatCoercionArg(f, arg, argType)) return false; } else if (expected.isSimd()) { if (!(argType <= expected)) return f.fail(arg, "argument to SIMD coercion isn't from the correct SIMD type"); - f.encoder().patchOneByteExpr(opcodeAt, Expr::Id); } else { MOZ_CRASH("not call coercions"); } @@ -4657,8 +4622,7 @@ CheckMathBuiltinCall(FunctionValidator& f, ParseNode* callNode, AsmJSMathBuiltin if (actualArity != arity) return f.failf(callNode, "call passed %u arguments, expected %u", actualArity, arity); - size_t opcodeAt; - if (!f.patchableCall(callNode, &opcodeAt)) + if (!f.prepareCall(callNode)) return false; Type firstType; @@ -4673,11 +4637,6 @@ CheckMathBuiltinCall(FunctionValidator& f, ParseNode* callNode, AsmJSMathBuiltin if (!opIsDouble && f32 == Expr::Unreachable) return f.fail(callNode, "math builtin cannot be used as float"); - if (opIsDouble) - f.encoder().patchOneByteExpr(opcodeAt, f64); - else - f.encoder().patchOneByteExpr(opcodeAt, f32); - if (arity == 2) { Type secondType; argNode = NextNode(argNode); @@ -4690,6 +4649,14 @@ CheckMathBuiltinCall(FunctionValidator& f, ParseNode* callNode, AsmJSMathBuiltin return f.fail(argNode, "both arguments to math builtin call should be the same type"); } + if (opIsDouble) { + if (!f.encoder().writeExpr(f64)) + return false; + } else { + if (!f.encoder().writeExpr(f32)) + return false; + } + *type = opIsDouble ? Type::Double : Type::Floatish; return true; } @@ -4719,31 +4686,6 @@ CheckSimdCallArgs(FunctionValidator& f, ParseNode* call, unsigned expectedArity, return true; } -template -static bool -CheckSimdCallArgsPatchable(FunctionValidator& f, ParseNode* call, unsigned expectedArity, - const CheckArgOp& checkArg) -{ - unsigned numArgs = CallArgListLength(call); - if (numArgs != expectedArity) - return f.failf(call, "expected %u arguments to SIMD call, got %u", expectedArity, numArgs); - - ParseNode* arg = CallArgList(call); - for (size_t i = 0; i < numArgs; i++, arg = NextNode(arg)) { - MOZ_ASSERT(!!arg); - Type argType; - size_t patchAt; - if (!f.encoder().writePatchableOneByteExpr(&patchAt)) - return false; - if (!CheckExpr(f, arg, &argType)) - return false; - if (!checkArg(f, arg, i, argType, patchAt)) - return false; - } - - return true; -} - class CheckArgIsSubtypeOf { @@ -4788,8 +4730,7 @@ class CheckSimdScalarArgs : simdType_(simdType), formalType_(SimdToCoercedScalarType(simdType)) {} - bool operator()(FunctionValidator& f, ParseNode* arg, unsigned argIndex, Type actualType, - size_t patchAt) const + bool operator()(FunctionValidator& f, ParseNode* arg, unsigned argIndex, Type actualType) const { if (!(actualType <= formalType_)) { // As a special case, accept doublelit arguments to float32x4 ops by @@ -4801,15 +4742,9 @@ class CheckSimdScalarArgs } // We emitted a double literal and actually want a float32. - MOZ_ASSERT(patchAt != size_t(-1)); - f.encoder().patchOneByteExpr(patchAt, Expr::F32DemoteF64); - return true; + return f.encoder().writeExpr(Expr::F32DemoteF64); } - if (patchAt == size_t(-1)) - return true; - - f.encoder().patchOneByteExpr(patchAt, Expr::Id); return true; } }; @@ -4845,36 +4780,6 @@ class CheckSimdVectorScalarArgs public: explicit CheckSimdVectorScalarArgs(SimdType t) : formalSimdType_(t) {} - bool operator()(FunctionValidator& f, ParseNode* arg, unsigned argIndex, Type actualType, - size_t patchAt = -1) const - { - MOZ_ASSERT(argIndex < 2); - if (argIndex == 0) { - // First argument is the vector - if (!(actualType <= Type(formalSimdType_))) { - return f.failf(arg, "%s is not a subtype of %s", actualType.toChars(), - Type(formalSimdType_).toChars()); - } - - if (patchAt == size_t(-1)) - return true; - - f.encoder().patchOneByteExpr(patchAt, Expr::Id); - return true; - } - - // Second argument is the scalar - return CheckSimdScalarArgs(formalSimdType_)(f, arg, argIndex, actualType, patchAt); - } -}; - -class CheckSimdExtractLaneArgs -{ - SimdType formalSimdType_; - - public: - explicit CheckSimdExtractLaneArgs(SimdType t) : formalSimdType_(t) {} - bool operator()(FunctionValidator& f, ParseNode* arg, unsigned argIndex, Type actualType) const { MOZ_ASSERT(argIndex < 2); @@ -4884,53 +4789,12 @@ class CheckSimdExtractLaneArgs return f.failf(arg, "%s is not a subtype of %s", actualType.toChars(), Type(formalSimdType_).toChars()); } + return true; } - uint32_t laneIndex; - // Second argument is the lane < vector length - if (!IsLiteralOrConstInt(f, arg, &laneIndex)) - return f.failf(arg, "lane selector should be a constant integer literal"); - if (laneIndex >= GetSimdLanes(formalSimdType_)) - return f.failf(arg, "lane selector should be in bounds"); - return true; - } -}; - -class CheckSimdReplaceLaneArgs -{ - SimdType formalSimdType_; - - public: - explicit CheckSimdReplaceLaneArgs(SimdType t) : formalSimdType_(t) {} - - bool operator()(FunctionValidator& f, ParseNode* arg, unsigned argIndex, Type actualType, - size_t patchAt) const - { - MOZ_ASSERT(argIndex < 3); - uint32_t u32; - switch (argIndex) { - case 0: - // First argument is the vector - if (!(actualType <= Type(formalSimdType_))) { - return f.failf(arg, "%s is not a subtype of %s", actualType.toChars(), - Type(formalSimdType_).toChars()); - } - f.encoder().patchOneByteExpr(patchAt, Expr::Id); - return true; - case 1: - // Second argument is the lane (< vector length). - if (!IsLiteralOrConstInt(f, arg, &u32)) - return f.failf(arg, "lane selector should be a constant integer literal"); - if (u32 >= GetSimdLanes(formalSimdType_)) - return f.failf(arg, "lane selector should be in bounds"); - f.encoder().patchOneByteExpr(patchAt, Expr::Id); - return true; - case 2: - // Third argument is the scalar - return CheckSimdScalarArgs(formalSimdType_)(f, arg, argIndex, actualType, patchAt); - } - return false; + // Second argument is the scalar + return CheckSimdScalarArgs(formalSimdType_)(f, arg, argIndex, actualType); } }; @@ -4940,10 +4804,10 @@ static bool CheckSimdUnary(FunctionValidator& f, ParseNode* call, SimdType opType, SimdOperation op, Type* type) { - if (!f.writeSimdOp(opType, op)) - return false; if (!CheckSimdCallArgs(f, call, 1, CheckArgIsSubtypeOf(opType))) return false; + if (!f.writeSimdOp(opType, op)) + return false; *type = opType; return true; } @@ -4952,10 +4816,10 @@ static bool CheckSimdBinaryShift(FunctionValidator& f, ParseNode* call, SimdType opType, SimdOperation op, Type *type) { - if (!f.writeSimdOp(opType, op)) - return false; if (!CheckSimdCallArgs(f, call, 2, CheckSimdVectorScalarArgs(opType))) return false; + if (!f.writeSimdOp(opType, op)) + return false; *type = opType; return true; } @@ -4964,10 +4828,10 @@ static bool CheckSimdBinaryComp(FunctionValidator& f, ParseNode* call, SimdType opType, SimdOperation op, Type *type) { - if (!f.writeSimdOp(opType, op)) - return false; if (!CheckSimdCallArgs(f, call, 2, CheckArgIsSubtypeOf(opType))) return false; + if (!f.writeSimdOp(opType, op)) + return false; *type = GetBooleanSimdType(opType); return true; } @@ -4976,10 +4840,10 @@ static bool CheckSimdBinary(FunctionValidator& f, ParseNode* call, SimdType opType, SimdOperation op, Type* type) { - if (!f.writeSimdOp(opType, op)) - return false; if (!CheckSimdCallArgs(f, call, 2, CheckArgIsSubtypeOf(opType))) return false; + if (!f.writeSimdOp(opType, op)) + return false; *type = opType; return true; } @@ -4987,8 +4851,6 @@ CheckSimdBinary(FunctionValidator& f, ParseNode* call, SimdType opType, SimdOper static bool CheckSimdExtractLane(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type) { - if (!f.writeSimdOp(opType, SimdOperation::Fn_extractLane)) - return false; switch (opType) { case SimdType::Int32x4: *type = Type::Signed; break; case SimdType::Uint32x4: *type = Type::Unsigned; break; @@ -4996,15 +4858,84 @@ CheckSimdExtractLane(FunctionValidator& f, ParseNode* call, SimdType opType, Typ case SimdType::Bool32x4: *type = Type::Int; break; default: MOZ_CRASH("unhandled simd type"); } - return CheckSimdCallArgs(f, call, 2, CheckSimdExtractLaneArgs(opType)); + + unsigned numArgs = CallArgListLength(call); + if (numArgs != 2) + return f.failf(call, "expected 2 arguments to SIMD extract, got %u", numArgs); + + ParseNode* arg = CallArgList(call); + + // First argument is the vector + Type vecType; + if (!CheckExpr(f, arg, &vecType)) + return false; + if (!(vecType <= Type(opType))) { + return f.failf(arg, "%s is not a subtype of %s", vecType.toChars(), + Type(opType).toChars()); + } + + arg = NextNode(arg); + + // Second argument is the lane < vector length + uint32_t lane; + if (!IsLiteralOrConstInt(f, arg, &lane)) + return f.failf(arg, "lane selector should be a constant integer literal"); + if (lane >= GetSimdLanes(opType)) + return f.failf(arg, "lane selector should be in bounds"); + + if (!f.writeSimdOp(opType, SimdOperation::Fn_extractLane)) + return false; + if (!f.encoder().writeVarU32(lane)) + return false; + return true; } static bool CheckSimdReplaceLane(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type) { + unsigned numArgs = CallArgListLength(call); + if (numArgs != 3) + return f.failf(call, "expected 2 arguments to SIMD replace, got %u", numArgs); + + ParseNode* arg = CallArgList(call); + + // First argument is the vector + Type vecType; + if (!CheckExpr(f, arg, &vecType)) + return false; + if (!(vecType <= Type(opType))) { + return f.failf(arg, "%s is not a subtype of %s", vecType.toChars(), + Type(opType).toChars()); + } + + arg = NextNode(arg); + + // Second argument is the lane < vector length + uint32_t lane; + if (!IsLiteralOrConstInt(f, arg, &lane)) + return f.failf(arg, "lane selector should be a constant integer literal"); + if (lane >= GetSimdLanes(opType)) + return f.failf(arg, "lane selector should be in bounds"); + + arg = NextNode(arg); + + // Third argument is the scalar + Type scalarType; + if (!CheckExpr(f, arg, &scalarType)) + return false; + if (!(scalarType <= SimdToCoercedScalarType(opType))) { + if (opType == SimdType::Float32x4 && scalarType.isDoubleLit()) { + if (!f.encoder().writeExpr(Expr::F32DemoteF64)) + return false; + } else { + return f.failf(arg, "%s is not the correct type to replace an element of %s", + scalarType.toChars(), vecType.toChars()); + } + } + if (!f.writeSimdOp(opType, SimdOperation::Fn_replaceLane)) return false; - if (!CheckSimdCallArgsPatchable(f, call, 3, CheckSimdReplaceLaneArgs(opType))) + if (!f.encoder().writeVarU32(lane)) return false; *type = opType; return true; @@ -5019,10 +4950,10 @@ static bool CheckSimdCast(FunctionValidator& f, ParseNode* call, SimdType fromType, SimdType toType, SimdOperation op, Type* type) { - if (!f.writeSimdOp(toType, op)) - return false; if (!CheckSimdCallArgs(f, call, 1, CheckArgIsSubtypeOf(fromType))) return false; + if (!f.writeSimdOp(toType, op)) + return false; *type = toType; return true; } @@ -5050,9 +4981,6 @@ CheckSimdSwizzle(FunctionValidator& f, ParseNode* call, SimdType opType, Type* t if (numArgs != 5) return f.failf(call, "expected 5 arguments to SIMD swizzle, got %u", numArgs); - if (!f.writeSimdOp(opType, SimdOperation::Fn_swizzle)) - return false; - Type retType = opType; ParseNode* vec = CallArgList(call); Type vecType; @@ -5061,6 +4989,9 @@ CheckSimdSwizzle(FunctionValidator& f, ParseNode* call, SimdType opType, Type* t if (!(vecType <= retType)) return f.failf(vec, "%s is not a subtype of %s", vecType.toChars(), retType.toChars()); + if (!f.writeSimdOp(opType, SimdOperation::Fn_swizzle)) + return false; + int32_t lanes[4]; if (!CheckSimdShuffleSelectors(f, NextNode(vec), lanes, 4)) return false; @@ -5081,9 +5012,6 @@ CheckSimdShuffle(FunctionValidator& f, ParseNode* call, SimdType opType, Type* t if (numArgs != 6) return f.failf(call, "expected 6 arguments to SIMD shuffle, got %u", numArgs); - if (!f.writeSimdOp(opType, SimdOperation::Fn_shuffle)) - return false; - Type retType = opType; ParseNode* arg = CallArgList(call); for (unsigned i = 0; i < 2; i++, arg = NextNode(arg)) { @@ -5094,6 +5022,9 @@ CheckSimdShuffle(FunctionValidator& f, ParseNode* call, SimdType opType, Type* t return f.failf(arg, "%s is not a subtype of %s", type.toChars(), retType.toChars()); } + if (!f.writeSimdOp(opType, SimdOperation::Fn_shuffle)) + return false; + int32_t lanes[4]; if (!CheckSimdShuffleSelectors(f, arg, lanes, 8)) return false; @@ -5108,7 +5039,7 @@ CheckSimdShuffle(FunctionValidator& f, ParseNode* call, SimdType opType, Type* t } static bool -CheckSimdLoadStoreArgs(FunctionValidator& f, ParseNode* call) +CheckSimdLoadStoreArgs(FunctionValidator& f, ParseNode* call, Scalar::Type* viewType) { ParseNode* view = CallArgList(call); if (!view->isKind(PNK_NAME)) @@ -5116,11 +5047,10 @@ CheckSimdLoadStoreArgs(FunctionValidator& f, ParseNode* call) ParseNode* indexExpr = NextNode(view); - Scalar::Type viewType; - if (!CheckAndPrepareArrayAccess(f, view, indexExpr, YesSimd, &viewType)) + if (!CheckAndPrepareArrayAccess(f, view, indexExpr, YesSimd, viewType)) return false; - if (viewType != Scalar::Uint8) + if (*viewType != Scalar::Uint8) return f.fail(view, "expected Uint8Array view as SIMD.*.load/store first argument"); return true; @@ -5134,10 +5064,14 @@ CheckSimdLoad(FunctionValidator& f, ParseNode* call, SimdType opType, SimdOperat if (numArgs != 2) return f.failf(call, "expected 2 arguments to SIMD load, got %u", numArgs); + Scalar::Type viewType; + if (!CheckSimdLoadStoreArgs(f, call, &viewType)) + return false; + if (!f.writeSimdOp(opType, op)) return false; - if (!CheckSimdLoadStoreArgs(f, call)) + if (!WriteArrayAccessFlags(f, viewType)) return false; *type = opType; @@ -5152,10 +5086,8 @@ CheckSimdStore(FunctionValidator& f, ParseNode* call, SimdType opType, SimdOpera if (numArgs != 3) return f.failf(call, "expected 3 arguments to SIMD store, got %u", numArgs); - if (!f.writeSimdOp(opType, op)) - return false; - - if (!CheckSimdLoadStoreArgs(f, call)) + Scalar::Type viewType; + if (!CheckSimdLoadStoreArgs(f, call, &viewType)) return false; Type retType = opType; @@ -5164,6 +5096,12 @@ CheckSimdStore(FunctionValidator& f, ParseNode* call, SimdType opType, SimdOpera if (!CheckExpr(f, vecExpr, &vecType)) return false; + if (!f.writeSimdOp(opType, op)) + return false; + + if (!WriteArrayAccessFlags(f, viewType)) + return false; + if (!(vecType <= retType)) return f.failf(vecExpr, "%s is not a subtype of %s", vecType.toChars(), retType.toChars()); @@ -5174,10 +5112,10 @@ CheckSimdStore(FunctionValidator& f, ParseNode* call, SimdType opType, SimdOpera static bool CheckSimdSelect(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type) { - if (!f.writeSimdOp(opType, SimdOperation::Fn_select)) - return false; if (!CheckSimdCallArgs(f, call, 3, CheckSimdSelectArgs(opType))) return false; + if (!f.writeSimdOp(opType, SimdOperation::Fn_select)) + return false; *type = opType; return true; } @@ -5185,10 +5123,10 @@ CheckSimdSelect(FunctionValidator& f, ParseNode* call, SimdType opType, Type* ty static bool CheckSimdAllTrue(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type) { - if (!f.writeSimdOp(opType, SimdOperation::Fn_allTrue)) - return false; if (!CheckSimdCallArgs(f, call, 1, CheckArgIsSubtypeOf(opType))) return false; + if (!f.writeSimdOp(opType, SimdOperation::Fn_allTrue)) + return false; *type = Type::Int; return true; } @@ -5196,10 +5134,10 @@ CheckSimdAllTrue(FunctionValidator& f, ParseNode* call, SimdType opType, Type* t static bool CheckSimdAnyTrue(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type) { - if (!f.writeSimdOp(opType, SimdOperation::Fn_anyTrue)) - return false; if (!CheckSimdCallArgs(f, call, 1, CheckArgIsSubtypeOf(opType))) return false; + if (!f.writeSimdOp(opType, SimdOperation::Fn_anyTrue)) + return false; *type = Type::Int; return true; } @@ -5217,9 +5155,9 @@ CheckSimdCheck(FunctionValidator& f, ParseNode* call, SimdType opType, Type* typ static bool CheckSimdSplat(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type) { - if (!f.writeSimdOp(opType, SimdOperation::Fn_splat)) + if (!CheckSimdCallArgs(f, call, 1, CheckSimdScalarArgs(opType))) return false; - if (!CheckSimdCallArgsPatchable(f, call, 1, CheckSimdScalarArgs(opType))) + if (!f.writeSimdOp(opType, SimdOperation::Fn_splat)) return false; *type = opType; return true; @@ -5319,11 +5257,11 @@ CheckSimdCtorCall(FunctionValidator& f, ParseNode* call, const ModuleValidator:: MOZ_ASSERT(call->isKind(PNK_CALL)); SimdType simdType = global->simdCtorType(); - if (!f.writeSimdOp(simdType, SimdOperation::Constructor)) + unsigned length = GetSimdLanes(simdType); + if (!CheckSimdCallArgs(f, call, length, CheckSimdScalarArgs(simdType))) return false; - unsigned length = GetSimdLanes(simdType); - if (!CheckSimdCallArgsPatchable(f, call, length, CheckSimdScalarArgs(simdType))) + if (!f.writeSimdOp(simdType, SimdOperation::Constructor)) return false; *type = simdType; @@ -5354,43 +5292,44 @@ CheckUncoercedCall(FunctionValidator& f, ParseNode* expr, Type* type) } static bool -CoerceResult(FunctionValidator& f, ParseNode* expr, Type expected, Type actual, size_t patchAt, +CoerceResult(FunctionValidator& f, ParseNode* expr, Type expected, Type actual, Type* type) { MOZ_ASSERT(expected.isCanonical()); // At this point, the bytecode resembles this: - // | patchAt | the thing we wanted to coerce | current position |> + // | the thing we wanted to coerce | current position |> switch (expected.which()) { case Type::Void: - f.encoder().patchOneByteExpr(patchAt, Expr::Id); break; case Type::Int: if (!actual.isIntish()) return f.failf(expr, "%s is not a subtype of intish", actual.toChars()); - f.encoder().patchOneByteExpr(patchAt, Expr::Id); break; case Type::Float: - if (!CheckFloatCoercionArg(f, expr, actual, patchAt)) + if (!CheckFloatCoercionArg(f, expr, actual)) return false; break; case Type::Double: - if (actual.isMaybeDouble()) - f.encoder().patchOneByteExpr(patchAt, Expr::Id); - else if (actual.isMaybeFloat()) - f.encoder().patchOneByteExpr(patchAt, Expr::F64PromoteF32); - else if (actual.isSigned()) - f.encoder().patchOneByteExpr(patchAt, Expr::F64ConvertSI32); - else if (actual.isUnsigned()) - f.encoder().patchOneByteExpr(patchAt, Expr::F64ConvertUI32); - else + if (actual.isMaybeDouble()) { + // No conversion necessary. + } else if (actual.isMaybeFloat()) { + if (!f.encoder().writeExpr(Expr::F64PromoteF32)) + return false; + } else if (actual.isSigned()) { + if (!f.encoder().writeExpr(Expr::F64ConvertSI32)) + return false; + } else if (actual.isUnsigned()) { + if (!f.encoder().writeExpr(Expr::F64ConvertUI32)) + return false; + } else { return f.failf(expr, "%s is not a subtype of double?, float?, signed or unsigned", actual.toChars()); + } break; default: MOZ_ASSERT(expected.isSimd(), "Incomplete switch"); if (actual != expected) return f.failf(expr, "got type %s, expected %s", actual.toChars(), expected.toChars()); - f.encoder().patchOneByteExpr(patchAt, Expr::Id); break; } @@ -5402,13 +5341,10 @@ static bool CheckCoercedMathBuiltinCall(FunctionValidator& f, ParseNode* callNode, AsmJSMathBuiltinFunction func, Type ret, Type* type) { - size_t opcodeAt; - if (!f.encoder().writePatchableOneByteExpr(&opcodeAt)) - return false; Type actual; if (!CheckMathBuiltinCall(f, callNode, func, &actual)) return false; - return CoerceResult(f, callNode, ret, actual, opcodeAt, type); + return CoerceResult(f, callNode, ret, actual, type); } static bool @@ -5417,10 +5353,6 @@ CheckCoercedSimdCall(FunctionValidator& f, ParseNode* call, const ModuleValidato { MOZ_ASSERT(ret.isCanonical()); - size_t opcodeAt; - if (!f.encoder().writePatchableOneByteExpr(&opcodeAt)) - return false; - Type actual; if (global->isSimdCtor()) { if (!CheckSimdCtorCall(f, call, global, &actual)) @@ -5432,7 +5364,7 @@ CheckCoercedSimdCall(FunctionValidator& f, ParseNode* call, const ModuleValidato return false; } - return CoerceResult(f, call, ret, actual, opcodeAt, type); + return CoerceResult(f, call, ret, actual, type); } static bool @@ -5441,13 +5373,10 @@ CheckCoercedAtomicsBuiltinCall(FunctionValidator& f, ParseNode* callNode, { MOZ_ASSERT(ret.isCanonical()); - size_t opcodeAt; - if (!f.encoder().writePatchableOneByteExpr(&opcodeAt)) - return false; Type actual; if (!CheckAtomicsBuiltinCall(f, callNode, func, &actual)) return false; - return CoerceResult(f, callNode, ret, actual, opcodeAt, type); + return CoerceResult(f, callNode, ret, actual, type); } static bool @@ -5458,13 +5387,10 @@ CheckCoercedCall(FunctionValidator& f, ParseNode* call, Type ret, Type* type) JS_CHECK_RECURSION_DONT_REPORT(f.cx(), return f.m().failOverRecursed()); if (IsNumericLiteral(f.m(), call)) { - size_t coerceOp; - if (!f.encoder().writePatchableOneByteExpr(&coerceOp)) - return false; NumLit lit = ExtractNumericLiteral(f.m(), call); if (!f.writeConstExpr(lit)) return false; - return CoerceResult(f, call, ret, Type::lit(lit), coerceOp, type); + return CoerceResult(f, call, ret, Type::lit(lit), type); } ParseNode* callee = CallCallee(call); @@ -5512,15 +5438,11 @@ CheckPos(FunctionValidator& f, ParseNode* pos, Type* type) if (operand->isKind(PNK_CALL)) return CheckCoercedCall(f, operand, Type::Double, type); - size_t opcodeAt; - if (!f.encoder().writePatchableOneByteExpr(&opcodeAt)) - return false; - Type actual; if (!CheckExpr(f, operand, &actual)) return false; - return CoerceResult(f, operand, Type::Double, actual, opcodeAt, type); + return CoerceResult(f, operand, Type::Double, actual, type); } static bool @@ -5529,9 +5451,6 @@ CheckNot(FunctionValidator& f, ParseNode* expr, Type* type) MOZ_ASSERT(expr->isKind(PNK_NOT)); ParseNode* operand = UnaryKid(expr); - if (!f.encoder().writeExpr(Expr::I32Eqz)) - return false; - Type operandType; if (!CheckExpr(f, operand, &operandType)) return false; @@ -5540,7 +5459,7 @@ CheckNot(FunctionValidator& f, ParseNode* expr, Type* type) return f.failf(operand, "%s is not a subtype of int", operandType.toChars()); *type = Type::Int; - return true; + return f.encoder().writeExpr(Expr::I32Eqz); } static bool @@ -5549,30 +5468,23 @@ CheckNeg(FunctionValidator& f, ParseNode* expr, Type* type) MOZ_ASSERT(expr->isKind(PNK_NEG)); ParseNode* operand = UnaryKid(expr); - size_t opcodeAt; - if (!f.encoder().writePatchableOneByteExpr(&opcodeAt)) - return false; - Type operandType; if (!CheckExpr(f, operand, &operandType)) return false; if (operandType.isInt()) { - f.encoder().patchOneByteExpr(opcodeAt, Expr::I32Neg); *type = Type::Intish; - return true; + return f.encoder().writeExpr(Expr::I32Neg); } if (operandType.isMaybeDouble()) { - f.encoder().patchOneByteExpr(opcodeAt, Expr::F64Neg); *type = Type::Double; - return true; + return f.encoder().writeExpr(Expr::F64Neg); } if (operandType.isMaybeFloat()) { - f.encoder().patchOneByteExpr(opcodeAt, Expr::F32Neg); *type = Type::Floatish; - return true; + return f.encoder().writeExpr(Expr::F32Neg); } return f.failf(operand, "%s is not a subtype of int, float? or double?", operandType.toChars()); @@ -5584,25 +5496,19 @@ CheckCoerceToInt(FunctionValidator& f, ParseNode* expr, Type* type) MOZ_ASSERT(expr->isKind(PNK_BITNOT)); ParseNode* operand = UnaryKid(expr); - size_t opcodeAt; - if (!f.encoder().writePatchableOneByteExpr(&opcodeAt)) - return false; - Type operandType; if (!CheckExpr(f, operand, &operandType)) return false; if (operandType.isMaybeDouble() || operandType.isMaybeFloat()) { - Expr opcode = operandType.isMaybeDouble() ? Expr::I32TruncSF64 : Expr::I32TruncSF32; - f.encoder().patchOneByteExpr(opcodeAt, opcode); *type = Type::Signed; - return true; + Expr opcode = operandType.isMaybeDouble() ? Expr::I32TruncSF64 : Expr::I32TruncSF32; + return f.encoder().writeExpr(opcode); } if (!operandType.isIntish()) return f.failf(operand, "%s is not a subtype of double?, float? or intish", operandType.toChars()); - f.encoder().patchOneByteExpr(opcodeAt, Expr::Id); *type = Type::Signed; return true; } @@ -5616,9 +5522,6 @@ CheckBitNot(FunctionValidator& f, ParseNode* neg, Type* type) if (operand->isKind(PNK_BITNOT)) return CheckCoerceToInt(f, operand, type); - if (!f.encoder().writeExpr(Expr::I32BitNot)) - return false; - Type operandType; if (!CheckExpr(f, operand, &operandType)) return false; @@ -5626,6 +5529,9 @@ CheckBitNot(FunctionValidator& f, ParseNode* neg, Type* type) if (!operandType.isIntish()) return f.failf(operand, "%s is not a subtype of intish", operandType.toChars()); + if (!f.encoder().writeExpr(Expr::I32BitNot)) + return false; + *type = Type::Signed; return true; } @@ -5643,8 +5549,6 @@ CheckComma(FunctionValidator& f, ParseNode* comma, Type* type) // contain breaks and continues and nested control flow structures. if (!f.encoder().writeExpr(Expr::Block)) return false; - if (!f.encoder().writeVarU32(ListLength(comma))) - return false; ParseNode* pn = operands; for (; NextNode(pn); pn = NextNode(pn)) { @@ -5652,7 +5556,10 @@ CheckComma(FunctionValidator& f, ParseNode* comma, Type* type) return false; } - return CheckExpr(f, pn, type); + if (!CheckExpr(f, pn, type)) + return false; + + return f.encoder().writeExpr(Expr::End); } static bool @@ -5660,9 +5567,6 @@ CheckConditional(FunctionValidator& f, ParseNode* ternary, Type* type) { MOZ_ASSERT(ternary->isKind(PNK_CONDITIONAL)); - if (!f.encoder().writeExpr(Expr::IfElse)) - return false; - ParseNode* cond = TernaryKid1(ternary); ParseNode* thenExpr = TernaryKid2(ternary); ParseNode* elseExpr = TernaryKid3(ternary); @@ -5674,10 +5578,16 @@ CheckConditional(FunctionValidator& f, ParseNode* ternary, Type* type) if (!condType.isInt()) return f.failf(cond, "%s is not a subtype of int", condType.toChars()); + if (!f.pushIf()) + return false; + Type thenType; if (!CheckExpr(f, thenExpr, &thenType)) return false; + if (!f.switchToElse()) + return false; + Type elseType; if (!CheckExpr(f, elseExpr, &elseType)) return false; @@ -5696,6 +5606,9 @@ CheckConditional(FunctionValidator& f, ParseNode* ternary, Type* type) thenType.toChars(), elseType.toChars()); } + if (!f.popIf()) + return false; + return true; } @@ -5733,10 +5646,6 @@ CheckMultiply(FunctionValidator& f, ParseNode* star, Type* type) ParseNode* lhs = MultiplyLeft(star); ParseNode* rhs = MultiplyRight(star); - size_t opcodeAt; - if (!f.encoder().writePatchableOneByteExpr(&opcodeAt)) - return false; - Type lhsType; if (!CheckExpr(f, lhs, &lhsType)) return false; @@ -5748,21 +5657,18 @@ CheckMultiply(FunctionValidator& f, ParseNode* star, Type* type) if (lhsType.isInt() && rhsType.isInt()) { if (!IsValidIntMultiplyConstant(f.m(), lhs) && !IsValidIntMultiplyConstant(f.m(), rhs)) return f.fail(star, "one arg to int multiply must be a small (-2^20, 2^20) int literal"); - f.encoder().patchOneByteExpr(opcodeAt, Expr::I32Mul); *type = Type::Intish; - return true; + return f.encoder().writeExpr(Expr::I32Mul); } if (lhsType.isMaybeDouble() && rhsType.isMaybeDouble()) { - f.encoder().patchOneByteExpr(opcodeAt, Expr::F64Mul); *type = Type::Double; - return true; + return f.encoder().writeExpr(Expr::F64Mul); } if (lhsType.isMaybeFloat() && rhsType.isMaybeFloat()) { - f.encoder().patchOneByteExpr(opcodeAt, Expr::F32Mul); *type = Type::Floatish; - return true; + return f.encoder().writeExpr(Expr::F32Mul); } return f.fail(star, "multiply operands must be both int, both double? or both float?"); @@ -5780,10 +5686,6 @@ CheckAddOrSub(FunctionValidator& f, ParseNode* expr, Type* type, unsigned* numAd Type lhsType, rhsType; unsigned lhsNumAddOrSub, rhsNumAddOrSub; - size_t opcodeAt; - if (!f.encoder().writePatchableOneByteExpr(&opcodeAt)) - return false; - if (lhs->isKind(PNK_ADD) || lhs->isKind(PNK_SUB)) { if (!CheckAddOrSub(f, lhs, &lhsType, &lhsNumAddOrSub)) return false; @@ -5811,13 +5713,16 @@ CheckAddOrSub(FunctionValidator& f, ParseNode* expr, Type* type, unsigned* numAd return f.fail(expr, "too many + or - without intervening coercion"); if (lhsType.isInt() && rhsType.isInt()) { - f.encoder().patchOneByteExpr(opcodeAt, expr->isKind(PNK_ADD) ? Expr::I32Add : Expr::I32Sub); + if (!f.encoder().writeExpr(expr->isKind(PNK_ADD) ? Expr::I32Add : Expr::I32Sub)) + return false; *type = Type::Intish; } else if (lhsType.isMaybeDouble() && rhsType.isMaybeDouble()) { - f.encoder().patchOneByteExpr(opcodeAt, expr->isKind(PNK_ADD) ? Expr::F64Add : Expr::F64Sub); + if (!f.encoder().writeExpr(expr->isKind(PNK_ADD) ? Expr::F64Add : Expr::F64Sub)) + return false; *type = Type::Double; } else if (lhsType.isMaybeFloat() && rhsType.isMaybeFloat()) { - f.encoder().patchOneByteExpr(opcodeAt, expr->isKind(PNK_ADD) ? Expr::F32Add : Expr::F32Sub); + if (!f.encoder().writeExpr(expr->isKind(PNK_ADD) ? Expr::F32Add : Expr::F32Sub)) + return false; *type = Type::Floatish; } else { return f.failf(expr, "operands to + or - must both be int, float? or double?, got %s and %s", @@ -5834,10 +5739,6 @@ CheckDivOrMod(FunctionValidator& f, ParseNode* expr, Type* type) { MOZ_ASSERT(expr->isKind(PNK_DIV) || expr->isKind(PNK_MOD)); - size_t opcodeAt; - if (!f.encoder().writePatchableOneByteExpr(&opcodeAt)) - return false; - ParseNode* lhs = DivOrModLeft(expr); ParseNode* rhs = DivOrModRight(expr); @@ -5848,30 +5749,26 @@ CheckDivOrMod(FunctionValidator& f, ParseNode* expr, Type* type) return false; if (lhsType.isMaybeDouble() && rhsType.isMaybeDouble()) { - f.encoder().patchOneByteExpr(opcodeAt, expr->isKind(PNK_DIV) ? Expr::F64Div : Expr::F64Mod); *type = Type::Double; - return true; + return f.encoder().writeExpr(expr->isKind(PNK_DIV) ? Expr::F64Div : Expr::F64Mod); } if (lhsType.isMaybeFloat() && rhsType.isMaybeFloat()) { + *type = Type::Floatish; if (expr->isKind(PNK_DIV)) - f.encoder().patchOneByteExpr(opcodeAt, Expr::F32Div); + return f.encoder().writeExpr(Expr::F32Div); else return f.fail(expr, "modulo cannot receive float arguments"); - *type = Type::Floatish; - return true; } if (lhsType.isSigned() && rhsType.isSigned()) { - f.encoder().patchOneByteExpr(opcodeAt, expr->isKind(PNK_DIV) ? Expr::I32DivS : Expr::I32RemS); *type = Type::Intish; - return true; + return f.encoder().writeExpr(expr->isKind(PNK_DIV) ? Expr::I32DivS : Expr::I32RemS); } if (lhsType.isUnsigned() && rhsType.isUnsigned()) { - f.encoder().patchOneByteExpr(opcodeAt, expr->isKind(PNK_DIV) ? Expr::I32DivU : Expr::I32RemU); *type = Type::Intish; - return true; + return f.encoder().writeExpr(expr->isKind(PNK_DIV) ? Expr::I32DivU : Expr::I32RemU); } return f.failf(expr, "arguments to / or %% must both be double?, float?, signed, or unsigned; " @@ -5884,10 +5781,6 @@ CheckComparison(FunctionValidator& f, ParseNode* comp, Type* type) MOZ_ASSERT(comp->isKind(PNK_LT) || comp->isKind(PNK_LE) || comp->isKind(PNK_GT) || comp->isKind(PNK_GE) || comp->isKind(PNK_EQ) || comp->isKind(PNK_NE)); - size_t opcodeAt; - if (!f.encoder().writePatchableOneByteExpr(&opcodeAt)) - return false; - ParseNode* lhs = ComparisonLeft(comp); ParseNode* rhs = ComparisonRight(comp); @@ -5951,9 +5844,8 @@ CheckComparison(FunctionValidator& f, ParseNode* comp, Type* type) MOZ_CRASH("unexpected type"); } - f.encoder().patchOneByteExpr(opcodeAt, stmt); *type = Type::Int; - return true; + return f.encoder().writeExpr(stmt); } static bool @@ -5996,16 +5888,6 @@ CheckBitwise(FunctionValidator& f, ParseNode* bitwise, Type* type) return true; } - switch (bitwise->getKind()) { - case PNK_BITOR: if (!f.encoder().writeExpr(Expr::I32Or)) return false; break; - case PNK_BITAND: if (!f.encoder().writeExpr(Expr::I32And)) return false; break; - case PNK_BITXOR: if (!f.encoder().writeExpr(Expr::I32Xor)) return false; break; - case PNK_LSH: if (!f.encoder().writeExpr(Expr::I32Shl)) return false; break; - case PNK_RSH: if (!f.encoder().writeExpr(Expr::I32ShrS)) return false; break; - case PNK_URSH: if (!f.encoder().writeExpr(Expr::I32ShrU)) return false; break; - default: MOZ_CRASH("not a bitwise op"); - } - Type lhsType; if (!CheckExpr(f, lhs, &lhsType)) return false; @@ -6019,6 +5901,16 @@ CheckBitwise(FunctionValidator& f, ParseNode* bitwise, Type* type) if (!rhsType.isIntish()) return f.failf(rhs, "%s is not a subtype of intish", rhsType.toChars()); + switch (bitwise->getKind()) { + case PNK_BITOR: if (!f.encoder().writeExpr(Expr::I32Or)) return false; break; + case PNK_BITAND: if (!f.encoder().writeExpr(Expr::I32And)) return false; break; + case PNK_BITXOR: if (!f.encoder().writeExpr(Expr::I32Xor)) return false; break; + case PNK_LSH: if (!f.encoder().writeExpr(Expr::I32Shl)) return false; break; + case PNK_RSH: if (!f.encoder().writeExpr(Expr::I32ShrS)) return false; break; + case PNK_URSH: if (!f.encoder().writeExpr(Expr::I32ShrU)) return false; break; + default: MOZ_CRASH("not a bitwise op"); + } + return true; } @@ -6087,7 +5979,7 @@ CheckExprStatement(FunctionValidator& f, ParseNode* exprStmt) MOZ_ASSERT(exprStmt->isKind(PNK_SEMI)); ParseNode* expr = UnaryKid(exprStmt); if (!expr) - return f.encoder().writeExpr(Expr::Nop); + return true; return CheckAsExprStatement(f, expr); } @@ -6095,21 +5987,10 @@ static bool CheckLoopConditionOnEntry(FunctionValidator& f, ParseNode* cond) { uint32_t maybeLit; - - // TODO: will not need to generate nop when blocks switch from - // number-of-statements immediate to end marker. if (IsLiteralInt(f.m(), cond, &maybeLit) && maybeLit) - return f.encoder().writeExpr(Expr::Nop); + return true; - // brIf (i32.eq 0 $f) $out - if (!f.writeBreakIf()) - return false; - - // TODO change this to i32.eqz - // i32.eq 0 $f - if (!f.encoder().writeExpr(Expr::I32Eq)) - return false; - if (!f.writeInt32Lit(0)) + if (!f.encoder().writeExpr(Expr::Nop)) return false; Type condType; @@ -6118,6 +5999,17 @@ CheckLoopConditionOnEntry(FunctionValidator& f, ParseNode* cond) if (!condType.isInt()) return f.failf(cond, "%s is not a subtype of int", condType.toChars()); + // TODO change this to i32.eqz + // i32.eq 0 $f + if (!f.writeInt32Lit(0)) + return false; + if (!f.encoder().writeExpr(Expr::I32Eq)) + return false; + + // brIf (i32.eq 0 $f) $out + if (!f.writeBreakIf()) + return false; + return true; } @@ -6137,7 +6029,7 @@ CheckWhile(FunctionValidator& f, ParseNode* whileStmt, const NameVector* labels if (labels && !f.addLabels(*labels, 0, 1)) return false; - if (!f.pushLoop(/* numStmts = */ 3)) + if (!f.pushLoop()) return false; if (!CheckLoopConditionOnEntry(f, cond)) @@ -6147,7 +6039,8 @@ CheckWhile(FunctionValidator& f, ParseNode* whileStmt, const NameVector* labels if (!f.writeContinue()) return false; - f.popLoop(); + if (!f.popLoop()) + return false; if (labels) f.removeLabels(*labels); return true; @@ -6182,14 +6075,14 @@ CheckFor(FunctionValidator& f, ParseNode* forStmt, const NameVector* labels = nu if (labels && !f.addLabels(*labels, 1, 3)) return false; - if (!f.pushUnbreakableBlock(/* numStmts = */ 1 + !!maybeInit)) + if (!f.pushUnbreakableBlock()) return false; if (maybeInit && !CheckAsExprStatement(f, maybeInit)) return false; { - if (!f.pushLoop(/* numStmts = */ 2 + !!maybeCond + !!maybeInc)) + if (!f.pushLoop()) return false; if (maybeCond && !CheckLoopConditionOnEntry(f, maybeCond)) @@ -6197,11 +6090,12 @@ CheckFor(FunctionValidator& f, ParseNode* forStmt, const NameVector* labels = nu { // Continuing in the body should just break out to the increment. - if (!f.pushContinuableBlock(1)) + if (!f.pushContinuableBlock()) return false; if (!CheckStatement(f, body)) return false; - f.popContinuableBlock(); + if (!f.popContinuableBlock()) + return false; } if (maybeInc && !CheckAsExprStatement(f, maybeInc)) @@ -6209,10 +6103,12 @@ CheckFor(FunctionValidator& f, ParseNode* forStmt, const NameVector* labels = nu if (!f.writeContinue()) return false; - f.popLoop(); + if (!f.popLoop()) + return false; } - f.popUnbreakableBlock(); + if (!f.popUnbreakableBlock()) + return false; if (labels) f.removeLabels(*labels); @@ -6237,19 +6133,20 @@ CheckDoWhile(FunctionValidator& f, ParseNode* whileStmt, const NameVector* label if (labels && !f.addLabels(*labels, 0, 2)) return false; - if (!f.pushLoop(2 /* numStmts = #body + br_if */)) + if (!f.pushLoop()) return false; { // An unlabeled continue in the body should break out to the condition. - if (!f.pushContinuableBlock(1)) + if (!f.pushContinuableBlock()) return false; if (!CheckStatement(f, body)) return false; - f.popContinuableBlock(); + if (!f.popContinuableBlock()) + return false; } - if (!f.writeContinueIf()) + if (!f.encoder().writeExpr(Expr::Nop)) return false; Type condType; @@ -6258,7 +6155,11 @@ CheckDoWhile(FunctionValidator& f, ParseNode* whileStmt, const NameVector* label if (!condType.isInt()) return f.failf(cond, "%s is not a subtype of int", condType.toChars()); - f.popLoop(); + if (!f.writeContinueIf()) + return false; + + if (!f.popLoop()) + return false; if (labels) f.removeLabels(*labels); return true; @@ -6292,24 +6193,23 @@ CheckLabel(FunctionValidator& f, ParseNode* labeledStmt) break; } - if (!f.pushUnbreakableBlock(1, &labels)) + if (!f.pushUnbreakableBlock(&labels)) return false; if (!CheckStatement(f, innermost)) return false; - f.popUnbreakableBlock(&labels); + if (!f.popUnbreakableBlock(&labels)) + return false; return true; } static bool CheckIf(FunctionValidator& f, ParseNode* ifStmt) { - recurse: - size_t opcodeAt; - if (!f.encoder().writePatchableOneByteExpr(&opcodeAt)) - return false; + uint32_t numIfEnd = 1; + recurse: MOZ_ASSERT(ifStmt->isKind(PNK_IF)); ParseNode* cond = TernaryKid1(ifStmt); ParseNode* thenStmt = TernaryKid2(ifStmt); @@ -6321,16 +6221,21 @@ CheckIf(FunctionValidator& f, ParseNode* ifStmt) if (!condType.isInt()) return f.failf(cond, "%s is not a subtype of int", condType.toChars()); + if (!f.pushIf()) + return false; + if (!CheckStatement(f, thenStmt)) return false; - if (!elseStmt) { - f.encoder().patchOneByteExpr(opcodeAt, Expr::If); - } else { - f.encoder().patchOneByteExpr(opcodeAt, Expr::IfElse); + if (elseStmt) { + if (!f.switchToElse()) + return false; if (elseStmt->isKind(PNK_IF)) { ifStmt = elseStmt; + ++numIfEnd; + if (numIfEnd == 0) + return false; goto recurse; } @@ -6338,6 +6243,11 @@ CheckIf(FunctionValidator& f, ParseNode* ifStmt) return false; } + for (uint32_t i = 0; i != numIfEnd; ++i) { + if (!f.popIf()) + return false; + } + return true; } @@ -6480,26 +6390,40 @@ CheckSwitch(FunctionValidator& f, ParseNode* switchStmt) } // Open the wrapping breakable default block. - if (!f.pushBreakableBlock(2)) + if (!f.pushBreakableBlock()) return false; // Open all the case blocks. for (uint32_t i = 0; i < numCases; i++) { - if (!f.pushUnbreakableBlock(2)) + if (!f.pushUnbreakableBlock()) return false; } // Open the br_table block. - if (!f.pushUnbreakableBlock(1)) + if (!f.pushUnbreakableBlock()) return false; // The default block is the last one. uint32_t defaultDepth = numCases; + // Subtract lowest case value, so that all the cases start from 0. + if (low) { + if (!CheckSwitchExpr(f, switchExpr)) + return false; + if (!f.writeInt32Lit(low)) + return false; + if (!f.encoder().writeExpr(Expr::I32Sub)) + return false; + } else { + if (!CheckSwitchExpr(f, switchExpr)) + return false; + } + // Start the br_table block. if (!f.encoder().writeExpr(Expr::BrTable)) return false; + // Write the number of cases (tableLength - 1 + 1 (default)). // Write the number of cases (tableLength - 1 + 1 (default)). if (!f.encoder().writeVarU32(tableLength)) return false; @@ -6516,40 +6440,26 @@ CheckSwitch(FunctionValidator& f, ParseNode* switchStmt) if (!f.encoder().writeFixedU32(defaultDepth)) return false; - // Subtract lowest case value, so that all the cases start from 0. - if (low) { - if (!f.encoder().writeExpr(Expr::I32Sub)) - return false; - if (!CheckSwitchExpr(f, switchExpr)) - return false; - if (!f.writeInt32Lit(low)) - return false; - } else { - if (!CheckSwitchExpr(f, switchExpr)) - return false; - } - // Our br_table is done. Close its block, write the cases down in order. - f.popUnbreakableBlock(); + if (!f.popUnbreakableBlock()) + return false; for (; stmt && !IsDefaultCase(stmt); stmt = NextNode(stmt)) { if (!CheckStatement(f, CaseBody(stmt))) return false; - f.popUnbreakableBlock(); + if (!f.popUnbreakableBlock()) + return false; } // Write the default block. if (stmt && IsDefaultCase(stmt)) { if (!CheckStatement(f, CaseBody(stmt))) return false; - } else { - // TODO no need to write this nop once we go postorder. - if (!f.encoder().writeExpr(Expr::Nop)) - return false; } // Close the wrapping block. - f.popBreakableBlock(); + if (!f.popBreakableBlock()) + return false; return true; } @@ -6574,20 +6484,28 @@ CheckReturn(FunctionValidator& f, ParseNode* returnStmt) { ParseNode* expr = ReturnExpr(returnStmt); + if (!expr) { + if (!f.encoder().writeExpr(Expr::Nop)) + return false; + + if (!CheckReturnType(f, returnStmt, Type::Void)) + return false; + } else { + Type type; + if (!CheckExpr(f, expr, &type)) + return false; + + if (!type.isReturnType()) + return f.failf(expr, "%s is not a valid return type", type.toChars()); + + if (!CheckReturnType(f, expr, Type::canonicalize(type))) + return false; + } + if (!f.encoder().writeExpr(Expr::Return)) return false; - if (!expr) - return CheckReturnType(f, returnStmt, Type::Void); - - Type type; - if (!CheckExpr(f, expr, &type)) - return false; - - if (!type.isReturnType()) - return f.failf(expr, "%s is not a valid return type", type.toChars()); - - return CheckReturnType(f, expr, Type::canonicalize(type)); + return true; } static bool @@ -6595,8 +6513,7 @@ CheckStatementList(FunctionValidator& f, ParseNode* stmtList, const NameVector* { MOZ_ASSERT(stmtList->isKind(PNK_STATEMENTLIST)); - uint32_t numStmts = ListLength(stmtList); - if (!f.pushUnbreakableBlock(numStmts, labels)) + if (!f.pushUnbreakableBlock(labels)) return false; for (ParseNode* stmt = ListHead(stmtList); stmt; stmt = NextNode(stmt)) { @@ -6604,7 +6521,8 @@ CheckStatementList(FunctionValidator& f, ParseNode* stmtList, const NameVector* return false; } - f.popUnbreakableBlock(labels); + if (!f.popUnbreakableBlock(labels)) + return false; return true; } diff --git a/js/src/asmjs/Wasm.cpp b/js/src/asmjs/Wasm.cpp index b11d2e5fe994..94347c2f109c 100644 --- a/js/src/asmjs/Wasm.cpp +++ b/js/src/asmjs/Wasm.cpp @@ -22,6 +22,7 @@ #include "jsprf.h" +#include "asmjs/WasmBinaryIterator.h" #include "asmjs/WasmGenerator.h" #include "vm/ArrayBufferObject.h" #include "vm/Debugger.h" @@ -61,105 +62,64 @@ Fail(JSContext* cx, Decoder& d, const char* str) return false; } -/*****************************************************************************/ -// wasm validation type lattice - -// ExprType::Limit is an out-of-band value and has no wasm-semantic meaning. For -// the purpose of recursive validation, we use this value to represent the type -// of branch/return instructions that don't actually return to the parent -// expression and can thus be used in any context. -static const ExprType AnyType = ExprType::Limit; - -static ExprType -Unify(ExprType one, ExprType two) -{ - if (one == AnyType) - return two; - if (two == AnyType) - return one; - if (one == two) - return one; - return ExprType::Void; -} - static bool IsI64Implemented() { #ifdef JS_CPU_X64 - return true; + return true; #else - return false; + return false; #endif } -class FunctionDecoder +namespace { + +class ValidatingPolicy : public ExprIterPolicy { JSContext* cx_; - Decoder& d_; - ModuleGenerator& mg_; - FunctionGenerator& fg_; - uint32_t funcIndex_; - const ValTypeVector& locals_; - Vector blocks_; public: - FunctionDecoder(JSContext* cx, Decoder& d, ModuleGenerator& mg, FunctionGenerator& fg, - uint32_t funcIndex, const ValTypeVector& locals) - : cx_(cx), d_(d), mg_(mg), fg_(fg), funcIndex_(funcIndex), locals_(locals), blocks_(cx) - {} - JSContext* cx() const { return cx_; } - Decoder& d() const { return d_; } - ModuleGenerator& mg() const { return mg_; } - FunctionGenerator& fg() const { return fg_; } - uint32_t funcIndex() const { return funcIndex_; } - const ValTypeVector& locals() const { return locals_; } - const DeclaredSig& sig() const { return mg_.funcSig(funcIndex_); } + // Validation is what we're all about here. + static const bool Validate = true; - bool fail(const char* str) { - return Fail(cx_, d_, str); + // Fail by printing a message, using the contains JSContext. + bool fail(const char* str, Decoder& d) { + return Fail(cx_, d, str); } + + explicit ValidatingPolicy(JSContext* cx) : cx_(cx) {} +}; + +typedef ExprIter ValidatingExprIter; + +class FunctionDecoder +{ + const ModuleGenerator& mg_; + ValidatingExprIter iter_; + const ValTypeVector& locals_; + const DeclaredSig& sig_; + + public: + FunctionDecoder(JSContext* cx, const ModuleGenerator& mg, Decoder& d, + uint32_t funcIndex, const ValTypeVector& locals) + : mg_(mg), + iter_(ValidatingPolicy(cx), d), + locals_(locals), + sig_(mg.funcSig(funcIndex)) + {} + const ModuleGenerator& mg() const { return mg_; } + ValidatingExprIter& iter() { return iter_; } + const ValTypeVector& locals() const { return locals_; } + const DeclaredSig& sig() const { return sig_; } + bool checkI64Support() { if (!IsI64Implemented()) - return fail("i64 NYI on this platform"); - return true; - } - - MOZ_WARN_UNUSED_RESULT bool pushBlock() { - return blocks_.append(AnyType); - } - ExprType popBlock() { - return blocks_.popCopy(); - } - MOZ_WARN_UNUSED_RESULT bool branchWithType(uint32_t depth, ExprType type) { - if (depth >= blocks_.length()) - return false; - uint32_t absolute = blocks_.length() - 1 - depth; - blocks_[absolute] = Unify(blocks_[absolute], type); + return iter().notYetImplemented("i64 NYI on this platform"); return true; } }; -static bool -CheckType(FunctionDecoder& f, ExprType actual, ValType expected) -{ - if (actual == AnyType || actual == ToExprType(expected)) - return true; - - UniqueChars error(JS_smprintf("type mismatch: expression has type %s but expected %s", - ToCString(actual), ToCString(expected))); - if (!error) - return false; - - return f.fail(error.get()); -} - -static bool -CheckType(FunctionDecoder& f, ExprType actual, ExprType expected) -{ - MOZ_ASSERT(expected != AnyType); - return expected == ExprType::Void || - CheckType(f, actual, NonVoidToValType(expected)); -} +} // end anonymous namespace static bool CheckValType(JSContext* cx, Decoder& d, ValType type) @@ -191,530 +151,169 @@ CheckExprType(JSContext* cx, Decoder& d, ExprType type) } static bool -DecodeExpr(FunctionDecoder& f, ExprType* type); - -static bool -DecodeNop(FunctionDecoder& f, ExprType* type) +DecodeCallArgs(FunctionDecoder& f, const Sig& sig) { - *type = ExprType::Void; - return true; -} - -static bool -DecodeUnreachable(FunctionDecoder& f, ExprType* type) -{ - *type = AnyType; - return true; -} - -static bool -DecodeCallWithSig(FunctionDecoder& f, const Sig& sig, ExprType* type) -{ - for (ValType argType : sig.args()) { - ExprType exprType; - if (!DecodeExpr(f, &exprType)) - return false; - - if (!CheckType(f, exprType, argType)) + Nothing arg; + const ValTypeVector& args = sig.args(); + uint32_t numArgs = args.length(); + for (size_t i = 0; i < numArgs; ++i) { + ValType argType = args[i]; + if (!f.iter().readCallArg(argType, numArgs, i, &arg)) return false; } - *type = sig.ret(); - return true; + return f.iter().readCallArgsEnd(numArgs); } static bool -DecodeCall(FunctionDecoder& f, ExprType* type) +DecodeCallReturn(FunctionDecoder& f, const Sig& sig) { - uint32_t funcIndex; - if (!f.d().readVarU32(&funcIndex)) - return f.fail("unable to read import index"); - - if (funcIndex >= f.mg().numFuncSigs()) - return f.fail("callee index out of range"); - - return DecodeCallWithSig(f, f.mg().funcSig(funcIndex), type); + return f.iter().readCallReturn(sig.ret()); } static bool -DecodeCallImport(FunctionDecoder& f, ExprType* type) +DecodeCall(FunctionDecoder& f) { - uint32_t importIndex; - if (!f.d().readVarU32(&importIndex)) - return f.fail("unable to read import index"); - - if (importIndex >= f.mg().numImports()) - return f.fail("import index out of range"); - - return DecodeCallWithSig(f, *f.mg().import(importIndex).sig, type); -} - -static bool -DecodeCallIndirect(FunctionDecoder& f, ExprType* type) -{ - uint32_t sigIndex; - if (!f.d().readVarU32(&sigIndex)) - return f.fail("unable to read indirect call signature index"); - - if (sigIndex >= f.mg().numSigs()) - return f.fail("signature index out of range"); - - ExprType indexType; - if (!DecodeExpr(f, &indexType)) + if (!f.iter().readCall()) return false; - if (!CheckType(f, indexType, ValType::I32)) + const CallRecord& call = f.iter().call(); + if (call.callee >= f.mg().numFuncSigs()) + return f.iter().fail("callee index out of range"); + + const Sig& sig = f.mg().funcSig(call.callee); + return DecodeCallArgs(f, sig) && + DecodeCallReturn(f, sig); +} + +static bool +DecodeCallIndirect(FunctionDecoder& f) +{ + if (!f.iter().readCallIndirect()) return false; - return DecodeCallWithSig(f, f.mg().sig(sigIndex), type); -} + const CallIndirectRecord& callIndirect = f.iter().callIndirect(); + if (callIndirect.sigIndex >= f.mg().numSigs()) + return f.iter().fail("signature index out of range"); -static bool -DecodeConstI32(FunctionDecoder& f, ExprType* type) -{ - int32_t _; - if (!f.d().readVarS32(&_)) - return f.fail("unable to read i32.const immediate"); - - *type = ExprType::I32; - return true; -} - -static bool -DecodeConstI64(FunctionDecoder& f, ExprType* type) -{ - int64_t _; - if (!f.d().readVarS64(&_)) - return f.fail("unable to read i64.const immediate"); - - *type = ExprType::I64; - return true; -} - -static bool -DecodeConstF32(FunctionDecoder& f, ExprType* type) -{ - float value; - if (!f.d().readFixedF32(&value)) - return f.fail("unable to read f32.const immediate"); - - if (IsNaN(value)) { - const float jsNaN = (float)JS::GenericNaN(); - if (memcmp(&value, &jsNaN, sizeof(value)) != 0) - return f.fail("NYI: NaN literals with custom payloads"); - } - - *type = ExprType::F32; - return true; -} - -static bool -DecodeConstF64(FunctionDecoder& f, ExprType* type) -{ - double value; - if (!f.d().readFixedF64(&value)) - return f.fail("unable to read f64.const immediate"); - - if (IsNaN(value)) { - const double jsNaN = JS::GenericNaN(); - if (memcmp(&value, &jsNaN, sizeof(value)) != 0) - return f.fail("NYI: NaN literals with custom payloads"); - } - - *type = ExprType::F64; - return true; -} - -static bool -DecodeGetLocal(FunctionDecoder& f, ExprType* type) -{ - uint32_t localIndex; - if (!f.d().readVarU32(&localIndex)) - return f.fail("unable to read get_local index"); - - if (localIndex >= f.locals().length()) - return f.fail("get_local index out of range"); - - *type = ToExprType(f.locals()[localIndex]); - return true; -} - -static bool -DecodeSetLocal(FunctionDecoder& f, ExprType* type) -{ - uint32_t localIndex; - if (!f.d().readVarU32(&localIndex)) - return f.fail("unable to read set_local index"); - - if (localIndex >= f.locals().length()) - return f.fail("set_local index out of range"); - - *type = ToExprType(f.locals()[localIndex]); - - ExprType rhsType; - if (!DecodeExpr(f, &rhsType)) + const Sig& sig = f.mg().sig(callIndirect.sigIndex); + if (!DecodeCallArgs(f, sig)) return false; - return CheckType(f, rhsType, *type); + Nothing callee; + if (!f.iter().readCallIndirectCallee(&callee)) + return false; + + return DecodeCallReturn(f, sig); } static bool -DecodeBlock(FunctionDecoder& f, bool isLoop, ExprType* type) +DecodeCallImport(FunctionDecoder& f) { - if (!f.pushBlock()) - return f.fail("nesting overflow"); + if (!f.iter().readCallImport()) + return false; - if (isLoop) { - if (!f.pushBlock()) - return f.fail("nesting overflow"); - } + const CallImportRecord& callImport = f.iter().callImport(); + if (callImport.callee >= f.mg().numImports()) + return f.iter().fail("import index out of range"); - uint32_t numExprs; - if (!f.d().readVarU32(&numExprs)) - return f.fail("unable to read block's number of expressions"); + const Sig& sig = *f.mg().import(callImport.callee).sig; + return DecodeCallArgs(f, sig) && + DecodeCallReturn(f, sig); +} - ExprType exprType = ExprType::Void; +static bool +DecodeBrTable(FunctionDecoder& f) +{ + if (!f.iter().readBrTable()) + return false; - for (uint32_t i = 0; i < numExprs; i++) { - if (!DecodeExpr(f, &exprType)) + ExprType type = f.iter().brTable().type; + if (!IsVoid(type)) + return f.iter().notYetImplemented("non-void br_table"); + + uint32_t depth; + for (size_t i = 0, e = f.iter().brTable().tableLength; i < e; ++i) { + if (!f.iter().readBrTableEntry(type, &depth)) return false; } - if (isLoop) - f.popBlock(); - - ExprType branchType = f.popBlock(); - *type = Unify(branchType, exprType); - return true; + // Read the default label. + return f.iter().readBrTableEntry(type, &depth); } static bool -DecodeUnaryOperator(FunctionDecoder& f, ValType argType, ExprType *type) +DecodeExpr(FunctionDecoder& f) { - ExprType actual; - if (!DecodeExpr(f, &actual)) - return false; - - if (!CheckType(f, actual, argType)) - return false; - - *type = ToExprType(argType); - return true; -} - -static bool -DecodeBinaryOperator(FunctionDecoder& f, ValType argType, ExprType* type) -{ - ExprType actual; - - if (!DecodeExpr(f, &actual)) - return false; - - if (!CheckType(f, actual, argType)) - return false; - - if (!DecodeExpr(f, &actual)) - return false; - - if (!CheckType(f, actual, argType)) - return false; - - *type = ToExprType(argType); - return true; -} - -static bool -DecodeComparisonOperator(FunctionDecoder& f, ValType argType, ExprType* type) -{ - ExprType actual; - - if (!DecodeExpr(f, &actual)) - return false; - - if (!CheckType(f, actual, argType)) - return false; - - if (!DecodeExpr(f, &actual)) - return false; - - if (!CheckType(f, actual, argType)) - return false; - - *type = ExprType::I32; - return true; -} - -static bool -DecodeConversionOperator(FunctionDecoder& f, ValType to, ValType argType, ExprType* type) -{ - ExprType actual; - if (!DecodeExpr(f, &actual)) - return false; - - if (!CheckType(f, actual, argType)) - return false; - - *type = ToExprType(to); - return true; -} - -static bool -DecodeSelect(FunctionDecoder& f, ExprType* type) -{ - ExprType trueType; - if (!DecodeExpr(f, &trueType)) - return false; - - if (trueType == ExprType::I64 && !f.checkI64Support()) - return false; - - ExprType falseType; - if (!DecodeExpr(f, &falseType)) - return false; - - ExprType condType; - if (!DecodeExpr(f, &condType)) - return false; - - if (!CheckType(f, condType, ValType::I32)) - return false; - - *type = Unify(trueType, falseType); - return true; -} - -static bool -DecodeIfElse(FunctionDecoder& f, bool hasElse, ExprType* type) -{ - ExprType condType; - if (!DecodeExpr(f, &condType)) - return false; - - if (!CheckType(f, condType, ValType::I32)) - return false; - - ExprType thenType; - if (!DecodeExpr(f, &thenType)) - return false; - - if (hasElse) { - ExprType elseType; - if (!DecodeExpr(f, &elseType)) - return false; - - *type = Unify(thenType, elseType); - } else { - *type = ExprType::Void; - } - - return true; -} - -static bool -DecodeLoadStoreAddress(FunctionDecoder &f, unsigned width) -{ - uint32_t flags; - if (!f.d().readVarU32(&flags)) - return f.fail("expected memory access flags"); - - uint32_t alignLog2 = flags; - if (alignLog2 >= 32 || (1u << alignLog2) > width) - return f.fail("greater than natural alignment"); - - uint32_t offset; - if (!f.d().readVarU32(&offset)) - return f.fail("expected memory access offset"); - - ExprType baseType; - if (!DecodeExpr(f, &baseType)) - return false; - - return CheckType(f, baseType, ExprType::I32); -} - -static bool -DecodeLoad(FunctionDecoder& f, unsigned width, ValType loadType, ExprType* type) -{ - if (!DecodeLoadStoreAddress(f, width)) - return false; - - *type = ToExprType(loadType); - return true; -} - -static bool -DecodeStore(FunctionDecoder& f, unsigned width, ValType storeType, ExprType* type) -{ - if (!DecodeLoadStoreAddress(f, width)) - return false; - - ExprType actual; - if (!DecodeExpr(f, &actual)) - return false; - - if (!CheckType(f, actual, storeType)) - return false; - - *type = ToExprType(storeType); - return true; -} - -static bool -DecodeBranch(FunctionDecoder& f, Expr expr, ExprType* type) -{ - MOZ_ASSERT(expr == Expr::Br || expr == Expr::BrIf); - - uint32_t relativeDepth; - if (!f.d().readVarU32(&relativeDepth)) - return f.fail("expected relative depth"); - - ExprType brType; - if (!DecodeExpr(f, &brType)) - return f.fail("expected branch value"); - - if (!f.branchWithType(relativeDepth, brType)) - return f.fail("branch depth exceeds current nesting level"); - - if (expr == Expr::BrIf) { - ExprType actual; - if (!DecodeExpr(f, &actual)) - return false; - - if (!CheckType(f, actual, ValType::I32)) - return false; - - *type = ExprType::Void; - } else { - *type = AnyType; - } - - return true; -} - -static bool -DecodeBrTable(FunctionDecoder& f, ExprType* type) -{ - uint32_t tableLength; - if (!f.d().readVarU32(&tableLength)) - return false; - - if (tableLength > MaxBrTableElems) - return f.fail("too many br_table entries"); - - for (uint32_t i = 0; i < tableLength; i++) { - uint32_t depth; - if (!f.d().readFixedU32(&depth)) - return f.fail("missing br_table entry"); - - if (!f.branchWithType(depth, ExprType::Void)) - return f.fail("branch depth exceeds current nesting level"); - } - - uint32_t defaultDepth; - if (!f.d().readFixedU32(&defaultDepth)) - return f.fail("expected default relative depth"); - - if (!f.branchWithType(defaultDepth, ExprType::Void)) - return f.fail("branch depth exceeds current nesting level"); - - ExprType actual; - if (!DecodeExpr(f, &actual)) - return false; - - if (!CheckType(f, actual, ExprType::I32)) - return false; - - *type = AnyType; - return true; -} - -static bool -DecodeReturn(FunctionDecoder& f, ExprType* type) -{ - if (f.sig().ret() != ExprType::Void) { - ExprType actual; - if (!DecodeExpr(f, &actual)) - return false; - - if (!CheckType(f, actual, f.sig().ret())) - return false; - } - - *type = AnyType; - return true; -} - -static bool -DecodeExpr(FunctionDecoder& f, ExprType* type) -{ - JS_CHECK_RECURSION(f.cx(), return false); - Expr expr; - if (!f.d().readExpr(&expr)) - return f.fail("unable to read expression"); + if (!f.iter().readExpr(&expr)) + return false; switch (expr) { case Expr::Nop: - return DecodeNop(f, type); + return f.iter().readTrivial(); case Expr::Call: - return DecodeCall(f, type); - case Expr::CallImport: - return DecodeCallImport(f, type); + return DecodeCall(f); case Expr::CallIndirect: - return DecodeCallIndirect(f, type); + return DecodeCallIndirect(f); + case Expr::CallImport: + return DecodeCallImport(f); case Expr::I32Const: - return DecodeConstI32(f, type); + return f.iter().readI32Const(); case Expr::I64Const: - return f.checkI64Support() && DecodeConstI64(f, type); + return f.checkI64Support() && + f.iter().readI64Const(); case Expr::F32Const: - return DecodeConstF32(f, type); + return f.iter().readF32Const(); case Expr::F64Const: - return DecodeConstF64(f, type); + return f.iter().readF64Const(); case Expr::GetLocal: - return DecodeGetLocal(f, type); + return f.iter().readGetLocal(f.locals()); case Expr::SetLocal: - return DecodeSetLocal(f, type); + return f.iter().readSetLocal(f.locals()); case Expr::Select: - return DecodeSelect(f, type); + return f.iter().readSelect(); case Expr::Block: - return DecodeBlock(f, /* isLoop */ false, type); + return f.iter().readBlock(); case Expr::Loop: - return DecodeBlock(f, /* isLoop */ true, type); + return f.iter().readLoop(); case Expr::If: - return DecodeIfElse(f, /* hasElse */ false, type); - case Expr::IfElse: - return DecodeIfElse(f, /* hasElse */ true, type); + return f.iter().readIf(); + case Expr::Else: + return f.iter().readElse(); + case Expr::End: + return f.iter().readEnd(); case Expr::I32Clz: case Expr::I32Ctz: case Expr::I32Popcnt: case Expr::I32Eqz: - return DecodeUnaryOperator(f, ValType::I32, type); + return f.iter().readUnary(ValType::I32); case Expr::I64Clz: case Expr::I64Ctz: case Expr::I64Popcnt: case Expr::I64Eqz: - return f.fail("NYI: i64") && - DecodeUnaryOperator(f, ValType::I64, type); + return f.iter().notYetImplemented("i64") && + f.iter().readUnary(ValType::I64); case Expr::F32Abs: case Expr::F32Neg: case Expr::F32Ceil: case Expr::F32Floor: case Expr::F32Sqrt: - return DecodeUnaryOperator(f, ValType::F32, type); + return f.iter().readUnary(ValType::F32); case Expr::F32Trunc: - return f.fail("NYI: trunc"); + return f.iter().notYetImplemented("trunc"); case Expr::F32Nearest: - return f.fail("NYI: nearest"); + return f.iter().notYetImplemented("nearest"); case Expr::F64Abs: case Expr::F64Neg: case Expr::F64Ceil: case Expr::F64Floor: case Expr::F64Sqrt: - return DecodeUnaryOperator(f, ValType::F64, type); + return f.iter().readUnary(ValType::F64); case Expr::F64Trunc: - return f.fail("NYI: trunc"); + return f.iter().notYetImplemented("trunc"); case Expr::F64Nearest: - return f.fail("NYI: nearest"); + return f.iter().notYetImplemented("nearest"); case Expr::I32Add: case Expr::I32Sub: case Expr::I32Mul: @@ -728,10 +327,10 @@ DecodeExpr(FunctionDecoder& f, ExprType* type) case Expr::I32Shl: case Expr::I32ShrS: case Expr::I32ShrU: - return DecodeBinaryOperator(f, ValType::I32, type); + return f.iter().readBinary(ValType::I32); case Expr::I32Rotl: case Expr::I32Rotr: - return f.fail("NYI: rotate"); + return f.iter().notYetImplemented("rotate"); case Expr::I64Add: case Expr::I64Sub: case Expr::I64Mul: @@ -745,28 +344,29 @@ DecodeExpr(FunctionDecoder& f, ExprType* type) case Expr::I64Shl: case Expr::I64ShrS: case Expr::I64ShrU: - return f.checkI64Support() && DecodeBinaryOperator(f, ValType::I64, type); + return f.checkI64Support() && + f.iter().readBinary(ValType::I64); case Expr::I64Rotl: case Expr::I64Rotr: - return f.fail("NYI: rotate"); + return f.iter().notYetImplemented("rotate"); case Expr::F32Add: case Expr::F32Sub: case Expr::F32Mul: case Expr::F32Div: case Expr::F32Min: case Expr::F32Max: - return DecodeBinaryOperator(f, ValType::F32, type); + return f.iter().readBinary(ValType::F32); case Expr::F32CopySign: - return f.fail("NYI: copysign"); + return f.iter().notYetImplemented("copysign"); case Expr::F64Add: case Expr::F64Sub: case Expr::F64Mul: case Expr::F64Div: case Expr::F64Min: case Expr::F64Max: - return DecodeBinaryOperator(f, ValType::F64, type); + return f.iter().readBinary(ValType::F64); case Expr::F64CopySign: - return f.fail("NYI: copysign"); + return f.iter().notYetImplemented("copysign"); case Expr::I32Eq: case Expr::I32Ne: case Expr::I32LtS: @@ -777,7 +377,7 @@ DecodeExpr(FunctionDecoder& f, ExprType* type) case Expr::I32GtU: case Expr::I32GeS: case Expr::I32GeU: - return DecodeComparisonOperator(f, ValType::I32, type); + return f.iter().readComparison(ValType::I32); case Expr::I64Eq: case Expr::I64Ne: case Expr::I64LtS: @@ -788,118 +388,129 @@ DecodeExpr(FunctionDecoder& f, ExprType* type) case Expr::I64GtU: case Expr::I64GeS: case Expr::I64GeU: - return f.checkI64Support() && DecodeComparisonOperator(f, ValType::I64, type); + return f.checkI64Support() && + f.iter().readComparison(ValType::I64); case Expr::F32Eq: case Expr::F32Ne: case Expr::F32Lt: case Expr::F32Le: case Expr::F32Gt: case Expr::F32Ge: - return DecodeComparisonOperator(f, ValType::F32, type); + return f.iter().readComparison(ValType::F32); case Expr::F64Eq: case Expr::F64Ne: case Expr::F64Lt: case Expr::F64Le: case Expr::F64Gt: case Expr::F64Ge: - return DecodeComparisonOperator(f, ValType::F64, type); + return f.iter().readComparison(ValType::F64); case Expr::I32WrapI64: return f.checkI64Support() && - DecodeConversionOperator(f, ValType::I32, ValType::I64, type); + f.iter().readConversion(ValType::I64, ValType::I32); case Expr::I32TruncSF32: case Expr::I32TruncUF32: case Expr::I32ReinterpretF32: - return DecodeConversionOperator(f, ValType::I32, ValType::F32, type); + return f.iter().readConversion(ValType::F32, ValType::I32); case Expr::I32TruncSF64: case Expr::I32TruncUF64: - return DecodeConversionOperator(f, ValType::I32, ValType::F64, type); + return f.iter().readConversion(ValType::F64, ValType::I32); case Expr::I64ExtendSI32: case Expr::I64ExtendUI32: return f.checkI64Support() && - DecodeConversionOperator(f, ValType::I64, ValType::I32, type); + f.iter().readConversion(ValType::I32, ValType::I64); case Expr::I64TruncSF32: case Expr::I64TruncUF32: return f.checkI64Support() && - DecodeConversionOperator(f, ValType::I64, ValType::F32, type); + f.iter().readConversion(ValType::F32, ValType::I64); case Expr::I64TruncSF64: case Expr::I64TruncUF64: case Expr::I64ReinterpretF64: return f.checkI64Support() && - DecodeConversionOperator(f, ValType::I64, ValType::F64, type); + f.iter().readConversion(ValType::F64, ValType::I64); case Expr::F32ConvertSI32: case Expr::F32ConvertUI32: case Expr::F32ReinterpretI32: - return DecodeConversionOperator(f, ValType::F32, ValType::I32, type); + return f.iter().readConversion(ValType::I32, ValType::F32); case Expr::F32ConvertSI64: case Expr::F32ConvertUI64: return f.checkI64Support() && - DecodeConversionOperator(f, ValType::F32, ValType::I64, type); + f.iter().readConversion(ValType::I64, ValType::F32); case Expr::F32DemoteF64: - return DecodeConversionOperator(f, ValType::F32, ValType::F64, type); + return f.iter().readConversion(ValType::F64, ValType::F32); case Expr::F64ConvertSI32: case Expr::F64ConvertUI32: - return DecodeConversionOperator(f, ValType::F64, ValType::I32, type); + return f.iter().readConversion(ValType::I32, ValType::F64); case Expr::F64ConvertSI64: case Expr::F64ConvertUI64: case Expr::F64ReinterpretI64: return f.checkI64Support() && - DecodeConversionOperator(f, ValType::F64, ValType::I64, type); + f.iter().readConversion(ValType::I64, ValType::F64); case Expr::F64PromoteF32: - return DecodeConversionOperator(f, ValType::F64, ValType::F32, type); + return f.iter().readConversion(ValType::F32, ValType::F64); case Expr::I32Load8S: case Expr::I32Load8U: - return DecodeLoad(f, 1, ValType::I32, type); + return f.iter().readLoad(ValType::I32, 1); case Expr::I32Load16S: case Expr::I32Load16U: - return DecodeLoad(f, 2, ValType::I32, type); + return f.iter().readLoad(ValType::I32, 2); case Expr::I32Load: - return DecodeLoad(f, 4, ValType::I32, type); - case Expr::I64Load: + return f.iter().readLoad(ValType::I32, 4); case Expr::I64Load8S: case Expr::I64Load8U: + return f.iter().notYetImplemented("i64") && + f.iter().readLoad(ValType::I64, 1); case Expr::I64Load16S: case Expr::I64Load16U: + return f.iter().notYetImplemented("i64") && + f.iter().readLoad(ValType::I64, 2); case Expr::I64Load32S: case Expr::I64Load32U: - return f.fail("NYI: i64") && - DecodeLoad(f, 0, ValType::I64, type); + return f.iter().notYetImplemented("i64") && + f.iter().readLoad(ValType::I64, 4); + case Expr::I64Load: + return f.iter().notYetImplemented("i64"); case Expr::F32Load: - return DecodeLoad(f, 4, ValType::F32, type); + return f.iter().readLoad(ValType::F32, 4); case Expr::F64Load: - return DecodeLoad(f, 8, ValType::F64, type); + return f.iter().readLoad(ValType::F64, 8); case Expr::I32Store8: - return DecodeStore(f, 1, ValType::I32, type); + return f.iter().readStore(ValType::I32, 1); case Expr::I32Store16: - return DecodeStore(f, 2, ValType::I32, type); + return f.iter().readStore(ValType::I32, 2); case Expr::I32Store: - return DecodeStore(f, 4, ValType::I32, type); - case Expr::I64Store: + return f.iter().readStore(ValType::I32, 4); case Expr::I64Store8: + return f.iter().notYetImplemented("i64") && + f.iter().readStore(ValType::I64, 1); case Expr::I64Store16: + return f.iter().notYetImplemented("i64") && + f.iter().readStore(ValType::I64, 2); case Expr::I64Store32: - return f.fail("NYI: i64") && - DecodeStore(f, 0, ValType::I64, type); + return f.iter().notYetImplemented("i64") && + f.iter().readStore(ValType::I64, 4); + case Expr::I64Store: + return f.iter().notYetImplemented("i64"); case Expr::F32Store: - return DecodeStore(f, 4, ValType::F32, type); + return f.iter().readStore(ValType::F32, 4); case Expr::F64Store: - return DecodeStore(f, 8, ValType::F64, type); + return f.iter().readStore(ValType::F64, 8); case Expr::Br: - return DecodeBranch(f, expr, type); + return f.iter().readBr(); case Expr::BrIf: - return DecodeBranch(f, expr, type); + return f.iter().readBrIf(); case Expr::BrTable: - return DecodeBrTable(f, type); + return DecodeBrTable(f); case Expr::Return: - return DecodeReturn(f, type); + return f.iter().readReturn(); case Expr::Unreachable: - return DecodeUnreachable(f, type); + return f.iter().readUnreachable(); default: // Note: it's important not to remove this default since readExpr() // can return Expr values for which there is no enumerator. break; } - return f.fail("bad expression code"); + return f.iter().unrecognizedOpcode(expr); } /*****************************************************************************/ @@ -1334,16 +945,17 @@ DecodeFunctionBody(JSContext* cx, Decoder& d, ModuleGenerator& mg, uint32_t func return false; } - FunctionDecoder f(cx, d, mg, fg, funcIndex, locals); + FunctionDecoder f(cx, mg, d, funcIndex, locals); - ExprType type = ExprType::Void; + if (!f.iter().readFunctionStart()) + return false; while (d.currentPosition() < bodyEnd) { - if (!DecodeExpr(f, &type)) + if (!DecodeExpr(f)) return false; } - if (!CheckType(f, type, f.sig().ret())) + if (!f.iter().readFunctionEnd(f.sig().ret())) return false; if (d.currentPosition() != bodyEnd) @@ -1750,4 +1362,3 @@ js::InitWasmClass(JSContext* cx, HandleObject global) global->as().setConstructor(JSProto_Wasm, ObjectValue(*Wasm)); return Wasm; } - diff --git a/js/src/asmjs/WasmBinary.h b/js/src/asmjs/WasmBinary.h index b5a5416fdb6f..2651daa9feb8 100644 --- a/js/src/asmjs/WasmBinary.h +++ b/js/src/asmjs/WasmBinary.h @@ -25,7 +25,7 @@ namespace js { namespace wasm { static const uint32_t MagicNumber = 0x6d736100; // "\0asm" -static const uint32_t EncodingVersion = 0xa; +static const uint32_t EncodingVersion = 0x0b; static const char SignaturesId[] = "signatures"; static const char ImportTableId[] = "import_table"; @@ -36,7 +36,7 @@ static const char ExportTableId[] = "export_table"; static const char FunctionBodiesId[] = "function_bodies"; static const char DataSegmentsId[] = "data_segments"; -enum class ValType +enum class ValType : uint8_t { // 0x00 is reserved for ExprType::Void in the binary encoding. See comment // below about ExprType going away. @@ -64,24 +64,25 @@ enum class Expr Block = 0x01, Loop = 0x02, If = 0x03, - IfElse = 0x04, + Else = 0x04, Select = 0x05, Br = 0x06, BrIf = 0x07, BrTable = 0x08, - Return = 0x14, - Unreachable = 0x15, + Return = 0x09, + Unreachable = 0x0a, + End = 0x0f, // Basic operators - I32Const = 0x0a, - I64Const = 0x0b, - F64Const = 0x0c, - F32Const = 0x0d, - GetLocal = 0x0e, - SetLocal = 0x0f, - Call = 0x12, - CallIndirect = 0x13, - CallImport = 0x1f, + I32Const = 0x10, + I64Const = 0x11, + F64Const = 0x12, + F32Const = 0x13, + GetLocal = 0x14, + SetLocal = 0x15, + Call = 0x16, + CallIndirect = 0x17, + CallImport = 0x18, // Memory-related operators I32Load8S = 0x20, @@ -107,7 +108,7 @@ enum class Expr I64Store = 0x34, F32Store = 0x35, F64Store = 0x36, - MemorySize = 0x3b, + CurrentMemory = 0x3b, GrowMemory = 0x39, // i32 operators @@ -252,8 +253,7 @@ enum class Expr // compiling asm.js and are rejected by wasm validation. // asm.js-specific operators - Id = 0xc0, - LoadGlobal, + LoadGlobal = 0xc0, StoreGlobal, I32Min, I32Max, @@ -316,7 +316,7 @@ enum class Expr // generalized to a list of ValType and this enum will go away, replaced, // wherever it is used, by a varU32 + list of ValType. -enum class ExprType +enum class ExprType : uint8_t { Void = 0x00, I32 = uint8_t(ValType::I32), @@ -464,15 +464,6 @@ class Encoder // Variable-length encodings that allow back-patching. - MOZ_WARN_UNUSED_RESULT bool writePatchableFixedU8(size_t* offset) { - *offset = bytes_.length(); - return bytes_.append(0xff); - } - void patchFixedU8(size_t offset, uint8_t i) { - MOZ_ASSERT(bytes_[offset] == 0xff); - bytes_[offset] = i; - } - MOZ_WARN_UNUSED_RESULT bool writePatchableVarU32(size_t* offset) { *offset = bytes_.length(); return writeVarU32(UINT32_MAX); @@ -481,16 +472,6 @@ class Encoder return patchVarU32(offset, patchBits, UINT32_MAX); } - MOZ_WARN_UNUSED_RESULT bool writePatchableOneByteExpr(size_t* offset) { - *offset = bytes_.length(); - return writeFixedU8(0xff); - } - void patchOneByteExpr(size_t offset, Expr expr) { - MOZ_ASSERT(size_t(expr) < UINT8_MAX); - MOZ_ASSERT(bytes_[offset] == 0xff); - bytes_[offset] = uint8_t(expr); - } - // Byte ranges start with an LEB128 length followed by an arbitrary sequence // of bytes. When used for strings, bytes are to be interpreted as utf8. @@ -834,12 +815,15 @@ class Decoder ? Expr(u8) : Expr(uncheckedReadFixedU8() + UINT8_MAX); } - Expr uncheckedPeekExpr() { - static_assert(size_t(Expr::Limit) <= ExprLimit, "fits"); - uint8_t u8 = cur_[0]; - return u8 != UINT8_MAX - ? Expr(u8) - : Expr(cur_[1] + UINT8_MAX); + void uncheckedReadFixedI32x4(I32x4* i32x4) { + struct T { I32x4 v; }; + T t = uncheckedRead(); + memcpy(i32x4, &t, sizeof(t)); + } + void uncheckedReadFixedF32x4(F32x4* f32x4) { + struct T { F32x4 v; }; + T t = uncheckedRead(); + memcpy(f32x4, &t, sizeof(t)); } }; diff --git a/js/src/asmjs/WasmBinaryIterator.h b/js/src/asmjs/WasmBinaryIterator.h new file mode 100644 index 000000000000..2c5a3cb45e91 --- /dev/null +++ b/js/src/asmjs/WasmBinaryIterator.h @@ -0,0 +1,2153 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * + * Copyright 2016 Mozilla Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef wasm_iterator_h +#define wasm_iterator_h + +#include "mozilla/Poison.h" + +#include "jsprf.h" + +#include "asmjs/WasmTypes.h" +#include "jit/AtomicOp.h" + +namespace js { +namespace wasm { + +// The kind of a control-flow stack item. +enum class LabelKind : uint8_t { Block, Loop, Then, Else }; + +// A description of a unary operator's parameters. +template +struct UnaryRecord +{ + Value op; + + explicit UnaryRecord(Value op) : op(op) {} +}; + +// A description of a binary operator's parameters. +template +struct BinaryRecord +{ + Value lhs; + Value rhs; + + BinaryRecord(Value lhs, Value rhs) : lhs(lhs), rhs(rhs) {} +}; + +// A description of a select operator's parameters. +template +struct SelectRecord +{ + ExprType type; + Value trueValue; + Value falseValue; + Value condition; + + SelectRecord(ExprType type, Value trueValue, Value falseValue, Value condition) + : type(type), trueValue(trueValue), falseValue(falseValue), condition(condition) + {} +}; + +// Common fields for linear memory access. +template +struct LinearMemoryAddress +{ + Value base; + uint32_t offset; + uint32_t align; + + LinearMemoryAddress(Value base, uint32_t offset, uint32_t align) + : base(base), offset(offset), align(align) + {} +}; + +// A description of a load operator's parameters. +template +struct LoadRecord +{ + LinearMemoryAddress addr; + + LoadRecord(Value base, uint32_t offset, uint32_t align) + : addr(base, offset, align) + {} +}; + +// A description of a store operator's parameters. +template +struct StoreRecord +{ + LinearMemoryAddress addr; + Value value; + + StoreRecord(Value base, uint32_t offset, uint32_t align, Value value) + : addr(base, offset, align), value(value) + {} +}; + +// A description of an if operator's parameters. +template +struct IfRecord +{ + Value condition; + + explicit IfRecord(Value condition) : condition(condition) {} +}; + +// A description of an else operator's parameters. +template +struct ElseRecord +{ + ExprType type; + Value thenValue; + + ElseRecord(ExprType type, Value thenValue) : type(type), thenValue(thenValue) {} +}; + +// A description of a block, loop, if, or else operator's parameters. +template +struct EndRecord +{ + LabelKind kind; + ExprType type; + Value value; + + explicit EndRecord(LabelKind kind, ExprType type, Value value) + : kind(kind), type(type), value(value) + {} +}; + +// A description of a br operator's parameters. +template +struct BrRecord +{ + uint32_t relativeDepth; + ExprType type; + Value value; + + BrRecord(uint32_t relativeDepth, ExprType type, Value value) + : relativeDepth(relativeDepth), type(type), value(value) {} +}; + +// A description of a br_if operator's parameters. +template +struct BrIfRecord +{ + uint32_t relativeDepth; + ExprType type; + Value value; + Value condition; + + BrIfRecord(uint32_t relativeDepth, ExprType type, Value value, Value condition) + : relativeDepth(relativeDepth), type(type), value(value), condition(condition) + {} +}; + +// A description of a br_table operator's parameters. +template +struct BrTableRecord +{ + uint32_t tableLength; + ExprType type; + Value index; + + BrTableRecord(uint32_t tableLength, ExprType type, Value index) + : tableLength(tableLength), type(type), index(index) + {} +}; + +// A description of a get_local or get_global operator's parameters. +struct GetVarRecord +{ + uint32_t id; + + explicit GetVarRecord(uint32_t id) : id(id) {} +}; + +// A description of a set_local or set_global operator's parameters. +template +struct SetVarRecord +{ + uint32_t id; + Value value; + + SetVarRecord(uint32_t id, Value value) : id(id), value(value) {} +}; + +// A description of a call operator's parameters, not counting the call arguments. +struct CallRecord +{ + uint32_t callee; + + explicit CallRecord(uint32_t callee) + : callee(callee) + {} +}; + +// A description of a call_indirect operator's parameters, not counting the call arguments. +template +struct CallIndirectRecord +{ + uint32_t sigIndex; + + explicit CallIndirectRecord(uint32_t sigIndex) + : sigIndex(sigIndex) + {} +}; + +// A description of a call_import operator's parameters, not counting the call arguments. +struct CallImportRecord +{ + uint32_t callee; + + explicit CallImportRecord(uint32_t callee) + : callee(callee) + {} +}; + +// A description of a return operator's parameters. +template +struct ReturnRecord +{ + Value value; + + explicit ReturnRecord(Value value) : value(value) {} +}; + +template +struct AtomicLoadRecord +{ + LinearMemoryAddress addr; + Scalar::Type viewType; +}; + +template +struct AtomicStoreRecord +{ + LinearMemoryAddress addr; + Scalar::Type viewType; + Value value; +}; + +template +struct AtomicBinOpRecord +{ + LinearMemoryAddress addr; + Scalar::Type viewType; + jit::AtomicOp op; + Value value; +}; + +template +struct AtomicCompareExchangeRecord +{ + LinearMemoryAddress addr; + Scalar::Type viewType; + Value oldValue; + Value newValue; +}; + +template +struct AtomicExchangeRecord +{ + LinearMemoryAddress addr; + Scalar::Type viewType; + Value value; +}; + +template +struct ExtractLaneRecord +{ + jit::SimdLane lane; + Value vector; + + ExtractLaneRecord(jit::SimdLane lane, Value vector) : lane(lane), vector(vector) {} +}; + +template +struct ReplaceLaneRecord +{ + jit::SimdLane lane; + Value vector; + Value scalar; + + ReplaceLaneRecord(jit::SimdLane lane, Value vector, Value scalar) + : lane(lane), vector(vector), scalar(scalar) + {} +}; + +template +struct SwizzleRecord +{ + uint8_t lanes[4]; + Value vector; + + SwizzleRecord(uint8_t lanes[4], Value vector) + : vector(vector) + { + memcpy(this->lanes, lanes, sizeof(this->lanes)); + } +}; + +template +struct ShuffleRecord +{ + uint8_t lanes[4]; + Value lhs; + Value rhs; + + ShuffleRecord(uint8_t lanes[4], Value lhs, Value rhs) + : lhs(lhs), rhs(rhs) + { + memcpy(this->lanes, lanes, sizeof(this->lanes)); + } +}; + +template +struct SimdSelectRecord +{ + Value trueValue; + Value falseValue; + Value condition; + + SimdSelectRecord(Value trueValue, Value falseValue, Value condition) + : trueValue(trueValue), falseValue(falseValue), condition(condition) {} +}; + +struct Nothing {}; + +template +class ControlStackEntry +{ + LabelKind kind_; + ExprType type_; + size_t valueStackStart_; + ControlItem controlItem_; + + public: + ControlStackEntry(LabelKind kind, size_t valueStackStart) + : kind_(kind), type_(AnyType), valueStackStart_(valueStackStart) + {} + + LabelKind kind() const { return kind_; } + size_t valueStackStart() const { return valueStackStart_; } + const ExprType& type() const { return type_; } + ExprType& type() { return type_; } + ControlItem& controlItem() { return controlItem_; } +}; + +// Specialization for when there is no additional data needed. +template <> +class ControlStackEntry +{ + LabelKind kind_; + ExprType type_; + size_t valueStackStart_; + + public: + ControlStackEntry(LabelKind kind, size_t valueStackStart) + : kind_(kind), type_(AnyType), valueStackStart_(valueStackStart) + {} + + LabelKind kind() const { return kind_; } + size_t valueStackStart() const { return valueStackStart_; } + const ExprType& type() const { return type_; } + ExprType& type() { return type_; } + Nothing controlItem() { return Nothing(); } +}; + +template +class TypeAndValue +{ + ExprType type_; + Value value_; + + public: + TypeAndValue() = default; + explicit TypeAndValue(ExprType type) + : type_(type) + {} + TypeAndValue(ExprType type, Value value) + : type_(type), value_(value) + {} + ExprType type() const { + return type_; + } + Value value() const { + return value_; + } + void setValue(Value value) { + value_ = value; + } +}; + +// Specialization for when there is no additional data needed. +template <> +struct TypeAndValue +{ + ExprType type_; + + public: + TypeAndValue() {} + explicit TypeAndValue(ExprType type) : type_(type) {} + + TypeAndValue(ExprType type, Nothing value) + : type_(type) + {} + + ExprType type() const { return type_; } + Nothing value() const { return Nothing(); } + void setValue(Nothing value) {} +}; + +// A policy class for configuring ExprIter. Clients can use this as a +// base class, and override the behavior as needed. +struct ExprIterPolicy +{ + // Should the iterator perform validation, such as type checking and + // validity checking? + static const bool Validate = false; + + // This function is called to report failures. + static bool fail(const char*, Decoder&) { + MOZ_CRASH("unexpected validation failure"); + return false; + } + + // These members allow clients to add additional information to the value + // and control stacks, respectively. Using Nothing means that no additional + // field is added. + typedef Nothing Value; + typedef Nothing ControlItem; +}; + +// An iterator over the bytes of a function body. It performs validation +// (if Policy::Validate is true) and unpacks the data into a usable form. +template +class ExprIter : private Policy +{ + static const bool Validate = Policy::Validate; + typedef typename Policy::Value Value; + typedef typename Policy::ControlItem ControlItem; + + // A union containing all the expression description types. + union ExprRecord { + ExprRecord() { +#ifdef DEBUG + mozWritePoison(this, sizeof(*this)); +#endif + } + + int32_t i32; + int64_t i64; + float f32; + double f64; + I32x4 i32x4; + F32x4 f32x4; + BrRecord br; + BrIfRecord brIf; + BrTableRecord brTable; + UnaryRecord unary; + BinaryRecord binary; + LoadRecord load; + StoreRecord store; + SelectRecord select; + GetVarRecord getVar; + SetVarRecord setVar; + CallRecord call; + CallIndirectRecord callIndirect; + CallImportRecord callImport; + ReturnRecord return_; + IfRecord if_; + ElseRecord else_; + EndRecord end; + AtomicLoadRecord atomicLoad; + AtomicStoreRecord atomicStore; + AtomicBinOpRecord atomicBinOp; + AtomicCompareExchangeRecord atomicCompareExchange; + AtomicExchangeRecord atomicExchange; + ExtractLaneRecord extractLane; + ReplaceLaneRecord replaceLane; + SwizzleRecord swizzle; + ShuffleRecord shuffle; + SimdSelectRecord simdSelect; + }; + + Decoder& d_; + + Vector, 0, SystemAllocPolicy> valueStack_; + Vector, 0, SystemAllocPolicy> controlStack_; + + ExprRecord u_; + +#ifdef DEBUG + Expr expr_; + bool isInitialized_; +#endif + + MOZ_WARN_UNUSED_RESULT bool readFixedU8(uint8_t* out) { + if (Validate) + return d_.readFixedU8(out); + *out = d_.uncheckedReadFixedU8(); + return true; + } + MOZ_WARN_UNUSED_RESULT bool readFixedU32(uint32_t* out) { + if (Validate) + return d_.readFixedU32(out); + *out = d_.uncheckedReadFixedU32(); + return true; + } + MOZ_WARN_UNUSED_RESULT bool readVarS32(int32_t* out) { + if (Validate) + return d_.readVarS32(out); + *out = d_.uncheckedReadVarS32(); + return true; + } + MOZ_WARN_UNUSED_RESULT bool readVarU32(uint32_t* out) { + if (Validate) + return d_.readVarU32(out); + *out = d_.uncheckedReadVarU32(); + return true; + } + MOZ_WARN_UNUSED_RESULT bool readVarS64(int64_t* out) { + if (Validate) + return d_.readVarS64(out); + *out = d_.uncheckedReadVarS64(); + return true; + } + MOZ_WARN_UNUSED_RESULT bool readVarU64(uint64_t* out) { + if (Validate) + return d_.readVarU64(out); + *out = d_.uncheckedReadVarU64(); + return true; + } + MOZ_WARN_UNUSED_RESULT bool readFixedF32(float* out) { + if (Validate) + return d_.readFixedF32(out); + *out = d_.uncheckedReadFixedF32(); + return true; + } + MOZ_WARN_UNUSED_RESULT bool readFixedF64(double* out) { + if (Validate) + return d_.readFixedF64(out); + *out = d_.uncheckedReadFixedF64(); + return true; + } + MOZ_WARN_UNUSED_RESULT bool readFixedI32x4(I32x4* out) { + if (Validate) + return d_.readFixedI32x4(out); + d_.uncheckedReadFixedI32x4(out); + return true; + } + MOZ_WARN_UNUSED_RESULT bool readFixedF32x4(F32x4* out) { + if (Validate) + return d_.readFixedF32x4(out); + d_.uncheckedReadFixedF32x4(out); + return true; + } + + MOZ_WARN_UNUSED_RESULT bool readAtomicViewType(Scalar::Type* viewType) { + uint8_t x; + if (!readFixedU8(&x)) + return false; + if (Validate && x >= Scalar::MaxTypedArrayViewType) + return fail("invalid atomic view type"); + *viewType = Scalar::Type(x); + return true; + } + + MOZ_WARN_UNUSED_RESULT bool readAtomicBinOpOp(jit::AtomicOp* op) { + uint8_t x; + if (!readFixedU8(&x)) + return false; + if (Validate) { + switch (x) { + case jit::AtomicFetchAddOp: + case jit::AtomicFetchSubOp: + case jit::AtomicFetchAndOp: + case jit::AtomicFetchOrOp: + case jit::AtomicFetchXorOp: + break; + default: + return fail("unrecognized atomic binop"); + } + } + *op = jit::AtomicOp(x); + return true; + } + +#ifdef DEBUG + bool isI32() const { return isInitialized_ && expr_ == Expr::I32Const; } + bool isI64() const { return isInitialized_ && expr_ == Expr::I64Const; } + bool isF32() const { return isInitialized_ && expr_ == Expr::F32Const; } + bool isF64() const { return isInitialized_ && expr_ == Expr::F64Const; } + bool isI32x4() const { return isInitialized_ && (expr_ == Expr::I32x4Const || + expr_ == Expr::B32x4Const); } + bool isF32x4() const { return isInitialized_ && expr_ == Expr::F32x4Const; } + bool isBr() const { return isInitialized_ && expr_ == Expr::Br; } + bool isBrIf() const { return isInitialized_ && expr_ == Expr::BrIf; } + bool isBrTable() const { return isInitialized_ && expr_ == Expr::BrTable; } + bool isUnary() const { + return isInitialized_ && + (expr_ == Expr::I32Clz || expr_ == Expr::I32Ctz || + expr_ == Expr::I32Popcnt || expr_ == Expr::I32Eqz || + expr_ == Expr::I64Clz || expr_ == Expr::I64Ctz || + expr_ == Expr::I64Popcnt || expr_ == Expr::I64Eqz || + expr_ == Expr::F32Abs || expr_ == Expr::F32Neg || + expr_ == Expr::F32Ceil || expr_ == Expr::F32Floor || + expr_ == Expr::F32Sqrt || expr_ == Expr::F64Abs || + expr_ == Expr::F64Neg || expr_ == Expr::F64Ceil || + expr_ == Expr::F64Floor || expr_ == Expr::F64Sqrt || + expr_ == Expr::I32WrapI64 || expr_ == Expr::I32TruncSF32 || + expr_ == Expr::I32TruncUF32 || expr_ == Expr::I32ReinterpretF32 || + expr_ == Expr::I32TruncSF64 || expr_ == Expr::I32TruncUF64 || + expr_ == Expr::I64ExtendSI32 || expr_ == Expr::I64ExtendUI32 || + expr_ == Expr::I64TruncSF32 || expr_ == Expr::I64TruncUF32 || + expr_ == Expr::I64TruncSF64 || expr_ == Expr::I64TruncUF64 || + expr_ == Expr::I64ReinterpretF64 || expr_ == Expr::F32ConvertSI32 || + expr_ == Expr::F32ConvertUI32 || expr_ == Expr::F32ReinterpretI32 || + expr_ == Expr::F32ConvertSI64 || expr_ == Expr::F32ConvertUI64 || + expr_ == Expr::F32DemoteF64 || expr_ == Expr::F64ConvertSI32 || + expr_ == Expr::F64ConvertUI32 || expr_ == Expr::F64ConvertSI64 || + expr_ == Expr::F64ConvertUI64 || expr_ == Expr::F64ReinterpretI64 || + expr_ == Expr::F64PromoteF32 || + expr_ == Expr::I32BitNot || expr_ == Expr::I32Abs || + expr_ == Expr::F64Sin || expr_ == Expr::F64Cos || + expr_ == Expr::F64Tan || expr_ == Expr::F64Asin || + expr_ == Expr::F64Acos || expr_ == Expr::F64Atan || + expr_ == Expr::F64Exp || expr_ == Expr::F64Log || + expr_ == Expr::I32Neg || + expr_ == Expr::I32x4splat || expr_ == Expr::F32x4splat || + expr_ == Expr::B32x4splat || + expr_ == Expr::I32x4check || expr_ == Expr::F32x4check || + expr_ == Expr::B32x4check || + expr_ == Expr::I32x4fromFloat32x4 || + expr_ == Expr::I32x4fromFloat32x4U || + expr_ == Expr::F32x4fromInt32x4 || + expr_ == Expr::F32x4fromUint32x4 || + expr_ == Expr::I32x4fromFloat32x4Bits || + expr_ == Expr::F32x4fromInt32x4Bits || + expr_ == Expr::F32x4fromUint32x4Bits || + expr_ == Expr::I32x4neg || expr_ == Expr::I32x4not || + expr_ == Expr::F32x4neg || expr_ == Expr::F32x4sqrt || + expr_ == Expr::F32x4abs || + expr_ == Expr::F32x4reciprocalApproximation || + expr_ == Expr::F32x4reciprocalSqrtApproximation || + expr_ == Expr::B32x4not || + expr_ == Expr::B32x4anyTrue || + expr_ == Expr::B32x4allTrue); + } + bool isBinary() const { + return isInitialized_ && + (expr_ == Expr::I32Add || expr_ == Expr::I32Sub || + expr_ == Expr::I32Mul || expr_ == Expr::I32DivS || + expr_ == Expr::I32DivU || expr_ == Expr::I32RemS || + expr_ == Expr::I32RemU || expr_ == Expr::I32And || + expr_ == Expr::I32Or || expr_ == Expr::I32Xor || + expr_ == Expr::I32Shl || expr_ == Expr::I32ShrS || + expr_ == Expr::I32ShrU || expr_ == Expr::I32Rotl || + expr_ == Expr::I32Rotr || expr_ == Expr::I64Add || + expr_ == Expr::I64Sub || expr_ == Expr::I64Mul || + expr_ == Expr::I64DivS || expr_ == Expr::I64DivU || + expr_ == Expr::I64RemS || expr_ == Expr::I64RemU || + expr_ == Expr::I64And || expr_ == Expr::I64Or || + expr_ == Expr::I64Xor || expr_ == Expr::I64Shl || + expr_ == Expr::I64ShrS || expr_ == Expr::I64ShrU || + expr_ == Expr::I64Rotl || expr_ == Expr::I64Rotr || + expr_ == Expr::F32Add || expr_ == Expr::F32Sub || + expr_ == Expr::F32Mul || expr_ == Expr::F32Div || + expr_ == Expr::F32Min || expr_ == Expr::F32Max || + expr_ == Expr::F32CopySign || expr_ == Expr::F64Add || + expr_ == Expr::F64Sub || expr_ == Expr::F64Mul || + expr_ == Expr::F64Div || expr_ == Expr::F64Min || + expr_ == Expr::F64Max || expr_ == Expr::F64CopySign || + expr_ == Expr::I32Eq || expr_ == Expr::I32Ne || + expr_ == Expr::I32LtS || expr_ == Expr::I32LtU || + expr_ == Expr::I32LeS || expr_ == Expr::I32LeU || + expr_ == Expr::I32GtS || expr_ == Expr::I32GtU || + expr_ == Expr::I32GeS || expr_ == Expr::I32GeU || + expr_ == Expr::I64Eq || expr_ == Expr::I64Ne || + expr_ == Expr::I64LtS || expr_ == Expr::I64LtU || + expr_ == Expr::I64LeS || expr_ == Expr::I64LeU || + expr_ == Expr::I64GtS || expr_ == Expr::I64GtU || + expr_ == Expr::I64GeS || expr_ == Expr::I64GeU || + expr_ == Expr::F32Eq || expr_ == Expr::F32Ne || + expr_ == Expr::F32Lt || expr_ == Expr::F32Le || + expr_ == Expr::F32Gt || expr_ == Expr::F32Ge || + expr_ == Expr::F64Eq || expr_ == Expr::F64Ne || + expr_ == Expr::F64Lt || expr_ == Expr::F64Le || + expr_ == Expr::F64Gt || expr_ == Expr::F64Ge || + expr_ == Expr::I32Min || expr_ == Expr::I32Max || + expr_ == Expr::F64Mod || expr_ == Expr::F64Pow || + expr_ == Expr::F64Atan2 || + expr_ == Expr::I32x4add || expr_ == Expr::I32x4sub || + expr_ == Expr::I32x4mul || + expr_ == Expr::I32x4and || expr_ == Expr::I32x4or || + expr_ == Expr::I32x4xor || + expr_ == Expr::I32x4shiftLeftByScalar || + expr_ == Expr::I32x4shiftRightByScalar || + expr_ == Expr::I32x4shiftRightByScalarU || + expr_ == Expr::I32x4equal || expr_ == Expr::I32x4notEqual || + expr_ == Expr::I32x4greaterThan || + expr_ == Expr::I32x4greaterThanOrEqual || + expr_ == Expr::I32x4lessThan || + expr_ == Expr::I32x4lessThanOrEqual || + expr_ == Expr::I32x4greaterThanU || + expr_ == Expr::I32x4greaterThanOrEqualU || + expr_ == Expr::I32x4lessThanU || + expr_ == Expr::I32x4lessThanOrEqualU || + expr_ == Expr::F32x4add || expr_ == Expr::F32x4sub || + expr_ == Expr::F32x4mul || expr_ == Expr::F32x4div || + expr_ == Expr::F32x4min || expr_ == Expr::F32x4max || + expr_ == Expr::F32x4minNum || expr_ == Expr::F32x4maxNum || + expr_ == Expr::F32x4equal || + expr_ == Expr::F32x4notEqual || + expr_ == Expr::F32x4greaterThan || + expr_ == Expr::F32x4greaterThanOrEqual || + expr_ == Expr::F32x4lessThan || + expr_ == Expr::F32x4lessThanOrEqual || + expr_ == Expr::B32x4and || expr_ == Expr::B32x4or || + expr_ == Expr::B32x4xor); + } + bool isLoad() const { + return isInitialized_ && + (expr_ == Expr::I32Load8S || expr_ == Expr::I32Load8U || + expr_ == Expr::I32Load16S || expr_ == Expr::I32Load16U || + expr_ == Expr::I64Load8S || expr_ == Expr::I64Load8U || + expr_ == Expr::I64Load16S || expr_ == Expr::I64Load16U || + expr_ == Expr::I64Load32S || expr_ == Expr::I64Load32U || + expr_ == Expr::I32Load || expr_ == Expr::I64Load || + expr_ == Expr::F32Load || expr_ == Expr::F64Load || + expr_ == Expr::I32x4load || expr_ == Expr::I32x4load1 || + expr_ == Expr::I32x4load2 || expr_ == Expr::I32x4load3 || + expr_ == Expr::F32x4load || expr_ == Expr::F32x4load1 || + expr_ == Expr::F32x4load2 || expr_ == Expr::F32x4load3); + } + bool isStore() const { + return isInitialized_ && + (expr_ == Expr::I32Store8 || expr_ == Expr::I32Store16 || + expr_ == Expr::I64Store8 || expr_ == Expr::I64Store16 || + expr_ == Expr::I64Store32 || expr_ == Expr::I32Store || + expr_ == Expr::I64Store || expr_ == Expr::F32Store || + expr_ == Expr::F64Store || + expr_ == Expr::F32StoreF64 || expr_ == Expr::F64StoreF32 || + expr_ == Expr::I32x4store || expr_ == Expr::I32x4store1 || + expr_ == Expr::I32x4store2 || expr_ == Expr::I32x4store3 || + expr_ == Expr::F32x4store || expr_ == Expr::F32x4store1 || + expr_ == Expr::F32x4store2 || expr_ == Expr::F32x4store3); + } + bool isSelect() const { return isInitialized_ && expr_ == Expr::Select; } + bool isGetVar() const { return isInitialized_ && + (expr_ == Expr::GetLocal || + expr_ == Expr::LoadGlobal); } + bool isSetVar() const { return isInitialized_ && + (expr_ == Expr::SetLocal || + expr_ == Expr::StoreGlobal); } + bool isCall() const { return isInitialized_ && expr_ == Expr::Call; } + bool isCallIndirect() const { return isInitialized_ && expr_ == Expr::CallIndirect; } + bool isCallImport() const { return isInitialized_ && expr_ == Expr::CallImport; } + bool isReturn() const { + // Accept Limit, for use in decoding the end of a function after the body. + return isInitialized_ && + (expr_ == Expr::Return || + expr_ == Expr::Limit); + } + bool isIf() const { return isInitialized_ && expr_ == Expr::If; } + bool isElse() const { return isInitialized_ && expr_ == Expr::Else; } + bool isEnd() const { return isInitialized_ && expr_ == Expr::End; } + bool isAtomicLoad() const { return isInitialized_ && expr_ == Expr::I32AtomicsLoad; } + bool isAtomicStore() const { return isInitialized_ && expr_ == Expr::I32AtomicsStore; } + bool isAtomicBinOp() const { return isInitialized_ && expr_ == Expr::I32AtomicsBinOp; } + bool isAtomicCompareExchange() const { + return isInitialized_ && expr_ == Expr::I32AtomicsCompareExchange; + } + bool isAtomicExchange() const { + return isInitialized_ && expr_ == Expr::I32AtomicsExchange; + } + bool isExtractLane() const { + return isInitialized_ && + (expr_ == Expr::I32x4extractLane || + expr_ == Expr::F32x4extractLane || + expr_ == Expr::B32x4extractLane); + } + bool isReplaceLane() const { + return isInitialized_ && + (expr_ == Expr::I32x4replaceLane || + expr_ == Expr::F32x4replaceLane || + expr_ == Expr::B32x4replaceLane); + } + bool isSwizzle() const { + return isInitialized_ && + (expr_ == Expr::I32x4swizzle || + expr_ == Expr::F32x4swizzle); + } + bool isShuffle() const { + return isInitialized_ && + (expr_ == Expr::I32x4shuffle || + expr_ == Expr::F32x4shuffle); + } + bool isSimdSelect() const { + return isInitialized_ && + (expr_ == Expr::I32x4select || + expr_ == Expr::F32x4select); + } +#endif + +#ifdef DEBUG + bool setInitialized() { + isInitialized_ = true; + return true; + } +#else + bool setInitialized() { return true; } +#endif + + MOZ_WARN_UNUSED_RESULT bool typeMismatch(ExprType actual, ExprType expected) MOZ_COLD; + MOZ_WARN_UNUSED_RESULT bool checkType(ExprType actual, ExprType expected); + MOZ_WARN_UNUSED_RESULT bool readFunctionReturnValue(ExprType ret); + MOZ_WARN_UNUSED_RESULT bool checkBranch(uint32_t relativeDepth, ExprType type); + MOZ_WARN_UNUSED_RESULT bool pushControl(LabelKind kind); + MOZ_WARN_UNUSED_RESULT bool popControl(LabelKind* kind, ExprType* type, Value* value); + MOZ_WARN_UNUSED_RESULT bool popControlAfterCheck(LabelKind* kind, ExprType* type, Value* value); + MOZ_WARN_UNUSED_RESULT bool push(ExprType t) { return valueStack_.emplaceBack(t); } + MOZ_WARN_UNUSED_RESULT bool push(TypeAndValue tv) { return valueStack_.append(tv); } + MOZ_WARN_UNUSED_RESULT bool readLinearMemoryAddress(uint32_t byteSize, LinearMemoryAddress* addr); + + void infallibleCheckSuccessor(ControlStackEntry& controlItem, ExprType type); + void infalliblePush(ExprType t) { valueStack_.infallibleEmplaceBack(t); } + void infalliblePush(TypeAndValue tv) { valueStack_.infallibleAppend(tv); } + + // Test whether reading the top of the value stack is currently valid. + MOZ_WARN_UNUSED_RESULT bool checkTop() { + if (Validate && valueStack_.length() <= controlStack_.back().valueStackStart()) + return fail("popping value from outside block"); + return true; + } + + // Pop the top of the value stack. + MOZ_WARN_UNUSED_RESULT bool pop(TypeAndValue* tv) { + if (!checkTop()) + return false; + *tv = valueStack_.popCopy(); + return true; + } + + // Pop the top of the value stack and check that it has the given type. + MOZ_WARN_UNUSED_RESULT bool popWithType(ExprType expectedType, Value* value) { + if (!checkTop()) + return false; + TypeAndValue tv = valueStack_.popCopy(); + if (!checkType(tv.type(), expectedType)) + return false; + *value = tv.value(); + return true; + } + + // Read the top of the value stack (without popping it). + MOZ_WARN_UNUSED_RESULT bool top(TypeAndValue* tv) { + if (!checkTop()) + return false; + *tv = valueStack_.back(); + return true; + } + + // Read the top of the value stack (without popping it) and check that it + // has the given type. + MOZ_WARN_UNUSED_RESULT bool topWithType(ExprType expectedType, Value* value) { + if (!checkTop()) + return false; + TypeAndValue& tv = valueStack_.back(); + if (!checkType(tv.type(), expectedType)) + return false; + *value = tv.value(); + return true; + } + + // Read the value stack entry at depth |index|. + bool peek(uint32_t index, TypeAndValue* tv) { + if (Validate && valueStack_.length() - controlStack_.back().valueStackStart() <= index) + return fail("peeking at value from outside block"); + *tv = valueStack_[valueStack_.length() - index]; + return true; + } + + public: + ExprIter(Policy policy, Decoder& decoder) + : Policy(policy), d_(decoder) + { +#ifdef DEBUG + expr_ = Expr::Limit; + isInitialized_ = false; +#endif + } + + // Return the decoding byte offset. + uint32_t currentOffset() const { return d_.currentOffset(); } + + // Test whether the iterator has reached the end of the buffer. + bool done() const { return d_.done(); } + + // Report a general failure. + MOZ_WARN_UNUSED_RESULT bool fail(const char* msg) MOZ_COLD; + + // Report an unimplemented feature. + MOZ_WARN_UNUSED_RESULT + bool notYetImplemented(const char* what) MOZ_COLD; + + // Report an unrecognized opcode. + MOZ_WARN_UNUSED_RESULT + bool unrecognizedOpcode(Expr expr) MOZ_COLD; + + // ------------------------------------------------------------------------ + // Decoding and validation interface. + + MOZ_WARN_UNUSED_RESULT bool readExpr(Expr* expr); + MOZ_WARN_UNUSED_RESULT bool readFunctionStart(); + MOZ_WARN_UNUSED_RESULT bool readFunctionEnd(ExprType ret); + MOZ_WARN_UNUSED_RESULT bool readReturn(); + MOZ_WARN_UNUSED_RESULT bool readBlock(); + MOZ_WARN_UNUSED_RESULT bool readLoop(); + MOZ_WARN_UNUSED_RESULT bool readIf(); + MOZ_WARN_UNUSED_RESULT bool readElse(); + MOZ_WARN_UNUSED_RESULT bool readEnd(); + MOZ_WARN_UNUSED_RESULT bool readBr(); + MOZ_WARN_UNUSED_RESULT bool readBrIf(); + MOZ_WARN_UNUSED_RESULT bool readBrTable(); + MOZ_WARN_UNUSED_RESULT bool readBrTableEntry(ExprType type, uint32_t* depth); + MOZ_WARN_UNUSED_RESULT bool readUnreachable(); + MOZ_WARN_UNUSED_RESULT bool readUnary(ValType operandType); + MOZ_WARN_UNUSED_RESULT bool readConversion(ValType operandType, ValType resultType); + MOZ_WARN_UNUSED_RESULT bool readBinary(ValType operandType); + MOZ_WARN_UNUSED_RESULT bool readComparison(ValType operandType); + MOZ_WARN_UNUSED_RESULT bool readLoad(ValType resultType, uint32_t byteSize); + MOZ_WARN_UNUSED_RESULT bool readStore(ValType resultType, uint32_t byteSize); + MOZ_WARN_UNUSED_RESULT bool readTrivial(); + MOZ_WARN_UNUSED_RESULT bool readSelect(); + MOZ_WARN_UNUSED_RESULT bool readGetLocal(const ValTypeVector& locals); + MOZ_WARN_UNUSED_RESULT bool readSetLocal(const ValTypeVector& locals); + MOZ_WARN_UNUSED_RESULT bool readGetGlobal(const GlobalDescVector& globals); + MOZ_WARN_UNUSED_RESULT bool readSetGlobal(const GlobalDescVector& globals); + MOZ_WARN_UNUSED_RESULT bool readI32Const(); + MOZ_WARN_UNUSED_RESULT bool readI64Const(); + MOZ_WARN_UNUSED_RESULT bool readF32Const(); + MOZ_WARN_UNUSED_RESULT bool readF64Const(); + MOZ_WARN_UNUSED_RESULT bool readI32x4Const(); + MOZ_WARN_UNUSED_RESULT bool readF32x4Const(); + MOZ_WARN_UNUSED_RESULT bool readB32x4Const(); + MOZ_WARN_UNUSED_RESULT bool readCall(); + MOZ_WARN_UNUSED_RESULT bool readCallIndirect(); + MOZ_WARN_UNUSED_RESULT bool readCallImport(); + MOZ_WARN_UNUSED_RESULT bool readCallArg(ValType type, uint32_t numArgs, uint32_t argIndex, Value* arg); + MOZ_WARN_UNUSED_RESULT bool readCallArgsEnd(uint32_t numArgs); + MOZ_WARN_UNUSED_RESULT bool readCallIndirectCallee(Value* callee); + MOZ_WARN_UNUSED_RESULT bool readCallReturn(ExprType ret); + MOZ_WARN_UNUSED_RESULT bool readAtomicLoad(); + MOZ_WARN_UNUSED_RESULT bool readAtomicStore(); + MOZ_WARN_UNUSED_RESULT bool readAtomicBinOp(); + MOZ_WARN_UNUSED_RESULT bool readAtomicCompareExchange(); + MOZ_WARN_UNUSED_RESULT bool readAtomicExchange(); + MOZ_WARN_UNUSED_RESULT bool readSimdComparison(ValType simdType); + MOZ_WARN_UNUSED_RESULT bool readSimdShiftByScalar(ValType simdType); + MOZ_WARN_UNUSED_RESULT bool readSimdBooleanReduction(ValType simdType); + MOZ_WARN_UNUSED_RESULT bool readExtractLane(ValType simdType); + MOZ_WARN_UNUSED_RESULT bool readReplaceLane(ValType simdType); + MOZ_WARN_UNUSED_RESULT bool readSplat(ValType simdType); + MOZ_WARN_UNUSED_RESULT bool readSwizzle(ValType simdType); + MOZ_WARN_UNUSED_RESULT bool readShuffle(ValType simdType); + MOZ_WARN_UNUSED_RESULT bool readSimdSelect(ValType simdType); + MOZ_WARN_UNUSED_RESULT bool readSimdCtor(); + MOZ_WARN_UNUSED_RESULT bool readSimdCtorArg(ValType elementType, uint32_t numElements, uint32_t argIndex, Value* arg); + MOZ_WARN_UNUSED_RESULT bool readSimdCtorArgsEnd(uint32_t numElements); + MOZ_WARN_UNUSED_RESULT bool readSimdCtorReturn(ValType simdType); + + // ------------------------------------------------------------------------ + // Translation interface. These methods provide the information obtained + // through decoding. + + int32_t i32() const { MOZ_ASSERT(isI32()); return u_.i32; } + int64_t i64() const { MOZ_ASSERT(isI64()); return u_.i64; } + float f32() const { MOZ_ASSERT(isF32()); return u_.f32; } + double f64() const { MOZ_ASSERT(isF64()); return u_.f64; } + const I32x4& i32x4() const { MOZ_ASSERT(isI32x4()); return u_.i32x4; } + const F32x4& f32x4() const { MOZ_ASSERT(isF32x4()); return u_.f32x4; } + const BrRecord& br() const { MOZ_ASSERT(isBr()); return u_.br; } + const BrIfRecord& brIf() const { MOZ_ASSERT(isBrIf()); return u_.brIf; } + const BrTableRecord& brTable() const { MOZ_ASSERT(isBrTable()); return u_.brTable; } + const UnaryRecord& unary() const { MOZ_ASSERT(isUnary()); return u_.unary; } + const BinaryRecord& binary() const { MOZ_ASSERT(isBinary()); return u_.binary; } + const LoadRecord& load() const { MOZ_ASSERT(isLoad()); return u_.load; } + const StoreRecord& store() const { MOZ_ASSERT(isStore()); return u_.store; } + const SelectRecord& select() const { MOZ_ASSERT(isSelect()); return u_.select; } + const GetVarRecord& getVar() const { MOZ_ASSERT(isGetVar()); return u_.getVar; } + const SetVarRecord& setVar() const { MOZ_ASSERT(isSetVar()); return u_.setVar; } + const CallRecord& call() const { MOZ_ASSERT(isCall()); return u_.call; } + const CallIndirectRecord& callIndirect() const + { MOZ_ASSERT(isCallIndirect()); return u_.callIndirect; } + const CallImportRecord& callImport() const + { MOZ_ASSERT(isCallImport()); return u_.callImport; } + const ReturnRecord& return_() const { MOZ_ASSERT(isReturn()); return u_.return_; } + const IfRecord& if_() const { MOZ_ASSERT(isIf()); return u_.if_; } + const ElseRecord& else_() const { MOZ_ASSERT(isElse()); return u_.else_; } + const EndRecord& end() const { MOZ_ASSERT(isEnd()); return u_.end; } + const AtomicLoadRecord& atomicLoad() const + { MOZ_ASSERT(isAtomicLoad()); return u_.atomicLoad; } + const AtomicStoreRecord& atomicStore() const + { MOZ_ASSERT(isAtomicStore()); return u_.atomicStore; } + const AtomicBinOpRecord& atomicBinOp() const + { MOZ_ASSERT(isAtomicBinOp()); return u_.atomicBinOp; } + const AtomicCompareExchangeRecord& atomicCompareExchange() const + { MOZ_ASSERT(isAtomicCompareExchange()); return u_.atomicCompareExchange; } + const AtomicExchangeRecord& atomicExchange() const + { MOZ_ASSERT(isAtomicExchange()); return u_.atomicExchange; } + const ExtractLaneRecord& extractLane() const + { MOZ_ASSERT(isExtractLane()); return u_.extractLane; } + const ReplaceLaneRecord& replaceLane() const + { MOZ_ASSERT(isReplaceLane()); return u_.replaceLane; } + const SwizzleRecord& swizzle() const + { MOZ_ASSERT(isSwizzle()); return u_.swizzle; } + const ShuffleRecord& shuffle() const + { MOZ_ASSERT(isShuffle()); return u_.shuffle; } + const SimdSelectRecord& simdSelect() const + { MOZ_ASSERT(isSimdSelect()); return u_.simdSelect; } + + // ------------------------------------------------------------------------ + // Stack management. + + // Set the result value of the current top-of-value-stack expression. + void setResult(Value value) { + valueStack_.back().setValue(value); + } + + // Return the result value of the current top-of-value-stack expression. + Value getResult() { + return valueStack_.back().value(); + } + + // Return a reference to the top of the control stack. + ControlItem& controlItem() { + return controlStack_.back().controlItem(); + } +}; + +template +inline bool +ExprIter::typeMismatch(ExprType actual, ExprType expected) +{ + UniqueChars error(JS_smprintf("type mismatch: expression has type %s but expected %s", + ToCString(actual), ToCString(expected))); + if (!error) + return false; + + return fail(error.get()); +} + +template +inline MOZ_WARN_UNUSED_RESULT bool +ExprIter::checkType(ExprType actual, ExprType expected) +{ + if (!Validate) { + MOZ_ASSERT(actual == AnyType || actual == expected, "type mismatch"); + return true; + } + + if (MOZ_LIKELY(actual == AnyType || actual == expected)) + return true; + + return typeMismatch(actual, expected); +} + +template +inline bool +ExprIter::notYetImplemented(const char* what) +{ + UniqueChars error(JS_smprintf("not yet implemented: %s", what)); + if (!error) + return false; + + return fail(error.get()); +} + +template +inline bool +ExprIter::unrecognizedOpcode(Expr expr) +{ + UniqueChars error(JS_smprintf("unrecognized opcode: %x", uint32_t(expr))); + if (!error) + return false; + + return fail(error.get()); +} + +template +inline bool +ExprIter::fail(const char* msg) { + return Policy::fail(msg, d_); +} + +template +inline bool +ExprIter::readExpr(Expr* expr) +{ + if (Validate) { + if (MOZ_UNLIKELY(!d_.readExpr(expr))) + return fail("unable to read opcode"); + } else { + *expr = d_.uncheckedReadExpr(); + } + +#ifdef DEBUG + expr_ = *expr; + mozWritePoison(&u_, sizeof(u_)); + isInitialized_ = false; +#endif + + return true; +} + +template +inline bool +ExprIter::readFunctionStart() +{ + MOZ_ASSERT(valueStack_.empty()); + MOZ_ASSERT(controlStack_.empty()); + MOZ_ASSERT(expr_ == Expr::Limit); + + return pushControl(LabelKind::Block); +} + +template +inline bool +ExprIter::readFunctionEnd(ExprType ret) +{ +#ifdef DEBUG + expr_ = Expr::Limit; + mozWritePoison(&u_, sizeof(u_)); +#endif + + if (Validate) { + MOZ_ASSERT(controlStack_.length() > 0); + if (controlStack_.length() != 1) + return fail("unbalanced function body control flow"); + } else { + MOZ_ASSERT(controlStack_.length() == 1); + } + + ExprType type; + Value value; + LabelKind kind; + if (!popControlAfterCheck(&kind, &type, &value)) + return false; + + MOZ_ASSERT(kind == LabelKind::Block); + MOZ_ASSERT(valueStack_.length() == 1); + + if (!IsVoid(ret)) { + if (!checkType(type, ret)) + return false; + } + + u_.return_ = ReturnRecord(value); + return setInitialized(); +} + +template +inline bool +ExprIter::readReturn() +{ + ControlStackEntry& controlItem = controlStack_[0]; + MOZ_ASSERT(controlItem.kind() == LabelKind::Block); + + TypeAndValue tv; + if (!pop(&tv)) + return false; + + infallibleCheckSuccessor(controlItem, tv.type()); + + infalliblePush(AnyType); + + u_.return_ = ReturnRecord(tv.value()); + return setInitialized(); +} + +template +inline void +ExprIter::infallibleCheckSuccessor(ControlStackEntry& controlItem, + ExprType type) +{ + controlItem.type() = Unify(controlItem.type(), type); +} + +template +inline bool +ExprIter::checkBranch(uint32_t relativeDepth, ExprType type) +{ + // FIXME: Don't allow branching to the function-body block for now. + if (Validate && relativeDepth >= controlStack_.length() - 1) + return fail("branch depth exceeds current nesting level"); + + ControlStackEntry& controlItem = + controlStack_[controlStack_.length() - 1 - relativeDepth]; + + if (controlItem.kind() != LabelKind::Loop) + infallibleCheckSuccessor(controlItem, type); + + return true; +} + +template +inline bool +ExprIter::pushControl(LabelKind kind) +{ + size_t length = valueStack_.length(); + + // Push a void value at the start of every control region, in case the + // region is empty. + if (!push(ExprType::Void)) + return false; + + return controlStack_.emplaceBack(kind, length); +} + +template +inline bool +ExprIter::popControl(LabelKind* kind, ExprType* type, Value* value) +{ + MOZ_ASSERT(controlStack_.length() > 0); + if (controlStack_.length() <= 1) + return fail("unbalanced function body control flow"); + + return popControlAfterCheck(kind, type, value); +} + +template +inline bool +ExprIter::popControlAfterCheck(LabelKind* kind, ExprType* type, Value* value) +{ + TypeAndValue tv; + if (!pop(&tv)) + return false; + *value = tv.value(); + + ControlStackEntry controlItem = controlStack_.popCopy(); + *kind = controlItem.kind(); + + infallibleCheckSuccessor(controlItem, tv.type()); + + *type = controlItem.type(); + + // Clear out the value stack up to the start of the block/loop. + valueStack_.shrinkTo(controlItem.valueStackStart()); + + infalliblePush(controlItem.type()); + return true; +} + +template +inline bool +ExprIter::readBlock() +{ + return pushControl(LabelKind::Block); +} + +template +inline bool +ExprIter::readLoop() +{ + return pushControl(LabelKind::Block) && + pushControl(LabelKind::Loop); +} + +template +inline bool +ExprIter::readIf() +{ + Value condition; + if (!popWithType(ExprType::I32, &condition)) + return false; + + u_.if_ = IfRecord(condition); + + return setInitialized() && + pushControl(LabelKind::Then); +} + +template +inline bool +ExprIter::readElse() +{ + ExprType thenType; + Value thenValue; + LabelKind kind; + if (!popControl(&kind, &thenType, &thenValue)) + return false; + + if (Validate && kind != LabelKind::Then) + return fail("else can only be used within an if"); + + // Pop and discard the old then value for now. + TypeAndValue tv; + if (!pop(&tv)) + return false; + + if (!pushControl(LabelKind::Else)) + return false; + + // Initialize the else block's type with the then block's type, so that + // the two get unified. + ControlStackEntry& controlItem = controlStack_.back(); + controlItem.type() = thenType; + + u_.else_ = ElseRecord(thenType, thenValue); + return setInitialized(); +} + +template +inline bool +ExprIter::readEnd() +{ + ExprType type; + Value value; + LabelKind kind; + if (!popControl(&kind, &type, &value)) + return false; + + switch (kind) { + case LabelKind::Block: + break; + case LabelKind::Loop: { + // Note: Propose a spec change: loops don't implicitly have an end label. + + setResult(value); + + LabelKind blockKind; + if (!popControl(&blockKind, &type, &value)) + return false; + + MOZ_ASSERT(blockKind == LabelKind::Block); + break; + } + case LabelKind::Then: + valueStack_.back() = TypeAndValue(ExprType::Void); + type = ExprType::Void; + break; + case LabelKind::Else: + break; + } + + u_.end = EndRecord(kind, type, value); + return setInitialized(); +} + +template +inline bool +ExprIter::readBr() +{ + uint32_t relativeDepth; + if (!readVarU32(&relativeDepth)) + return fail("unable to read br depth"); + + TypeAndValue tv; + if (!pop(&tv)) + return false; + + if (!checkBranch(relativeDepth, tv.type())) + return false; + + if (!push(AnyType)) + return false; + + u_.br = BrRecord(relativeDepth, tv.type(), tv.value()); + return setInitialized(); +} + +template +inline bool +ExprIter::readBrIf() +{ + uint32_t relativeDepth; + if (!readVarU32(&relativeDepth)) + return fail("unable to read br_if depth"); + + Value condition; + if (!popWithType(ExprType::I32, &condition)) + return false; + + // Leave the operand(s) in place; they become our result(s). + TypeAndValue tv; + if (!top(&tv)) + return false; + + if (!checkBranch(relativeDepth, tv.type())) + return false; + + u_.brIf = BrIfRecord(relativeDepth, tv.type(), tv.value(), condition); + return setInitialized(); +} + +template +inline bool +ExprIter::readBrTable() +{ + Value index; + if (!popWithType(ExprType::I32, &index)) + return false; + + uint32_t tableLength; + if (!readVarU32(&tableLength)) + return fail("unable to read br_table table length"); + + TypeAndValue tv; + if (!top(&tv)) + return false; + + u_.brTable = BrTableRecord(tableLength, tv.type(), index); + return setInitialized(); +} + +template +inline bool +ExprIter::readBrTableEntry(ExprType type, uint32_t* depth) +{ + return readFixedU32(depth) && + checkBranch(*depth, type); +} + +template +inline bool +ExprIter::readUnreachable() +{ + return push(AnyType); +} + +template +inline bool +ExprIter::readUnary(ValType operandType) +{ + Value op; + if (!popWithType(ToExprType(operandType), &op)) + return false; + + infalliblePush(ToExprType(operandType)); + + u_.unary = UnaryRecord(op); + return setInitialized(); +} + +template +inline bool +ExprIter::readConversion(ValType operandType, ValType resultType) +{ + Value op; + if (!popWithType(ToExprType(operandType), &op)) + return false; + + infalliblePush(ToExprType(resultType)); + + u_.unary = UnaryRecord(op); + return setInitialized(); +} + +template +inline bool +ExprIter::readBinary(ValType operandType) +{ + Value rhs; + if (!popWithType(ToExprType(operandType), &rhs)) + return false; + + Value lhs; + if (!popWithType(ToExprType(operandType), &lhs)) + return false; + + infalliblePush(ToExprType(operandType)); + + u_.binary = BinaryRecord(lhs, rhs); + return setInitialized(); +} + +template +inline bool +ExprIter::readComparison(ValType operandType) +{ + Value rhs; + if (!popWithType(ToExprType(operandType), &rhs)) + return false; + + Value lhs; + if (!popWithType(ToExprType(operandType), &lhs)) + return false; + + infalliblePush(ExprType::I32); + + u_.binary = BinaryRecord(lhs, rhs); + return setInitialized(); +} + +template +inline bool +ExprIter::readLinearMemoryAddress(uint32_t byteSize, LinearMemoryAddress* addr) +{ + Value base; + if (!popWithType(ExprType::I32, &base)) + return false; + + uint8_t alignLog2; + if (!readFixedU8(&alignLog2)) + return fail("unable to read load alignment"); + if (Validate && (alignLog2 >= 32 || (uint32_t(1) << alignLog2) > byteSize)) + return fail("greater than natural alignment"); + uint32_t align = uint32_t(1) << alignLog2; + + uint32_t offset; + if (!readVarU32(&offset)) + return fail("unable to read load offset"); + + *addr = LinearMemoryAddress(base, offset, align); + return true; +} + +template +inline bool +ExprIter::readLoad(ValType resultType, uint32_t byteSize) +{ + if (!readLinearMemoryAddress(byteSize, &u_.load.addr)) + return false; + + infalliblePush(ToExprType(resultType)); + return setInitialized(); +} + +template +inline bool +ExprIter::readStore(ValType resultType, uint32_t byteSize) +{ + Value value; + if (!popWithType(ToExprType(resultType), &value)) + return false; + + if (!readLinearMemoryAddress(byteSize, &u_.store.addr)) + return false; + + infalliblePush(TypeAndValue(ToExprType(resultType), value)); + + u_.store.value = value; + return setInitialized(); +} + +template +inline bool +ExprIter::readTrivial() +{ + return push(ExprType::Void); +} + +template +inline bool +ExprIter::readSelect() +{ + Value condition; + if (!popWithType(ExprType::I32, &condition)) + return false; + + TypeAndValue falseValue; + if (!pop(&falseValue)) + return false; + + TypeAndValue trueValue; + if (!pop(&trueValue)) + return false; + + ExprType resultType = Unify(trueValue.type(), falseValue.type()); + infalliblePush(resultType); + + u_.select = SelectRecord(resultType, trueValue.value(), falseValue.value(), + condition); + return setInitialized(); +} + +template +inline bool +ExprIter::readGetLocal(const ValTypeVector& locals) +{ + uint32_t id; + if (!readVarU32(&id)) + return false; + + if (Validate && id >= locals.length()) + return fail("get_local index out of range"); + + if (!push(ToExprType(locals[id]))) + return false; + + u_.getVar = GetVarRecord(id); + return setInitialized(); +} + +template +inline bool +ExprIter::readSetLocal(const ValTypeVector& locals) +{ + uint32_t id; + if (!readVarU32(&id)) + return false; + + if (Validate && id >= locals.length()) + return fail("set_local index out of range"); + + Value value; + if (!topWithType(ToExprType(locals[id]), &value)) + return false; + + u_.setVar = SetVarRecord(id, value); + return setInitialized(); +} + +template +inline bool +ExprIter::readGetGlobal(const GlobalDescVector& globals) +{ + uint32_t id; + if (!readVarU32(&id)) + return false; + + if (!push(ToExprType(globals[id].type))) + return false; + + u_.getVar = GetVarRecord(id); + return setInitialized(); +} + +template +inline bool +ExprIter::readSetGlobal(const GlobalDescVector& globals) +{ + uint32_t id; + if (!readVarU32(&id)) + return false; + + Value value; + if (!topWithType(ToExprType(globals[id].type), &value)) + return false; + + u_.setVar = SetVarRecord(id, value); + return setInitialized(); +} + +template +inline MOZ_WARN_UNUSED_RESULT bool +ExprIter::readI32Const() +{ + return readVarS32(&u_.i32) && + setInitialized() && + push(ExprType::I32); +} + +template +inline MOZ_WARN_UNUSED_RESULT bool +ExprIter::readI64Const() +{ + return readVarS64(&u_.i64) && + setInitialized() && + push(ExprType::I64); +} + +template +inline MOZ_WARN_UNUSED_RESULT bool +ExprIter::readF32Const() +{ + if (!readFixedF32(&u_.f32)) + return false; + + if (Validate && mozilla::IsNaN(u_.f32)) { + const float jsNaN = (float)JS::GenericNaN(); + if (memcmp(&u_.f32, &jsNaN, sizeof(u_.f32)) != 0) + return notYetImplemented("NaN literals with custom payloads"); + } + + return setInitialized() && + push(ExprType::F32); +} + +template +inline MOZ_WARN_UNUSED_RESULT bool +ExprIter::readF64Const() +{ + if (!readFixedF64(&u_.f64)) + return false; + + if (Validate && mozilla::IsNaN(u_.f64)) { + const double jsNaN = JS::GenericNaN(); + if (memcmp(&u_.f64, &jsNaN, sizeof(u_.f64)) != 0) + return notYetImplemented("NaN literals with custom payloads"); + } + + return setInitialized() && + push(ExprType::F64); +} + +template +inline MOZ_WARN_UNUSED_RESULT bool +ExprIter::readI32x4Const() +{ + return readFixedI32x4(&u_.i32x4) && + setInitialized() && + push(ExprType::I32x4); +} + +template +inline MOZ_WARN_UNUSED_RESULT bool +ExprIter::readF32x4Const() +{ + return readFixedF32x4(&u_.f32x4) && + setInitialized() && + push(ExprType::F32x4); +} + +template +inline MOZ_WARN_UNUSED_RESULT bool +ExprIter::readB32x4Const() +{ + return readFixedI32x4(&u_.i32x4) && + setInitialized() && + push(ExprType::B32x4); +} + +template +inline bool +ExprIter::readCall() +{ + uint32_t funcIndex; + if (!readVarU32(&funcIndex)) + return fail("unable to read call function index"); + + u_.call = CallRecord(funcIndex); + return setInitialized(); +} + +template +inline bool +ExprIter::readCallIndirect() +{ + uint32_t sigIndex; + if (!readVarU32(&sigIndex)) + return fail("unable to read call_indirect signature index"); + + u_.callIndirect = CallIndirectRecord(sigIndex); + return setInitialized(); +} + +template +inline bool +ExprIter::readCallImport() +{ + uint32_t importIndex; + if (!readVarU32(&importIndex)) + return fail("unable to read call_import import index"); + + u_.callImport = CallImportRecord(importIndex); + return setInitialized(); +} + +template +inline bool +ExprIter::readCallArg(ValType type, uint32_t numArgs, uint32_t argIndex, Value* arg) +{ + TypeAndValue tv; + if (!peek(numArgs - argIndex, &tv)) + return false; + if (!checkType(tv.type(), ToExprType(type))) + return false; + + *arg = tv.value(); + return true; +} + +template +inline bool +ExprIter::readCallArgsEnd(uint32_t numArgs) +{ + MOZ_ASSERT(numArgs <= valueStack_.length()); + valueStack_.shrinkBy(numArgs); + return true; +} + +template +inline bool +ExprIter::readCallIndirectCallee(Value* callee) +{ + return popWithType(ExprType::I32, callee); +} + +template +inline bool +ExprIter::readCallReturn(ExprType ret) +{ + return push(ret); +} + +template +inline bool +ExprIter::readAtomicLoad() +{ + Scalar::Type viewType; + if (!readAtomicViewType(&viewType)) + return false; + + uint32_t byteSize = Scalar::byteSize(viewType); + if (!readLinearMemoryAddress(byteSize, &u_.atomicLoad.addr)) + return false; + + infalliblePush(ExprType::I32); + + u_.atomicLoad.viewType = viewType; + return setInitialized(); +} + +template +inline bool +ExprIter::readAtomicStore() +{ + Scalar::Type viewType; + if (!readAtomicViewType(&viewType)) + return false; + + uint32_t byteSize = Scalar::byteSize(viewType); + if (!readLinearMemoryAddress(byteSize, &u_.atomicStore.addr)) + return false; + + Value value; + if (!popWithType(ExprType::I32, &value)) + return false; + + infalliblePush(ExprType::I32); + + u_.atomicStore.viewType = viewType; + u_.atomicStore.value = value; + return setInitialized(); +} + +template +inline bool +ExprIter::readAtomicBinOp() +{ + Scalar::Type viewType; + if (!readAtomicViewType(&viewType)) + return false; + + jit::AtomicOp op; + if (!readAtomicBinOpOp(&op)) + return false; + + uint32_t byteSize = Scalar::byteSize(viewType); + if (!readLinearMemoryAddress(byteSize, &u_.atomicStore.addr)) + return false; + + Value value; + if (!popWithType(ExprType::I32, &value)) + return false; + + infalliblePush(ExprType::I32); + + u_.atomicBinOp.viewType = viewType; + u_.atomicBinOp.op = op; + u_.atomicBinOp.value = value; + return setInitialized(); +} + +template +inline bool +ExprIter::readAtomicCompareExchange() +{ + Scalar::Type viewType; + if (!readAtomicViewType(&viewType)) + return false; + + uint32_t byteSize = Scalar::byteSize(viewType); + if (!readLinearMemoryAddress(byteSize, &u_.atomicStore.addr)) + return false; + + Value new_; + if (!popWithType(ExprType::I32, &new_)) + return false; + + Value old; + if (!popWithType(ExprType::I32, &old)) + return false; + + infalliblePush(ExprType::I32); + + u_.atomicCompareExchange.viewType = viewType; + u_.atomicCompareExchange.oldValue = old; + u_.atomicCompareExchange.newValue = new_; + return setInitialized(); +} + +template +inline bool +ExprIter::readAtomicExchange() +{ + Scalar::Type viewType; + if (!readAtomicViewType(&viewType)) + return false; + + uint32_t byteSize = Scalar::byteSize(viewType); + if (!readLinearMemoryAddress(byteSize, &u_.atomicStore.addr)) + return false; + + Value value; + if (!popWithType(ExprType::I32, &value)) + return false; + + infalliblePush(ExprType::I32); + + u_.atomicExchange.viewType = viewType; + u_.atomicExchange.value = value; + return setInitialized(); +} + +template +inline bool +ExprIter::readSimdComparison(ValType simdType) +{ + Value rhs; + if (!popWithType(ToExprType(simdType), &rhs)) + return false; + + Value lhs; + if (!popWithType(ToExprType(simdType), &lhs)) + return false; + + infalliblePush(ToExprType(SimdBoolType(simdType))); + + u_.binary = BinaryRecord(lhs, rhs); + return setInitialized(); +} + +template +inline bool +ExprIter::readSimdShiftByScalar(ValType simdType) +{ + Value count; + if (!popWithType(ExprType::I32, &count)) + return false; + + Value vector; + if (!popWithType(ToExprType(simdType), &vector)) + return false; + + infalliblePush(ToExprType(simdType)); + + u_.binary = BinaryRecord(vector, count); + return setInitialized(); +} + +template +inline bool +ExprIter::readSimdBooleanReduction(ValType simdType) +{ + Value op; + if (!popWithType(ToExprType(simdType), &op)) + return false; + + infalliblePush(ExprType::I32); + + u_.unary = UnaryRecord(op); + return setInitialized(); +} + +template +inline bool +ExprIter::readExtractLane(ValType simdType) +{ + uint32_t lane; + if (!readVarU32(&lane)) + return false; + if (Validate && lane >= NumSimdElements(simdType)) + return fail("simd lane out of bounds for simd type"); + + Value value; + if (!popWithType(ToExprType(simdType), &value)) + return false; + + infalliblePush(ToExprType(SimdElementType(simdType))); + + u_.extractLane = ExtractLaneRecord(jit::SimdLane(lane), value); + return setInitialized(); +} + +template +inline bool +ExprIter::readReplaceLane(ValType simdType) +{ + uint32_t lane; + if (!readVarU32(&lane)) + return false; + if (Validate && lane >= NumSimdElements(simdType)) + return fail("simd lane out of bounds for simd type"); + + Value scalar; + if (!popWithType(ToExprType(SimdElementType(simdType)), &scalar)) + return false; + + Value vector; + if (!popWithType(ToExprType(simdType), &vector)) + return false; + + infalliblePush(ToExprType(simdType)); + + u_.replaceLane = ReplaceLaneRecord(jit::SimdLane(lane), vector, scalar); + return setInitialized(); +} + +template +inline bool +ExprIter::readSplat(ValType simdType) +{ + Value op; + if (!popWithType(ToExprType(SimdElementType(simdType)), &op)) + return false; + + infalliblePush(ToExprType(simdType)); + + u_.unary = UnaryRecord(op); + return setInitialized(); +} + +template +inline bool +ExprIter::readSwizzle(ValType simdType) +{ + uint8_t lanes[4]; + uint32_t numSimdLanes = NumSimdElements(simdType); + MOZ_ASSERT(numSimdLanes <= mozilla::ArrayLength(lanes)); + for (uint32_t i = 0; i < numSimdLanes; ++i) { + if (!readFixedU8(&lanes[i])) + return false; + if (Validate && lanes[i] >= numSimdLanes) + return fail("swizzle index out of bounds"); + } + + Value vector; + if (!popWithType(ToExprType(simdType), &vector)) + return false; + + infalliblePush(ToExprType(simdType)); + + u_.swizzle = SwizzleRecord(lanes, vector); + return setInitialized(); +} + +template +inline bool +ExprIter::readShuffle(ValType simdType) +{ + uint8_t lanes[4]; + uint32_t numSimdLanes = NumSimdElements(simdType); + MOZ_ASSERT(numSimdLanes <= mozilla::ArrayLength(lanes)); + for (uint32_t i = 0; i < numSimdLanes; ++i) { + if (!readFixedU8(&lanes[i])) + return false; + if (Validate && lanes[i] >= numSimdLanes * 2) + return fail("shuffle index out of bounds"); + } + + Value rhs; + if (!popWithType(ToExprType(simdType), &rhs)) + return false; + + Value lhs; + if (!popWithType(ToExprType(simdType), &lhs)) + return false; + + infalliblePush(ToExprType(simdType)); + + u_.shuffle = ShuffleRecord(lanes, lhs, rhs); + return setInitialized(); +} + +template +inline bool +ExprIter::readSimdSelect(ValType simdType) +{ + Value falseValue; + if (!popWithType(ToExprType(simdType), &falseValue)) + return false; + Value trueValue; + if (!popWithType(ToExprType(simdType), &trueValue)) + return false; + Value condition; + if (!popWithType(ToExprType(SimdBoolType(simdType)), &condition)) + return false; + + infalliblePush(ToExprType(simdType)); + + u_.simdSelect = SimdSelectRecord(trueValue, falseValue, condition); + return setInitialized(); +} + +template +inline bool +ExprIter::readSimdCtor() +{ + return setInitialized(); +} + +template +inline bool +ExprIter::readSimdCtorArg(ValType elementType, uint32_t numElements, uint32_t index, + Value* arg) +{ + TypeAndValue tv; + if (!peek(numElements - index, &tv)) + return false; + if (!checkType(tv.type(), ToExprType(elementType))) + return false; + + *arg = tv.value(); + return true; +} + +template +inline bool +ExprIter::readSimdCtorArgsEnd(uint32_t numElements) +{ + MOZ_ASSERT(numElements <= valueStack_.length()); + valueStack_.shrinkBy(numElements); + return true; +} + +template +inline bool +ExprIter::readSimdCtorReturn(ValType simdType) +{ + return push(ToExprType(simdType)); +} + +} // namespace wasm +} // namespace js + +namespace mozilla { + +// Specialize IsPod for the Nothing specializations. +template<> struct IsPod> : TrueType {}; +template<> struct IsPod> : TrueType {}; + +} // namespace mozilla + +#endif // wasm_iterator_h diff --git a/js/src/asmjs/WasmBinaryToText.cpp b/js/src/asmjs/WasmBinaryToText.cpp index 379a98816089..b28b54d29d89 100644 --- a/js/src/asmjs/WasmBinaryToText.cpp +++ b/js/src/asmjs/WasmBinaryToText.cpp @@ -1000,7 +1000,7 @@ RenderExpr(WasmRenderContext& c) return RenderLoop(c); case Expr::If: return RenderIfElse(c, false); - case Expr::IfElse: + case Expr::Else: return RenderIfElse(c, true); case Expr::I32Clz: case Expr::I32Ctz: @@ -1791,7 +1791,11 @@ wasm::BinaryToText(JSContext* cx, const uint8_t* bytes, size_t length, StringBuf WasmRenderContext c(cx, d, buffer); - if (!RenderModule(c)) { + if (!c.buffer.append("Binary-to-text is temporarily unavailable\n")) + return false; + + // FIXME: Implement binary-to-text and re-enable this. + if (0 && !RenderModule(c)) { if (!cx->isExceptionPending()) ReportOutOfMemory(cx); return false; diff --git a/js/src/asmjs/WasmGenerator.cpp b/js/src/asmjs/WasmGenerator.cpp index 855456835bfa..b2cf87fec949 100644 --- a/js/src/asmjs/WasmGenerator.cpp +++ b/js/src/asmjs/WasmGenerator.cpp @@ -20,6 +20,7 @@ #include "mozilla/EnumeratedRange.h" +#include "asmjs/WasmIonCompile.h" #include "asmjs/WasmStubs.h" #include "jit/MacroAssembler-inl.h" @@ -54,7 +55,8 @@ ModuleGenerator::ModuleGenerator(ExclusiveContext* cx) tasks_(cx), freeTasks_(cx), activeFunc_(nullptr), - finishedFuncs_(false) + startedFuncDefs_(false), + finishedFuncDefs_(false) { MOZ_ASSERT(IsCompilingAsmJS()); } @@ -563,9 +565,9 @@ ModuleGenerator::allocateGlobalBytes(uint32_t bytes, uint32_t align, uint32_t* g } bool -ModuleGenerator::allocateGlobalVar(ValType type, bool isConst, uint32_t* index) +ModuleGenerator::allocateGlobal(ValType type, bool isConst, uint32_t* index) { - MOZ_ASSERT(!startedFuncDefs()); + MOZ_ASSERT(!startedFuncDefs_); unsigned width = 0; switch (type) { case ValType::I32: @@ -591,7 +593,7 @@ ModuleGenerator::allocateGlobalVar(ValType type, bool isConst, uint32_t* index) return false; *index = shared_->globals.length(); - return shared_->globals.append(AsmJSGlobalVariable(ToExprType(type), offset, isConst)); + return shared_->globals.append(GlobalDesc(type, offset, isConst)); } void @@ -731,10 +733,8 @@ ModuleGenerator::addMemoryExport(UniqueChars fieldName) bool ModuleGenerator::startFuncDefs() { - MOZ_ASSERT(!startedFuncDefs()); - threadView_ = MakeUnique(*shared_); - if (!threadView_) - return false; + MOZ_ASSERT(!startedFuncDefs_); + MOZ_ASSERT(!finishedFuncDefs_); uint32_t numTasks; if (ParallelCompilationEnabled(cx_) && @@ -759,23 +759,24 @@ ModuleGenerator::startFuncDefs() return false; JSRuntime* rt = cx_->compartment()->runtimeFromAnyThread(); for (size_t i = 0; i < numTasks; i++) - tasks_.infallibleEmplaceBack(rt, *threadView_, COMPILATION_LIFO_DEFAULT_CHUNK_SIZE); + tasks_.infallibleEmplaceBack(rt, *shared_, COMPILATION_LIFO_DEFAULT_CHUNK_SIZE); if (!freeTasks_.reserve(numTasks)) return false; for (size_t i = 0; i < numTasks; i++) freeTasks_.infallibleAppend(&tasks_[i]); - MOZ_ASSERT(startedFuncDefs()); + startedFuncDefs_ = true; + MOZ_ASSERT(!finishedFuncDefs_); return true; } bool ModuleGenerator::startFuncDef(uint32_t lineOrBytecode, FunctionGenerator* fg) { - MOZ_ASSERT(startedFuncDefs()); + MOZ_ASSERT(startedFuncDefs_); MOZ_ASSERT(!activeFunc_); - MOZ_ASSERT(!finishedFuncs_); + MOZ_ASSERT(!finishedFuncDefs_); if (freeTasks_.empty() && !finishOutstandingTask()) return false; @@ -827,9 +828,9 @@ ModuleGenerator::finishFuncDef(uint32_t funcIndex, unsigned generateTime, Functi bool ModuleGenerator::finishFuncDefs() { - MOZ_ASSERT(startedFuncDefs()); + MOZ_ASSERT(startedFuncDefs_); MOZ_ASSERT(!activeFunc_); - MOZ_ASSERT(!finishedFuncs_); + MOZ_ASSERT(!finishedFuncDefs_); while (outstanding_ > 0) { if (!finishOutstandingTask()) @@ -840,7 +841,7 @@ ModuleGenerator::finishFuncDefs() MOZ_ASSERT(funcIsDefined(funcIndex)); module_->functionBytes = masm_.size(); - finishedFuncs_ = true; + finishedFuncDefs_ = true; return true; } @@ -883,7 +884,7 @@ ModuleGenerator::finish(CacheableCharsVector&& prettyFuncNames, SlowFunctionVector* slowFuncs) { MOZ_ASSERT(!activeFunc_); - MOZ_ASSERT(finishedFuncs_); + MOZ_ASSERT(finishedFuncDefs_); UniqueStaticLinkData link = MakeUnique(); if (!link) diff --git a/js/src/asmjs/WasmGenerator.h b/js/src/asmjs/WasmGenerator.h index 31cb12f9a3a6..dc83723b5514 100644 --- a/js/src/asmjs/WasmGenerator.h +++ b/js/src/asmjs/WasmGenerator.h @@ -20,7 +20,6 @@ #define wasm_generator_h #include "asmjs/WasmBinary.h" -#include "asmjs/WasmIonCompile.h" #include "asmjs/WasmModule.h" #include "jit/MacroAssembler.h" @@ -47,12 +46,11 @@ struct SlowFunction typedef Vector SlowFunctionVector; // The ModuleGeneratorData holds all the state shared between the -// ModuleGenerator and ModuleGeneratorThreadView. The ModuleGeneratorData -// is encapsulated by ModuleGenerator/ModuleGeneratorThreadView classes which -// present a race-free interface to the code in each thread assuming any given -// element is initialized by the ModuleGenerator thread before an index to that -// element is written to Bytes sent to a ModuleGeneratorThreadView thread. -// Once created, the Vectors are never resized. +// ModuleGenerator thread and background compile threads. The background +// threads are given a read-only view of the ModuleGeneratorData and the +// ModuleGenerator is careful to initialize, and never subsequently mutate, +// any given datum before being read by a background thread. In particular, +// once created, the Vectors are never resized. struct TableModuleGeneratorData { @@ -82,18 +80,6 @@ struct ImportModuleGeneratorData typedef Vector ImportModuleGeneratorDataVector; -struct AsmJSGlobalVariable -{ - ExprType type; - unsigned globalDataOffset; - bool isConst; - AsmJSGlobalVariable(ExprType type, unsigned offset, bool isConst) - : type(type), globalDataOffset(offset), isConst(isConst) - {} -}; - -typedef Vector AsmJSGlobalVariableVector; - struct ModuleGeneratorData { CompileArgs args; @@ -105,7 +91,7 @@ struct ModuleGeneratorData TableModuleGeneratorDataVector sigToTable; DeclaredSigPtrVector funcSigs; ImportModuleGeneratorDataVector imports; - AsmJSGlobalVariableVector globals; + GlobalDescVector globals; uint32_t funcSigIndex(uint32_t funcIndex) const { return funcSigs[funcIndex] - sigs.begin(); @@ -118,51 +104,6 @@ struct ModuleGeneratorData typedef UniquePtr UniqueModuleGeneratorData; -// The ModuleGeneratorThreadView class presents a restricted, read-only view of -// the shared state needed by helper threads. There is only one -// ModuleGeneratorThreadView object owned by ModuleGenerator and referenced by -// all compile tasks. - -class ModuleGeneratorThreadView -{ - const ModuleGeneratorData& shared_; - - public: - explicit ModuleGeneratorThreadView(const ModuleGeneratorData& shared) - : shared_(shared) - {} - CompileArgs args() const { - return shared_.args; - } - bool isAsmJS() const { - return shared_.kind == ModuleKind::AsmJS; - } - uint32_t numTableElems() const { - MOZ_ASSERT(!isAsmJS()); - return shared_.numTableElems; - } - uint32_t minHeapLength() const { - return shared_.minHeapLength; - } - const DeclaredSig& sig(uint32_t sigIndex) const { - return shared_.sigs[sigIndex]; - } - const TableModuleGeneratorData& sigToTable(uint32_t sigIndex) const { - return shared_.sigToTable[sigIndex]; - } - const DeclaredSig& funcSig(uint32_t funcIndex) const { - MOZ_ASSERT(shared_.funcSigs[funcIndex]); - return *shared_.funcSigs[funcIndex]; - } - const ImportModuleGeneratorData& import(uint32_t importIndex) const { - MOZ_ASSERT(shared_.imports[importIndex].sig); - return shared_.imports[importIndex]; - } - const AsmJSGlobalVariable& globalVar(uint32_t globalIndex) const { - return shared_.globals[globalIndex]; - } -}; - // A ModuleGenerator encapsulates the creation of a wasm module. During the // lifetime of a ModuleGenerator, a sequence of FunctionGenerators are created // and destroyed to compile the individual function bodies. After generating all @@ -171,7 +112,6 @@ class ModuleGeneratorThreadView class MOZ_STACK_CLASS ModuleGenerator { - typedef UniquePtr UniqueModuleGeneratorThreadView; typedef HashMap FuncIndexMap; ExclusiveContext* cx_; @@ -197,13 +137,13 @@ class MOZ_STACK_CLASS ModuleGenerator // Parallel compilation bool parallel_; uint32_t outstanding_; - UniqueModuleGeneratorThreadView threadView_; Vector tasks_; Vector freeTasks_; // Assertions - FunctionGenerator* activeFunc_; - bool finishedFuncs_; + DebugOnly activeFunc_; + DebugOnly startedFuncDefs_; + DebugOnly finishedFuncDefs_; bool finishOutstandingTask(); bool funcIsDefined(uint32_t funcIndex) const; @@ -213,7 +153,6 @@ class MOZ_STACK_CLASS ModuleGenerator bool finishCodegen(StaticLinkData* link); bool finishStaticLinkData(uint8_t* code, uint32_t codeBytes, StaticLinkData* link); bool addImport(const Sig& sig, uint32_t globalDataOffset); - bool startedFuncDefs() const { return !!threadView_; } bool allocateGlobalBytes(uint32_t bytes, uint32_t align, uint32_t* globalDataOffset); public: @@ -238,6 +177,10 @@ class MOZ_STACK_CLASS ModuleGenerator uint32_t numFuncSigs() const { return module_->numFuncs; } const DeclaredSig& funcSig(uint32_t funcIndex) const; + // Globals: + bool allocateGlobal(ValType type, bool isConst, uint32_t* index); + const GlobalDesc& global(unsigned index) const { return shared_->globals[index]; } + // Imports: uint32_t numImports() const; const ImportModuleGeneratorData& import(uint32_t index) const; @@ -264,10 +207,6 @@ class MOZ_STACK_CLASS ModuleGenerator void initSigTableElems(uint32_t sigIndex, Uint32Vector&& elemFuncIndices); void bumpMinHeapLength(uint32_t newMinHeapLength); - // asm.js global variables: - bool allocateGlobalVar(ValType type, bool isConst, uint32_t* index); - const AsmJSGlobalVariable& globalVar(unsigned index) const { return shared_->globals[index]; } - // Return a ModuleData object which may be used to construct a Module, the // StaticLinkData required to call Module::staticallyLink, and the list of // functions that took a long time to compile. diff --git a/js/src/asmjs/WasmIonCompile.cpp b/js/src/asmjs/WasmIonCompile.cpp index 56c517cc3d80..91999c89181d 100644 --- a/js/src/asmjs/WasmIonCompile.cpp +++ b/js/src/asmjs/WasmIonCompile.cpp @@ -18,6 +18,7 @@ #include "asmjs/WasmIonCompile.h" +#include "asmjs/WasmBinaryIterator.h" #include "asmjs/WasmGenerator.h" #include "jit/CodeGenerator.h" @@ -29,8 +30,21 @@ using namespace js::wasm; using mozilla::DebugOnly; using mozilla::Maybe; +namespace { + typedef Vector BlockVector; +struct IonCompilePolicy : ExprIterPolicy +{ + // We store SSA definitions in the value stack. + typedef MDefinition* Value; + + // We store loop headers and then/else blocks in the control flow stack. + typedef MBasicBlock* ControlItem; +}; + +typedef ExprIter IonExprIter; + // Encapsulates the compilation of a single function in an asm.js module. The // function compiler handles the creation and final backend compilation of the // MIR graph. @@ -55,8 +69,8 @@ class FunctionCompiler private: typedef Vector CallVector; - ModuleGeneratorThreadView& mg_; - Decoder& decoder_; + const ModuleGeneratorData& mg_; + IonExprIter iter_; const FuncBytes& func_; const ValTypeVector& locals_; size_t lastReadCallSite_; @@ -77,14 +91,14 @@ class FunctionCompiler FuncCompileResults& compileResults_; public: - FunctionCompiler(ModuleGeneratorThreadView& mg, + FunctionCompiler(const ModuleGeneratorData& mg, Decoder& decoder, const FuncBytes& func, const ValTypeVector& locals, MIRGenerator& mirGen, FuncCompileResults& compileResults) : mg_(mg), - decoder_(decoder), + iter_(IonCompilePolicy(), decoder), func_(func), locals_(locals), lastReadCallSite_(0), @@ -99,7 +113,8 @@ class FunctionCompiler compileResults_(compileResults) {} - ModuleGeneratorThreadView& mg() const { return mg_; } + const ModuleGeneratorData& mg() const { return mg_; } + IonExprIter& iter() { return iter_; } TempAllocator& alloc() const { return alloc_; } MacroAssembler& masm() const { return compileResults_.masm(); } const Sig& sig() const { return func_.sig(); } @@ -176,7 +191,7 @@ class FunctionCompiler } #endif MOZ_ASSERT(inDeadCode()); - MOZ_ASSERT(decoder_.done(), "all bytes must be consumed"); + MOZ_ASSERT(done(), "all bytes must be consumed"); MOZ_ASSERT(func_.callSiteLineNums().length() == lastReadCallSite_); } @@ -193,7 +208,7 @@ class FunctionCompiler return curBlock_->getSlot(info().localSlot(slot)); } - ValType localType(unsigned slot) const { return locals_[slot]; } + const ValTypeVector& locals() const { return locals_; } /***************************** Code generation (after local scope setup) */ @@ -665,7 +680,7 @@ class FunctionCompiler void addInterruptCheck() { - if (mg_.args().useSignalHandlersForInterrupt) + if (mg_.args.useSignalHandlersForInterrupt) return; if (inDeadCode()) @@ -847,7 +862,7 @@ class FunctionCompiler } MAsmJSLoadFuncPtr* ptrFun; - if (mg().isAsmJS()) { + if (mg().kind == ModuleKind::AsmJS) { MOZ_ASSERT(IsPowerOfTwo(length)); MConstant* mask = MConstant::New(alloc(), Int32Value(length - 1)); curBlock_->add(mask); @@ -861,10 +876,10 @@ class FunctionCompiler // However, these signatures may still be called (it is not a validation error) // so we instead have a flag alwaysThrow which throws an exception instead of loading // the function pointer from the (non-existant) array. - MOZ_ASSERT(!length || length == mg_.numTableElems()); + MOZ_ASSERT(!length || length == mg_.numTableElems); bool alwaysThrow = !length; - ptrFun = MAsmJSLoadFuncPtr::New(alloc(), index, mg_.numTableElems(), alwaysThrow, + ptrFun = MAsmJSLoadFuncPtr::New(alloc(), index, mg_.numTableElems, alwaysThrow, globalDataOffset); curBlock_->add(ptrFun); } @@ -924,32 +939,6 @@ class FunctionCompiler curBlock_ = nullptr; } - bool branchAndStartThen(MDefinition* cond, MBasicBlock** thenBlock, MBasicBlock** elseBlock) - { - if (inDeadCode()) - return true; - - bool hasThenBlock = *thenBlock != nullptr; - bool hasElseBlock = *elseBlock != nullptr; - - if (!hasThenBlock && !newBlock(curBlock_, thenBlock)) - return false; - if (!hasElseBlock && !newBlock(curBlock_, elseBlock)) - return false; - - curBlock_->end(MTest::New(alloc(), cond, *thenBlock, *elseBlock)); - - // Only add as a predecessor if newBlock hasn't been called (as it does it for us) - if (hasThenBlock && !(*thenBlock)->addPredecessor(alloc(), curBlock_)) - return false; - if (hasElseBlock && !(*elseBlock)->addPredecessor(alloc(), curBlock_)) - return false; - - curBlock_ = *thenBlock; - mirGraph().moveBlockToEnd(curBlock_); - return true; - } - private: static bool hasPushed(MBasicBlock* block) { @@ -1013,52 +1002,93 @@ class FunctionCompiler } } - bool joinIf(MBasicBlock* joinBlock, BlockVector* blocks, MDefinition** def) - { - MOZ_ASSERT_IF(curBlock_, blocks->back() == curBlock_); - curBlock_ = joinBlock; - return joinIfElse(nullptr, blocks, def); - } - - void switchToElse(MBasicBlock* elseBlock) - { - if (!elseBlock) - return; - curBlock_ = elseBlock; - mirGraph().moveBlockToEnd(curBlock_); - } - - bool addJoinPredecessor(MDefinition* def, BlockVector* blocks) + private: + void addJoinPredecessor(MDefinition* def, MBasicBlock** joinPred) { + *joinPred = curBlock_; if (inDeadCode()) - return true; + return; pushDef(def); - return blocks->append(curBlock_); } - bool joinIfElse(MDefinition* elseDef, BlockVector* blocks, MDefinition** def) + public: + bool branchAndStartThen(MDefinition* cond, MBasicBlock** elseBlock) { - if (!addJoinPredecessor(elseDef, blocks)) - return false; - - auto getBlock = [&](size_t i) -> MBasicBlock* { return (*blocks)[i]; }; - ensurePushInvariants(getBlock, blocks->length()); - - if (blocks->empty()) { - *def = nullptr; - return true; - } - - MBasicBlock* join; - if (!goToNewBlock((*blocks)[0], &join)) - return false; - for (size_t i = 1; i < blocks->length(); i++) { - if (!goToExistingBlock((*blocks)[i], join)) + if (inDeadCode()) { + *elseBlock = nullptr; + } else { + MBasicBlock* thenBlock; + if (!newBlock(curBlock_, &thenBlock)) return false; + if (!newBlock(curBlock_, elseBlock)) + return false; + + curBlock_->end(MTest::New(alloc(), cond, thenBlock, *elseBlock)); + + curBlock_ = thenBlock; + mirGraph().moveBlockToEnd(curBlock_); + } + + return startBlock(); + } + + bool switchToElse(MBasicBlock* elseBlock, MBasicBlock** thenJoinPred) + { + MDefinition* ifDef; + if (!finishBlock(&ifDef)) + return false; + + if (!elseBlock) { + *thenJoinPred = nullptr; + } else { + addJoinPredecessor(ifDef, thenJoinPred); + + curBlock_ = elseBlock; + mirGraph().moveBlockToEnd(curBlock_); + } + + return startBlock(); + } + + bool joinIfElse(MBasicBlock* thenJoinPred, MDefinition** def) + { + MDefinition* elseDef; + if (!finishBlock(&elseDef)) + return false; + + if (!thenJoinPred && inDeadCode()) { + *def = nullptr; + } else { + MBasicBlock* elseJoinPred; + addJoinPredecessor(elseDef, &elseJoinPred); + + mozilla::Array blocks; + size_t numJoinPreds = 0; + if (thenJoinPred) + blocks[numJoinPreds++] = thenJoinPred; + if (elseJoinPred) + blocks[numJoinPreds++] = elseJoinPred; + + auto getBlock = [&](size_t i) -> MBasicBlock* { return blocks[i]; }; + ensurePushInvariants(getBlock, numJoinPreds); + + if (numJoinPreds == 0) { + *def = nullptr; + return true; + } + + MBasicBlock* join; + if (!goToNewBlock(blocks[0], &join)) + return false; + for (size_t i = 1; i < numJoinPreds; ++i) { + if (!goToExistingBlock(blocks[i], join)) + return false; + } + + curBlock_ = join; + *def = popDefIfPushed(); } - curBlock_ = join; - *def = popDefIfPushed(); return true; } @@ -1314,38 +1344,13 @@ class FunctionCompiler /************************************************************ DECODING ***/ - uint8_t readU8() { return decoder_.uncheckedReadFixedU8(); } - uint32_t readU32() { return decoder_.uncheckedReadFixedU32(); } - uint32_t readVarS32() { return decoder_.uncheckedReadVarS32(); } - uint32_t readVarU32() { return decoder_.uncheckedReadVarU32(); } - uint64_t readVarU64() { return decoder_.uncheckedReadVarU64(); } - uint64_t readVarS64() { return decoder_.uncheckedReadVarS64(); } - float readF32() { return decoder_.uncheckedReadFixedF32(); } - double readF64() { return decoder_.uncheckedReadFixedF64(); } - Expr readExpr() { return decoder_.uncheckedReadExpr(); } - Expr peakExpr() { return decoder_.uncheckedPeekExpr(); } - - SimdConstant readI32X4() { - I32x4 i32x4; - JS_ALWAYS_TRUE(decoder_.readFixedI32x4(&i32x4)); - return SimdConstant::CreateX4(i32x4); - } - SimdConstant readF32X4() { - F32x4 f32x4; - JS_ALWAYS_TRUE(decoder_.readFixedF32x4(&f32x4)); - return SimdConstant::CreateX4(f32x4); - } - - uint32_t currentOffset() const { - return decoder_.currentOffset(); - } uint32_t readCallSiteLineOrBytecode(uint32_t callOffset) { if (!func_.callSiteLineNums().empty()) return func_.callSiteLineNums()[lastReadCallSite_++]; return callOffset; } - bool done() const { return decoder_.done(); } + bool done() const { return iter_.done(); } /*************************************************************************/ private: @@ -1428,99 +1433,595 @@ class FunctionCompiler } }; +} // end anonymous namespace + static bool -EmitLiteral(FunctionCompiler& f, ValType type, MDefinition** def) +EmitBlock(FunctionCompiler& f) { - switch (type) { - case ValType::I32: { - int32_t val = f.readVarS32(); - *def = f.constant(Int32Value(val), MIRType::Int32); - return true; - } - case ValType::I64: { - int64_t val = f.readVarS64(); - *def = f.constant(val); - return true; - } - case ValType::F32: { - float val = f.readF32(); - *def = f.constant(Float32Value(val), MIRType::Float32); - return true; - } - case ValType::F64: { - double val = f.readF64(); - *def = f.constant(DoubleValue(val), MIRType::Double); - return true; - } - case ValType::I32x4: { - SimdConstant lit(f.readI32X4()); - *def = f.constant(lit, MIRType::Int32x4); - return true; - } - case ValType::F32x4: { - SimdConstant lit(f.readF32X4()); - *def = f.constant(lit, MIRType::Float32x4); - return true; - } - case ValType::B32x4: { - // Boolean vectors are stored as an Int vector with -1 / 0 lanes. - SimdConstant lit(f.readI32X4()); - *def = f.constant(lit, MIRType::Bool32x4); - return true; - } - case ValType::Limit: + return f.iter().readBlock() && + f.startBlock(); +} + +static bool +EmitLoop(FunctionCompiler& f) +{ + if (!f.iter().readLoop()) + return false; + + MBasicBlock *loopHeader; + if (!f.startLoop(&loopHeader)) + return false; + + f.addInterruptCheck(); + + f.iter().controlItem() = loopHeader; + return true; +} + +static bool +EmitIf(FunctionCompiler& f) +{ + if (!f.iter().readIf()) + return false; + + const IfRecord& if_ = f.iter().if_(); + + MBasicBlock* elseBlock; + if (!f.branchAndStartThen(if_.condition, &elseBlock)) + return false; + + f.iter().controlItem() = elseBlock; + return true; +} + +static bool +EmitElse(FunctionCompiler& f) +{ + MBasicBlock *block = f.iter().controlItem(); + + if (!f.iter().readElse()) + return false; + + const ElseRecord& else_ = f.iter().else_(); + + if (!IsVoid(else_.type)) + f.pushDef(else_.thenValue); + + if (!f.switchToElse(block, &f.iter().controlItem())) + return false; + + return true; +} + +static bool +EmitEnd(FunctionCompiler& f) +{ + MBasicBlock *block = f.iter().controlItem(); + + if (!f.iter().readEnd()) + return false; + + const EndRecord& end = f.iter().end(); + + if (!IsVoid(end.type)) + f.pushDef(end.value); + + MDefinition* def; + switch (end.kind) { + case LabelKind::Block: + if (!f.finishBlock(&def)) + return false; + break; + case LabelKind::Loop: + if (!f.closeLoop(block, &def)) + return false; + break; + case LabelKind::Then: + // If we didn't see an Else, create a trivial else block so that we create + // a diamond anyway, to preserve Ion invariants. + if (!f.switchToElse(block, &block)) + return false; + + if (!f.joinIfElse(block, &def)) + return false; + break; + case LabelKind::Else: + if (!f.joinIfElse(block, &def)) + return false; break; } - MOZ_CRASH("unexpected literal type"); -} -static bool -EmitGetLocal(FunctionCompiler& f, MDefinition** def) -{ - uint32_t slot = f.readVarU32(); - *def = f.getLocalDef(slot); + f.iter().setResult(def); return true; } static bool -EmitLoadGlobal(FunctionCompiler& f, MDefinition** def) +EmitBr(FunctionCompiler& f) { - uint32_t index = f.readVarU32(); - const AsmJSGlobalVariable& global = f.mg().globalVar(index); - *def = f.loadGlobalVar(global.globalDataOffset, global.isConst, ToMIRType(global.type)); - return true; -} - -static bool EmitExpr(FunctionCompiler&, MDefinition**); - -static bool -EmitHeapAddress(FunctionCompiler& f, MDefinition** base, MAsmJSHeapAccess* access) -{ - uint32_t alignLog2 = f.readVarU32(); - access->setAlign(1 << alignLog2); - - uint32_t offset = f.readVarU32(); - access->setOffset(offset); - - if (!EmitExpr(f, base)) + if (!f.iter().readBr()) return false; - if (f.mg().isAsmJS()) { - MOZ_ASSERT(offset == 0 && "asm.js validation does not produce load/store offsets"); + const BrRecord& br = f.iter().br(); + + if (IsVoid(br.type)) { + if (!f.br(br.relativeDepth, nullptr)) + return false; + } else { + if (!f.br(br.relativeDepth, br.value)) + return false; + } + + return true; +} + +static bool +EmitBrIf(FunctionCompiler& f) +{ + if (!f.iter().readBrIf()) + return false; + + const BrIfRecord& brIf = f.iter().brIf(); + + if (IsVoid(brIf.type)) { + if (!f.brIf(brIf.relativeDepth, nullptr, brIf.condition)) + return false; + } else { + if (!f.brIf(brIf.relativeDepth, brIf.value, brIf.condition)) + return false; + } + + return true; +} + +static bool +EmitBrTable(FunctionCompiler& f) +{ + if (!f.iter().readBrTable()) + return false; + + const BrTableRecord& brTable = f.iter().brTable(); + + Uint32Vector depths; + size_t tableLength = brTable.tableLength; + if (!depths.reserve(tableLength)) + return false; + + ExprType type = f.iter().brTable().type; + uint32_t depth; + for (size_t i = 0; i < tableLength; ++i) { + if (!f.iter().readBrTableEntry(type, &depth)) + return false; + depths.infallibleAppend(depth); + } + + // Read the default label. + if (!f.iter().readBrTableEntry(type, &depth)) + return false; + + if (tableLength == 0) + return f.br(depth, nullptr); + + return f.brTable(brTable.index, depth, depths); +} + +static bool +EmitReturn(FunctionCompiler& f) +{ + if (!f.iter().readReturn()) + return false; + + const ReturnRecord& return_ = f.iter().return_(); + + if (IsVoid(f.sig().ret())) { + f.returnVoid(); return true; } + f.returnExpr(return_.value); + return true; +} + +static bool +EmitCallArgs(FunctionCompiler& f, const Sig& sig, FunctionCompiler::Call* ionCall) +{ + if (!f.startCallArgs(ionCall)) + return false; + + MDefinition* arg; + const ValTypeVector& args = sig.args(); + uint32_t numArgs = args.length(); + for (size_t i = 0; i < numArgs; ++i) { + ValType argType = args[i]; + if (!f.iter().readCallArg(argType, numArgs, i, &arg)) + return false; + if (!f.passArg(arg, argType, ionCall)) + return false; + } + + if (!f.iter().readCallArgsEnd(numArgs)) + return false; + + f.finishCallArgs(ionCall); + return true; +} + +static bool +EmitCall(FunctionCompiler& f, uint32_t callOffset) +{ + uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode(callOffset); + + if (!f.iter().readCall()) + return false; + + const CallRecord& call = f.iter().call(); + const Sig& sig = *f.mg().funcSigs[call.callee]; + + FunctionCompiler::Call ionCall(f, lineOrBytecode); + if (!EmitCallArgs(f, sig, &ionCall)) + return false; + + if (!f.iter().readCallReturn(sig.ret())) + return false; + + MDefinition* def; + if (!f.internalCall(sig, call.callee, ionCall, &def)) + return false; + + if (IsVoid(sig.ret())) + return true; + + f.iter().setResult(def); + return true; +} + +static bool +EmitCallIndirect(FunctionCompiler& f, uint32_t callOffset) +{ + uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode(callOffset); + + if (!f.iter().readCallIndirect()) + return false; + + const CallIndirectRecord& callIndirect = f.iter().callIndirect(); + const Sig& sig = f.mg().sigs[callIndirect.sigIndex]; + + FunctionCompiler::Call ionCall(f, lineOrBytecode); + if (!EmitCallArgs(f, sig, &ionCall)) + return false; + + MDefinition* callee; + if (!f.iter().readCallIndirectCallee(&callee)) + return false; + + if (!f.iter().readCallReturn(sig.ret())) + return false; + + MDefinition* def; + const TableModuleGeneratorData& table = f.mg().sigToTable[callIndirect.sigIndex]; + if (!f.funcPtrCall(sig, table.numElems, table.globalDataOffset, callee, ionCall, &def)) + return false; + + if (IsVoid(sig.ret())) + return true; + + f.iter().setResult(def); + return true; +} + +static bool +EmitCallImport(FunctionCompiler& f, uint32_t callOffset) +{ + uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode(callOffset); + + if (!f.iter().readCallImport()) + return false; + + const CallImportRecord& callImport = f.iter().callImport(); + const ImportModuleGeneratorData& import = f.mg().imports[callImport.callee]; + const Sig& sig = *import.sig; + + FunctionCompiler::Call ionCall(f, lineOrBytecode); + if (!EmitCallArgs(f, sig, &ionCall)) + return false; + + if (!f.iter().readCallReturn(sig.ret())) + return false; + + MDefinition* def; + if (!f.ffiCall(import.globalDataOffset, ionCall, sig.ret(), &def)) + return false; + + if (IsVoid(sig.ret())) + return true; + + f.iter().setResult(def); + return true; +} + +static bool +EmitGetLocal(FunctionCompiler& f) +{ + if (!f.iter().readGetLocal(f.locals())) + return false; + + const GetVarRecord& getVar = f.iter().getVar(); + + f.iter().setResult(f.getLocalDef(getVar.id)); + return true; +} + +static bool +EmitSetLocal(FunctionCompiler& f) +{ + if (!f.iter().readSetLocal(f.locals())) + return false; + + const SetVarRecord& setVar = f.iter().setVar(); + + f.assign(setVar.id, setVar.value); + return true; +} + +static bool +EmitGetGlobal(FunctionCompiler& f) +{ + if (!f.iter().readGetGlobal(f.mg().globals)) + return false; + + const GetVarRecord& getVar = f.iter().getVar(); + + const GlobalDesc& global = f.mg().globals[getVar.id]; + f.iter().setResult(f.loadGlobalVar(global.globalDataOffset, global.isConst, + ToMIRType(global.type))); + return true; +} + +static bool +EmitSetGlobal(FunctionCompiler& f) +{ + if (!f.iter().readSetGlobal(f.mg().globals)) + return false; + + const SetVarRecord& setVar = f.iter().setVar(); + + const GlobalDesc& global = f.mg().globals[setVar.id]; + f.storeGlobalVar(global.globalDataOffset, setVar.value); + return true; +} + +template +static bool +EmitUnary(FunctionCompiler& f, ValType operandType) +{ + if (!f.iter().readUnary(operandType)) + return false; + + const UnaryRecord& unary = f.iter().unary(); + + f.iter().setResult(f.unary(unary.op)); + return true; +} + +template +static bool +EmitConversion(FunctionCompiler& f, ValType operandType, ValType resultType) +{ + if (!f.iter().readConversion(operandType, resultType)) + return false; + + const UnaryRecord& unary = f.iter().unary(); + + f.iter().setResult(f.unary(unary.op)); + return true; +} + +template +static bool +EmitUnaryWithType(FunctionCompiler& f, ValType operandType, MIRType mirType) +{ + if (!f.iter().readUnary(operandType)) + return false; + + const UnaryRecord& unary = f.iter().unary(); + + f.iter().setResult(f.unary(unary.op, mirType)); + return true; +} + +template +static bool +EmitConversionWithType(FunctionCompiler& f, + ValType operandType, ValType resultType, MIRType mirType) +{ + if (!f.iter().readConversion(operandType, resultType)) + return false; + + const UnaryRecord& unary = f.iter().unary(); + + f.iter().setResult(f.unary(unary.op, mirType)); + return true; +} + +static bool +EmitTruncateToI64(FunctionCompiler& f, ValType operandType, bool isUnsigned) +{ + if (!f.iter().readConversion(operandType, ValType::I64)) + return false; + + const UnaryRecord& unary = f.iter().unary(); + + f.iter().setResult(f.truncate(unary.op, isUnsigned)); + return true; +} + +static bool +EmitExtendI32(FunctionCompiler& f, bool isUnsigned) +{ + if (!f.iter().readConversion(ValType::I32, ValType::I64)) + return false; + + const UnaryRecord& unary = f.iter().unary(); + + f.iter().setResult(f.extendI32(unary.op, isUnsigned)); + return true; +} + +static bool +EmitConvertI64ToFloatingPoint(FunctionCompiler& f, + ValType resultType, MIRType mirType, bool isUnsigned) +{ + if (!f.iter().readConversion(ValType::I64, resultType)) + return false; + + const UnaryRecord& unary = f.iter().unary(); + + f.iter().setResult(f.convertI64ToFloatingPoint(unary.op, mirType, isUnsigned)); + return true; +} + +static bool +EmitReinterpret(FunctionCompiler& f, ValType resultType, ValType operandType, MIRType mirType) +{ + if (!f.iter().readConversion(operandType, resultType)) + return false; + + const UnaryRecord& unary = f.iter().unary(); + + f.iter().setResult(f.reinterpret(unary.op, mirType)); + return true; +} + +template +static bool +EmitBinary(FunctionCompiler& f, ValType type, MIRType mirType) +{ + if (!f.iter().readBinary(type)) + return false; + + const BinaryRecord& binary = f.iter().binary(); + + f.iter().setResult(f.binary(binary.lhs, binary.rhs, mirType)); + return true; +} + +template +static bool +EmitBitwise(FunctionCompiler& f, ValType operandType) +{ + if (!f.iter().readUnary(operandType)) + return false; + + const UnaryRecord& unary = f.iter().unary(); + + f.iter().setResult(f.bitwise(unary.op)); + return true; +} + +template +static bool +EmitBitwise(FunctionCompiler& f, ValType operandType, MIRType mirType) +{ + if (!f.iter().readBinary(operandType)) + return false; + + const BinaryRecord& binary = f.iter().binary(); + + f.iter().setResult(f.bitwise(binary.lhs, binary.rhs, mirType)); + return true; +} + +static bool +EmitMul(FunctionCompiler& f, ValType operandType, MIRType mirType) +{ + if (!f.iter().readBinary(operandType)) + return false; + + const BinaryRecord& binary = f.iter().binary(); + + f.iter().setResult(f.mul(binary.lhs, binary.rhs, mirType, + mirType == MIRType::Int32 ? MMul::Integer : MMul::Normal)); + return true; +} + +static bool +EmitDiv(FunctionCompiler& f, ValType operandType, MIRType mirType, bool isUnsigned) +{ + if (!f.iter().readBinary(operandType)) + return false; + + const BinaryRecord& binary = f.iter().binary(); + + f.iter().setResult(f.div(binary.lhs, binary.rhs, mirType, isUnsigned)); + return true; +} + +static bool +EmitRem(FunctionCompiler& f, ValType operandType, MIRType mirType, bool isUnsigned) +{ + if (!f.iter().readBinary(operandType)) + return false; + + const BinaryRecord& binary = f.iter().binary(); + + f.iter().setResult(f.mod(binary.lhs, binary.rhs, mirType, isUnsigned)); + return true; +} + +static bool +EmitMinMax(FunctionCompiler& f, ValType operandType, MIRType mirType, bool isMax) +{ + if (!f.iter().readBinary(operandType)) + return false; + + const BinaryRecord& binary = f.iter().binary(); + + f.iter().setResult(f.minMax(binary.lhs, binary.rhs, mirType, isMax)); + return true; +} + +static bool +EmitComparison(FunctionCompiler& f, + ValType operandType, JSOp compareOp, MCompare::CompareType compareType) +{ + if (!f.iter().readComparison(operandType)) + return false; + + const BinaryRecord& binary = f.iter().binary(); + + f.iter().setResult(f.compare(binary.lhs, binary.rhs, compareOp, compareType)); + return true; +} + +static bool +EmitSelect(FunctionCompiler& f) +{ + if (!f.iter().readSelect()) + return false; + + const SelectRecord& select = f.iter().select(); + + if (!IsVoid(select.type)) + f.iter().setResult(f.select(select.trueValue, select.falseValue, select.condition)); + + return true; +} + +static bool +SetHeapAccessOffset(FunctionCompiler& f, uint32_t offset, MAsmJSHeapAccess* access, MDefinition** base) +{ // TODO Remove this after implementing non-wraparound offset semantics. - uint32_t endOffset = access->endOffset(); + uint32_t endOffset = offset + access->byteSize(); if (endOffset < offset) return false; - bool accessNeedsBoundsCheck = true; + // Assume worst case. + bool accessNeedsBoundsCheck = true; bool atomicAccess = true; if (endOffset > f.mirGen().foldableOffsetRange(accessNeedsBoundsCheck, atomicAccess)) { MDefinition* rhs = f.constant(Int32Value(offset), MIRType::Int32); *base = f.binary(*base, rhs, MIRType::Int32); - offset = 0; + access->setOffset(0); + } else { access->setOffset(offset); } @@ -1528,294 +2029,73 @@ EmitHeapAddress(FunctionCompiler& f, MDefinition** base, MAsmJSHeapAccess* acces } static bool -EmitLoad(FunctionCompiler& f, Scalar::Type viewType, MDefinition** def) +EmitLoad(FunctionCompiler& f, ValType type, Scalar::Type viewType) { - MDefinition* base; - MAsmJSHeapAccess access(viewType); - if (!EmitHeapAddress(f, &base, &access)) + if (!f.iter().readLoad(type, Scalar::byteSize(viewType))) return false; - *def = f.loadHeap(base, access); + + const LoadRecord& load = f.iter().load(); + + MAsmJSHeapAccess access(viewType); + access.setAlign(load.addr.align); + + MDefinition* base = load.addr.base; + if (!SetHeapAccessOffset(f, load.addr.offset, &access, &base)) + return false; + + f.iter().setResult(f.loadHeap(base, access)); return true; } static bool -EmitStore(FunctionCompiler& f, Scalar::Type viewType, MDefinition** def) +EmitStore(FunctionCompiler& f, ValType resultType, Scalar::Type viewType) { - MDefinition* base; - MAsmJSHeapAccess access(viewType); - if (!EmitHeapAddress(f, &base, &access)) + if (!f.iter().readStore(resultType, Scalar::byteSize(viewType))) return false; - MDefinition* rhs = nullptr; - switch (viewType) { - case Scalar::Int8: - case Scalar::Int16: - case Scalar::Int32: - if (!EmitExpr(f, &rhs)) - return false; - break; - case Scalar::Float32: - if (!EmitExpr(f, &rhs)) - return false; - break; - case Scalar::Float64: - if (!EmitExpr(f, &rhs)) - return false; - break; - default: MOZ_CRASH("unexpected scalar type"); - } + const StoreRecord& store = f.iter().store(); - f.storeHeap(base, access, rhs); - *def = rhs; + MAsmJSHeapAccess access(viewType); + access.setAlign(store.addr.align); + + MDefinition* base = store.addr.base; + if (!SetHeapAccessOffset(f, store.addr.offset, &access, &base)) + return false; + + f.storeHeap(base, access, store.value); return true; } static bool -EmitStoreWithCoercion(FunctionCompiler& f, Scalar::Type rhsType, Scalar::Type viewType, - MDefinition **def) +EmitStoreWithCoercion(FunctionCompiler& f, ValType resultType, Scalar::Type viewType) { - MDefinition* base; - MAsmJSHeapAccess access(viewType); - if (!EmitHeapAddress(f, &base, &access)) + if (!f.iter().readStore(resultType, Scalar::byteSize(viewType))) return false; - MDefinition* rhs = nullptr; - MDefinition* coerced = nullptr; - if (rhsType == Scalar::Float32 && viewType == Scalar::Float64) { - if (!EmitExpr(f, &rhs)) - return false; - coerced = f.unary(rhs); - } else if (rhsType == Scalar::Float64 && viewType == Scalar::Float32) { - if (!EmitExpr(f, &rhs)) - return false; - coerced = f.unary(rhs); - } else { + const StoreRecord& store = f.iter().store(); + + MDefinition* value = store.value; + if (resultType == ValType::F32 && viewType == Scalar::Float64) + value = f.unary(value); + else if (resultType == ValType::F64 && viewType == Scalar::Float32) + value = f.unary(value); + else MOZ_CRASH("unexpected coerced store"); - } - f.storeHeap(base, access, coerced); - *def = rhs; - return true; -} - -static bool -EmitSetLocal(FunctionCompiler& f, MDefinition** def) -{ - uint32_t slot = f.readVarU32(); - MDefinition* expr; - if (!EmitExpr(f, &expr)) - return false; - f.assign(slot, expr); - *def = expr; - return true; -} - -static bool -EmitStoreGlobal(FunctionCompiler& f, MDefinition**def) -{ - uint32_t index = f.readVarU32(); - const AsmJSGlobalVariable& global = f.mg().globalVar(index); - MDefinition* expr; - if (!EmitExpr(f, &expr)) - return false; - f.storeGlobalVar(global.globalDataOffset, expr); - *def = expr; - return true; -} - -typedef bool IsMax; - -static bool -EmitMathMinMax(FunctionCompiler& f, ValType type, bool isMax, MDefinition** def) -{ - MDefinition* lhs; - if (!EmitExpr(f, &lhs)) - return false; - MDefinition* rhs; - if (!EmitExpr(f, &rhs)) - return false; - MIRType mirType = ToMIRType(type); - *def = f.minMax(lhs, rhs, mirType, isMax); - return true; -} - -static bool -EmitAtomicsLoad(FunctionCompiler& f, MDefinition** def) -{ - Scalar::Type viewType = Scalar::Type(f.readU8()); - - MDefinition* base; - MAsmJSHeapAccess access(viewType, 0, MembarBeforeLoad, MembarAfterLoad); - if (!EmitHeapAddress(f, &base, &access)) - return false; - - *def = f.atomicLoadHeap(base, access); - return true; -} - -static bool -EmitAtomicsStore(FunctionCompiler& f, MDefinition** def) -{ - Scalar::Type viewType = Scalar::Type(f.readU8()); - - MDefinition* base; - MAsmJSHeapAccess access(viewType, 0, MembarBeforeStore, MembarAfterStore); - if (!EmitHeapAddress(f, &base, &access)) - return false; - - MDefinition* value; - if (!EmitExpr(f, &value)) - return false; - f.atomicStoreHeap(base, access, value); - *def = value; - return true; -} - -static bool -EmitAtomicsBinOp(FunctionCompiler& f, MDefinition** def) -{ - Scalar::Type viewType = Scalar::Type(f.readU8()); - js::jit::AtomicOp op = js::jit::AtomicOp(f.readU8()); - - MDefinition* base; MAsmJSHeapAccess access(viewType); - if (!EmitHeapAddress(f, &base, &access)) + access.setAlign(store.addr.align); + + MDefinition* base = store.addr.base; + if (!SetHeapAccessOffset(f, store.addr.offset, &access, &base)) return false; - MDefinition* value; - if (!EmitExpr(f, &value)) - return false; - *def = f.atomicBinopHeap(op, base, access, value); + f.storeHeap(base, access, value); return true; } static bool -EmitAtomicsCompareExchange(FunctionCompiler& f, MDefinition** def) -{ - Scalar::Type viewType = Scalar::Type(f.readU8()); - - MDefinition* base; - MAsmJSHeapAccess access(viewType); - if (!EmitHeapAddress(f, &base, &access)) - return false; - - MDefinition* oldValue; - if (!EmitExpr(f, &oldValue)) - return false; - MDefinition* newValue; - if (!EmitExpr(f, &newValue)) - return false; - *def = f.atomicCompareExchangeHeap(base, access, oldValue, newValue); - return true; -} - -static bool -EmitAtomicsExchange(FunctionCompiler& f, MDefinition** def) -{ - Scalar::Type viewType = Scalar::Type(f.readU8()); - - MDefinition* base; - MAsmJSHeapAccess access(viewType); - if (!EmitHeapAddress(f, &base, &access)) - return false; - - MDefinition* value; - if (!EmitExpr(f, &value)) - return false; - *def = f.atomicExchangeHeap(base, access, value); - return true; -} - -static bool -EmitCallArgs(FunctionCompiler& f, const Sig& sig, FunctionCompiler::Call* call) -{ - if (!f.startCallArgs(call)) - return false; - for (ValType argType : sig.args()) { - MDefinition* arg; - if (!EmitExpr(f, &arg)) - return false; - if (!f.passArg(arg, argType, call)) - return false; - } - f.finishCallArgs(call); - return true; -} - -static bool -EmitCall(FunctionCompiler& f, uint32_t callOffset, MDefinition** def) -{ - uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode(callOffset); - uint32_t funcIndex = f.readVarU32(); - - const Sig& sig = f.mg().funcSig(funcIndex); - - FunctionCompiler::Call call(f, lineOrBytecode); - if (!EmitCallArgs(f, sig, &call)) - return false; - - return f.internalCall(sig, funcIndex, call, def); -} - -static bool -EmitCallIndirect(FunctionCompiler& f, uint32_t callOffset, MDefinition** def) -{ - uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode(callOffset); - uint32_t sigIndex = f.readVarU32(); - - const Sig& sig = f.mg().sig(sigIndex); - - MDefinition* index; - if (!EmitExpr(f, &index)) - return false; - - FunctionCompiler::Call call(f, lineOrBytecode); - if (!EmitCallArgs(f, sig, &call)) - return false; - - const TableModuleGeneratorData& table = f.mg().sigToTable(sigIndex); - return f.funcPtrCall(sig, table.numElems, table.globalDataOffset, index, call, def); -} - -static bool -EmitCallImport(FunctionCompiler& f, uint32_t callOffset, MDefinition** def) -{ - uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode(callOffset); - uint32_t importIndex = f.readVarU32(); - - const ImportModuleGeneratorData& import = f.mg().import(importIndex); - const Sig& sig = *import.sig; - - FunctionCompiler::Call call(f, lineOrBytecode); - if (!EmitCallArgs(f, sig, &call)) - return false; - - return f.ffiCall(import.globalDataOffset, call, sig.ret(), def); -} - -static bool -EmitF32MathBuiltinCall(FunctionCompiler& f, uint32_t callOffset, Expr f32, MDefinition** def) -{ - MOZ_ASSERT(f32 == Expr::F32Ceil || f32 == Expr::F32Floor); - - uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode(callOffset); - - FunctionCompiler::Call call(f, lineOrBytecode); - if (!f.startCallArgs(&call)) - return false; - - MDefinition* firstArg; - if (!EmitExpr(f, &firstArg) || !f.passArg(firstArg, ValType::F32, &call)) - return false; - - f.finishCallArgs(&call); - - SymbolicAddress callee = f32 == Expr::F32Ceil ? SymbolicAddress::CeilF : SymbolicAddress::FloorF; - return f.builtinCall(callee, call, ValType::F32, def); -} - -static bool -EmitF64MathBuiltinCall(FunctionCompiler& f, uint32_t callOffset, Expr f64, MDefinition** def) +EmitUnaryMathBuiltinCall(FunctionCompiler& f, uint32_t callOffset, SymbolicAddress callee, + ValType operandType) { uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode(callOffset); @@ -1823,40 +2103,156 @@ EmitF64MathBuiltinCall(FunctionCompiler& f, uint32_t callOffset, Expr f64, MDefi if (!f.startCallArgs(&call)) return false; - MDefinition* firstArg; - if (!EmitExpr(f, &firstArg) || !f.passArg(firstArg, ValType::F64, &call)) + if (!f.iter().readUnary(operandType)) return false; - if (f64 == Expr::F64Pow || f64 == Expr::F64Atan2) { - MDefinition* secondArg; - if (!EmitExpr(f, &secondArg) || !f.passArg(secondArg, ValType::F64, &call)) - return false; - } + const UnaryRecord& unary = f.iter().unary(); - SymbolicAddress callee; - switch (f64) { - case Expr::F64Ceil: callee = SymbolicAddress::CeilD; break; - case Expr::F64Floor: callee = SymbolicAddress::FloorD; break; - case Expr::F64Sin: callee = SymbolicAddress::SinD; break; - case Expr::F64Cos: callee = SymbolicAddress::CosD; break; - case Expr::F64Tan: callee = SymbolicAddress::TanD; break; - case Expr::F64Asin: callee = SymbolicAddress::ASinD; break; - case Expr::F64Acos: callee = SymbolicAddress::ACosD; break; - case Expr::F64Atan: callee = SymbolicAddress::ATanD; break; - case Expr::F64Exp: callee = SymbolicAddress::ExpD; break; - case Expr::F64Log: callee = SymbolicAddress::LogD; break; - case Expr::F64Pow: callee = SymbolicAddress::PowD; break; - case Expr::F64Atan2: callee = SymbolicAddress::ATan2D; break; - default: MOZ_CRASH("unexpected double math builtin callee"); - } + if (!f.passArg(unary.op, operandType, &call)) + return false; f.finishCallArgs(&call); - return f.builtinCall(callee, call, ValType::F64, def); + MDefinition* def; + if (!f.builtinCall(callee, call, operandType, &def)) + return false; + + f.iter().setResult(def); + return true; } static bool -EmitSimdUnary(FunctionCompiler& f, ValType type, SimdOperation simdOp, MDefinition** def) +EmitBinaryMathBuiltinCall(FunctionCompiler& f, uint32_t callOffset, SymbolicAddress callee, + ValType operandType) +{ + uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode(callOffset); + + FunctionCompiler::Call call(f, lineOrBytecode); + if (!f.startCallArgs(&call)) + return false; + + if (!f.iter().readBinary(operandType)) + return false; + + const BinaryRecord& binary = f.iter().binary(); + + if (!f.passArg(binary.lhs, operandType, &call)) + return false; + + if (!f.passArg(binary.rhs, operandType, &call)) + return false; + + f.finishCallArgs(&call); + + MDefinition* def; + if (!f.builtinCall(callee, call, operandType, &def)) + return false; + + f.iter().setResult(def); + return true; +} + +static bool +EmitAtomicsLoad(FunctionCompiler& f) +{ + if (!f.iter().readAtomicLoad()) + return false; + + const AtomicLoadRecord& atomicLoad = f.iter().atomicLoad(); + + MAsmJSHeapAccess access(atomicLoad.viewType, 0, MembarBeforeLoad, MembarAfterLoad); + access.setAlign(atomicLoad.addr.align); + + MDefinition* base = atomicLoad.addr.base; + if (!SetHeapAccessOffset(f, atomicLoad.addr.offset, &access, &base)) + return false; + + f.iter().setResult(f.atomicLoadHeap(base, access)); + return true; +} + +static bool +EmitAtomicsStore(FunctionCompiler& f) +{ + if (!f.iter().readAtomicStore()) + return false; + + const AtomicStoreRecord& atomicStore = f.iter().atomicStore(); + + MAsmJSHeapAccess access(atomicStore.viewType, 0, MembarBeforeStore, MembarAfterStore); + access.setAlign(atomicStore.addr.align); + + MDefinition* base = atomicStore.addr.base; + if (!SetHeapAccessOffset(f, atomicStore.addr.offset, &access, &base)) + return false; + + f.atomicStoreHeap(base, access, atomicStore.value); + f.iter().setResult(atomicStore.value); + return true; +} + +static bool +EmitAtomicsBinOp(FunctionCompiler& f) +{ + if (!f.iter().readAtomicBinOp()) + return false; + + const AtomicBinOpRecord& atomicBinOp = f.iter().atomicBinOp(); + + MAsmJSHeapAccess access(atomicBinOp.viewType); + access.setAlign(atomicBinOp.addr.align); + + MDefinition* base = atomicBinOp.addr.base; + if (!SetHeapAccessOffset(f, atomicBinOp.addr.offset, &access, &base)) + return false; + + f.iter().setResult(f.atomicBinopHeap(atomicBinOp.op, base, access, atomicBinOp.value)); + return true; +} + +static bool +EmitAtomicsCompareExchange(FunctionCompiler& f) +{ + if (!f.iter().readAtomicCompareExchange()) + return false; + + const AtomicCompareExchangeRecord& atomicCompareExchange = + f.iter().atomicCompareExchange(); + + MAsmJSHeapAccess access(atomicCompareExchange.viewType); + access.setAlign(atomicCompareExchange.addr.align); + + MDefinition* base = atomicCompareExchange.addr.base; + if (!SetHeapAccessOffset(f, atomicCompareExchange.addr.offset, &access, &base)) + return false; + + f.iter().setResult(f.atomicCompareExchangeHeap(base, access, + atomicCompareExchange.oldValue, + atomicCompareExchange.newValue)); + return true; +} + +static bool +EmitAtomicsExchange(FunctionCompiler& f) +{ + if (!f.iter().readAtomicExchange()) + return false; + + const AtomicExchangeRecord& atomicExchange = f.iter().atomicExchange(); + + MAsmJSHeapAccess access(atomicExchange.viewType); + access.setAlign(atomicExchange.addr.align); + + MDefinition* base = atomicExchange.addr.base; + if (!SetHeapAccessOffset(f, atomicExchange.addr.offset, &access, &base)) + return false; + + f.iter().setResult(f.atomicExchangeHeap(base, access, atomicExchange.value)); + return true; +} + +static bool +EmitSimdUnary(FunctionCompiler& f, ValType type, SimdOperation simdOp) { MSimdUnaryArith::Operation op; switch (simdOp) { @@ -1881,51 +2277,42 @@ EmitSimdUnary(FunctionCompiler& f, ValType type, SimdOperation simdOp, MDefiniti default: MOZ_CRASH("not a simd unary arithmetic operation"); } - MDefinition* in; - if (!EmitExpr(f, &in)) + if (!f.iter().readUnary(type)) return false; - *def = f.unarySimd(in, op, ToMIRType(type)); + const UnaryRecord& unary = f.iter().unary(); + f.iter().setResult(f.unarySimd(unary.op, op, ToMIRType(type))); return true; } template inline bool -EmitSimdBinary(FunctionCompiler& f, ValType type, OpKind op, MDefinition** def) +EmitSimdBinary(FunctionCompiler& f, ValType type, OpKind op) { - MDefinition* lhs; - if (!EmitExpr(f, &lhs)) + if (!f.iter().readBinary(type)) return false; - MDefinition* rhs; - if (!EmitExpr(f, &rhs)) - return false; - *def = f.binarySimd(lhs, rhs, op, ToMIRType(type)); + const BinaryRecord& binary = f.iter().binary(); + f.iter().setResult(f.binarySimd(binary.lhs, binary.rhs, op, ToMIRType(type))); return true; } static bool -EmitSimdBinaryComp(FunctionCompiler& f, MSimdBinaryComp::Operation op, SimdSign sign, - MDefinition** def) +EmitSimdBinaryComp(FunctionCompiler& f, ValType operandType, MSimdBinaryComp::Operation op, + SimdSign sign) { - MDefinition* lhs; - if (!EmitExpr(f, &lhs)) + if (!f.iter().readSimdComparison(operandType)) return false; - MDefinition* rhs; - if (!EmitExpr(f, &rhs)) - return false; - *def = f.binarySimdComp(lhs, rhs, op, sign); + const BinaryRecord& binary = f.iter().binary(); + f.iter().setResult(f.binarySimdComp(binary.lhs, binary.rhs, op, sign)); return true; } static bool -EmitSimdShift(FunctionCompiler& f, MSimdShift::Operation op, MDefinition** def) +EmitSimdShift(FunctionCompiler& f, ValType operandType, MSimdShift::Operation op) { - MDefinition* lhs; - if (!EmitExpr(f, &lhs)) + if (!f.iter().readSimdShiftByScalar(operandType)) return false; - MDefinition* rhs; - if (!EmitExpr(f, &rhs)) - return false; - *def = f.binarySimd(lhs, rhs, op); + const BinaryRecord& binary = f.iter().binary(); + f.iter().setResult(f.binarySimd(binary.lhs, binary.rhs, op)); return true; } @@ -1946,128 +2333,90 @@ SimdToLaneType(ValType type) } static bool -EmitExtractLane(FunctionCompiler& f, ValType type, SimdSign sign, MDefinition** def) +EmitExtractLane(FunctionCompiler& f, ValType operandType, SimdSign sign) { - MDefinition* vec; - if (!EmitExpr(f, &vec)) + if (!f.iter().readExtractLane(operandType)) return false; - MDefinition* laneDef; - if (!EmitExpr(f, &laneDef)) - return false; + const ExtractLaneRecord& extractLane = f.iter().extractLane(); - if (!laneDef) { - *def = nullptr; - return true; - } - - MOZ_ASSERT(laneDef->isConstant()); - int32_t laneLit = laneDef->toConstant()->toInt32(); - MOZ_ASSERT(laneLit < 4); - SimdLane lane = SimdLane(laneLit); - - *def = f.extractSimdElement(lane, vec, ToMIRType(SimdToLaneType(type)), sign); + f.iter().setResult(f.extractSimdElement(extractLane.lane, extractLane.vector, + ToMIRType(SimdToLaneType(operandType)), sign)); return true; } // Emit an I32 expression and then convert it to a boolean SIMD lane value, i.e. -1 or 0. -static bool -EmitSimdBooleanLaneExpr(FunctionCompiler& f, MDefinition** def) +static MDefinition* +EmitSimdBooleanLaneExpr(FunctionCompiler& f, MDefinition* i32) { - MDefinition* i32; - if (!EmitExpr(f, &i32)) - return false; - // Now compute !i32 - 1 to force the value range into {0, -1}. + // Compute !i32 - 1 to force the value range into {0, -1}. MDefinition* noti32 = f.unary(i32); - *def = f.binary(noti32, f.constant(Int32Value(1), MIRType::Int32), MIRType::Int32); - return true; + return f.binary(noti32, f.constant(Int32Value(1), MIRType::Int32), MIRType::Int32); } static bool -EmitSimdReplaceLane(FunctionCompiler& f, ValType simdType, MDefinition** def) +EmitSimdReplaceLane(FunctionCompiler& f, ValType simdType) { - MDefinition* vector; - if (!EmitExpr(f, &vector)) + if (IsSimdBoolType(simdType)) + f.iter().setResult(EmitSimdBooleanLaneExpr(f, f.iter().getResult())); + + if (!f.iter().readReplaceLane(simdType)) return false; - MDefinition* laneDef; - if (!EmitExpr(f, &laneDef)) - return false; + const ReplaceLaneRecord& replaceLane = f.iter().replaceLane(); - SimdLane lane; - if (laneDef) { - MOZ_ASSERT(laneDef->isConstant()); - int32_t laneLit = laneDef->toConstant()->toInt32(); - MOZ_ASSERT(laneLit < 4); - lane = SimdLane(laneLit); - } else { - lane = SimdLane(-1); - } - - MDefinition* scalar; - if (IsSimdBoolType(simdType)) { - if (!EmitSimdBooleanLaneExpr(f, &scalar)) - return false; - } else { - if (!EmitExpr(f, &scalar)) - return false; - } - *def = f.insertElementSimd(vector, scalar, lane, ToMIRType(simdType)); + f.iter().setResult(f.insertElementSimd(replaceLane.vector, replaceLane.scalar, + replaceLane.lane, ToMIRType(simdType))); return true; } inline bool -EmitSimdBitcast(FunctionCompiler& f, ValType fromType, ValType toType, MDefinition** def) +EmitSimdBitcast(FunctionCompiler& f, ValType fromType, ValType toType) { - MDefinition* in; - if (!EmitExpr(f, &in)) + if (!f.iter().readConversion(fromType, toType)) return false; - *def = f.bitcastSimd(in, ToMIRType(fromType), ToMIRType(toType)); + + const UnaryRecord& unary = f.iter().unary(); + + f.iter().setResult(f.bitcastSimd(unary.op, ToMIRType(fromType), ToMIRType(toType))); return true; } inline bool -EmitSimdConvert(FunctionCompiler& f, ValType fromType, ValType toType, SimdSign sign, - MDefinition** def) +EmitSimdConvert(FunctionCompiler& f, ValType fromType, ValType toType, SimdSign sign) { - MDefinition* in; - if (!EmitExpr(f, &in)) + if (!f.iter().readConversion(fromType, toType)) return false; - *def = f.convertSimd(in, ToMIRType(fromType), ToMIRType(toType), sign); + + const UnaryRecord& unary = f.iter().unary(); + + f.iter().setResult(f.convertSimd(unary.op, ToMIRType(fromType), ToMIRType(toType), sign)); return true; } static bool -EmitSimdSwizzle(FunctionCompiler& f, ValType type, MDefinition** def) +EmitSimdSwizzle(FunctionCompiler& f, ValType simdType) { - MDefinition* in; - if (!EmitExpr(f, &in)) + if (!f.iter().readSwizzle(simdType)) return false; - uint8_t lanes[4]; - for (unsigned i = 0; i < 4; i++) - lanes[i] = f.readU8(); + const SwizzleRecord& swizzle = f.iter().swizzle(); - *def = f.swizzleSimd(in, lanes[0], lanes[1], lanes[2], lanes[3], ToMIRType(type)); + f.iter().setResult(f.swizzleSimd(swizzle.vector, swizzle.lanes[0], swizzle.lanes[1], + swizzle.lanes[2], swizzle.lanes[3], ToMIRType(simdType))); return true; } static bool -EmitSimdShuffle(FunctionCompiler& f, ValType type, MDefinition** def) +EmitSimdShuffle(FunctionCompiler& f, ValType simdType) { - MDefinition* lhs; - if (!EmitExpr(f, &lhs)) + if (!f.iter().readShuffle(simdType)) return false; - MDefinition* rhs; - if (!EmitExpr(f, &rhs)) - return false; + const ShuffleRecord& shuffle = f.iter().shuffle(); - uint8_t lanes[4]; - for (unsigned i = 0; i < 4; i++) - lanes[i] = f.readU8(); - - *def = f.shuffleSimd(lhs, rhs, lanes[0], lanes[1], lanes[2], lanes[3], ToMIRType(type)); + f.iter().setResult(f.shuffleSimd(shuffle.lhs, shuffle.rhs, shuffle.lanes[0], shuffle.lanes[1], + shuffle.lanes[2], shuffle.lanes[3], ToMIRType(simdType))); return true; } @@ -2083,126 +2432,151 @@ SimdExprTypeToViewType(ValType type, unsigned* defaultNumElems) } static bool -EmitSimdLoad(FunctionCompiler& f, ValType type, unsigned numElems, MDefinition** def) +EmitSimdLoad(FunctionCompiler& f, ValType resultType, unsigned numElems) { unsigned defaultNumElems; - Scalar::Type viewType = SimdExprTypeToViewType(type, &defaultNumElems); + Scalar::Type viewType = SimdExprTypeToViewType(resultType, &defaultNumElems); if (!numElems) numElems = defaultNumElems; - MDefinition* base; - MAsmJSHeapAccess access(viewType, numElems); - if (!EmitHeapAddress(f, &base, &access)) + if (!f.iter().readLoad(resultType, Scalar::byteSize(viewType))) return false; - *def = f.loadSimdHeap(base, access); + const LoadRecord& load = f.iter().load(); + + MAsmJSHeapAccess access(viewType, numElems); + access.setAlign(load.addr.align); + + MDefinition* base = load.addr.base; + if (!SetHeapAccessOffset(f, load.addr.offset, &access, &base)) + return false; + + f.iter().setResult(f.loadSimdHeap(base, access)); return true; } static bool -EmitSimdStore(FunctionCompiler& f, ValType type, unsigned numElems, MDefinition** def) +EmitSimdStore(FunctionCompiler& f, ValType resultType, unsigned numElems) { unsigned defaultNumElems; - Scalar::Type viewType = SimdExprTypeToViewType(type, &defaultNumElems); + Scalar::Type viewType = SimdExprTypeToViewType(resultType, &defaultNumElems); if (!numElems) numElems = defaultNumElems; - MDefinition* base; + if (!f.iter().readStore(resultType, Scalar::byteSize(viewType))) + return false; + + const StoreRecord& store = f.iter().store(); + MAsmJSHeapAccess access(viewType, numElems); - if (!EmitHeapAddress(f, &base, &access)) + access.setAlign(store.addr.align); + + MDefinition* base = store.addr.base; + if (!SetHeapAccessOffset(f, store.addr.offset, &access, &base)) return false; - MDefinition* vec; - if (!EmitExpr(f, &vec)) - return false; - - f.storeSimdHeap(base, access, vec); - *def = vec; + f.storeSimdHeap(base, access, store.value); return true; } static bool -EmitSimdSelect(FunctionCompiler& f, ValType type, MDefinition** def) +EmitSimdSelect(FunctionCompiler& f, ValType simdType) { - MDefinition* mask; - MDefinition* defs[2]; - - // The mask is a boolean vector for elementwise select. - if (!EmitExpr(f, &mask)) + if (!f.iter().readSimdSelect(simdType)) return false; - if (!EmitExpr(f, &defs[0]) || !EmitExpr(f, &defs[1])) - return false; - *def = f.selectSimd(mask, defs[0], defs[1], ToMIRType(type)); + const SimdSelectRecord& simdSelect = f.iter().simdSelect(); + + f.iter().setResult(f.selectSimd(simdSelect.condition, + simdSelect.trueValue, simdSelect.falseValue, + ToMIRType(simdType))); return true; } static bool -EmitSimdAllTrue(FunctionCompiler& f, MDefinition** def) +EmitSimdAllTrue(FunctionCompiler& f, ValType operandType) { - MDefinition* in; - if (!EmitExpr(f, &in)) + if (!f.iter().readSimdBooleanReduction(operandType)) return false; - *def = f.simdAllTrue(in); + + const UnaryRecord& unary = f.iter().unary(); + + f.iter().setResult(f.simdAllTrue(unary.op)); return true; } static bool -EmitSimdAnyTrue(FunctionCompiler& f, MDefinition** def) +EmitSimdAnyTrue(FunctionCompiler& f, ValType operandType) { - MDefinition* in; - if (!EmitExpr(f, &in)) + if (!f.iter().readSimdBooleanReduction(operandType)) return false; - *def = f.simdAnyTrue(in); + + const UnaryRecord& unary = f.iter().unary(); + + f.iter().setResult(f.simdAnyTrue(unary.op)); return true; } static bool -EmitSimdSplat(FunctionCompiler& f, ValType type, MDefinition** def) +EmitSimdSplat(FunctionCompiler& f, ValType simdType) { - MDefinition* in; - if (IsSimdBoolType(type)) { - if (!EmitSimdBooleanLaneExpr(f, &in)) - return false; - } else { - if (!EmitExpr(f, &in)) - return false; - } - *def = f.splatSimd(in, ToMIRType(type)); + if (IsSimdBoolType(simdType)) + f.iter().setResult(EmitSimdBooleanLaneExpr(f, f.iter().getResult())); + + if (!f.iter().readSplat(simdType)) + return false; + + const UnaryRecord& unary = f.iter().unary(); + + f.iter().setResult(f.splatSimd(unary.op, ToMIRType(simdType))); return true; } static bool -EmitSimdCtor(FunctionCompiler& f, ValType type, MDefinition** def) +EmitSimdCtor(FunctionCompiler& f, ValType type) { + if (!f.iter().readSimdCtor()) + return false; + switch (type) { case ValType::I32x4: { MDefinition* args[4]; for (unsigned i = 0; i < 4; i++) { - if (!EmitExpr(f, &args[i])) + if (!f.iter().readSimdCtorArg(ValType::I32, 4, i, &args[i])) return false; } - *def = f.constructSimd(args[0], args[1], args[2], args[3], MIRType::Int32x4); + if (!f.iter().readSimdCtorArgsEnd(4) || !f.iter().readSimdCtorReturn(type)) + return false; + f.iter().setResult(f.constructSimd(args[0], args[1], args[2], args[3], + MIRType::Int32x4)); return true; } case ValType::F32x4: { MDefinition* args[4]; for (unsigned i = 0; i < 4; i++) { - if (!EmitExpr(f, &args[i])) + if (!f.iter().readSimdCtorArg(ValType::F32, 4, i, &args[i])) return false; } - *def = f.constructSimd(args[0], args[1], args[2], args[3], MIRType::Float32x4); + if (!f.iter().readSimdCtorArgsEnd(4) || !f.iter().readSimdCtorReturn(type)) + return false; + f.iter().setResult(f.constructSimd(args[0], args[1], args[2], args[3], + MIRType::Float32x4)); return true; } case ValType::B32x4: { MDefinition* args[4]; for (unsigned i = 0; i < 4; i++) { - if (!EmitSimdBooleanLaneExpr(f, &args[i])) + MDefinition* i32; + if (!f.iter().readSimdCtorArg(ValType::I32, 4, i, &i32)) return false; + args[i] = EmitSimdBooleanLaneExpr(f, i32); } - *def = f.constructSimd(args[0], args[1], args[2], args[3], MIRType::Bool32x4); + if (!f.iter().readSimdCtorArgsEnd(4) || !f.iter().readSimdCtorReturn(type)) + return false; + f.iter().setResult(f.constructSimd(args[0], args[1], args[2], args[3], + MIRType::Bool32x4)); return true; } case ValType::I32: @@ -2215,386 +2589,86 @@ EmitSimdCtor(FunctionCompiler& f, ValType type, MDefinition** def) MOZ_CRASH("unexpected SIMD type"); } -template static bool -EmitUnary(FunctionCompiler& f, MDefinition** def) -{ - MDefinition* in; - if (!EmitExpr(f, &in)) - return false; - *def = f.unary(in); - return true; -} - -template -static bool -EmitUnaryWithType(FunctionCompiler& f, ValType type, MDefinition** def) -{ - MDefinition* in; - if (!EmitExpr(f, &in)) - return false; - *def = f.unary(in, ToMIRType(type)); - return true; -} - -static bool -EmitMultiply(FunctionCompiler& f, ValType type, MDefinition** def) -{ - MDefinition* lhs; - if (!EmitExpr(f, &lhs)) - return false; - MDefinition* rhs; - if (!EmitExpr(f, &rhs)) - return false; - MIRType mirType = ToMIRType(type); - *def = f.mul(lhs, rhs, mirType, mirType == MIRType::Int32 ? MMul::Integer : MMul::Normal); - return true; -} - -static bool -EmitSelect(FunctionCompiler& f, MDefinition** def) -{ - MDefinition* trueExpr; - if (!EmitExpr(f, &trueExpr)) - return false; - - MDefinition* falseExpr; - if (!EmitExpr(f, &falseExpr)) - return false; - - MDefinition* condExpr; - if (!EmitExpr(f, &condExpr)) - return false; - - if (trueExpr && falseExpr && - trueExpr->type() == falseExpr->type() && - trueExpr->type() != MIRType::None) - { - *def = f.select(trueExpr, falseExpr, condExpr); - } else { - *def = nullptr; - } - - return true; -} - -typedef bool IsAdd; - -static bool -EmitAddOrSub(FunctionCompiler& f, ValType type, bool isAdd, MDefinition** def) -{ - MDefinition* lhs; - if (!EmitExpr(f, &lhs)) - return false; - MDefinition* rhs; - if (!EmitExpr(f, &rhs)) - return false; - MIRType mirType = ToMIRType(type); - *def = isAdd ? f.binary(lhs, rhs, mirType) : f.binary(lhs, rhs, mirType); - return true; -} - -typedef bool IsUnsigned; -typedef bool IsDiv; - -static bool -EmitDivOrMod(FunctionCompiler& f, ValType type, bool isDiv, bool isUnsigned, MDefinition** def) -{ - MDefinition* lhs; - if (!EmitExpr(f, &lhs)) - return false; - MDefinition* rhs; - if (!EmitExpr(f, &rhs)) - return false; - *def = isDiv - ? f.div(lhs, rhs, ToMIRType(type), isUnsigned) - : f.mod(lhs, rhs, ToMIRType(type), isUnsigned); - return true; -} - -static bool -EmitDivOrMod(FunctionCompiler& f, ValType type, bool isDiv, MDefinition** def) -{ - MOZ_ASSERT(type != ValType::I32 && type != ValType::I64, - "int div or mod must indicate signedness"); - return EmitDivOrMod(f, type, isDiv, false, def); -} - -static bool -EmitComparison(FunctionCompiler& f, Expr expr, MDefinition** def) -{ - MDefinition *lhs, *rhs; - MCompare::CompareType compareType; - switch (expr) { - case Expr::I32Eq: - case Expr::I32Ne: - case Expr::I32LeS: - case Expr::I32LtS: - case Expr::I32LeU: - case Expr::I32LtU: - case Expr::I32GeS: - case Expr::I32GtS: - case Expr::I32GeU: - case Expr::I32GtU: - if (!EmitExpr(f, &lhs) || !EmitExpr(f, &rhs)) - return false; - - switch (expr) { - case Expr::I32LeS: case Expr::I32LtS: case Expr::I32GeS: case Expr::I32GtS: - case Expr::I32Eq: case Expr::I32Ne: - compareType = MCompare::Compare_Int32; break; - case Expr::I32GeU: case Expr::I32GtU: case Expr::I32LeU: case Expr::I32LtU: - compareType = MCompare::Compare_UInt32; break; - default: MOZ_CRASH("impossibru opcode"); - } - break; - case Expr::I64Eq: - case Expr::I64Ne: - case Expr::I64LeS: - case Expr::I64LtS: - case Expr::I64LeU: - case Expr::I64LtU: - case Expr::I64GeS: - case Expr::I64GtS: - case Expr::I64GeU: - case Expr::I64GtU: - if (!EmitExpr(f, &lhs) || !EmitExpr(f, &rhs)) - return false; - switch (expr) { - case Expr::I64LeS: case Expr::I64LtS: case Expr::I64GeS: case Expr::I64GtS: - case Expr::I64Eq: case Expr::I64Ne: - compareType = MCompare::Compare_Int64; - break; - case Expr::I64GeU: case Expr::I64GtU: case Expr::I64LeU: case Expr::I64LtU: - compareType = MCompare::Compare_UInt64; - break; - default: - MOZ_CRASH("unexpected opcode"); - } - break; - case Expr::F32Eq: - case Expr::F32Ne: - case Expr::F32Le: - case Expr::F32Lt: - case Expr::F32Ge: - case Expr::F32Gt: - if (!EmitExpr(f, &lhs) || !EmitExpr(f, &rhs)) - return false; - compareType = MCompare::Compare_Float32; - break; - case Expr::F64Eq: - case Expr::F64Ne: - case Expr::F64Le: - case Expr::F64Lt: - case Expr::F64Ge: - case Expr::F64Gt: - if (!EmitExpr(f, &lhs) || !EmitExpr(f, &rhs)) - return false; - compareType = MCompare::Compare_Double; - break; - default: MOZ_CRASH("unexpected comparison opcode"); - } - - JSOp compareOp; - switch (expr) { - case Expr::I32Eq: - case Expr::I64Eq: - case Expr::F32Eq: - case Expr::F64Eq: - compareOp = JSOP_EQ; - break; - case Expr::I32Ne: - case Expr::I64Ne: - case Expr::F32Ne: - case Expr::F64Ne: - compareOp = JSOP_NE; - break; - case Expr::I32LeS: - case Expr::I32LeU: - case Expr::I64LeS: - case Expr::I64LeU: - case Expr::F32Le: - case Expr::F64Le: - compareOp = JSOP_LE; - break; - case Expr::I32LtS: - case Expr::I32LtU: - case Expr::I64LtS: - case Expr::I64LtU: - case Expr::F32Lt: - case Expr::F64Lt: - compareOp = JSOP_LT; - break; - case Expr::I32GeS: - case Expr::I32GeU: - case Expr::I64GeS: - case Expr::I64GeU: - case Expr::F32Ge: - case Expr::F64Ge: - compareOp = JSOP_GE; - break; - case Expr::I32GtS: - case Expr::I32GtU: - case Expr::I64GtS: - case Expr::I64GtU: - case Expr::F32Gt: - case Expr::F64Gt: - compareOp = JSOP_GT; - break; - default: MOZ_CRASH("unexpected comparison opcode"); - } - - *def = f.compare(lhs, rhs, compareOp, compareType); - return true; -} - -template -static bool -EmitBitwise(FunctionCompiler& f, ValType type, MDefinition** def) -{ - MDefinition* lhs; - if (!EmitExpr(f, &lhs)) - return false; - MDefinition* rhs; - if (!EmitExpr(f, &rhs)) - return false; - MIRType mirType = ToMIRType(type); - *def = f.bitwise(lhs, rhs, mirType); - return true; -} - -static bool -EmitBitwiseNot(FunctionCompiler& f, MDefinition** def) -{ - MDefinition* in; - if (!EmitExpr(f, &in)) - return false; - *def = f.bitwise(in); - return true; -} - -static bool -EmitExtendI32(FunctionCompiler& f, bool isUnsigned, MDefinition** def) -{ - MDefinition* in; - if (!EmitExpr(f, &in)) - return false; - *def = f.extendI32(in, isUnsigned); - return true; -} - -template -static bool -EmitTruncate(FunctionCompiler& f, bool isUnsigned, MDefinition** def) -{ - MDefinition* in; - if (!EmitExpr(f, &in)) - return false; - *def = f.truncate(in, isUnsigned); - return true; -} - -static bool -EmitConvertI64ToFloatingPoint(FunctionCompiler& f, ValType type, bool isUnsigned, - MDefinition** def) -{ - MDefinition* in; - if (!EmitExpr(f, &in)) - return false; - *def = f.convertI64ToFloatingPoint(in, ToMIRType(type), isUnsigned); - return true; -} - -static bool -EmitReinterpret(FunctionCompiler& f, ValType to, MDefinition** def) -{ - MDefinition* in; - if (!EmitExpr(f, &in)) - return false; - *def = f.reinterpret(in, ToMIRType(to)); - return true; -} - -static bool -EmitSimdOp(FunctionCompiler& f, ValType type, SimdOperation op, SimdSign sign, MDefinition** def) +EmitSimdOp(FunctionCompiler& f, ValType type, SimdOperation op, SimdSign sign) { switch (op) { case SimdOperation::Constructor: - return EmitSimdCtor(f, type, def); + return EmitSimdCtor(f, type); case SimdOperation::Fn_extractLane: - return EmitExtractLane(f, type, sign, def); + return EmitExtractLane(f, type, sign); case SimdOperation::Fn_replaceLane: - return EmitSimdReplaceLane(f, type, def); + return EmitSimdReplaceLane(f, type); case SimdOperation::Fn_check: MOZ_CRASH("only used in asm.js' type system"); case SimdOperation::Fn_splat: - return EmitSimdSplat(f, type, def); + return EmitSimdSplat(f, type); case SimdOperation::Fn_select: - return EmitSimdSelect(f, type, def); + return EmitSimdSelect(f, type); case SimdOperation::Fn_swizzle: - return EmitSimdSwizzle(f, type, def); + return EmitSimdSwizzle(f, type); case SimdOperation::Fn_shuffle: - return EmitSimdShuffle(f, type, def); + return EmitSimdShuffle(f, type); case SimdOperation::Fn_load: - return EmitSimdLoad(f, type, 0, def); + return EmitSimdLoad(f, type, 0); case SimdOperation::Fn_load1: - return EmitSimdLoad(f, type, 1, def); + return EmitSimdLoad(f, type, 1); case SimdOperation::Fn_load2: - return EmitSimdLoad(f, type, 2, def); + return EmitSimdLoad(f, type, 2); case SimdOperation::Fn_load3: - return EmitSimdLoad(f, type, 3, def); + return EmitSimdLoad(f, type, 3); case SimdOperation::Fn_store: - return EmitSimdStore(f, type, 0, def); + return EmitSimdStore(f, type, 0); case SimdOperation::Fn_store1: - return EmitSimdStore(f, type, 1, def); + return EmitSimdStore(f, type, 1); case SimdOperation::Fn_store2: - return EmitSimdStore(f, type, 2, def); + return EmitSimdStore(f, type, 2); case SimdOperation::Fn_store3: - return EmitSimdStore(f, type, 3, def); + return EmitSimdStore(f, type, 3); case SimdOperation::Fn_allTrue: - return EmitSimdAllTrue(f, def); + return EmitSimdAllTrue(f, type); case SimdOperation::Fn_anyTrue: - return EmitSimdAnyTrue(f, def); + return EmitSimdAnyTrue(f, type); case SimdOperation::Fn_abs: case SimdOperation::Fn_neg: case SimdOperation::Fn_not: case SimdOperation::Fn_sqrt: case SimdOperation::Fn_reciprocalApproximation: case SimdOperation::Fn_reciprocalSqrtApproximation: - return EmitSimdUnary(f, type, op, def); + return EmitSimdUnary(f, type, op); case SimdOperation::Fn_shiftLeftByScalar: - return EmitSimdShift(f, MSimdShift::lsh, def); + return EmitSimdShift(f, type, MSimdShift::lsh); case SimdOperation::Fn_shiftRightByScalar: - return EmitSimdShift(f, MSimdShift::rshForSign(sign), def); + return EmitSimdShift(f, type, MSimdShift::rshForSign(sign)); #define _CASE(OP) \ case SimdOperation::Fn_##OP: \ - return EmitSimdBinaryComp(f, MSimdBinaryComp::OP, sign, def); + return EmitSimdBinaryComp(f, type, MSimdBinaryComp::OP, sign); FOREACH_COMP_SIMD_OP(_CASE) #undef _CASE case SimdOperation::Fn_and: - return EmitSimdBinary(f, type, MSimdBinaryBitwise::and_, def); + return EmitSimdBinary(f, type, MSimdBinaryBitwise::and_); case SimdOperation::Fn_or: - return EmitSimdBinary(f, type, MSimdBinaryBitwise::or_, def); + return EmitSimdBinary(f, type, MSimdBinaryBitwise::or_); case SimdOperation::Fn_xor: - return EmitSimdBinary(f, type, MSimdBinaryBitwise::xor_, def); + return EmitSimdBinary(f, type, MSimdBinaryBitwise::xor_); #define _CASE(OP) \ case SimdOperation::Fn_##OP: \ - return EmitSimdBinary(f, type, MSimdBinaryArith::Op_##OP, def); + return EmitSimdBinary(f, type, MSimdBinaryArith::Op_##OP); FOREACH_NUMERIC_SIMD_BINOP(_CASE) FOREACH_FLOAT_SIMD_BINOP(_CASE) #undef _CASE case SimdOperation::Fn_fromFloat32x4: - return EmitSimdConvert(f, ValType::F32x4, type, sign, def); + return EmitSimdConvert(f, ValType::F32x4, type, sign); case SimdOperation::Fn_fromInt32x4: - return EmitSimdConvert(f, ValType::I32x4, type, SimdSign::Signed, def); + return EmitSimdConvert(f, ValType::I32x4, type, SimdSign::Signed); case SimdOperation::Fn_fromUint32x4: - return EmitSimdConvert(f, ValType::I32x4, type, SimdSign::Unsigned, def); + return EmitSimdConvert(f, ValType::I32x4, type, SimdSign::Unsigned); case SimdOperation::Fn_fromInt32x4Bits: case SimdOperation::Fn_fromUint32x4Bits: - return EmitSimdBitcast(f, ValType::I32x4, type, def); + return EmitSimdBitcast(f, ValType::I32x4, type); case SimdOperation::Fn_fromFloat32x4Bits: case SimdOperation::Fn_fromInt8x16Bits: - return EmitSimdBitcast(f, ValType::F32x4, type, def); + return EmitSimdBitcast(f, ValType::F32x4, type); case SimdOperation::Fn_fromInt16x8Bits: case SimdOperation::Fn_fromUint8x16Bits: case SimdOperation::Fn_fromUint16x8Bits: @@ -2605,488 +2679,378 @@ EmitSimdOp(FunctionCompiler& f, ValType type, SimdOperation op, SimdSign sign, M } static bool -EmitLoop(FunctionCompiler& f, MDefinition** def) -{ - MBasicBlock* loopHeader; - if (!f.startLoop(&loopHeader)) - return false; - - f.addInterruptCheck(); - - if (uint32_t numStmts = f.readVarU32()) { - for (uint32_t i = 0; i < numStmts - 1; i++) { - MDefinition* _; - if (!EmitExpr(f, &_)) - return false; - } - MDefinition* last = nullptr; - if (!EmitExpr(f, &last)) - return false; - f.pushDef(last); - } - - return f.closeLoop(loopHeader, def); -} - -static bool -EmitIfElse(FunctionCompiler& f, Expr op, MDefinition** def) -{ - MOZ_ASSERT(op == Expr::If || op == Expr::IfElse); - - // Handle if/else-if chains using iteration instead of recursion. This - // avoids blowing the C stack quota for long if/else-if chains and also - // creates fewer MBasicBlocks at join points (by creating one join block - // for the entire if/else-if chain). - BlockVector blocks; - - recurse: - MDefinition* condition; - if (!EmitExpr(f, &condition)) - return false; - - MBasicBlock* thenBlock = nullptr; - MBasicBlock* elseOrJoinBlock = nullptr; - if (!f.branchAndStartThen(condition, &thenBlock, &elseOrJoinBlock)) - return false; - - MDefinition* ifDef; - if (!EmitExpr(f, &ifDef)) - return false; - - if (!f.addJoinPredecessor(ifDef, &blocks)) - return false; - - if (op == Expr::If) - return f.joinIf(elseOrJoinBlock, &blocks, def); - - f.switchToElse(elseOrJoinBlock); - - Expr nextStmt = f.peakExpr(); - if (nextStmt == Expr::If || nextStmt == Expr::IfElse) { - JS_ALWAYS_TRUE(f.readExpr() == nextStmt); - op = nextStmt; - goto recurse; - } - - MDefinition* elseDef; - if (!EmitExpr(f, &elseDef)) - return false; - - return f.joinIfElse(elseDef, &blocks, def); -} - -static bool -EmitBrTable(FunctionCompiler& f, MDefinition** def) -{ - uint32_t numCases = f.readVarU32(); - - Uint32Vector depths; - if (!depths.resize(numCases)) - return false; - - for (size_t i = 0; i < numCases; i++) - depths[i] = f.readU32(); - - uint32_t defaultDepth = f.readU32(); - - MDefinition* index; - if (!EmitExpr(f, &index)) - return false; - - *def = nullptr; - - // Empty table - if (!numCases) - return f.br(defaultDepth, nullptr); - - return f.brTable(index, defaultDepth, depths); -} - -static bool -EmitReturn(FunctionCompiler& f, MDefinition** def) -{ - ExprType ret = f.sig().ret(); - - if (IsVoid(ret)) { - *def = nullptr; - f.returnVoid(); - return true; - } - - MDefinition* retVal; - if (!EmitExpr(f, &retVal)) - return false; - - f.returnExpr(retVal); - - *def = nullptr; - return true; -} - -static bool -EmitUnreachable(FunctionCompiler& f, MDefinition** def) -{ - *def = nullptr; - f.unreachableTrap(); - return true; -} - -static bool -EmitBlock(FunctionCompiler& f, MDefinition** def) -{ - if (!f.startBlock()) - return false; - if (uint32_t numStmts = f.readVarU32()) { - for (uint32_t i = 0; i < numStmts - 1; i++) { - MDefinition* _ = nullptr; - if (!EmitExpr(f, &_)) - return false; - } - MDefinition* last = nullptr; - if (!EmitExpr(f, &last)) - return false; - f.pushDef(last); - } - return f.finishBlock(def); -} - -static bool -EmitBranch(FunctionCompiler& f, Expr op, MDefinition** def) -{ - MOZ_ASSERT(op == Expr::Br || op == Expr::BrIf); - - uint32_t relativeDepth = f.readVarU32(); - - MDefinition* maybeValue = nullptr; - if (!EmitExpr(f, &maybeValue)) - return false; - - if (op == Expr::Br) { - if (!f.br(relativeDepth, maybeValue)) - return false; - } else { - MDefinition* condition; - if (!EmitExpr(f, &condition)) - return false; - - if (!f.brIf(relativeDepth, maybeValue, condition)) - return false; - } - - *def = nullptr; - return true; -} - -static bool -EmitExpr(FunctionCompiler& f, MDefinition** def) +EmitExpr(FunctionCompiler& f) { if (!f.mirGen().ensureBallast()) return false; - uint32_t exprOffset = f.currentOffset(); + uint32_t exprOffset = f.iter().currentOffset(); - switch (Expr op = f.readExpr()) { + Expr expr; + if (!f.iter().readExpr(&expr)) + return false; + + switch (expr) { // Control opcodes case Expr::Nop: - *def = nullptr; - return true; - case Expr::Id: - return EmitExpr(f, def); + return f.iter().readTrivial(); case Expr::Block: - return EmitBlock(f, def); - case Expr::If: - case Expr::IfElse: - return EmitIfElse(f, op, def); + return EmitBlock(f); case Expr::Loop: - return EmitLoop(f, def); + return EmitLoop(f); + case Expr::If: + return EmitIf(f); + case Expr::Else: + return EmitElse(f); + case Expr::End: + return EmitEnd(f); case Expr::Br: + return EmitBr(f); case Expr::BrIf: - return EmitBranch(f, op, def); + return EmitBrIf(f); case Expr::BrTable: - return EmitBrTable(f, def); + return EmitBrTable(f); case Expr::Return: - return EmitReturn(f, def); + return EmitReturn(f); case Expr::Unreachable: - return EmitUnreachable(f, def); + if (!f.iter().readUnreachable()) + return false; + f.unreachableTrap(); + return true; // Calls case Expr::Call: - return EmitCall(f, exprOffset, def); + return EmitCall(f, exprOffset); case Expr::CallIndirect: - return EmitCallIndirect(f, exprOffset, def); + return EmitCallIndirect(f, exprOffset); case Expr::CallImport: - return EmitCallImport(f, exprOffset, def); + return EmitCallImport(f, exprOffset); // Locals and globals case Expr::GetLocal: - return EmitGetLocal(f, def); + return EmitGetLocal(f); case Expr::SetLocal: - return EmitSetLocal(f, def); + return EmitSetLocal(f); case Expr::LoadGlobal: - return EmitLoadGlobal(f, def); + return EmitGetGlobal(f); case Expr::StoreGlobal: - return EmitStoreGlobal(f, def); + return EmitSetGlobal(f); // Select case Expr::Select: - return EmitSelect(f, def); + return EmitSelect(f); // I32 case Expr::I32Const: - return EmitLiteral(f, ValType::I32, def); + if (!f.iter().readI32Const()) + return false; + f.iter().setResult(f.constant(Int32Value(f.iter().i32()), MIRType::Int32)); + return true; case Expr::I32Add: - return EmitAddOrSub(f, ValType::I32, IsAdd(true), def); + return EmitBinary(f, ValType::I32, MIRType::Int32); case Expr::I32Sub: - return EmitAddOrSub(f, ValType::I32, IsAdd(false), def); + return EmitBinary(f, ValType::I32, MIRType::Int32); case Expr::I32Mul: - return EmitMultiply(f, ValType::I32, def); + return EmitMul(f, ValType::I32, MIRType::Int32); case Expr::I32DivS: case Expr::I32DivU: - return EmitDivOrMod(f, ValType::I32, IsDiv(true), IsUnsigned(op == Expr::I32DivU), def); + return EmitDiv(f, ValType::I32, MIRType::Int32, expr == Expr::I32DivU); case Expr::I32RemS: case Expr::I32RemU: - return EmitDivOrMod(f, ValType::I32, IsDiv(false), IsUnsigned(op == Expr::I32RemU), def); + return EmitRem(f, ValType::I32, MIRType::Int32, expr == Expr::I32RemU); case Expr::I32Min: - return EmitMathMinMax(f, ValType::I32, IsMax(false), def); case Expr::I32Max: - return EmitMathMinMax(f, ValType::I32, IsMax(true), def); + return EmitMinMax(f, ValType::I32, MIRType::Int32, expr == Expr::I32Max); case Expr::I32Eqz: - return EmitUnary(f, def); + return EmitConversion(f, ValType::I32, ValType::I32); case Expr::I32TruncSF32: case Expr::I32TruncUF32: - return EmitUnary(f, def); + return EmitConversion(f, ValType::F32, ValType::I32); case Expr::I32TruncSF64: case Expr::I32TruncUF64: - return EmitUnary(f, def); + return EmitConversion(f, ValType::F64, ValType::I32); case Expr::I32WrapI64: - return EmitUnary(f, def); - case Expr::I32Clz: - return EmitUnary(f, def); - case Expr::I32Ctz: - return EmitUnary(f, def); - case Expr::I32Popcnt: - return EmitUnary(f, def); - case Expr::I32Abs: - return EmitUnaryWithType(f, ValType::I32, def); - case Expr::I32Neg: - return EmitUnaryWithType(f, ValType::I32, def); - case Expr::I32Or: - return EmitBitwise(f, ValType::I32, def); - case Expr::I32And: - return EmitBitwise(f, ValType::I32, def); - case Expr::I32Xor: - return EmitBitwise(f, ValType::I32, def); - case Expr::I32Shl: - return EmitBitwise(f, ValType::I32, def); - case Expr::I32ShrS: - return EmitBitwise(f, ValType::I32, def); - case Expr::I32ShrU: - return EmitBitwise(f, ValType::I32, def); - case Expr::I32BitNot: - return EmitBitwiseNot(f, def); - case Expr::I32Load8S: - return EmitLoad(f, Scalar::Int8, def); - case Expr::I32Load8U: - return EmitLoad(f, Scalar::Uint8, def); - case Expr::I32Load16S: - return EmitLoad(f, Scalar::Int16, def); - case Expr::I32Load16U: - return EmitLoad(f, Scalar::Uint16, def); - case Expr::I32Load: - return EmitLoad(f, Scalar::Int32, def); - case Expr::I32Store8: - return EmitStore(f, Scalar::Int8, def); - case Expr::I32Store16: - return EmitStore(f, Scalar::Int16, def); - case Expr::I32Store: - return EmitStore(f, Scalar::Int32, def); - case Expr::I32Eq: - case Expr::I32Ne: - case Expr::I32LtS: - case Expr::I32LeS: - case Expr::I32GtS: - case Expr::I32GeS: - case Expr::I32LtU: - case Expr::I32LeU: - case Expr::I32GtU: - case Expr::I32GeU: - case Expr::I64Eq: - case Expr::I64Ne: - case Expr::I64LtS: - case Expr::I64LeS: - case Expr::I64LtU: - case Expr::I64LeU: - case Expr::I64GtS: - case Expr::I64GeS: - case Expr::I64GtU: - case Expr::I64GeU: - case Expr::F32Eq: - case Expr::F32Ne: - case Expr::F32Lt: - case Expr::F32Le: - case Expr::F32Gt: - case Expr::F32Ge: - case Expr::F64Eq: - case Expr::F64Ne: - case Expr::F64Lt: - case Expr::F64Le: - case Expr::F64Gt: - case Expr::F64Ge: - return EmitComparison(f, op, def); + return EmitConversion(f, ValType::I64, ValType::I32); case Expr::I32ReinterpretF32: - return EmitReinterpret(f, ValType::I32, def); + return EmitReinterpret(f, ValType::I32, ValType::F32, MIRType::Int32); + case Expr::I32Clz: + return EmitUnary(f, ValType::I32); + case Expr::I32Ctz: + return EmitUnary(f, ValType::I32); + case Expr::I32Popcnt: + return EmitUnary(f, ValType::I32); + case Expr::I32Abs: + return EmitUnaryWithType(f, ValType::I32, MIRType::Int32); + case Expr::I32Neg: + return EmitUnaryWithType(f, ValType::I32, MIRType::Int32); + case Expr::I32Or: + return EmitBitwise(f, ValType::I32, MIRType::Int32); + case Expr::I32And: + return EmitBitwise(f, ValType::I32, MIRType::Int32); + case Expr::I32Xor: + return EmitBitwise(f, ValType::I32, MIRType::Int32); + case Expr::I32Shl: + return EmitBitwise(f, ValType::I32, MIRType::Int32); + case Expr::I32ShrS: + return EmitBitwise(f, ValType::I32, MIRType::Int32); + case Expr::I32ShrU: + return EmitBitwise(f, ValType::I32, MIRType::Int32); + case Expr::I32BitNot: + return EmitBitwise(f, ValType::I32); + case Expr::I32Load8S: + return EmitLoad(f, ValType::I32, Scalar::Int8); + case Expr::I32Load8U: + return EmitLoad(f, ValType::I32, Scalar::Uint8); + case Expr::I32Load16S: + return EmitLoad(f, ValType::I32, Scalar::Int16); + case Expr::I32Load16U: + return EmitLoad(f, ValType::I32, Scalar::Uint16); + case Expr::I32Load: + return EmitLoad(f, ValType::I32, Scalar::Int32); + case Expr::I32Store8: + return EmitStore(f, ValType::I32, Scalar::Int8); + case Expr::I32Store16: + return EmitStore(f, ValType::I32, Scalar::Int16); + case Expr::I32Store: + return EmitStore(f, ValType::I32, Scalar::Int32); // I64 case Expr::I64Const: - return EmitLiteral(f, ValType::I64, def); - case Expr::I64ExtendSI32: - case Expr::I64ExtendUI32: - return EmitExtendI32(f, IsUnsigned(op == Expr::I64ExtendUI32), def); - case Expr::I64TruncSF32: - case Expr::I64TruncUF32: - return EmitTruncate(f, IsUnsigned(op == Expr::I64TruncUF32), def); - case Expr::I64TruncSF64: - case Expr::I64TruncUF64: - return EmitTruncate(f, IsUnsigned(op == Expr::I64TruncUF64), def); - case Expr::I64Or: - return EmitBitwise(f, ValType::I64, def); - case Expr::I64And: - return EmitBitwise(f, ValType::I64, def); - case Expr::I64Xor: - return EmitBitwise(f, ValType::I64, def); - case Expr::I64Shl: - return EmitBitwise(f, ValType::I64, def); - case Expr::I64ShrS: - return EmitBitwise(f, ValType::I64, def); - case Expr::I64ShrU: - return EmitBitwise(f, ValType::I64, def); + if (!f.iter().readI64Const()) + return false; + + f.iter().setResult(f.constant(f.iter().i64())); + return true; case Expr::I64Add: - return EmitAddOrSub(f, ValType::I64, IsAdd(true), def); + return EmitBinary(f, ValType::I64, MIRType::Int64); case Expr::I64Sub: - return EmitAddOrSub(f, ValType::I64, IsAdd(false), def); + return EmitBinary(f, ValType::I64, MIRType::Int64); case Expr::I64Mul: - return EmitMultiply(f, ValType::I64, def); + return EmitMul(f, ValType::I64, MIRType::Int64); case Expr::I64DivS: case Expr::I64DivU: - return EmitDivOrMod(f, ValType::I64, IsDiv(true), IsUnsigned(op == Expr::I64DivU), def); + return EmitDiv(f, ValType::I64, MIRType::Int64, expr == Expr::I64DivU); case Expr::I64RemS: case Expr::I64RemU: - return EmitDivOrMod(f, ValType::I64, IsDiv(false), IsUnsigned(op == Expr::I64RemU), def); + return EmitRem(f, ValType::I64, MIRType::Int64, expr == Expr::I64RemU); + case Expr::I64TruncSF32: + case Expr::I64TruncUF32: + return EmitTruncateToI64(f, ValType::F32, expr == Expr::I64TruncUF32); + case Expr::I64TruncSF64: + case Expr::I64TruncUF64: + return EmitTruncateToI64(f, ValType::F64, expr == Expr::I64TruncUF64); + case Expr::I64ExtendSI32: + case Expr::I64ExtendUI32: + return EmitExtendI32(f, expr == Expr::I64ExtendUI32); case Expr::I64ReinterpretF64: - return EmitReinterpret(f, ValType::I64, def); + return EmitReinterpret(f, ValType::I64, ValType::F64, MIRType::Int64); + case Expr::I64Or: + return EmitBitwise(f, ValType::I64, MIRType::Int64); + case Expr::I64And: + return EmitBitwise(f, ValType::I64, MIRType::Int64); + case Expr::I64Xor: + return EmitBitwise(f, ValType::I64, MIRType::Int64); + case Expr::I64Shl: + return EmitBitwise(f, ValType::I64, MIRType::Int64); + case Expr::I64ShrS: + return EmitBitwise(f, ValType::I64, MIRType::Int64); + case Expr::I64ShrU: + return EmitBitwise(f, ValType::I64, MIRType::Int64); // F32 case Expr::F32Const: - return EmitLiteral(f, ValType::F32, def); + if (!f.iter().readF32Const()) + return false; + + f.iter().setResult(f.constant(Float32Value(f.iter().f32()), MIRType::Float32)); + return true; case Expr::F32Add: - return EmitAddOrSub(f, ValType::F32, IsAdd(true), def); + return EmitBinary(f, ValType::F32, MIRType::Float32); case Expr::F32Sub: - return EmitAddOrSub(f, ValType::F32, IsAdd(false), def); + return EmitBinary(f, ValType::F32, MIRType::Float32); case Expr::F32Mul: - return EmitMultiply(f, ValType::F32, def); + return EmitMul(f, ValType::F32, MIRType::Float32); case Expr::F32Div: - return EmitDivOrMod(f, ValType::F32, IsDiv(true), def); + return EmitDiv(f, ValType::F32, MIRType::Float32, /* isUnsigned = */ false); case Expr::F32Min: - return EmitMathMinMax(f, ValType::F32, IsMax(false), def); case Expr::F32Max: - return EmitMathMinMax(f, ValType::F32, IsMax(true), def); + return EmitMinMax(f, ValType::F32, MIRType::Float32, expr == Expr::F32Max); case Expr::F32Neg: - return EmitUnaryWithType(f, ValType::F32, def); + return EmitUnaryWithType(f, ValType::F32, MIRType::Float32); case Expr::F32Abs: - return EmitUnaryWithType(f, ValType::F32, def); + return EmitUnaryWithType(f, ValType::F32, MIRType::Float32); case Expr::F32Sqrt: - return EmitUnaryWithType(f, ValType::F32, def); + return EmitUnaryWithType(f, ValType::F32, MIRType::Float32); case Expr::F32Ceil: + return EmitUnaryMathBuiltinCall(f, exprOffset, SymbolicAddress::CeilF, ValType::F32); case Expr::F32Floor: - return EmitF32MathBuiltinCall(f, exprOffset, op, def); + return EmitUnaryMathBuiltinCall(f, exprOffset, SymbolicAddress::FloorF, ValType::F32); case Expr::F32DemoteF64: - return EmitUnary(f, def); + return EmitConversion(f, ValType::F64, ValType::F32); case Expr::F32ConvertSI32: - return EmitUnary(f, def); + return EmitConversion(f, ValType::I32, ValType::F32); case Expr::F32ConvertUI32: - return EmitUnary(f, def); + return EmitConversion(f, ValType::I32, ValType::F32); case Expr::F32ConvertSI64: case Expr::F32ConvertUI64: - return EmitConvertI64ToFloatingPoint(f, ValType::F32, - IsUnsigned(op == Expr::F32ConvertUI64), def); + return EmitConvertI64ToFloatingPoint(f, ValType::F32, MIRType::Float32, + expr == Expr::F32ConvertUI64); + case Expr::F32ReinterpretI32: + return EmitReinterpret(f, ValType::F32, ValType::I32, MIRType::Float32); case Expr::F32Load: - return EmitLoad(f, Scalar::Float32, def); + return EmitLoad(f, ValType::F32, Scalar::Float32); case Expr::F32Store: - return EmitStore(f, Scalar::Float32, def); + return EmitStore(f, ValType::F32, Scalar::Float32); case Expr::F32StoreF64: - return EmitStoreWithCoercion(f, Scalar::Float32, Scalar::Float64, def); - case Expr::F32ReinterpretI32: - return EmitReinterpret(f, ValType::F32, def); + return EmitStoreWithCoercion(f, ValType::F32, Scalar::Float64); // F64 case Expr::F64Const: - return EmitLiteral(f, ValType::F64, def); + if (!f.iter().readF64Const()) + return false; + f.iter().setResult(f.constant(DoubleValue(f.iter().f64()), MIRType::Double)); + return true; case Expr::F64Add: - return EmitAddOrSub(f, ValType::F64, IsAdd(true), def); + return EmitBinary(f, ValType::F64, MIRType::Double); case Expr::F64Sub: - return EmitAddOrSub(f, ValType::F64, IsAdd(false), def); + return EmitBinary(f, ValType::F64, MIRType::Double); case Expr::F64Mul: - return EmitMultiply(f, ValType::F64, def); + return EmitMul(f, ValType::F64, MIRType::Double); case Expr::F64Div: - return EmitDivOrMod(f, ValType::F64, IsDiv(true), def); + return EmitDiv(f, ValType::F64, MIRType::Double, /* isUnsigned = */ false); case Expr::F64Mod: - return EmitDivOrMod(f, ValType::F64, IsDiv(false), def); + return EmitRem(f, ValType::F64, MIRType::Double, /* isUnsigned = */ false); case Expr::F64Min: - return EmitMathMinMax(f, ValType::F64, IsMax(false), def); case Expr::F64Max: - return EmitMathMinMax(f, ValType::F64, IsMax(true), def); + return EmitMinMax(f, ValType::F64, MIRType::Double, expr == Expr::F64Max); case Expr::F64Neg: - return EmitUnaryWithType(f, ValType::F64, def); + return EmitUnaryWithType(f, ValType::F64, MIRType::Double); case Expr::F64Abs: - return EmitUnaryWithType(f, ValType::F64, def); + return EmitUnaryWithType(f, ValType::F64, MIRType::Double); case Expr::F64Sqrt: - return EmitUnaryWithType(f, ValType::F64, def); + return EmitUnaryWithType(f, ValType::F64, MIRType::Double); case Expr::F64Ceil: + return EmitUnaryMathBuiltinCall(f, exprOffset, SymbolicAddress::CeilD, ValType::F64); case Expr::F64Floor: + return EmitUnaryMathBuiltinCall(f, exprOffset, SymbolicAddress::FloorD, + ValType::F64); case Expr::F64Sin: + return EmitUnaryMathBuiltinCall(f, exprOffset, SymbolicAddress::SinD, ValType::F64); case Expr::F64Cos: + return EmitUnaryMathBuiltinCall(f, exprOffset, SymbolicAddress::CosD, ValType::F64); case Expr::F64Tan: + return EmitUnaryMathBuiltinCall(f, exprOffset, SymbolicAddress::TanD, ValType::F64); case Expr::F64Asin: + return EmitUnaryMathBuiltinCall(f, exprOffset, SymbolicAddress::ASinD, ValType::F64); case Expr::F64Acos: + return EmitUnaryMathBuiltinCall(f, exprOffset, SymbolicAddress::ACosD, ValType::F64); case Expr::F64Atan: + return EmitUnaryMathBuiltinCall(f, exprOffset, SymbolicAddress::ATanD, ValType::F64); case Expr::F64Exp: + return EmitUnaryMathBuiltinCall(f, exprOffset, SymbolicAddress::ExpD, ValType::F64); case Expr::F64Log: + return EmitUnaryMathBuiltinCall(f, exprOffset, SymbolicAddress::LogD, ValType::F64); case Expr::F64Pow: + return EmitBinaryMathBuiltinCall(f, exprOffset, SymbolicAddress::PowD, ValType::F64); case Expr::F64Atan2: - return EmitF64MathBuiltinCall(f, exprOffset, op, def); + return EmitBinaryMathBuiltinCall(f, exprOffset, SymbolicAddress::ATan2D, + ValType::F64); case Expr::F64PromoteF32: - return EmitUnary(f, def); + return EmitConversion(f, ValType::F32, ValType::F64); case Expr::F64ConvertSI32: - return EmitUnary(f, def); + return EmitConversion(f, ValType::I32, ValType::F64); case Expr::F64ConvertUI32: - return EmitUnary(f, def); + return EmitConversion(f, ValType::I32, ValType::F64); case Expr::F64ConvertSI64: case Expr::F64ConvertUI64: - return EmitConvertI64ToFloatingPoint(f, ValType::F64, - IsUnsigned(op == Expr::F64ConvertUI64), def); + return EmitConvertI64ToFloatingPoint(f, ValType::F64, MIRType::Double, + expr == Expr::F64ConvertUI64); case Expr::F64Load: - return EmitLoad(f, Scalar::Float64, def); + return EmitLoad(f, ValType::F64, Scalar::Float64); case Expr::F64Store: - return EmitStore(f, Scalar::Float64, def); + return EmitStore(f, ValType::F64, Scalar::Float64); case Expr::F64StoreF32: - return EmitStoreWithCoercion(f, Scalar::Float64, Scalar::Float32, def); + return EmitStoreWithCoercion(f, ValType::F64, Scalar::Float32); case Expr::F64ReinterpretI64: - return EmitReinterpret(f, ValType::F64, def); + return EmitReinterpret(f, ValType::F64, ValType::I64, MIRType::Double); + + // Comparisons + case Expr::I32Eq: + return EmitComparison(f, ValType::I32, JSOP_EQ, MCompare::Compare_Int32); + case Expr::I32Ne: + return EmitComparison(f, ValType::I32, JSOP_NE, MCompare::Compare_Int32); + case Expr::I32LtS: + return EmitComparison(f, ValType::I32, JSOP_LT, MCompare::Compare_Int32); + case Expr::I32LeS: + return EmitComparison(f, ValType::I32, JSOP_LE, MCompare::Compare_Int32); + case Expr::I32GtS: + return EmitComparison(f, ValType::I32, JSOP_GT, MCompare::Compare_Int32); + case Expr::I32GeS: + return EmitComparison(f, ValType::I32, JSOP_GE, MCompare::Compare_Int32); + case Expr::I32LtU: + return EmitComparison(f, ValType::I32, JSOP_LT, MCompare::Compare_UInt32); + case Expr::I32LeU: + return EmitComparison(f, ValType::I32, JSOP_LE, MCompare::Compare_UInt32); + case Expr::I32GtU: + return EmitComparison(f, ValType::I32, JSOP_GT, MCompare::Compare_UInt32); + case Expr::I32GeU: + return EmitComparison(f, ValType::I32, JSOP_GE, MCompare::Compare_UInt32); + case Expr::I64Eq: + return EmitComparison(f, ValType::I64, JSOP_EQ, MCompare::Compare_Int64); + case Expr::I64Ne: + return EmitComparison(f, ValType::I64, JSOP_NE, MCompare::Compare_Int64); + case Expr::I64LtS: + return EmitComparison(f, ValType::I64, JSOP_LT, MCompare::Compare_Int64); + case Expr::I64LeS: + return EmitComparison(f, ValType::I64, JSOP_LE, MCompare::Compare_Int64); + case Expr::I64GtS: + return EmitComparison(f, ValType::I64, JSOP_GT, MCompare::Compare_Int64); + case Expr::I64GeS: + return EmitComparison(f, ValType::I64, JSOP_GE, MCompare::Compare_Int64); + case Expr::I64LtU: + return EmitComparison(f, ValType::I64, JSOP_LT, MCompare::Compare_UInt64); + case Expr::I64LeU: + return EmitComparison(f, ValType::I64, JSOP_LE, MCompare::Compare_UInt64); + case Expr::I64GtU: + return EmitComparison(f, ValType::I64, JSOP_GT, MCompare::Compare_UInt64); + case Expr::I64GeU: + return EmitComparison(f, ValType::I64, JSOP_GE, MCompare::Compare_UInt64); + case Expr::F32Eq: + return EmitComparison(f, ValType::F32, JSOP_EQ, MCompare::Compare_Float32); + case Expr::F32Ne: + return EmitComparison(f, ValType::F32, JSOP_NE, MCompare::Compare_Float32); + case Expr::F32Lt: + return EmitComparison(f, ValType::F32, JSOP_LT, MCompare::Compare_Float32); + case Expr::F32Le: + return EmitComparison(f, ValType::F32, JSOP_LE, MCompare::Compare_Float32); + case Expr::F32Gt: + return EmitComparison(f, ValType::F32, JSOP_GT, MCompare::Compare_Float32); + case Expr::F32Ge: + return EmitComparison(f, ValType::F32, JSOP_GE, MCompare::Compare_Float32); + case Expr::F64Eq: + return EmitComparison(f, ValType::F64, JSOP_EQ, MCompare::Compare_Double); + case Expr::F64Ne: + return EmitComparison(f, ValType::F64, JSOP_NE, MCompare::Compare_Double); + case Expr::F64Lt: + return EmitComparison(f, ValType::F64, JSOP_LT, MCompare::Compare_Double); + case Expr::F64Le: + return EmitComparison(f, ValType::F64, JSOP_LE, MCompare::Compare_Double); + case Expr::F64Gt: + return EmitComparison(f, ValType::F64, JSOP_GT, MCompare::Compare_Double); + case Expr::F64Ge: + return EmitComparison(f, ValType::F64, JSOP_GE, MCompare::Compare_Double); // SIMD #define CASE(TYPE, OP, SIGN) \ case Expr::TYPE##OP: \ - return EmitSimdOp(f, ValType::TYPE, SimdOperation::Fn_##OP, SIGN, def); + return EmitSimdOp(f, ValType::TYPE, SimdOperation::Fn_##OP, SIGN); #define I32CASE(OP) CASE(I32x4, OP, SimdSign::Signed) #define F32CASE(OP) CASE(F32x4, OP, SimdSign::NotApplicable) #define B32CASE(OP) CASE(B32x4, OP, SimdSign::NotApplicable) #define ENUMERATE(TYPE, FORALL, DO) \ - case Expr::TYPE##Const: \ - return EmitLiteral(f, ValType::TYPE, def); \ case Expr::TYPE##Constructor: \ - return EmitSimdOp(f, ValType::TYPE, SimdOperation::Constructor, \ - SimdSign::NotApplicable, def); \ + return EmitSimdOp(f, ValType::TYPE, SimdOperation::Constructor, SimdSign::NotApplicable); \ FORALL(DO) ENUMERATE(I32x4, FORALL_INT32X4_ASMJS_OP, I32CASE) @@ -3099,36 +3063,47 @@ EmitExpr(FunctionCompiler& f, MDefinition** def) #undef B32CASE #undef ENUMERATE + case Expr::I32x4Const: + if (!f.iter().readI32x4Const()) + return false; + f.iter().setResult(f.constant(SimdConstant::CreateX4(f.iter().i32x4()), MIRType::Int32x4)); + return true; + case Expr::F32x4Const: + if (!f.iter().readF32x4Const()) + return false; + f.iter().setResult(f.constant(SimdConstant::CreateX4(f.iter().f32x4()), MIRType::Float32x4)); + return true; + case Expr::B32x4Const: + if (!f.iter().readB32x4Const()) + return false; + f.iter().setResult(f.constant(SimdConstant::CreateX4(f.iter().i32x4()), MIRType::Bool32x4)); + return true; + // SIMD unsigned integer operations. case Expr::I32x4shiftRightByScalarU: - return EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_shiftRightByScalar, - SimdSign::Unsigned, def); + return EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_shiftRightByScalar, SimdSign::Unsigned); case Expr::I32x4lessThanU: - return EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_lessThan, SimdSign::Unsigned, def); + return EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_lessThan, SimdSign::Unsigned); case Expr::I32x4lessThanOrEqualU: - return EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_lessThanOrEqual, - SimdSign::Unsigned, def); + return EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_lessThanOrEqual, SimdSign::Unsigned); case Expr::I32x4greaterThanU: - return EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_greaterThan, SimdSign::Unsigned, - def); + return EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_greaterThan, SimdSign::Unsigned); case Expr::I32x4greaterThanOrEqualU: - return EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_greaterThanOrEqual, - SimdSign::Unsigned, def); + return EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_greaterThanOrEqual, SimdSign::Unsigned); case Expr::I32x4fromFloat32x4U: - return EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_fromFloat32x4, - SimdSign::Unsigned, def); + return EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_fromFloat32x4, SimdSign::Unsigned); // Atomics - case Expr::I32AtomicsCompareExchange: - return EmitAtomicsCompareExchange(f, def); - case Expr::I32AtomicsExchange: - return EmitAtomicsExchange(f, def); case Expr::I32AtomicsLoad: - return EmitAtomicsLoad(f, def); + return EmitAtomicsLoad(f); case Expr::I32AtomicsStore: - return EmitAtomicsStore(f, def); + return EmitAtomicsStore(f); case Expr::I32AtomicsBinOp: - return EmitAtomicsBinOp(f, def); + return EmitAtomicsBinOp(f); + case Expr::I32AtomicsCompareExchange: + return EmitAtomicsCompareExchange(f); + case Expr::I32AtomicsExchange: + return EmitAtomicsExchange(f); // Future opcodes case Expr::F32CopySign: @@ -3156,10 +3131,9 @@ EmitExpr(FunctionCompiler& f, MDefinition** def) case Expr::I32Rotl: case Expr::I64Rotr: case Expr::I64Rotl: - case Expr::MemorySize: + case Expr::CurrentMemory: case Expr::GrowMemory: MOZ_CRASH("NYI"); - break; case Expr::Limit:; } @@ -3192,8 +3166,8 @@ wasm::IonCompileFunction(IonCompileTask* task) CompileInfo compileInfo(locals.length()); MIRGenerator mir(nullptr, options, &results.alloc(), &graph, &compileInfo, IonOptimizations.get(OptimizationLevel::AsmJS)); - mir.initUsesSignalHandlersForAsmJSOOB(task->mg().args().useSignalHandlersForOOB); - mir.initMinAsmJSHeapLength(task->mg().minHeapLength()); + mir.initUsesSignalHandlersForAsmJSOOB(task->mg().args.useSignalHandlersForOOB); + mir.initMinAsmJSHeapLength(task->mg().minHeapLength); // Build MIR graph { @@ -3201,16 +3175,20 @@ wasm::IonCompileFunction(IonCompileTask* task) if (!f.init()) return false; - MDefinition* last; + if (!f.iter().readFunctionStart()) + return false; + while (!f.done()) { - if (!EmitExpr(f, &last)) + if (!EmitExpr(f)) return false; } + if (!f.iter().readFunctionEnd(f.sig().ret())) + return false; if (IsVoid(f.sig().ret())) f.returnVoid(); else - f.returnExpr(last); + f.returnExpr(f.iter().return_().value); f.finish(); } diff --git a/js/src/asmjs/WasmIonCompile.h b/js/src/asmjs/WasmIonCompile.h index 7e91f2042e9d..f5eff686c366 100644 --- a/js/src/asmjs/WasmIonCompile.h +++ b/js/src/asmjs/WasmIonCompile.h @@ -25,7 +25,7 @@ namespace js { namespace wasm { -class ModuleGeneratorThreadView; +struct ModuleGeneratorData; typedef Vector MIRTypeVector; typedef jit::ABIArgIter ABIArgMIRTypeIter; @@ -108,7 +108,7 @@ class FuncCompileResults class IonCompileTask { JSRuntime* const runtime_; - ModuleGeneratorThreadView& mg_; + const ModuleGeneratorData& mg_; LifoAlloc lifo_; UniqueFuncBytes func_; Maybe results_; @@ -117,7 +117,7 @@ class IonCompileTask IonCompileTask& operator=(const IonCompileTask&) = delete; public: - IonCompileTask(JSRuntime* rt, ModuleGeneratorThreadView& mg, size_t defaultChunkSize) + IonCompileTask(JSRuntime* rt, const ModuleGeneratorData& mg, size_t defaultChunkSize) : runtime_(rt), mg_(mg), lifo_(defaultChunkSize), func_(nullptr) {} JSRuntime* runtime() const { @@ -126,7 +126,7 @@ class IonCompileTask LifoAlloc& lifo() { return lifo_; } - ModuleGeneratorThreadView& mg() const { + const ModuleGeneratorData& mg() const { return mg_; } void init(UniqueFuncBytes func) { diff --git a/js/src/asmjs/WasmStubs.cpp b/js/src/asmjs/WasmStubs.cpp index e2e5362e46a5..6f080b8b7817 100644 --- a/js/src/asmjs/WasmStubs.cpp +++ b/js/src/asmjs/WasmStubs.cpp @@ -20,6 +20,8 @@ #include "mozilla/ArrayUtils.h" +#include "asmjs/WasmIonCompile.h" + #include "jit/MacroAssembler-inl.h" using namespace js; diff --git a/js/src/asmjs/WasmTextToBinary.cpp b/js/src/asmjs/WasmTextToBinary.cpp index 4c744dfa24a1..18f451db9e03 100644 --- a/js/src/asmjs/WasmTextToBinary.cpp +++ b/js/src/asmjs/WasmTextToBinary.cpp @@ -305,8 +305,8 @@ class WasmAstBlock : public WasmAstExpr public: static const WasmAstExprKind Kind = WasmAstExprKind::Block; - explicit WasmAstBlock(Expr expr, WasmName breakName, WasmName continueName, - WasmAstExprVector&& exprs) + explicit WasmAstBlock(Expr expr, WasmName breakName, + WasmName continueName, WasmAstExprVector&& exprs) : WasmAstExpr(Kind), expr_(expr), breakName_(breakName), @@ -1004,7 +1004,7 @@ IsWasmLetter(char16_t c) static bool IsNameAfterDollar(char16_t c) { - return IsWasmLetter(c) || IsWasmDigit(c) || c == '_' || c == '$' || c == '-'; + return IsWasmLetter(c) || IsWasmDigit(c) || c == '_' || c == '$' || c == '-' || c == '.'; } static bool @@ -3481,9 +3481,18 @@ ResolveConversionOperator(Resolver& r, WasmAstConversionOperator& b) static bool ResolveIfElse(Resolver& r, WasmAstIf& i) { - return ResolveExpr(r, i.cond()) && - ResolveExpr(r, i.thenBranch()) && - (!i.hasElse() || ResolveExpr(r, i.elseBranch())); + if (!ResolveExpr(r, i.cond())) + return false; + if (!r.pushTarget(WasmName())) + return false; + if (!ResolveExpr(r, i.thenBranch())) + return false; + if (i.hasElse()) { + if (!ResolveExpr(r, i.elseBranch())) + return false; + } + r.popTarget(WasmName()); + return true; } static bool @@ -3654,14 +3663,15 @@ EncodeBlock(Encoder& e, WasmAstBlock& b) return false; size_t numExprs = b.exprs().length(); - if (!e.writeVarU32(numExprs)) - return false; for (size_t i = 0; i < numExprs; i++) { if (!EncodeExpr(e, *b.exprs()[i])) return false; } + if (!e.writeExpr(Expr::End)) + return false; + return true; } @@ -3670,20 +3680,25 @@ EncodeBranch(Encoder& e, WasmAstBranch& br) { MOZ_ASSERT(br.expr() == Expr::Br || br.expr() == Expr::BrIf); - if (!e.writeExpr(br.expr())) - return false; - - if (!e.writeVarU32(br.target().index())) - return false; - - if (br.maybeValue() ? !EncodeExpr(e, *br.maybeValue()) : !e.writeExpr(Expr::Nop)) - return false; + if (br.maybeValue()) { + if (!EncodeExpr(e, *br.maybeValue())) + return false; + } else { + if (!e.writeExpr(Expr::Nop)) + return false; + } if (br.expr() == Expr::BrIf) { if (!EncodeExpr(e, br.cond())) return false; } + if (!e.writeExpr(br.expr())) + return false; + + if (!e.writeVarU32(br.target().index())) + return false; + return true; } @@ -3701,33 +3716,33 @@ EncodeArgs(Encoder& e, const WasmAstExprVector& args) static bool EncodeCall(Encoder& e, WasmAstCall& c) { + if (!EncodeArgs(e, c.args())) + return false; + if (!e.writeExpr(c.expr())) return false; if (!e.writeVarU32(c.func().index())) return false; - if (!EncodeArgs(e, c.args())) - return false; - return true; } static bool EncodeCallIndirect(Encoder& e, WasmAstCallIndirect& c) { - if (!e.writeExpr(Expr::CallIndirect)) - return false; - - if (!e.writeVarU32(c.sig().index())) - return false; - if (!EncodeExpr(e, *c.index())) return false; if (!EncodeArgs(e, c.args())) return false; + if (!e.writeExpr(Expr::CallIndirect)) + return false; + + if (!e.writeVarU32(c.sig().index())) + return false; + return true; } @@ -3763,92 +3778,112 @@ EncodeGetLocal(Encoder& e, WasmAstGetLocal& gl) static bool EncodeSetLocal(Encoder& e, WasmAstSetLocal& sl) { - return e.writeExpr(Expr::SetLocal) && - e.writeVarU32(sl.local().index()) && - EncodeExpr(e, sl.value()); + return EncodeExpr(e, sl.value()) && + e.writeExpr(Expr::SetLocal) && + e.writeVarU32(sl.local().index()); } static bool EncodeUnaryOperator(Encoder& e, WasmAstUnaryOperator& b) { - return e.writeExpr(b.expr()) && - EncodeExpr(e, *b.op()); + return EncodeExpr(e, *b.op()) && + e.writeExpr(b.expr()); } static bool EncodeBinaryOperator(Encoder& e, WasmAstBinaryOperator& b) { - return e.writeExpr(b.expr()) && - EncodeExpr(e, *b.lhs()) && - EncodeExpr(e, *b.rhs()); + return EncodeExpr(e, *b.lhs()) && + EncodeExpr(e, *b.rhs()) && + e.writeExpr(b.expr()); } static bool EncodeTernaryOperator(Encoder& e, WasmAstTernaryOperator& b) { - return e.writeExpr(b.expr()) && - EncodeExpr(e, *b.op0()) && + return EncodeExpr(e, *b.op0()) && EncodeExpr(e, *b.op1()) && - EncodeExpr(e, *b.op2()); + EncodeExpr(e, *b.op2()) && + e.writeExpr(b.expr()); } static bool EncodeComparisonOperator(Encoder& e, WasmAstComparisonOperator& b) { - return e.writeExpr(b.expr()) && - EncodeExpr(e, *b.lhs()) && - EncodeExpr(e, *b.rhs()); + return EncodeExpr(e, *b.lhs()) && + EncodeExpr(e, *b.rhs()) && + e.writeExpr(b.expr()); } static bool EncodeConversionOperator(Encoder& e, WasmAstConversionOperator& b) { - return e.writeExpr(b.expr()) && - EncodeExpr(e, *b.op()); + return EncodeExpr(e, *b.op()) && + e.writeExpr(b.expr()); } static bool EmitIf(Encoder& e, WasmAstIf& i) { - return e.writeExpr(i.hasElse() ? Expr::IfElse : Expr::If) && - EncodeExpr(e, i.cond()) && + return EncodeExpr(e, i.cond()) && + e.writeExpr(Expr::If) && EncodeExpr(e, i.thenBranch()) && - (!i.hasElse() || EncodeExpr(e, i.elseBranch())); + (!i.hasElse() || + (e.writeExpr(Expr::Else) && + EncodeExpr(e, i.elseBranch()))) && + e.writeExpr(Expr::End); } static bool EncodeLoadStoreAddress(Encoder &e, const WasmAstLoadStoreAddress &address) +{ + return EncodeExpr(e, address.base()); +} + +static bool +EncodeLoadStoreFlags(Encoder &e, const WasmAstLoadStoreAddress &address) { return e.writeVarU32(address.flags()) && - e.writeVarU32(address.offset()) && - EncodeExpr(e, address.base()); + e.writeVarU32(address.offset()); } static bool EncodeLoad(Encoder& e, WasmAstLoad& l) { - return e.writeExpr(l.expr()) && - EncodeLoadStoreAddress(e, l.address()); + return EncodeLoadStoreAddress(e, l.address()) && + e.writeExpr(l.expr()) && + EncodeLoadStoreFlags(e, l.address()); } static bool EncodeStore(Encoder& e, WasmAstStore& s) { - return e.writeExpr(s.expr()) && - EncodeLoadStoreAddress(e, s.address()) && - EncodeExpr(e, s.value()); + return EncodeLoadStoreAddress(e, s.address()) && + EncodeExpr(e, s.value()) && + e.writeExpr(s.expr()) && + EncodeLoadStoreFlags(e, s.address()); } static bool EncodeReturn(Encoder& e, WasmAstReturn& r) { - return e.writeExpr(Expr::Return) && - (!r.maybeExpr() || EncodeExpr(e, *r.maybeExpr())); + if (r.maybeExpr()) { + if (!EncodeExpr(e, *r.maybeExpr())) + return false; + } else { + if (!e.writeExpr(Expr::Nop)) + return false; + } + + return e.writeExpr(Expr::Return); } static bool EncodeBranchTable(Encoder& e, WasmAstBranchTable& bt) { + if (!EncodeExpr(e, bt.index())) + return false; + if (!e.writeExpr(Expr::BrTable)) return false; @@ -3863,7 +3898,7 @@ EncodeBranchTable(Encoder& e, WasmAstBranchTable& bt) if (!e.writeFixedU32(bt.def().index())) return false; - return EncodeExpr(e, bt.index()); + return true; } static bool diff --git a/js/src/asmjs/WasmTypes.h b/js/src/asmjs/WasmTypes.h index dc94566571ab..eb62571b1467 100644 --- a/js/src/asmjs/WasmTypes.h +++ b/js/src/asmjs/WasmTypes.h @@ -39,6 +39,7 @@ class PropertyName; namespace wasm { +using mozilla::DebugOnly; using mozilla::EnumeratedArray; using mozilla::Maybe; using mozilla::Move; @@ -48,6 +49,24 @@ typedef Vector Uint32Vector; // ValType/ExprType utilities +// ExprType::Limit is an out-of-band value and has no wasm-semantic meaning. For +// the purpose of recursive validation, we use this value to represent the type +// of branch/return instructions that don't actually return to the parent +// expression and can thus be used in any context. +const ExprType AnyType = ExprType::Limit; + +inline ExprType +Unify(ExprType a, ExprType b) +{ + if (a == AnyType) + return b; + if (b == AnyType) + return a; + if (a == b) + return a; + return ExprType::Void; +} + static inline bool IsVoid(ExprType et) { @@ -73,6 +92,50 @@ IsSimdType(ValType vt) return vt == ValType::I32x4 || vt == ValType::F32x4 || vt == ValType::B32x4; } +static inline uint32_t +NumSimdElements(ValType vt) +{ + MOZ_ASSERT(IsSimdType(vt)); + switch (vt) { + case ValType::I32x4: + case ValType::F32x4: + case ValType::B32x4: + return 4; + default: + MOZ_CRASH("Unhandled SIMD type"); + } +} + +static inline ValType +SimdElementType(ValType vt) +{ + MOZ_ASSERT(IsSimdType(vt)); + switch (vt) { + case ValType::I32x4: + return ValType::I32; + case ValType::F32x4: + return ValType::F32; + case ValType::B32x4: + return ValType::I32; + default: + MOZ_CRASH("Unhandled SIMD type"); + } +} + +static inline ValType +SimdBoolType(ValType vt) +{ + MOZ_ASSERT(IsSimdType(vt)); + switch (vt) { + case ValType::I32x4: + case ValType::F32x4: + case ValType::B32x4: + return ValType::B32x4; + default: + MOZ_CRASH("Unhandled SIMD type"); + } +} + static inline bool IsSimdType(ExprType et) { @@ -233,6 +296,21 @@ struct SigHashPolicy static bool match(const Sig* lhs, Lookup rhs) { return *lhs == rhs; } }; +// A GlobalDesc describes a single global variable. Currently, globals are only +// exposed through asm.js. + +struct GlobalDesc +{ + ValType type; + unsigned globalDataOffset; + bool isConst; + GlobalDesc(ValType type, unsigned offset, bool isConst) + : type(type), globalDataOffset(offset), isConst(isConst) + {} +}; + +typedef Vector GlobalDescVector; + // A "declared" signature is a Sig object that is created and owned by the // ModuleGenerator. These signature objects are read-only and have the same // lifetime as the ModuleGenerator. This type is useful since some uses of Sig diff --git a/js/src/jit-test/tests/asm.js/testExpressions.js b/js/src/jit-test/tests/asm.js/testExpressions.js index 6914f00a730d..0b6f1cffe156 100644 --- a/js/src/jit-test/tests/asm.js/testExpressions.js +++ b/js/src/jit-test/tests/asm.js/testExpressions.js @@ -56,6 +56,14 @@ assertEq(f(-1), (1048575*-1)|0); assertEq(f(INT32_MIN), (1048575*INT32_MIN)|0); assertEq(f(INT32_MAX), (1048575*INT32_MAX)|0); +var f = asmLink(asmCompile(USE_ASM + "function f(i) { i=i|0; var j=0; j=~i; return j|0 } return f")); +assertEq(f(0), ~0); +assertEq(f(3), ~3); +assertEq(f(-3), ~-3); +assertEq(f(INT32_MAX), ~INT32_MAX); +assertEq(f(INT32_MIN), ~INT32_MIN); +assertEq(f(UINT32_MAX), ~UINT32_MAX); + var f = asmLink(asmCompile(USE_ASM + "function f(i) { i=+i; var j=0; j=~~i; return j|0 } return f")); assertEq(f(0), 0); assertEq(f(3.5), 3); diff --git a/js/src/jit-test/tests/wasm/basic-control-flow.js b/js/src/jit-test/tests/wasm/basic-control-flow.js index 733d32a4f27c..3eef0c815dbc 100644 --- a/js/src/jit-test/tests/wasm/basic-control-flow.js +++ b/js/src/jit-test/tests/wasm/basic-control-flow.js @@ -273,7 +273,7 @@ assertErrorMessage(() => wasmEvalText('(module (func (result i32) (block (br 0)) assertErrorMessage(() => wasmEvalText('(module (func (result i32) (block (br 0 (f32.const 42)))))'), TypeError, mismatchError("f32", "i32")); assertErrorMessage(() => wasmEvalText(`(module (func (result i32) (param i32) (block (if (get_local 0) (br 0 (i32.const 42))))) (export "" 0))`), TypeError, mismatchError("void", "i32")); -assertErrorMessage(() => wasmEvalText(`(module (func (result i32) (param i32) (block (if (get_local 0) (br 0 (i32.const 42))) (br 0 (f32.const 42)))) (export "" 0))`), TypeError, mismatchError("void", "i32")); +assertErrorMessage(() => wasmEvalText(`(module (func (result i32) (param i32) (block (if (get_local 0) (br 0 (i32.const 42))) (br 0 (f32.const 42)))) (export "" 0))`), TypeError, mismatchError("f32", "i32")); assertEq(wasmEvalText('(module (func (result i32) (block (br 0 (i32.const 42)) (i32.const 13))) (export "" 0))')(), 42); @@ -282,6 +282,10 @@ assertEq(wasmEvalText('(module (func) (func (block (br_if 0 (call 0) (i32.const var f = wasmEvalText(`(module (func (result i32) (param i32) (block (if (get_local 0) (br 0 (i32.const 42))) (i32.const 43))) (export "" 0))`); assertEq(f(0), 43); +assertEq(f(1), 43); + +var f = wasmEvalText(`(module (func (result i32) (param i32) (block (if (get_local 0) (br 1 (i32.const 42))) (i32.const 43))) (export "" 0))`); +assertEq(f(0), 43); assertEq(f(1), 42); var f = wasmEvalText(`(module (func (result i32) (param i32) (block (br_if 0 (i32.const 42) (get_local 0)) (i32.const 43))) (export "" 0))`); @@ -290,6 +294,10 @@ assertEq(f(1), 42); var f = wasmEvalText(`(module (func (result i32) (param i32) (block (if (get_local 0) (br 0 (i32.const 42))) (br 0 (i32.const 43)))) (export "" 0))`); assertEq(f(0), 43); +assertEq(f(1), 43); + +var f = wasmEvalText(`(module (func (result i32) (param i32) (block (if (get_local 0) (br 1 (i32.const 42))) (br 0 (i32.const 43)))) (export "" 0))`); +assertEq(f(0), 43); assertEq(f(1), 42); var f = wasmEvalText(`(module (func (result i32) (param i32) (block (br_if 0 (i32.const 42) (get_local 0)) (br 0 (i32.const 43)))) (export "" 0))`); @@ -298,6 +306,10 @@ assertEq(f(1), 42); var f = wasmEvalText(`(module (func (param i32) (result i32) (i32.add (i32.const 1) (block (if (get_local 0) (br 0 (i32.const 99))) (i32.const -1)))) (export "" 0))`); assertEq(f(0), 0); +assertEq(f(1), 0); + +var f = wasmEvalText(`(module (func (param i32) (result i32) (i32.add (i32.const 1) (block (if (get_local 0) (br 1 (i32.const 99))) (i32.const -1)))) (export "" 0))`); +assertEq(f(0), 0); assertEq(f(1), 100); var f = wasmEvalText(`(module (func (param i32) (result i32) (i32.add (i32.const 1) (block (br_if 0 (i32.const 99) (get_local 0)) (i32.const -1)))) (export "" 0))`); diff --git a/js/src/jit-test/tests/wasm/basic-memory.js b/js/src/jit-test/tests/wasm/basic-memory.js index c9f9489783b8..cbf083f7f8de 100644 --- a/js/src/jit-test/tests/wasm/basic-memory.js +++ b/js/src/jit-test/tests/wasm/basic-memory.js @@ -94,7 +94,7 @@ testLoad('i32', '16_u', 16, 0, 0, 0xf1f0); // When these tests fail, uncomment the load/store tests below. function testLoadNYI(ext) { - assertErrorMessage(() => wasmEvalText(`(module (memory 1) (func (i64.load${ext} (i32.const 0))))`), TypeError, /NYI/); + assertErrorMessage(() => wasmEvalText(`(module (memory 1) (func (i64.load${ext} (i32.const 0))))`), TypeError, /not yet implemented: i64/); } testLoadNYI(''); testLoadNYI('8_s'); @@ -116,7 +116,7 @@ testStore('i32', '', 0, 1, 0, -0x3f3e2c2c); testStore('i32', '', 1, 1, 4, -0x3f3e2c2c); function testStoreNYI(ext) { - assertErrorMessage(() => wasmEvalText(`(module (memory 1) (func (i64.store${ext} (i32.const 0) (i32.const 0))))`), TypeError, /NYI/); + assertErrorMessage(() => wasmEvalText(`(module (memory 1) (func (i64.store${ext} (i32.const 0) (i32.const 0))))`), TypeError, /not yet implemented: i64/); } testStoreNYI(''); testStoreNYI('8'); diff --git a/js/src/jit-test/tests/wasm/binary.js b/js/src/jit-test/tests/wasm/binary.js index f86d5b5ef751..2b3e69d1f63b 100644 --- a/js/src/jit-test/tests/wasm/binary.js +++ b/js/src/jit-test/tests/wasm/binary.js @@ -6,8 +6,8 @@ const magic1 = 0x61; // 'a' const magic2 = 0x73; // 's' const magic3 = 0x6d; // 'm' -// EncodingVersion = 10 (to be changed to 1 at some point in the future) -const ver0 = 0x0a; +// EncodingVersion = 11 (unofficial; to be reset at some point in the future) +const ver0 = 0x0b; const ver1 = 0x00; const ver2 = 0x00; const ver3 = 0x00; @@ -32,8 +32,8 @@ const I64Code = 2; const F32Code = 3; const F64Code = 4; -const NopCode = 0x00; -const BlockCode = 0x01; +const Block = 0x01; +const End = 0x0f; function toU8(array) { for (let b of array) @@ -209,14 +209,8 @@ assertErrorMessage(() => wasmEval(moduleWithSections([sigSection([v2vSig]), decl wasmEval(moduleWithSections([sigSection([v2vSig]), declSection([0,0,0]), tableSection([0,1,0,2]), bodySection([v2vBody, v2vBody, v2vBody])])); wasmEval(moduleWithSections([sigSection([v2vSig,i2vSig]), declSection([0,0,1]), tableSection([0,1,2]), bodySection([v2vBody, v2vBody, v2vBody])])); -// Deep nesting shouldn't crash. With iterative decoding, we should test that -// this doesn't even throw. -try { - var manyBlocks = []; - for (var i = 0; i < 20000; i++) - manyBlocks.push(BlockCode, 1); - manyBlocks.push(NopCode); - wasmEval(moduleWithSections([sigSection([v2vSig]), declSection([0]), bodySection([funcBody({locals:[], body:manyBlocks})])])); -} catch (e) { - assertEq(String(e).indexOf("too much recursion") == -1, false); -} +// Deep nesting shouldn't crash or even throw. +var manyBlocks = []; +for (var i = 0; i < 20000; i++) + manyBlocks.push(Block, End); +wasmEval(moduleWithSections([sigSection([v2vSig]), declSection([0]), bodySection([funcBody({locals:[], body:manyBlocks})])])); diff --git a/js/src/jit-test/tests/wasm/totext1.js b/js/src/jit-test/tests/wasm/totext1.js index c5ce59fd13ba..adbfea3cecd3 100644 --- a/js/src/jit-test/tests/wasm/totext1.js +++ b/js/src/jit-test/tests/wasm/totext1.js @@ -1,6 +1,9 @@ if (!wasmIsSupported()) quit(); +// FIXME: Enable this test once binary-to-text is implemented again. +quit(); + load(libdir + "asserts.js"); var caught = false;