Bug 1377518 - Support wasm opcode prefixes, and use the reserved FF prefix for internal asm.js opcodes. r=sunfish

--HG--
extra : rebase_source : 2559f595c3ebd6bd7894bbaeddb24fcc2a4267b8
This commit is contained in:
Lars T Hansen 2017-06-30 14:56:07 -07:00
Родитель 62dbe0378c
Коммит ac3f3bb121
11 изменённых файлов: 716 добавлений и 611 удалений

Просмотреть файл

@ -91,6 +91,9 @@ const I64TruncSF64Code = 0xb0;
const I64TruncUF64Code = 0xb1;
const FirstInvalidOpcode = 0xc0;
const LastInvalidOpcode = 0xfd;
const AtomicPrefix = 0xfe;
const MozPrefix = 0xff;
// DefinitionKind
const FunctionCode = 0x00;

Просмотреть файл

@ -436,9 +436,23 @@ assertEq(arr[0].byteLength, nameSec.body.length - 5 /* 4name */);
for (var bad of [0xff, 0, 1, 0x3f])
assertErrorMessage(() => wasmEval(moduleWithSections([sigSection([v2vSig]), declSection([0]), bodySection([funcBody({locals:[], body:[BlockCode, bad, EndCode]})])])), CompileError, /invalid inline block type/);
// Ensure all asm.js opcodes rejected
for (var i = FirstInvalidOpcode; i <= 0xff; i++) {
var binary = moduleWithSections([v2vSigSection, declSection([0]), bodySection([funcBody({locals:[], body:[i]})])]);
// Ensure all invalid opcodes rejected
for (let i = FirstInvalidOpcode; i <= LastInvalidOpcode; i++) {
let binary = moduleWithSections([v2vSigSection, declSection([0]), bodySection([funcBody({locals:[], body:[i]})])]);
assertErrorMessage(() => wasmEval(binary), CompileError, /unrecognized opcode/);
assertEq(WebAssembly.validate(binary), false);
}
// Prefixed opcodes
for (let prefix of [AtomicPrefix, MozPrefix]) {
for (let i = 0; i <= 255; i++) {
let binary = moduleWithSections([v2vSigSection, declSection([0]), bodySection([funcBody({locals:[], body:[prefix, i]})])]);
assertErrorMessage(() => wasmEval(binary), CompileError, /unrecognized opcode/);
assertEq(WebAssembly.validate(binary), false);
}
// Prefix without a subsequent opcode
let binary = moduleWithSections([v2vSigSection, declSection([0]), bodySection([funcBody({locals:[], body:[prefix]})])]);
assertErrorMessage(() => wasmEval(binary), CompileError, /unrecognized opcode/);
assertEq(WebAssembly.validate(binary), false);
}

Просмотреть файл

@ -2759,7 +2759,7 @@ IsLiteralInt(ModuleValidator& m, ParseNode* pn, uint32_t* u32)
namespace {
#define CASE(TYPE, OP) case SimdOperation::Fn_##OP: return Op::TYPE##OP;
#define CASE(TYPE, OP) case SimdOperation::Fn_##OP: return MozOp::TYPE##OP;
#define I8x16CASE(OP) CASE(I8x16, OP)
#define I16x8CASE(OP) CASE(I16x8, OP)
#define I32x4CASE(OP) CASE(I32x4, OP)
@ -2769,36 +2769,36 @@ namespace {
#define B32x4CASE(OP) CASE(B32x4, OP)
#define ENUMERATE(TYPE, FOR_ALL, DO) \
switch(op) { \
case SimdOperation::Constructor: return Op::TYPE##Constructor; \
case SimdOperation::Constructor: return MozOp::TYPE##Constructor;\
FOR_ALL(DO) \
default: break; \
}
static inline Op
static inline MozOp
SimdToOp(SimdType type, SimdOperation op)
{
switch (type) {
case SimdType::Uint8x16:
// Handle the special unsigned opcodes, then fall through to Int8x16.
switch (op) {
case SimdOperation::Fn_addSaturate: return Op::I8x16addSaturateU;
case SimdOperation::Fn_subSaturate: return Op::I8x16subSaturateU;
case SimdOperation::Fn_extractLane: return Op::I8x16extractLaneU;
case SimdOperation::Fn_shiftRightByScalar: return Op::I8x16shiftRightByScalarU;
case SimdOperation::Fn_lessThan: return Op::I8x16lessThanU;
case SimdOperation::Fn_lessThanOrEqual: return Op::I8x16lessThanOrEqualU;
case SimdOperation::Fn_greaterThan: return Op::I8x16greaterThanU;
case SimdOperation::Fn_greaterThanOrEqual: return Op::I8x16greaterThanOrEqualU;
case SimdOperation::Fn_fromInt8x16Bits: return Op::Limit;
case SimdOperation::Fn_addSaturate: return MozOp::I8x16addSaturateU;
case SimdOperation::Fn_subSaturate: return MozOp::I8x16subSaturateU;
case SimdOperation::Fn_extractLane: return MozOp::I8x16extractLaneU;
case SimdOperation::Fn_shiftRightByScalar: return MozOp::I8x16shiftRightByScalarU;
case SimdOperation::Fn_lessThan: return MozOp::I8x16lessThanU;
case SimdOperation::Fn_lessThanOrEqual: return MozOp::I8x16lessThanOrEqualU;
case SimdOperation::Fn_greaterThan: return MozOp::I8x16greaterThanU;
case SimdOperation::Fn_greaterThanOrEqual: return MozOp::I8x16greaterThanOrEqualU;
case SimdOperation::Fn_fromInt8x16Bits: return MozOp::Limit;
default: break;
}
MOZ_FALLTHROUGH;
case SimdType::Int8x16:
// Bitcasts Uint8x16 <--> Int8x16 become noops.
switch (op) {
case SimdOperation::Fn_fromUint8x16Bits: return Op::Limit;
case SimdOperation::Fn_fromUint16x8Bits: return Op::I8x16fromInt16x8Bits;
case SimdOperation::Fn_fromUint32x4Bits: return Op::I8x16fromInt32x4Bits;
case SimdOperation::Fn_fromUint8x16Bits: return MozOp::Limit;
case SimdOperation::Fn_fromUint16x8Bits: return MozOp::I8x16fromInt16x8Bits;
case SimdOperation::Fn_fromUint32x4Bits: return MozOp::I8x16fromInt32x4Bits;
default: break;
}
ENUMERATE(I8x16, FORALL_INT8X16_ASMJS_OP, I8x16CASE)
@ -2807,24 +2807,24 @@ SimdToOp(SimdType type, SimdOperation op)
case SimdType::Uint16x8:
// Handle the special unsigned opcodes, then fall through to Int16x8.
switch(op) {
case SimdOperation::Fn_addSaturate: return Op::I16x8addSaturateU;
case SimdOperation::Fn_subSaturate: return Op::I16x8subSaturateU;
case SimdOperation::Fn_extractLane: return Op::I16x8extractLaneU;
case SimdOperation::Fn_shiftRightByScalar: return Op::I16x8shiftRightByScalarU;
case SimdOperation::Fn_lessThan: return Op::I16x8lessThanU;
case SimdOperation::Fn_lessThanOrEqual: return Op::I16x8lessThanOrEqualU;
case SimdOperation::Fn_greaterThan: return Op::I16x8greaterThanU;
case SimdOperation::Fn_greaterThanOrEqual: return Op::I16x8greaterThanOrEqualU;
case SimdOperation::Fn_fromInt16x8Bits: return Op::Limit;
case SimdOperation::Fn_addSaturate: return MozOp::I16x8addSaturateU;
case SimdOperation::Fn_subSaturate: return MozOp::I16x8subSaturateU;
case SimdOperation::Fn_extractLane: return MozOp::I16x8extractLaneU;
case SimdOperation::Fn_shiftRightByScalar: return MozOp::I16x8shiftRightByScalarU;
case SimdOperation::Fn_lessThan: return MozOp::I16x8lessThanU;
case SimdOperation::Fn_lessThanOrEqual: return MozOp::I16x8lessThanOrEqualU;
case SimdOperation::Fn_greaterThan: return MozOp::I16x8greaterThanU;
case SimdOperation::Fn_greaterThanOrEqual: return MozOp::I16x8greaterThanOrEqualU;
case SimdOperation::Fn_fromInt16x8Bits: return MozOp::Limit;
default: break;
}
MOZ_FALLTHROUGH;
case SimdType::Int16x8:
// Bitcasts Uint16x8 <--> Int16x8 become noops.
switch (op) {
case SimdOperation::Fn_fromUint8x16Bits: return Op::I16x8fromInt8x16Bits;
case SimdOperation::Fn_fromUint16x8Bits: return Op::Limit;
case SimdOperation::Fn_fromUint32x4Bits: return Op::I16x8fromInt32x4Bits;
case SimdOperation::Fn_fromUint8x16Bits: return MozOp::I16x8fromInt8x16Bits;
case SimdOperation::Fn_fromUint16x8Bits: return MozOp::Limit;
case SimdOperation::Fn_fromUint32x4Bits: return MozOp::I16x8fromInt32x4Bits;
default: break;
}
ENUMERATE(I16x8, FORALL_INT16X8_ASMJS_OP, I16x8CASE)
@ -2833,22 +2833,22 @@ SimdToOp(SimdType type, SimdOperation op)
case SimdType::Uint32x4:
// Handle the special unsigned opcodes, then fall through to Int32x4.
switch(op) {
case SimdOperation::Fn_shiftRightByScalar: return Op::I32x4shiftRightByScalarU;
case SimdOperation::Fn_lessThan: return Op::I32x4lessThanU;
case SimdOperation::Fn_lessThanOrEqual: return Op::I32x4lessThanOrEqualU;
case SimdOperation::Fn_greaterThan: return Op::I32x4greaterThanU;
case SimdOperation::Fn_greaterThanOrEqual: return Op::I32x4greaterThanOrEqualU;
case SimdOperation::Fn_fromFloat32x4: return Op::I32x4fromFloat32x4U;
case SimdOperation::Fn_fromInt32x4Bits: return Op::Limit;
case SimdOperation::Fn_shiftRightByScalar: return MozOp::I32x4shiftRightByScalarU;
case SimdOperation::Fn_lessThan: return MozOp::I32x4lessThanU;
case SimdOperation::Fn_lessThanOrEqual: return MozOp::I32x4lessThanOrEqualU;
case SimdOperation::Fn_greaterThan: return MozOp::I32x4greaterThanU;
case SimdOperation::Fn_greaterThanOrEqual: return MozOp::I32x4greaterThanOrEqualU;
case SimdOperation::Fn_fromFloat32x4: return MozOp::I32x4fromFloat32x4U;
case SimdOperation::Fn_fromInt32x4Bits: return MozOp::Limit;
default: break;
}
MOZ_FALLTHROUGH;
case SimdType::Int32x4:
// Bitcasts Uint32x4 <--> Int32x4 become noops.
switch (op) {
case SimdOperation::Fn_fromUint8x16Bits: return Op::I32x4fromInt8x16Bits;
case SimdOperation::Fn_fromUint16x8Bits: return Op::I32x4fromInt16x8Bits;
case SimdOperation::Fn_fromUint32x4Bits: return Op::Limit;
case SimdOperation::Fn_fromUint8x16Bits: return MozOp::I32x4fromInt8x16Bits;
case SimdOperation::Fn_fromUint16x8Bits: return MozOp::I32x4fromInt16x8Bits;
case SimdOperation::Fn_fromUint32x4Bits: return MozOp::Limit;
default: break;
}
ENUMERATE(I32x4, FORALL_INT32X4_ASMJS_OP, I32x4CASE)
@ -2856,9 +2856,9 @@ SimdToOp(SimdType type, SimdOperation op)
case SimdType::Float32x4:
switch (op) {
case SimdOperation::Fn_fromUint8x16Bits: return Op::F32x4fromInt8x16Bits;
case SimdOperation::Fn_fromUint16x8Bits: return Op::F32x4fromInt16x8Bits;
case SimdOperation::Fn_fromUint32x4Bits: return Op::F32x4fromInt32x4Bits;
case SimdOperation::Fn_fromUint8x16Bits: return MozOp::F32x4fromInt8x16Bits;
case SimdOperation::Fn_fromUint16x8Bits: return MozOp::F32x4fromInt16x8Bits;
case SimdOperation::Fn_fromUint32x4Bits: return MozOp::F32x4fromInt32x4Bits;
default: break;
}
ENUMERATE(F32x4, FORALL_FLOAT32X4_ASMJS_OP, F32x4CASE)
@ -3195,30 +3195,30 @@ class MOZ_STACK_CLASS FunctionValidator
encoder().writeFixedF64(lit.toDouble());
case NumLit::Int8x16:
case NumLit::Uint8x16:
return encoder().writeOp(Op::I8x16Const) &&
return encoder().writeOp(MozOp::I8x16Const) &&
encoder().writeFixedI8x16(lit.simdValue().asInt8x16());
case NumLit::Int16x8:
case NumLit::Uint16x8:
return encoder().writeOp(Op::I16x8Const) &&
return encoder().writeOp(MozOp::I16x8Const) &&
encoder().writeFixedI16x8(lit.simdValue().asInt16x8());
case NumLit::Int32x4:
case NumLit::Uint32x4:
return encoder().writeOp(Op::I32x4Const) &&
return encoder().writeOp(MozOp::I32x4Const) &&
encoder().writeFixedI32x4(lit.simdValue().asInt32x4());
case NumLit::Float32x4:
return encoder().writeOp(Op::F32x4Const) &&
return encoder().writeOp(MozOp::F32x4Const) &&
encoder().writeFixedF32x4(lit.simdValue().asFloat32x4());
case NumLit::Bool8x16:
// Boolean vectors use the Int8x16 memory representation.
return encoder().writeOp(Op::B8x16Const) &&
return encoder().writeOp(MozOp::B8x16Const) &&
encoder().writeFixedI8x16(lit.simdValue().asInt8x16());
case NumLit::Bool16x8:
// Boolean vectors use the Int16x8 memory representation.
return encoder().writeOp(Op::B16x8Const) &&
return encoder().writeOp(MozOp::B16x8Const) &&
encoder().writeFixedI16x8(lit.simdValue().asInt16x8());
case NumLit::Bool32x4:
// Boolean vectors use the Int32x4 memory representation.
return encoder().writeOp(Op::B32x4Const) &&
return encoder().writeOp(MozOp::B32x4Const) &&
encoder().writeFixedI32x4(lit.simdValue().asInt32x4());
case NumLit::OutOfRangeInt:
break;
@ -3229,12 +3229,16 @@ class MOZ_STACK_CLASS FunctionValidator
return encoder().writeOp(op) &&
fg_.addCallSiteLineNum(m().tokenStream().srcCoords.lineNum(pn->pn_pos.begin));
}
MOZ_MUST_USE bool writeCall(ParseNode* pn, MozOp op) {
return encoder().writeOp(op) &&
fg_.addCallSiteLineNum(m().tokenStream().srcCoords.lineNum(pn->pn_pos.begin));
}
MOZ_MUST_USE bool prepareCall(ParseNode* pn) {
return fg_.addCallSiteLineNum(m().tokenStream().srcCoords.lineNum(pn->pn_pos.begin));
}
MOZ_MUST_USE bool writeSimdOp(SimdType simdType, SimdOperation simdOp) {
Op op = SimdToOp(simdType, simdOp);
if (op == Op::Limit)
MozOp op = SimdToOp(simdType, simdOp);
if (op == MozOp::Limit)
return true;
return encoder().writeOp(op);
}
@ -4237,34 +4241,34 @@ CheckStoreArray(FunctionValidator& f, ParseNode* lhs, ParseNode* rhs, Type* type
switch (viewType) {
case Scalar::Int8:
case Scalar::Uint8:
if (!f.encoder().writeOp(Op::I32TeeStore8))
if (!f.encoder().writeOp(MozOp::I32TeeStore8))
return false;
break;
case Scalar::Int16:
case Scalar::Uint16:
if (!f.encoder().writeOp(Op::I32TeeStore16))
if (!f.encoder().writeOp(MozOp::I32TeeStore16))
return false;
break;
case Scalar::Int32:
case Scalar::Uint32:
if (!f.encoder().writeOp(Op::I32TeeStore))
if (!f.encoder().writeOp(MozOp::I32TeeStore))
return false;
break;
case Scalar::Float32:
if (rhsType.isFloatish()) {
if (!f.encoder().writeOp(Op::F32TeeStore))
if (!f.encoder().writeOp(MozOp::F32TeeStore))
return false;
} else {
if (!f.encoder().writeOp(Op::F64TeeStoreF32))
if (!f.encoder().writeOp(MozOp::F64TeeStoreF32))
return false;
}
break;
case Scalar::Float64:
if (rhsType.isFloatish()) {
if (!f.encoder().writeOp(Op::F32TeeStoreF64))
if (!f.encoder().writeOp(MozOp::F32TeeStoreF64))
return false;
} else {
if (!f.encoder().writeOp(Op::F64TeeStore))
if (!f.encoder().writeOp(MozOp::F64TeeStore))
return false;
}
break;
@ -4312,7 +4316,7 @@ CheckAssignName(FunctionValidator& f, ParseNode* lhs, ParseNode* rhs, Type* type
Type globType = global->varOrConstType();
if (!(rhsType <= globType))
return f.failf(lhs, "%s is not a subtype of %s", rhsType.toChars(), globType.toChars());
if (!f.encoder().writeOp(Op::TeeGlobal))
if (!f.encoder().writeOp(MozOp::TeeGlobal))
return false;
if (!f.encoder().writeVarU32(global->varOrConstIndex()))
return false;
@ -4400,7 +4404,7 @@ CheckMathAbs(FunctionValidator& f, ParseNode* call, Type* type)
if (argType.isSigned()) {
*type = Type::Unsigned;
return f.encoder().writeOp(Op::I32Abs);
return f.encoder().writeOp(MozOp::I32Abs);
}
if (argType.isMaybeDouble()) {
@ -4452,7 +4456,8 @@ CheckMathMinMax(FunctionValidator& f, ParseNode* callNode, bool isMax, Type* typ
if (!CheckExpr(f, firstArg, &firstType))
return false;
Op op;
Op op = Op::Limit;
MozOp mozOp = MozOp::Limit;
if (firstType.isMaybeDouble()) {
*type = Type::Double;
firstType = Type::MaybeDouble;
@ -4464,7 +4469,7 @@ CheckMathMinMax(FunctionValidator& f, ParseNode* callNode, bool isMax, Type* typ
} else if (firstType.isSigned()) {
*type = Type::Signed;
firstType = Type::Signed;
op = isMax ? Op::I32Max : Op::I32Min;
mozOp = isMax ? MozOp::I32Max : MozOp::I32Min;
} else {
return f.failf(firstArg, "%s is not a subtype of double?, float? or signed",
firstType.toChars());
@ -4479,8 +4484,13 @@ CheckMathMinMax(FunctionValidator& f, ParseNode* callNode, bool isMax, Type* typ
if (!(nextType <= firstType))
return f.failf(nextArg, "%s is not a subtype of %s", nextType.toChars(), firstType.toChars());
if (!f.encoder().writeOp(op))
return false;
if (op != Op::Limit) {
if (!f.encoder().writeOp(op))
return false;
} else {
if (!f.encoder().writeOp(mozOp))
return false;
}
}
return true;
@ -4516,7 +4526,7 @@ CheckSharedArrayAtomicAccess(FunctionValidator& f, ParseNode* viewName, ParseNod
}
static bool
WriteAtomicOperator(FunctionValidator& f, Op opcode, Scalar::Type viewType)
WriteAtomicOperator(FunctionValidator& f, MozOp opcode, Scalar::Type viewType)
{
return f.encoder().writeOp(opcode) &&
f.encoder().writeFixedU8(viewType);
@ -4535,7 +4545,7 @@ CheckAtomicsLoad(FunctionValidator& f, ParseNode* call, Type* type)
if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType))
return false;
if (!WriteAtomicOperator(f, Op::I32AtomicsLoad, viewType))
if (!WriteAtomicOperator(f, MozOp::I32AtomicsLoad, viewType))
return false;
if (!WriteArrayAccessFlags(f, viewType))
@ -4566,7 +4576,7 @@ CheckAtomicsStore(FunctionValidator& f, ParseNode* call, Type* type)
if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType))
return false;
if (!WriteAtomicOperator(f, Op::I32AtomicsStore, viewType))
if (!WriteAtomicOperator(f, MozOp::I32AtomicsStore, viewType))
return false;
if (!WriteArrayAccessFlags(f, viewType))
@ -4597,7 +4607,7 @@ CheckAtomicsBinop(FunctionValidator& f, ParseNode* call, Type* type, AtomicOp op
if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType))
return false;
if (!WriteAtomicOperator(f, Op::I32AtomicsBinOp, viewType))
if (!WriteAtomicOperator(f, MozOp::I32AtomicsBinOp, viewType))
return false;
if (!f.encoder().writeFixedU8(uint8_t(op)))
return false;
@ -4654,7 +4664,7 @@ CheckAtomicsCompareExchange(FunctionValidator& f, ParseNode* call, Type* type)
if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType))
return false;
if (!WriteAtomicOperator(f, Op::I32AtomicsCompareExchange, viewType))
if (!WriteAtomicOperator(f, MozOp::I32AtomicsCompareExchange, viewType))
return false;
if (!WriteArrayAccessFlags(f, viewType))
@ -4685,7 +4695,7 @@ CheckAtomicsExchange(FunctionValidator& f, ParseNode* call, Type* type)
if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType))
return false;
if (!WriteAtomicOperator(f, Op::I32AtomicsExchange, viewType))
if (!WriteAtomicOperator(f, MozOp::I32AtomicsExchange, viewType))
return false;
if (!WriteArrayAccessFlags(f, viewType))
@ -4901,7 +4911,7 @@ CheckFuncPtrCall(FunctionValidator& f, ParseNode* callNode, Type ret, Type* type
if (!CheckFuncPtrTableAgainstExisting(f.m(), tableNode, name, Move(sig), mask, &tableIndex))
return false;
if (!f.writeCall(callNode, Op::OldCallIndirect))
if (!f.writeCall(callNode, MozOp::OldCallIndirect))
return false;
// Call signature
@ -5018,8 +5028,9 @@ CheckMathBuiltinCall(FunctionValidator& f, ParseNode* callNode, AsmJSMathBuiltin
Type* type)
{
unsigned arity = 0;
Op f32;
Op f64;
Op f32 = Op::Limit;
Op f64 = Op::Limit;
MozOp mozf64 = MozOp::Limit;
switch (func) {
case AsmJSMathBuiltin_imul: return CheckMathIMul(f, callNode, type);
case AsmJSMathBuiltin_clz32: return CheckMathClz32(f, callNode, type);
@ -5028,18 +5039,18 @@ CheckMathBuiltinCall(FunctionValidator& f, ParseNode* callNode, AsmJSMathBuiltin
case AsmJSMathBuiltin_fround: return CheckMathFRound(f, callNode, type);
case AsmJSMathBuiltin_min: return CheckMathMinMax(f, callNode, /* isMax = */ false, type);
case AsmJSMathBuiltin_max: return CheckMathMinMax(f, callNode, /* isMax = */ true, type);
case AsmJSMathBuiltin_ceil: arity = 1; f64 = Op::F64Ceil; f32 = Op::F32Ceil; break;
case AsmJSMathBuiltin_floor: arity = 1; f64 = Op::F64Floor; f32 = Op::F32Floor; break;
case AsmJSMathBuiltin_sin: arity = 1; f64 = Op::F64Sin; f32 = Op::Unreachable; break;
case AsmJSMathBuiltin_cos: arity = 1; f64 = Op::F64Cos; f32 = Op::Unreachable; break;
case AsmJSMathBuiltin_tan: arity = 1; f64 = Op::F64Tan; f32 = Op::Unreachable; break;
case AsmJSMathBuiltin_asin: arity = 1; f64 = Op::F64Asin; f32 = Op::Unreachable; break;
case AsmJSMathBuiltin_acos: arity = 1; f64 = Op::F64Acos; f32 = Op::Unreachable; break;
case AsmJSMathBuiltin_atan: arity = 1; f64 = Op::F64Atan; f32 = Op::Unreachable; break;
case AsmJSMathBuiltin_exp: arity = 1; f64 = Op::F64Exp; f32 = Op::Unreachable; break;
case AsmJSMathBuiltin_log: arity = 1; f64 = Op::F64Log; f32 = Op::Unreachable; break;
case AsmJSMathBuiltin_pow: arity = 2; f64 = Op::F64Pow; f32 = Op::Unreachable; break;
case AsmJSMathBuiltin_atan2: arity = 2; f64 = Op::F64Atan2; f32 = Op::Unreachable; break;
case AsmJSMathBuiltin_ceil: arity = 1; f64 = Op::F64Ceil; f32 = Op::F32Ceil; break;
case AsmJSMathBuiltin_floor: arity = 1; f64 = Op::F64Floor; f32 = Op::F32Floor; break;
case AsmJSMathBuiltin_sin: arity = 1; mozf64 = MozOp::F64Sin; f32 = Op::Unreachable; break;
case AsmJSMathBuiltin_cos: arity = 1; mozf64 = MozOp::F64Cos; f32 = Op::Unreachable; break;
case AsmJSMathBuiltin_tan: arity = 1; mozf64 = MozOp::F64Tan; f32 = Op::Unreachable; break;
case AsmJSMathBuiltin_asin: arity = 1; mozf64 = MozOp::F64Asin; f32 = Op::Unreachable; break;
case AsmJSMathBuiltin_acos: arity = 1; mozf64 = MozOp::F64Acos; f32 = Op::Unreachable; break;
case AsmJSMathBuiltin_atan: arity = 1; mozf64 = MozOp::F64Atan; f32 = Op::Unreachable; break;
case AsmJSMathBuiltin_exp: arity = 1; mozf64 = MozOp::F64Exp; f32 = Op::Unreachable; break;
case AsmJSMathBuiltin_log: arity = 1; mozf64 = MozOp::F64Log; f32 = Op::Unreachable; break;
case AsmJSMathBuiltin_pow: arity = 2; mozf64 = MozOp::F64Pow; f32 = Op::Unreachable; break;
case AsmJSMathBuiltin_atan2: arity = 2; mozf64 = MozOp::F64Atan2; f32 = Op::Unreachable; break;
default: MOZ_CRASH("unexpected mathBuiltin function");
}
@ -5075,8 +5086,13 @@ CheckMathBuiltinCall(FunctionValidator& f, ParseNode* callNode, AsmJSMathBuiltin
}
if (opIsDouble) {
if (!f.encoder().writeOp(f64))
return false;
if (f64 != Op::Limit) {
if (!f.encoder().writeOp(f64))
return false;
} else {
if (!f.encoder().writeOp(mozf64))
return false;
}
} else {
if (!f.encoder().writeOp(f32))
return false;
@ -5933,7 +5949,7 @@ CheckNeg(FunctionValidator& f, ParseNode* expr, Type* type)
if (operandType.isInt()) {
*type = Type::Intish;
return f.encoder().writeOp(Op::I32Neg);
return f.encoder().writeOp(MozOp::I32Neg);
}
if (operandType.isMaybeDouble()) {
@ -5988,7 +6004,7 @@ CheckBitNot(FunctionValidator& f, ParseNode* neg, Type* type)
if (!operandType.isIntish())
return f.failf(operand, "%s is not a subtype of intish", operandType.toChars());
if (!f.encoder().writeOp(Op::I32BitNot))
if (!f.encoder().writeOp(MozOp::I32BitNot))
return false;
*type = Type::Signed;
@ -6223,7 +6239,9 @@ CheckDivOrMod(FunctionValidator& f, ParseNode* expr, Type* type)
if (lhsType.isMaybeDouble() && rhsType.isMaybeDouble()) {
*type = Type::Double;
return f.encoder().writeOp(expr->isKind(PNK_DIV) ? Op::F64Div : Op::F64Mod);
if (expr->isKind(PNK_DIV))
return f.encoder().writeOp(Op::F64Div);
return f.encoder().writeOp(MozOp::F64Mod);
}
if (lhsType.isMaybeFloat() && rhsType.isMaybeFloat()) {

Просмотреть файл

@ -5146,7 +5146,9 @@ BaseCompiler::sniffConditionalControlCmp(Cond compareOp, ValType operandType)
{
MOZ_ASSERT(latentOp_ == LatentOp::None, "Latent comparison state not properly reset");
switch (iter_.peekOp()) {
OpBytes op;
iter_.peekOp(&op);
switch (op.b0) {
case uint16_t(Op::Select):
#ifdef JS_CODEGEN_X86
// On x86, with only 5 available registers, a latent i64 binary
@ -5169,7 +5171,9 @@ BaseCompiler::sniffConditionalControlEqz(ValType operandType)
{
MOZ_ASSERT(latentOp_ == LatentOp::None, "Latent comparison state not properly reset");
switch (iter_.peekOp()) {
OpBytes op;
iter_.peekOp(&op);
switch (op.b0) {
case uint16_t(Op::BrIf):
case uint16_t(Op::Select):
case uint16_t(Op::If):
@ -6885,11 +6889,11 @@ BaseCompiler::emitBody()
overhead--;
uint16_t op;
OpBytes op;
CHECK(iter_.readOp(&op));
// When debugEnabled_, every operator has breakpoint site but Op::End.
if (debugEnabled_ && op != (uint16_t)Op::End) {
if (debugEnabled_ && op.b0 != (uint16_t)Op::End) {
// TODO sync only registers that can be clobbered by the exit
// prologue/epilogue or disable these registers for use in
// baseline compiler when debugEnabled_ is set.
@ -6898,7 +6902,7 @@ BaseCompiler::emitBody()
insertBreakablePoint(CallSiteDesc::Breakpoint);
}
switch (op) {
switch (op.b0) {
case uint16_t(Op::End):
if (!emitEnd())
return false;
@ -7366,7 +7370,7 @@ BaseCompiler::emitBody()
CHECK_NEXT(emitCurrentMemory());
default:
return iter_.unrecognizedOpcode(op);
return iter_.unrecognizedOpcode(&op);
}
#undef CHECK

Просмотреть файл

@ -306,12 +306,26 @@ enum class Op
F32ReinterpretI32 = 0xbe,
F64ReinterpretI64 = 0xbf,
// ------------------------------------------------------------------------
// The rest of these operators are currently only emitted internally when
// compiling asm.js and are rejected by wasm validation.
AtomicPrefix = 0xfe,
MozPrefix = 0xff,
// asm.js-specific operators
TeeGlobal = 0xc8,
Limit = 0x100
};
inline bool
IsPrefixByte(uint8_t b) {
return b >= uint8_t(Op::AtomicPrefix);
}
enum class MozOp
{
// ------------------------------------------------------------------------
// These operators are emitted internally when compiling asm.js and are
// rejected by wasm validation. They are prefixed by MozPrefix.
// asm.js-specific operators. They start at 1 so as to check for
// uninitialized (zeroed) storage.
TeeGlobal = 0x01,
I32Min,
I32Max,
I32Neg,
@ -429,6 +443,20 @@ enum class Op
Limit
};
struct OpBytes
{
// The bytes of the opcode have 16-bit representations to allow for a full
// 256-value range plus a sentinel Limit value.
uint16_t b0;
uint16_t b1;
explicit OpBytes(Op x) {
b0 = uint16_t(x);
b1 = 0;
}
OpBytes() = default;
};
static const char NameSectionName[] = "name";
static const char SourceMappingURLSectionName[] = "sourceMappingURL";

Просмотреть файл

@ -24,9 +24,9 @@ using namespace js::wasm;
#ifdef DEBUG
OpKind
wasm::Classify(Op op)
wasm::Classify(OpBytes op)
{
switch (op) {
switch (Op(op.b0)) {
case Op::Block:
return OpKind::Block;
case Op::Loop:
@ -43,20 +43,6 @@ wasm::Classify(Op op)
return OpKind::F32;
case Op::F64Const:
return OpKind::F64;
case Op::I8x16Const:
return OpKind::I8x16;
case Op::I16x8Const:
return OpKind::I16x8;
case Op::I32x4Const:
return OpKind::I32x4;
case Op::B8x16Const:
return OpKind::B8x16;
case Op::B16x8Const:
return OpKind::B16x8;
case Op::B32x4Const:
return OpKind::B32x4;
case Op::F32x4Const:
return OpKind::F32x4;
case Op::Br:
return OpKind::Br;
case Op::BrIf:
@ -85,31 +71,6 @@ wasm::Classify(Op op)
case Op::F64Trunc:
case Op::F64Nearest:
case Op::F64Sqrt:
case Op::I32BitNot:
case Op::I32Abs:
case Op::F64Sin:
case Op::F64Cos:
case Op::F64Tan:
case Op::F64Asin:
case Op::F64Acos:
case Op::F64Atan:
case Op::F64Exp:
case Op::F64Log:
case Op::I32Neg:
case Op::I8x16neg:
case Op::I8x16not:
case Op::I16x8neg:
case Op::I16x8not:
case Op::I32x4neg:
case Op::I32x4not:
case Op::F32x4neg:
case Op::F32x4sqrt:
case Op::F32x4abs:
case Op::F32x4reciprocalApproximation:
case Op::F32x4reciprocalSqrtApproximation:
case Op::B8x16not:
case Op::B16x8not:
case Op::B32x4not:
return OpKind::Unary;
case Op::I32Add:
case Op::I32Sub:
@ -155,54 +116,6 @@ wasm::Classify(Op op)
case Op::F64Min:
case Op::F64Max:
case Op::F64CopySign:
case Op::I32Min:
case Op::I32Max:
case Op::F64Mod:
case Op::F64Pow:
case Op::F64Atan2:
case Op::I8x16add:
case Op::I8x16sub:
case Op::I8x16mul:
case Op::I8x16addSaturate:
case Op::I8x16subSaturate:
case Op::I8x16addSaturateU:
case Op::I8x16subSaturateU:
case Op::I8x16and:
case Op::I8x16or:
case Op::I8x16xor:
case Op::I16x8add:
case Op::I16x8sub:
case Op::I16x8mul:
case Op::I16x8addSaturate:
case Op::I16x8subSaturate:
case Op::I16x8addSaturateU:
case Op::I16x8subSaturateU:
case Op::I16x8and:
case Op::I16x8or:
case Op::I16x8xor:
case Op::I32x4add:
case Op::I32x4sub:
case Op::I32x4mul:
case Op::I32x4and:
case Op::I32x4or:
case Op::I32x4xor:
case Op::F32x4add:
case Op::F32x4sub:
case Op::F32x4mul:
case Op::F32x4div:
case Op::F32x4min:
case Op::F32x4max:
case Op::F32x4minNum:
case Op::F32x4maxNum:
case Op::B8x16and:
case Op::B8x16or:
case Op::B8x16xor:
case Op::B16x8and:
case Op::B16x8or:
case Op::B16x8xor:
case Op::B32x4and:
case Op::B32x4or:
case Op::B32x4xor:
return OpKind::Binary;
case Op::I32Eq:
case Op::I32Ne:
@ -264,22 +177,6 @@ wasm::Classify(Op op)
case Op::F64ConvertUI64:
case Op::F64ReinterpretI64:
case Op::F64PromoteF32:
case Op::I32x4fromFloat32x4:
case Op::I32x4fromFloat32x4U:
case Op::F32x4fromInt32x4:
case Op::F32x4fromUint32x4:
case Op::I32x4fromFloat32x4Bits:
case Op::I32x4fromInt8x16Bits:
case Op::I32x4fromInt16x8Bits:
case Op::I16x8fromInt8x16Bits:
case Op::I16x8fromInt32x4Bits:
case Op::I16x8fromFloat32x4Bits:
case Op::I8x16fromInt16x8Bits:
case Op::I8x16fromInt32x4Bits:
case Op::I8x16fromFloat32x4Bits:
case Op::F32x4fromInt8x16Bits:
case Op::F32x4fromInt16x8Bits:
case Op::F32x4fromInt32x4Bits:
return OpKind::Conversion;
case Op::I32Load8S:
case Op::I32Load8U:
@ -295,16 +192,6 @@ wasm::Classify(Op op)
case Op::I64Load:
case Op::F32Load:
case Op::F64Load:
case Op::I8x16load:
case Op::I16x8load:
case Op::I32x4load:
case Op::I32x4load1:
case Op::I32x4load2:
case Op::I32x4load3:
case Op::F32x4load:
case Op::F32x4load1:
case Op::F32x4load2:
case Op::F32x4load3:
return OpKind::Load;
case Op::I32Store8:
case Op::I32Store16:
@ -316,28 +203,6 @@ wasm::Classify(Op op)
case Op::F32Store:
case Op::F64Store:
return OpKind::Store;
case Op::I32TeeStore8:
case Op::I32TeeStore16:
case Op::I64TeeStore8:
case Op::I64TeeStore16:
case Op::I64TeeStore32:
case Op::I32TeeStore:
case Op::I64TeeStore:
case Op::F32TeeStore:
case Op::F64TeeStore:
case Op::F32TeeStoreF64:
case Op::F64TeeStoreF32:
case Op::I8x16store:
case Op::I16x8store:
case Op::I32x4store:
case Op::I32x4store1:
case Op::I32x4store2:
case Op::I32x4store3:
case Op::F32x4store:
case Op::F32x4store1:
case Op::F32x4store2:
case Op::F32x4store3:
return OpKind::TeeStore;
case Op::Select:
return OpKind::Select;
case Op::GetLocal:
@ -350,14 +215,10 @@ wasm::Classify(Op op)
return OpKind::GetGlobal;
case Op::SetGlobal:
return OpKind::SetGlobal;
case Op::TeeGlobal:
return OpKind::TeeGlobal;
case Op::Call:
return OpKind::Call;
case Op::CallIndirect:
return OpKind::CallIndirect;
case Op::OldCallIndirect:
return OpKind::OldCallIndirect;
case Op::Return:
case Op::Limit:
// Accept Limit, for use in decoding the end of a function after the body.
@ -368,130 +229,285 @@ wasm::Classify(Op op)
return OpKind::Else;
case Op::End:
return OpKind::End;
case Op::I32AtomicsLoad:
return OpKind::AtomicLoad;
case Op::I32AtomicsStore:
return OpKind::AtomicStore;
case Op::I32AtomicsBinOp:
return OpKind::AtomicBinOp;
case Op::I32AtomicsCompareExchange:
return OpKind::AtomicCompareExchange;
case Op::I32AtomicsExchange:
return OpKind::AtomicExchange;
case Op::I8x16extractLane:
case Op::I8x16extractLaneU:
case Op::I16x8extractLane:
case Op::I16x8extractLaneU:
case Op::I32x4extractLane:
case Op::F32x4extractLane:
case Op::B8x16extractLane:
case Op::B16x8extractLane:
case Op::B32x4extractLane:
return OpKind::ExtractLane;
case Op::I8x16replaceLane:
case Op::I16x8replaceLane:
case Op::I32x4replaceLane:
case Op::F32x4replaceLane:
case Op::B8x16replaceLane:
case Op::B16x8replaceLane:
case Op::B32x4replaceLane:
return OpKind::ReplaceLane;
case Op::I8x16swizzle:
case Op::I16x8swizzle:
case Op::I32x4swizzle:
case Op::F32x4swizzle:
return OpKind::Swizzle;
case Op::I8x16shuffle:
case Op::I16x8shuffle:
case Op::I32x4shuffle:
case Op::F32x4shuffle:
return OpKind::Shuffle;
case Op::I16x8check:
case Op::I16x8splat:
case Op::I32x4check:
case Op::I32x4splat:
case Op::I8x16check:
case Op::I8x16splat:
case Op::F32x4check:
case Op::F32x4splat:
case Op::B16x8check:
case Op::B16x8splat:
case Op::B32x4check:
case Op::B32x4splat:
case Op::B8x16check:
case Op::B8x16splat:
return OpKind::Splat;
case Op::I8x16select:
case Op::I16x8select:
case Op::I32x4select:
case Op::F32x4select:
return OpKind::SimdSelect;
case Op::I8x16Constructor:
case Op::I16x8Constructor:
case Op::I32x4Constructor:
case Op::F32x4Constructor:
case Op::B8x16Constructor:
case Op::B16x8Constructor:
case Op::B32x4Constructor:
return OpKind::SimdCtor;
case Op::B8x16allTrue:
case Op::B8x16anyTrue:
case Op::B16x8allTrue:
case Op::B16x8anyTrue:
case Op::B32x4allTrue:
case Op::B32x4anyTrue:
return OpKind::SimdBooleanReduction;
case Op::I8x16shiftLeftByScalar:
case Op::I8x16shiftRightByScalar:
case Op::I8x16shiftRightByScalarU:
case Op::I16x8shiftLeftByScalar:
case Op::I16x8shiftRightByScalar:
case Op::I16x8shiftRightByScalarU:
case Op::I32x4shiftLeftByScalar:
case Op::I32x4shiftRightByScalar:
case Op::I32x4shiftRightByScalarU:
return OpKind::SimdShiftByScalar;
case Op::I8x16equal:
case Op::I8x16notEqual:
case Op::I8x16greaterThan:
case Op::I8x16greaterThanOrEqual:
case Op::I8x16lessThan:
case Op::I8x16lessThanOrEqual:
case Op::I8x16greaterThanU:
case Op::I8x16greaterThanOrEqualU:
case Op::I8x16lessThanU:
case Op::I8x16lessThanOrEqualU:
case Op::I16x8equal:
case Op::I16x8notEqual:
case Op::I16x8greaterThan:
case Op::I16x8greaterThanOrEqual:
case Op::I16x8lessThan:
case Op::I16x8lessThanOrEqual:
case Op::I16x8greaterThanU:
case Op::I16x8greaterThanOrEqualU:
case Op::I16x8lessThanU:
case Op::I16x8lessThanOrEqualU:
case Op::I32x4equal:
case Op::I32x4notEqual:
case Op::I32x4greaterThan:
case Op::I32x4greaterThanOrEqual:
case Op::I32x4lessThan:
case Op::I32x4lessThanOrEqual:
case Op::I32x4greaterThanU:
case Op::I32x4greaterThanOrEqualU:
case Op::I32x4lessThanU:
case Op::I32x4lessThanOrEqualU:
case Op::F32x4equal:
case Op::F32x4notEqual:
case Op::F32x4greaterThan:
case Op::F32x4greaterThanOrEqual:
case Op::F32x4lessThan:
case Op::F32x4lessThanOrEqual:
return OpKind::SimdComparison;
case Op::CurrentMemory:
return OpKind::CurrentMemory;
case Op::GrowMemory:
return OpKind::GrowMemory;
case Op::AtomicPrefix:
break;
case Op::MozPrefix: {
switch (MozOp(op.b1)) {
case MozOp::Limit:
// Reject Limit for the MozPrefix encoding
break;
case MozOp::TeeGlobal:
return OpKind::TeeGlobal;
case MozOp::I8x16Const:
return OpKind::I8x16;
case MozOp::I16x8Const:
return OpKind::I16x8;
case MozOp::I32x4Const:
return OpKind::I32x4;
case MozOp::B8x16Const:
return OpKind::B8x16;
case MozOp::B16x8Const:
return OpKind::B16x8;
case MozOp::B32x4Const:
return OpKind::B32x4;
case MozOp::F32x4Const:
return OpKind::F32x4;
case MozOp::I32BitNot:
case MozOp::I32Abs:
case MozOp::I32Neg:
case MozOp::I8x16neg:
case MozOp::I8x16not:
case MozOp::I16x8neg:
case MozOp::I16x8not:
case MozOp::I32x4neg:
case MozOp::I32x4not:
case MozOp::F32x4neg:
case MozOp::F32x4sqrt:
case MozOp::F32x4abs:
case MozOp::F32x4reciprocalApproximation:
case MozOp::F32x4reciprocalSqrtApproximation:
case MozOp::B8x16not:
case MozOp::B16x8not:
case MozOp::B32x4not:
return OpKind::Unary;
case MozOp::I32Min:
case MozOp::I32Max:
case MozOp::F64Mod:
case MozOp::F64Pow:
case MozOp::F64Atan2:
case MozOp::I8x16add:
case MozOp::I8x16sub:
case MozOp::I8x16mul:
case MozOp::I8x16addSaturate:
case MozOp::I8x16subSaturate:
case MozOp::I8x16addSaturateU:
case MozOp::I8x16subSaturateU:
case MozOp::I8x16and:
case MozOp::I8x16or:
case MozOp::I8x16xor:
case MozOp::I16x8add:
case MozOp::I16x8sub:
case MozOp::I16x8mul:
case MozOp::I16x8addSaturate:
case MozOp::I16x8subSaturate:
case MozOp::I16x8addSaturateU:
case MozOp::I16x8subSaturateU:
case MozOp::I16x8and:
case MozOp::I16x8or:
case MozOp::I16x8xor:
case MozOp::I32x4add:
case MozOp::I32x4sub:
case MozOp::I32x4mul:
case MozOp::I32x4and:
case MozOp::I32x4or:
case MozOp::I32x4xor:
case MozOp::F32x4add:
case MozOp::F32x4sub:
case MozOp::F32x4mul:
case MozOp::F32x4div:
case MozOp::F32x4min:
case MozOp::F32x4max:
case MozOp::F32x4minNum:
case MozOp::F32x4maxNum:
case MozOp::B8x16and:
case MozOp::B8x16or:
case MozOp::B8x16xor:
case MozOp::B16x8and:
case MozOp::B16x8or:
case MozOp::B16x8xor:
case MozOp::B32x4and:
case MozOp::B32x4or:
case MozOp::B32x4xor:
return OpKind::Binary;
case MozOp::F64Sin:
case MozOp::F64Cos:
case MozOp::F64Tan:
case MozOp::F64Asin:
case MozOp::F64Acos:
case MozOp::F64Atan:
case MozOp::F64Exp:
case MozOp::F64Log:
return OpKind::Unary;
case MozOp::I32TeeStore8:
case MozOp::I32TeeStore16:
case MozOp::I64TeeStore8:
case MozOp::I64TeeStore16:
case MozOp::I64TeeStore32:
case MozOp::I32TeeStore:
case MozOp::I64TeeStore:
case MozOp::F32TeeStore:
case MozOp::F64TeeStore:
case MozOp::F32TeeStoreF64:
case MozOp::F64TeeStoreF32:
return OpKind::TeeStore;
case MozOp::I32x4fromFloat32x4:
case MozOp::I32x4fromFloat32x4U:
case MozOp::F32x4fromInt32x4:
case MozOp::F32x4fromUint32x4:
case MozOp::I32x4fromFloat32x4Bits:
case MozOp::I32x4fromInt8x16Bits:
case MozOp::I32x4fromInt16x8Bits:
case MozOp::I16x8fromInt8x16Bits:
case MozOp::I16x8fromInt32x4Bits:
case MozOp::I16x8fromFloat32x4Bits:
case MozOp::I8x16fromInt16x8Bits:
case MozOp::I8x16fromInt32x4Bits:
case MozOp::I8x16fromFloat32x4Bits:
case MozOp::F32x4fromInt8x16Bits:
case MozOp::F32x4fromInt16x8Bits:
case MozOp::F32x4fromInt32x4Bits:
return OpKind::Conversion;
case MozOp::I8x16load:
case MozOp::I16x8load:
case MozOp::I32x4load:
case MozOp::I32x4load1:
case MozOp::I32x4load2:
case MozOp::I32x4load3:
case MozOp::F32x4load:
case MozOp::F32x4load1:
case MozOp::F32x4load2:
case MozOp::F32x4load3:
return OpKind::Load;
case MozOp::I8x16store:
case MozOp::I16x8store:
case MozOp::I32x4store:
case MozOp::I32x4store1:
case MozOp::I32x4store2:
case MozOp::I32x4store3:
case MozOp::F32x4store:
case MozOp::F32x4store1:
case MozOp::F32x4store2:
case MozOp::F32x4store3:
return OpKind::TeeStore;
case MozOp::OldCallIndirect:
return OpKind::OldCallIndirect;
case MozOp::I32AtomicsLoad:
return OpKind::AtomicLoad;
case MozOp::I32AtomicsStore:
return OpKind::AtomicStore;
case MozOp::I32AtomicsBinOp:
return OpKind::AtomicBinOp;
case MozOp::I32AtomicsCompareExchange:
return OpKind::AtomicCompareExchange;
case MozOp::I32AtomicsExchange:
return OpKind::AtomicExchange;
case MozOp::I8x16extractLane:
case MozOp::I8x16extractLaneU:
case MozOp::I16x8extractLane:
case MozOp::I16x8extractLaneU:
case MozOp::I32x4extractLane:
case MozOp::F32x4extractLane:
case MozOp::B8x16extractLane:
case MozOp::B16x8extractLane:
case MozOp::B32x4extractLane:
return OpKind::ExtractLane;
case MozOp::I8x16replaceLane:
case MozOp::I16x8replaceLane:
case MozOp::I32x4replaceLane:
case MozOp::F32x4replaceLane:
case MozOp::B8x16replaceLane:
case MozOp::B16x8replaceLane:
case MozOp::B32x4replaceLane:
return OpKind::ReplaceLane;
case MozOp::I8x16swizzle:
case MozOp::I16x8swizzle:
case MozOp::I32x4swizzle:
case MozOp::F32x4swizzle:
return OpKind::Swizzle;
case MozOp::I8x16shuffle:
case MozOp::I16x8shuffle:
case MozOp::I32x4shuffle:
case MozOp::F32x4shuffle:
return OpKind::Shuffle;
case MozOp::I16x8check:
case MozOp::I16x8splat:
case MozOp::I32x4check:
case MozOp::I32x4splat:
case MozOp::I8x16check:
case MozOp::I8x16splat:
case MozOp::F32x4check:
case MozOp::F32x4splat:
case MozOp::B16x8check:
case MozOp::B16x8splat:
case MozOp::B32x4check:
case MozOp::B32x4splat:
case MozOp::B8x16check:
case MozOp::B8x16splat:
return OpKind::Splat;
case MozOp::I8x16select:
case MozOp::I16x8select:
case MozOp::I32x4select:
case MozOp::F32x4select:
return OpKind::SimdSelect;
case MozOp::I8x16Constructor:
case MozOp::I16x8Constructor:
case MozOp::I32x4Constructor:
case MozOp::F32x4Constructor:
case MozOp::B8x16Constructor:
case MozOp::B16x8Constructor:
case MozOp::B32x4Constructor:
return OpKind::SimdCtor;
case MozOp::B8x16allTrue:
case MozOp::B8x16anyTrue:
case MozOp::B16x8allTrue:
case MozOp::B16x8anyTrue:
case MozOp::B32x4allTrue:
case MozOp::B32x4anyTrue:
return OpKind::SimdBooleanReduction;
case MozOp::I8x16shiftLeftByScalar:
case MozOp::I8x16shiftRightByScalar:
case MozOp::I8x16shiftRightByScalarU:
case MozOp::I16x8shiftLeftByScalar:
case MozOp::I16x8shiftRightByScalar:
case MozOp::I16x8shiftRightByScalarU:
case MozOp::I32x4shiftLeftByScalar:
case MozOp::I32x4shiftRightByScalar:
case MozOp::I32x4shiftRightByScalarU:
return OpKind::SimdShiftByScalar;
case MozOp::I8x16equal:
case MozOp::I8x16notEqual:
case MozOp::I8x16greaterThan:
case MozOp::I8x16greaterThanOrEqual:
case MozOp::I8x16lessThan:
case MozOp::I8x16lessThanOrEqual:
case MozOp::I8x16greaterThanU:
case MozOp::I8x16greaterThanOrEqualU:
case MozOp::I8x16lessThanU:
case MozOp::I8x16lessThanOrEqualU:
case MozOp::I16x8equal:
case MozOp::I16x8notEqual:
case MozOp::I16x8greaterThan:
case MozOp::I16x8greaterThanOrEqual:
case MozOp::I16x8lessThan:
case MozOp::I16x8lessThanOrEqual:
case MozOp::I16x8greaterThanU:
case MozOp::I16x8greaterThanOrEqualU:
case MozOp::I16x8lessThanU:
case MozOp::I16x8lessThanOrEqualU:
case MozOp::I32x4equal:
case MozOp::I32x4notEqual:
case MozOp::I32x4greaterThan:
case MozOp::I32x4greaterThanOrEqual:
case MozOp::I32x4lessThan:
case MozOp::I32x4lessThanOrEqual:
case MozOp::I32x4greaterThanU:
case MozOp::I32x4greaterThanOrEqualU:
case MozOp::I32x4lessThanU:
case MozOp::I32x4lessThanOrEqualU:
case MozOp::F32x4equal:
case MozOp::F32x4notEqual:
case MozOp::F32x4greaterThan:
case MozOp::F32x4greaterThanOrEqual:
case MozOp::F32x4lessThan:
case MozOp::F32x4lessThanOrEqual:
return OpKind::SimdComparison;
}
break;
}
}
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unimplemented opcode");
}

Просмотреть файл

@ -158,7 +158,7 @@ enum class OpKind {
// Return the OpKind for a given Op. This is used for sanity-checking that
// API users use the correct read function for a given Op.
OpKind
Classify(Op op);
Classify(OpBytes op);
#endif
// Common fields for linear memory access.
@ -310,7 +310,9 @@ class MOZ_STACK_CLASS OpIter : private Policy
Vector<TypeAndValue<Value>, 8, SystemAllocPolicy> valueStack_;
Vector<ControlStackEntry<ControlItem>, 8, SystemAllocPolicy> controlStack_;
DebugOnly<Op> op_;
#ifdef DEBUG
OpBytes op_;
#endif
size_t offsetOfLastReadOp_;
MOZ_MUST_USE bool readFixedU8(uint8_t* out) {
@ -426,9 +428,15 @@ class MOZ_STACK_CLASS OpIter : private Policy
public:
typedef Vector<Value, 8, SystemAllocPolicy> ValueVector;
#ifdef DEBUG
explicit OpIter(const ModuleEnvironment& env, Decoder& decoder)
: d_(decoder), env_(env), op_(Op::Limit), offsetOfLastReadOp_(0)
: d_(decoder), env_(env), op_(OpBytes(Op::Limit)), offsetOfLastReadOp_(0)
{}
#else
explicit OpIter(const ModuleEnvironment& env, Decoder& decoder)
: d_(decoder), env_(env), offsetOfLastReadOp_(0)
{}
#endif
// Return the decoding byte offset.
uint32_t currentOffset() const {
@ -459,7 +467,7 @@ class MOZ_STACK_CLASS OpIter : private Policy
MOZ_MUST_USE bool fail(const char* msg) MOZ_COLD;
// Report an unrecognized opcode.
MOZ_MUST_USE bool unrecognizedOpcode(uint32_t expr) MOZ_COLD;
MOZ_MUST_USE bool unrecognizedOpcode(const OpBytes* expr) MOZ_COLD;
// Return whether the innermost block has a polymorphic base of its stack.
// Ideally this accessor would be removed; consider using something else.
@ -470,7 +478,7 @@ class MOZ_STACK_CLASS OpIter : private Policy
// ------------------------------------------------------------------------
// Decoding and validation interface.
MOZ_MUST_USE bool readOp(uint16_t* op);
MOZ_MUST_USE bool readOp(OpBytes* op);
MOZ_MUST_USE bool readFunctionStart(ExprType ret);
MOZ_MUST_USE bool readFunctionEnd(const uint8_t* bodyEnd);
MOZ_MUST_USE bool readReturn(Value* value);
@ -559,8 +567,8 @@ class MOZ_STACK_CLASS OpIter : private Policy
// At a location where readOp is allowed, peek at the next opcode
// without consuming it or updating any internal state.
// Never fails: returns uint16_t(Op::Limit) if it can't read.
uint16_t peekOp();
// Never fails: returns uint16_t(Op::Limit) in op->b0 if it can't read.
void peekOp(OpBytes* op);
// ------------------------------------------------------------------------
// Stack management.
@ -599,9 +607,10 @@ class MOZ_STACK_CLASS OpIter : private Policy
template <typename Policy>
inline bool
OpIter<Policy>::unrecognizedOpcode(uint32_t expr)
OpIter<Policy>::unrecognizedOpcode(const OpBytes* expr)
{
UniqueChars error(JS_smprintf("unrecognized opcode: %x", expr));
UniqueChars error(JS_smprintf("unrecognized opcode: %x %x", expr->b0,
IsPrefixByte(expr->b0) ? expr->b1 : 0));
if (!error)
return false;
@ -830,7 +839,7 @@ OpIter<Policy>::readBlockType(ExprType* type)
template <typename Policy>
inline bool
OpIter<Policy>::readOp(uint16_t* op)
OpIter<Policy>::readOp(OpBytes* op)
{
MOZ_ASSERT(!controlStack_.empty());
@ -839,24 +848,23 @@ OpIter<Policy>::readOp(uint16_t* op)
if (MOZ_UNLIKELY(!d_.readOp(op)))
return fail("unable to read opcode");
op_ = Op(*op); // debug-only
#ifdef DEBUG
op_ = *op;
#endif
return true;
}
template <typename Policy>
inline uint16_t
OpIter<Policy>::peekOp()
inline void
OpIter<Policy>::peekOp(OpBytes* op)
{
const uint8_t* pos = d_.currentPosition();
uint16_t op;
if (MOZ_UNLIKELY(!d_.readOp(&op)))
op = uint16_t(Op::Limit);
if (MOZ_UNLIKELY(!d_.readOp(op)))
op->b0 = uint16_t(Op::Limit);
d_.rollbackPosition(pos);
return op;
}
template <typename Policy>
@ -865,7 +873,7 @@ OpIter<Policy>::readFunctionStart(ExprType ret)
{
MOZ_ASSERT(valueStack_.empty());
MOZ_ASSERT(controlStack_.empty());
MOZ_ASSERT(Op(op_) == Op::Limit);
MOZ_ASSERT(op_.b0 == uint16_t(Op::Limit));
return pushControl(LabelKind::Block, ret);
}
@ -880,7 +888,9 @@ OpIter<Policy>::readFunctionEnd(const uint8_t* bodyEnd)
if (!controlStack_.empty())
return fail("unbalanced function body control flow");
op_ = Op::Limit;
#ifdef DEBUG
op_ = OpBytes(Op::Limit);
#endif
valueStack_.clear();
return true;
}

Просмотреть файл

@ -966,12 +966,12 @@ static bool
AstDecodeExpr(AstDecodeContext& c)
{
uint32_t exprOffset = c.iter().currentOffset();
uint16_t op;
OpBytes op;
if (!c.iter().readOp(&op))
return false;
AstExpr* tmp;
switch (op) {
switch (op.b0) {
case uint16_t(Op::Nop):
if (!AstDecodeNop(c))
return false;
@ -1040,7 +1040,7 @@ AstDecodeExpr(AstDecodeContext& c)
break;
case uint16_t(Op::Block):
case uint16_t(Op::Loop):
if (!AstDecodeBlock(c, Op(op)))
if (!AstDecodeBlock(c, Op(op.b0)))
return false;
break;
case uint16_t(Op::If):
@ -1058,13 +1058,13 @@ AstDecodeExpr(AstDecodeContext& c)
case uint16_t(Op::I32Clz):
case uint16_t(Op::I32Ctz):
case uint16_t(Op::I32Popcnt):
if (!AstDecodeUnary(c, ValType::I32, Op(op)))
if (!AstDecodeUnary(c, ValType::I32, Op(op.b0)))
return false;
break;
case uint16_t(Op::I64Clz):
case uint16_t(Op::I64Ctz):
case uint16_t(Op::I64Popcnt):
if (!AstDecodeUnary(c, ValType::I64, Op(op)))
if (!AstDecodeUnary(c, ValType::I64, Op(op.b0)))
return false;
break;
case uint16_t(Op::F32Abs):
@ -1074,7 +1074,7 @@ AstDecodeExpr(AstDecodeContext& c)
case uint16_t(Op::F32Sqrt):
case uint16_t(Op::F32Trunc):
case uint16_t(Op::F32Nearest):
if (!AstDecodeUnary(c, ValType::F32, Op(op)))
if (!AstDecodeUnary(c, ValType::F32, Op(op.b0)))
return false;
break;
case uint16_t(Op::F64Abs):
@ -1084,7 +1084,7 @@ AstDecodeExpr(AstDecodeContext& c)
case uint16_t(Op::F64Sqrt):
case uint16_t(Op::F64Trunc):
case uint16_t(Op::F64Nearest):
if (!AstDecodeUnary(c, ValType::F64, Op(op)))
if (!AstDecodeUnary(c, ValType::F64, Op(op.b0)))
return false;
break;
case uint16_t(Op::I32Add):
@ -1102,7 +1102,7 @@ AstDecodeExpr(AstDecodeContext& c)
case uint16_t(Op::I32ShrU):
case uint16_t(Op::I32Rotl):
case uint16_t(Op::I32Rotr):
if (!AstDecodeBinary(c, ValType::I32, Op(op)))
if (!AstDecodeBinary(c, ValType::I32, Op(op.b0)))
return false;
break;
case uint16_t(Op::I64Add):
@ -1120,7 +1120,7 @@ AstDecodeExpr(AstDecodeContext& c)
case uint16_t(Op::I64ShrU):
case uint16_t(Op::I64Rotl):
case uint16_t(Op::I64Rotr):
if (!AstDecodeBinary(c, ValType::I64, Op(op)))
if (!AstDecodeBinary(c, ValType::I64, Op(op.b0)))
return false;
break;
case uint16_t(Op::F32Add):
@ -1130,7 +1130,7 @@ AstDecodeExpr(AstDecodeContext& c)
case uint16_t(Op::F32Min):
case uint16_t(Op::F32Max):
case uint16_t(Op::F32CopySign):
if (!AstDecodeBinary(c, ValType::F32, Op(op)))
if (!AstDecodeBinary(c, ValType::F32, Op(op.b0)))
return false;
break;
case uint16_t(Op::F64Add):
@ -1140,7 +1140,7 @@ AstDecodeExpr(AstDecodeContext& c)
case uint16_t(Op::F64Min):
case uint16_t(Op::F64Max):
case uint16_t(Op::F64CopySign):
if (!AstDecodeBinary(c, ValType::F64, Op(op)))
if (!AstDecodeBinary(c, ValType::F64, Op(op.b0)))
return false;
break;
case uint16_t(Op::I32Eq):
@ -1153,7 +1153,7 @@ AstDecodeExpr(AstDecodeContext& c)
case uint16_t(Op::I32GtU):
case uint16_t(Op::I32GeS):
case uint16_t(Op::I32GeU):
if (!AstDecodeComparison(c, ValType::I32, Op(op)))
if (!AstDecodeComparison(c, ValType::I32, Op(op.b0)))
return false;
break;
case uint16_t(Op::I64Eq):
@ -1166,7 +1166,7 @@ AstDecodeExpr(AstDecodeContext& c)
case uint16_t(Op::I64GtU):
case uint16_t(Op::I64GeS):
case uint16_t(Op::I64GeU):
if (!AstDecodeComparison(c, ValType::I64, Op(op)))
if (!AstDecodeComparison(c, ValType::I64, Op(op.b0)))
return false;
break;
case uint16_t(Op::F32Eq):
@ -1175,7 +1175,7 @@ AstDecodeExpr(AstDecodeContext& c)
case uint16_t(Op::F32Le):
case uint16_t(Op::F32Gt):
case uint16_t(Op::F32Ge):
if (!AstDecodeComparison(c, ValType::F32, Op(op)))
if (!AstDecodeComparison(c, ValType::F32, Op(op.b0)))
return false;
break;
case uint16_t(Op::F64Eq):
@ -1184,150 +1184,150 @@ AstDecodeExpr(AstDecodeContext& c)
case uint16_t(Op::F64Le):
case uint16_t(Op::F64Gt):
case uint16_t(Op::F64Ge):
if (!AstDecodeComparison(c, ValType::F64, Op(op)))
if (!AstDecodeComparison(c, ValType::F64, Op(op.b0)))
return false;
break;
case uint16_t(Op::I32Eqz):
if (!AstDecodeConversion(c, ValType::I32, ValType::I32, Op(op)))
if (!AstDecodeConversion(c, ValType::I32, ValType::I32, Op(op.b0)))
return false;
break;
case uint16_t(Op::I64Eqz):
case uint16_t(Op::I32WrapI64):
if (!AstDecodeConversion(c, ValType::I64, ValType::I32, Op(op)))
if (!AstDecodeConversion(c, ValType::I64, ValType::I32, Op(op.b0)))
return false;
break;
case uint16_t(Op::I32TruncSF32):
case uint16_t(Op::I32TruncUF32):
case uint16_t(Op::I32ReinterpretF32):
if (!AstDecodeConversion(c, ValType::F32, ValType::I32, Op(op)))
if (!AstDecodeConversion(c, ValType::F32, ValType::I32, Op(op.b0)))
return false;
break;
case uint16_t(Op::I32TruncSF64):
case uint16_t(Op::I32TruncUF64):
if (!AstDecodeConversion(c, ValType::F64, ValType::I32, Op(op)))
if (!AstDecodeConversion(c, ValType::F64, ValType::I32, Op(op.b0)))
return false;
break;
case uint16_t(Op::I64ExtendSI32):
case uint16_t(Op::I64ExtendUI32):
if (!AstDecodeConversion(c, ValType::I32, ValType::I64, Op(op)))
if (!AstDecodeConversion(c, ValType::I32, ValType::I64, Op(op.b0)))
return false;
break;
case uint16_t(Op::I64TruncSF32):
case uint16_t(Op::I64TruncUF32):
if (!AstDecodeConversion(c, ValType::F32, ValType::I64, Op(op)))
if (!AstDecodeConversion(c, ValType::F32, ValType::I64, Op(op.b0)))
return false;
break;
case uint16_t(Op::I64TruncSF64):
case uint16_t(Op::I64TruncUF64):
case uint16_t(Op::I64ReinterpretF64):
if (!AstDecodeConversion(c, ValType::F64, ValType::I64, Op(op)))
if (!AstDecodeConversion(c, ValType::F64, ValType::I64, Op(op.b0)))
return false;
break;
case uint16_t(Op::F32ConvertSI32):
case uint16_t(Op::F32ConvertUI32):
case uint16_t(Op::F32ReinterpretI32):
if (!AstDecodeConversion(c, ValType::I32, ValType::F32, Op(op)))
if (!AstDecodeConversion(c, ValType::I32, ValType::F32, Op(op.b0)))
return false;
break;
case uint16_t(Op::F32ConvertSI64):
case uint16_t(Op::F32ConvertUI64):
if (!AstDecodeConversion(c, ValType::I64, ValType::F32, Op(op)))
if (!AstDecodeConversion(c, ValType::I64, ValType::F32, Op(op.b0)))
return false;
break;
case uint16_t(Op::F32DemoteF64):
if (!AstDecodeConversion(c, ValType::F64, ValType::F32, Op(op)))
if (!AstDecodeConversion(c, ValType::F64, ValType::F32, Op(op.b0)))
return false;
break;
case uint16_t(Op::F64ConvertSI32):
case uint16_t(Op::F64ConvertUI32):
if (!AstDecodeConversion(c, ValType::I32, ValType::F64, Op(op)))
if (!AstDecodeConversion(c, ValType::I32, ValType::F64, Op(op.b0)))
return false;
break;
case uint16_t(Op::F64ConvertSI64):
case uint16_t(Op::F64ConvertUI64):
case uint16_t(Op::F64ReinterpretI64):
if (!AstDecodeConversion(c, ValType::I64, ValType::F64, Op(op)))
if (!AstDecodeConversion(c, ValType::I64, ValType::F64, Op(op.b0)))
return false;
break;
case uint16_t(Op::F64PromoteF32):
if (!AstDecodeConversion(c, ValType::F32, ValType::F64, Op(op)))
if (!AstDecodeConversion(c, ValType::F32, ValType::F64, Op(op.b0)))
return false;
break;
case uint16_t(Op::I32Load8S):
case uint16_t(Op::I32Load8U):
if (!AstDecodeLoad(c, ValType::I32, 1, Op(op)))
if (!AstDecodeLoad(c, ValType::I32, 1, Op(op.b0)))
return false;
break;
case uint16_t(Op::I32Load16S):
case uint16_t(Op::I32Load16U):
if (!AstDecodeLoad(c, ValType::I32, 2, Op(op)))
if (!AstDecodeLoad(c, ValType::I32, 2, Op(op.b0)))
return false;
break;
case uint16_t(Op::I32Load):
if (!AstDecodeLoad(c, ValType::I32, 4, Op(op)))
if (!AstDecodeLoad(c, ValType::I32, 4, Op(op.b0)))
return false;
break;
case uint16_t(Op::I64Load8S):
case uint16_t(Op::I64Load8U):
if (!AstDecodeLoad(c, ValType::I64, 1, Op(op)))
if (!AstDecodeLoad(c, ValType::I64, 1, Op(op.b0)))
return false;
break;
case uint16_t(Op::I64Load16S):
case uint16_t(Op::I64Load16U):
if (!AstDecodeLoad(c, ValType::I64, 2, Op(op)))
if (!AstDecodeLoad(c, ValType::I64, 2, Op(op.b0)))
return false;
break;
case uint16_t(Op::I64Load32S):
case uint16_t(Op::I64Load32U):
if (!AstDecodeLoad(c, ValType::I64, 4, Op(op)))
if (!AstDecodeLoad(c, ValType::I64, 4, Op(op.b0)))
return false;
break;
case uint16_t(Op::I64Load):
if (!AstDecodeLoad(c, ValType::I64, 8, Op(op)))
if (!AstDecodeLoad(c, ValType::I64, 8, Op(op.b0)))
return false;
break;
case uint16_t(Op::F32Load):
if (!AstDecodeLoad(c, ValType::F32, 4, Op(op)))
if (!AstDecodeLoad(c, ValType::F32, 4, Op(op.b0)))
return false;
break;
case uint16_t(Op::F64Load):
if (!AstDecodeLoad(c, ValType::F64, 8, Op(op)))
if (!AstDecodeLoad(c, ValType::F64, 8, Op(op.b0)))
return false;
break;
case uint16_t(Op::I32Store8):
if (!AstDecodeStore(c, ValType::I32, 1, Op(op)))
if (!AstDecodeStore(c, ValType::I32, 1, Op(op.b0)))
return false;
break;
case uint16_t(Op::I32Store16):
if (!AstDecodeStore(c, ValType::I32, 2, Op(op)))
if (!AstDecodeStore(c, ValType::I32, 2, Op(op.b0)))
return false;
break;
case uint16_t(Op::I32Store):
if (!AstDecodeStore(c, ValType::I32, 4, Op(op)))
if (!AstDecodeStore(c, ValType::I32, 4, Op(op.b0)))
return false;
break;
case uint16_t(Op::I64Store8):
if (!AstDecodeStore(c, ValType::I64, 1, Op(op)))
if (!AstDecodeStore(c, ValType::I64, 1, Op(op.b0)))
return false;
break;
case uint16_t(Op::I64Store16):
if (!AstDecodeStore(c, ValType::I64, 2, Op(op)))
if (!AstDecodeStore(c, ValType::I64, 2, Op(op.b0)))
return false;
break;
case uint16_t(Op::I64Store32):
if (!AstDecodeStore(c, ValType::I64, 4, Op(op)))
if (!AstDecodeStore(c, ValType::I64, 4, Op(op.b0)))
return false;
break;
case uint16_t(Op::I64Store):
if (!AstDecodeStore(c, ValType::I64, 8, Op(op)))
if (!AstDecodeStore(c, ValType::I64, 8, Op(op.b0)))
return false;
break;
case uint16_t(Op::F32Store):
if (!AstDecodeStore(c, ValType::F32, 4, Op(op)))
if (!AstDecodeStore(c, ValType::F32, 4, Op(op.b0)))
return false;
break;
case uint16_t(Op::F64Store):
if (!AstDecodeStore(c, ValType::F64, 8, Op(op)))
if (!AstDecodeStore(c, ValType::F64, 8, Op(op.b0)))
return false;
break;
case uint16_t(Op::CurrentMemory):
@ -1348,7 +1348,7 @@ AstDecodeExpr(AstDecodeContext& c)
break;
case uint16_t(Op::Br):
case uint16_t(Op::BrIf):
if (!AstDecodeBranch(c, Op(op)))
if (!AstDecodeBranch(c, Op(op.b0)))
return false;
break;
case uint16_t(Op::BrTable):
@ -1369,7 +1369,7 @@ AstDecodeExpr(AstDecodeContext& c)
return false;
break;
default:
return c.iter().unrecognizedOpcode(op);
return c.iter().unrecognizedOpcode(&op);
}
AstExpr* lastExpr = c.top().expr;

Просмотреть файл

@ -3227,7 +3227,7 @@ EmitBodyExprs(FunctionCompiler& f)
#define CHECK_ASMJS(c) \
if (!f.env().isAsmJS()) \
return f.iter().unrecognizedOpcode(op); \
return f.iter().unrecognizedOpcode(&op); \
if (!(c)) \
return false; \
break
@ -3236,11 +3236,11 @@ EmitBodyExprs(FunctionCompiler& f)
if (!f.mirGen().ensureBallast())
return false;
uint16_t op;
OpBytes op;
if (!f.iter().readOp(&op))
return false;
switch (op) {
switch (op.b0) {
case uint16_t(Op::End):
if (!EmitEnd(f))
return false;
@ -3447,10 +3447,10 @@ EmitBodyExprs(FunctionCompiler& f)
CHECK(EmitMul(f, ValType::I32, MIRType::Int32));
case uint16_t(Op::I32DivS):
case uint16_t(Op::I32DivU):
CHECK(EmitDiv(f, ValType::I32, MIRType::Int32, Op(op) == Op::I32DivU));
CHECK(EmitDiv(f, ValType::I32, MIRType::Int32, Op(op.b0) == Op::I32DivU));
case uint16_t(Op::I32RemS):
case uint16_t(Op::I32RemU):
CHECK(EmitRem(f, ValType::I32, MIRType::Int32, Op(op) == Op::I32RemU));
CHECK(EmitRem(f, ValType::I32, MIRType::Int32, Op(op.b0) == Op::I32RemU));
case uint16_t(Op::I32And):
CHECK(EmitBitwise<MBitAnd>(f, ValType::I32, MIRType::Int32));
case uint16_t(Op::I32Or):
@ -3465,7 +3465,7 @@ EmitBodyExprs(FunctionCompiler& f)
CHECK(EmitBitwise<MUrsh>(f, ValType::I32, MIRType::Int32));
case uint16_t(Op::I32Rotl):
case uint16_t(Op::I32Rotr):
CHECK(EmitRotate(f, ValType::I32, Op(op) == Op::I32Rotl));
CHECK(EmitRotate(f, ValType::I32, Op(op.b0) == Op::I32Rotl));
case uint16_t(Op::I64Clz):
CHECK(EmitUnaryWithType<MClz>(f, ValType::I64, MIRType::Int64));
case uint16_t(Op::I64Ctz):
@ -3480,10 +3480,10 @@ EmitBodyExprs(FunctionCompiler& f)
CHECK(EmitMul(f, ValType::I64, MIRType::Int64));
case uint16_t(Op::I64DivS):
case uint16_t(Op::I64DivU):
CHECK(EmitDiv(f, ValType::I64, MIRType::Int64, Op(op) == Op::I64DivU));
CHECK(EmitDiv(f, ValType::I64, MIRType::Int64, Op(op.b0) == Op::I64DivU));
case uint16_t(Op::I64RemS):
case uint16_t(Op::I64RemU):
CHECK(EmitRem(f, ValType::I64, MIRType::Int64, Op(op) == Op::I64RemU));
CHECK(EmitRem(f, ValType::I64, MIRType::Int64, Op(op.b0) == Op::I64RemU));
case uint16_t(Op::I64And):
CHECK(EmitBitwise<MBitAnd>(f, ValType::I64, MIRType::Int64));
case uint16_t(Op::I64Or):
@ -3498,7 +3498,7 @@ EmitBodyExprs(FunctionCompiler& f)
CHECK(EmitBitwise<MUrsh>(f, ValType::I64, MIRType::Int64));
case uint16_t(Op::I64Rotl):
case uint16_t(Op::I64Rotr):
CHECK(EmitRotate(f, ValType::I64, Op(op) == Op::I64Rotl));
CHECK(EmitRotate(f, ValType::I64, Op(op.b0) == Op::I64Rotl));
case uint16_t(Op::F32Abs):
CHECK(EmitUnaryWithType<MAbs>(f, ValType::F32, MIRType::Float32));
case uint16_t(Op::F32Neg):
@ -3523,7 +3523,7 @@ EmitBodyExprs(FunctionCompiler& f)
CHECK(EmitDiv(f, ValType::F32, MIRType::Float32, /* isUnsigned = */ false));
case uint16_t(Op::F32Min):
case uint16_t(Op::F32Max):
CHECK(EmitMinMax(f, ValType::F32, MIRType::Float32, Op(op) == Op::F32Max));
CHECK(EmitMinMax(f, ValType::F32, MIRType::Float32, Op(op.b0) == Op::F32Max));
case uint16_t(Op::F32CopySign):
CHECK(EmitCopySign(f, ValType::F32));
case uint16_t(Op::F64Abs):
@ -3550,7 +3550,7 @@ EmitBodyExprs(FunctionCompiler& f)
CHECK(EmitDiv(f, ValType::F64, MIRType::Double, /* isUnsigned = */ false));
case uint16_t(Op::F64Min):
case uint16_t(Op::F64Max):
CHECK(EmitMinMax(f, ValType::F64, MIRType::Double, Op(op) == Op::F64Max));
CHECK(EmitMinMax(f, ValType::F64, MIRType::Double, Op(op.b0) == Op::F64Max));
case uint16_t(Op::F64CopySign):
CHECK(EmitCopySign(f, ValType::F64));
@ -3559,26 +3559,26 @@ EmitBodyExprs(FunctionCompiler& f)
CHECK(EmitConversion<MWrapInt64ToInt32>(f, ValType::I64, ValType::I32));
case uint16_t(Op::I32TruncSF32):
case uint16_t(Op::I32TruncUF32):
CHECK(EmitTruncate(f, ValType::F32, ValType::I32, Op(op) == Op::I32TruncUF32));
CHECK(EmitTruncate(f, ValType::F32, ValType::I32, Op(op.b0) == Op::I32TruncUF32));
case uint16_t(Op::I32TruncSF64):
case uint16_t(Op::I32TruncUF64):
CHECK(EmitTruncate(f, ValType::F64, ValType::I32, Op(op) == Op::I32TruncUF64));
CHECK(EmitTruncate(f, ValType::F64, ValType::I32, Op(op.b0) == Op::I32TruncUF64));
case uint16_t(Op::I64ExtendSI32):
case uint16_t(Op::I64ExtendUI32):
CHECK(EmitExtendI32(f, Op(op) == Op::I64ExtendUI32));
CHECK(EmitExtendI32(f, Op(op.b0) == Op::I64ExtendUI32));
case uint16_t(Op::I64TruncSF32):
case uint16_t(Op::I64TruncUF32):
CHECK(EmitTruncate(f, ValType::F32, ValType::I64, Op(op) == Op::I64TruncUF32));
CHECK(EmitTruncate(f, ValType::F32, ValType::I64, Op(op.b0) == Op::I64TruncUF32));
case uint16_t(Op::I64TruncSF64):
case uint16_t(Op::I64TruncUF64):
CHECK(EmitTruncate(f, ValType::F64, ValType::I64, Op(op) == Op::I64TruncUF64));
CHECK(EmitTruncate(f, ValType::F64, ValType::I64, Op(op.b0) == Op::I64TruncUF64));
case uint16_t(Op::F32ConvertSI32):
CHECK(EmitConversion<MToFloat32>(f, ValType::I32, ValType::F32));
case uint16_t(Op::F32ConvertUI32):
CHECK(EmitConversion<MWasmUnsignedToFloat32>(f, ValType::I32, ValType::F32));
case uint16_t(Op::F32ConvertSI64):
case uint16_t(Op::F32ConvertUI64):
CHECK(EmitConvertI64ToFloatingPoint(f, ValType::F32, MIRType::Float32, Op(op) == Op::F32ConvertUI64));
CHECK(EmitConvertI64ToFloatingPoint(f, ValType::F32, MIRType::Float32, Op(op.b0) == Op::F32ConvertUI64));
case uint16_t(Op::F32DemoteF64):
CHECK(EmitConversion<MToFloat32>(f, ValType::F64, ValType::F32));
case uint16_t(Op::F64ConvertSI32):
@ -3587,7 +3587,7 @@ EmitBodyExprs(FunctionCompiler& f)
CHECK(EmitConversion<MWasmUnsignedToDouble>(f, ValType::I32, ValType::F64));
case uint16_t(Op::F64ConvertSI64):
case uint16_t(Op::F64ConvertUI64):
CHECK(EmitConvertI64ToFloatingPoint(f, ValType::F64, MIRType::Double, Op(op) == Op::F64ConvertUI64));
CHECK(EmitConvertI64ToFloatingPoint(f, ValType::F64, MIRType::Double, Op(op.b0) == Op::F64ConvertUI64));
case uint16_t(Op::F64PromoteF32):
CHECK(EmitConversion<MToDouble>(f, ValType::F32, ValType::F64));
@ -3603,80 +3603,82 @@ EmitBodyExprs(FunctionCompiler& f)
// asm.js-specific operators
case uint16_t(Op::TeeGlobal):
CHECK_ASMJS(EmitTeeGlobal(f));
case uint16_t(Op::I32Min):
case uint16_t(Op::I32Max):
CHECK_ASMJS(EmitMinMax(f, ValType::I32, MIRType::Int32, Op(op) == Op::I32Max));
case uint16_t(Op::I32Neg):
CHECK_ASMJS(EmitUnaryWithType<MWasmNeg>(f, ValType::I32, MIRType::Int32));
case uint16_t(Op::I32BitNot):
CHECK_ASMJS(EmitBitNot(f, ValType::I32));
case uint16_t(Op::I32Abs):
CHECK_ASMJS(EmitUnaryWithType<MAbs>(f, ValType::I32, MIRType::Int32));
case uint16_t(Op::F32TeeStoreF64):
CHECK_ASMJS(EmitTeeStoreWithCoercion(f, ValType::F32, Scalar::Float64));
case uint16_t(Op::F64TeeStoreF32):
CHECK_ASMJS(EmitTeeStoreWithCoercion(f, ValType::F64, Scalar::Float32));
case uint16_t(Op::I32TeeStore8):
CHECK_ASMJS(EmitTeeStore(f, ValType::I32, Scalar::Int8));
case uint16_t(Op::I32TeeStore16):
CHECK_ASMJS(EmitTeeStore(f, ValType::I32, Scalar::Int16));
case uint16_t(Op::I64TeeStore8):
CHECK_ASMJS(EmitTeeStore(f, ValType::I64, Scalar::Int8));
case uint16_t(Op::I64TeeStore16):
CHECK_ASMJS(EmitTeeStore(f, ValType::I64, Scalar::Int16));
case uint16_t(Op::I64TeeStore32):
CHECK_ASMJS(EmitTeeStore(f, ValType::I64, Scalar::Int32));
case uint16_t(Op::I32TeeStore):
CHECK_ASMJS(EmitTeeStore(f, ValType::I32, Scalar::Int32));
case uint16_t(Op::I64TeeStore):
CHECK_ASMJS(EmitTeeStore(f, ValType::I64, Scalar::Int64));
case uint16_t(Op::F32TeeStore):
CHECK_ASMJS(EmitTeeStore(f, ValType::F32, Scalar::Float32));
case uint16_t(Op::F64TeeStore):
CHECK_ASMJS(EmitTeeStore(f, ValType::F64, Scalar::Float64));
case uint16_t(Op::F64Mod):
CHECK_ASMJS(EmitRem(f, ValType::F64, MIRType::Double, /* isUnsigned = */ false));
case uint16_t(Op::F64Sin):
CHECK_ASMJS(EmitUnaryMathBuiltinCall(f, SymbolicAddress::SinD, ValType::F64));
case uint16_t(Op::F64Cos):
CHECK_ASMJS(EmitUnaryMathBuiltinCall(f, SymbolicAddress::CosD, ValType::F64));
case uint16_t(Op::F64Tan):
CHECK_ASMJS(EmitUnaryMathBuiltinCall(f, SymbolicAddress::TanD, ValType::F64));
case uint16_t(Op::F64Asin):
CHECK_ASMJS(EmitUnaryMathBuiltinCall(f, SymbolicAddress::ASinD, ValType::F64));
case uint16_t(Op::F64Acos):
CHECK_ASMJS(EmitUnaryMathBuiltinCall(f, SymbolicAddress::ACosD, ValType::F64));
case uint16_t(Op::F64Atan):
CHECK_ASMJS(EmitUnaryMathBuiltinCall(f, SymbolicAddress::ATanD, ValType::F64));
case uint16_t(Op::F64Exp):
CHECK_ASMJS(EmitUnaryMathBuiltinCall(f, SymbolicAddress::ExpD, ValType::F64));
case uint16_t(Op::F64Log):
CHECK_ASMJS(EmitUnaryMathBuiltinCall(f, SymbolicAddress::LogD, ValType::F64));
case uint16_t(Op::F64Pow):
CHECK_ASMJS(EmitBinaryMathBuiltinCall(f, SymbolicAddress::PowD, ValType::F64));
case uint16_t(Op::F64Atan2):
CHECK_ASMJS(EmitBinaryMathBuiltinCall(f, SymbolicAddress::ATan2D, ValType::F64));
case uint16_t(Op::OldCallIndirect):
CHECK_ASMJS(EmitCallIndirect(f, /* oldStyle = */ true));
case uint16_t(Op::MozPrefix): {
switch (op.b1) {
case uint16_t(MozOp::TeeGlobal):
CHECK_ASMJS(EmitTeeGlobal(f));
case uint16_t(MozOp::I32Min):
case uint16_t(MozOp::I32Max):
CHECK_ASMJS(EmitMinMax(f, ValType::I32, MIRType::Int32, MozOp(op.b1) == MozOp::I32Max));
case uint16_t(MozOp::I32Neg):
CHECK_ASMJS(EmitUnaryWithType<MWasmNeg>(f, ValType::I32, MIRType::Int32));
case uint16_t(MozOp::I32BitNot):
CHECK_ASMJS(EmitBitNot(f, ValType::I32));
case uint16_t(MozOp::I32Abs):
CHECK_ASMJS(EmitUnaryWithType<MAbs>(f, ValType::I32, MIRType::Int32));
case uint16_t(MozOp::F32TeeStoreF64):
CHECK_ASMJS(EmitTeeStoreWithCoercion(f, ValType::F32, Scalar::Float64));
case uint16_t(MozOp::F64TeeStoreF32):
CHECK_ASMJS(EmitTeeStoreWithCoercion(f, ValType::F64, Scalar::Float32));
case uint16_t(MozOp::I32TeeStore8):
CHECK_ASMJS(EmitTeeStore(f, ValType::I32, Scalar::Int8));
case uint16_t(MozOp::I32TeeStore16):
CHECK_ASMJS(EmitTeeStore(f, ValType::I32, Scalar::Int16));
case uint16_t(MozOp::I64TeeStore8):
CHECK_ASMJS(EmitTeeStore(f, ValType::I64, Scalar::Int8));
case uint16_t(MozOp::I64TeeStore16):
CHECK_ASMJS(EmitTeeStore(f, ValType::I64, Scalar::Int16));
case uint16_t(MozOp::I64TeeStore32):
CHECK_ASMJS(EmitTeeStore(f, ValType::I64, Scalar::Int32));
case uint16_t(MozOp::I32TeeStore):
CHECK_ASMJS(EmitTeeStore(f, ValType::I32, Scalar::Int32));
case uint16_t(MozOp::I64TeeStore):
CHECK_ASMJS(EmitTeeStore(f, ValType::I64, Scalar::Int64));
case uint16_t(MozOp::F32TeeStore):
CHECK_ASMJS(EmitTeeStore(f, ValType::F32, Scalar::Float32));
case uint16_t(MozOp::F64TeeStore):
CHECK_ASMJS(EmitTeeStore(f, ValType::F64, Scalar::Float64));
case uint16_t(MozOp::F64Mod):
CHECK_ASMJS(EmitRem(f, ValType::F64, MIRType::Double, /* isUnsigned = */ false));
case uint16_t(MozOp::F64Sin):
CHECK_ASMJS(EmitUnaryMathBuiltinCall(f, SymbolicAddress::SinD, ValType::F64));
case uint16_t(MozOp::F64Cos):
CHECK_ASMJS(EmitUnaryMathBuiltinCall(f, SymbolicAddress::CosD, ValType::F64));
case uint16_t(MozOp::F64Tan):
CHECK_ASMJS(EmitUnaryMathBuiltinCall(f, SymbolicAddress::TanD, ValType::F64));
case uint16_t(MozOp::F64Asin):
CHECK_ASMJS(EmitUnaryMathBuiltinCall(f, SymbolicAddress::ASinD, ValType::F64));
case uint16_t(MozOp::F64Acos):
CHECK_ASMJS(EmitUnaryMathBuiltinCall(f, SymbolicAddress::ACosD, ValType::F64));
case uint16_t(MozOp::F64Atan):
CHECK_ASMJS(EmitUnaryMathBuiltinCall(f, SymbolicAddress::ATanD, ValType::F64));
case uint16_t(MozOp::F64Exp):
CHECK_ASMJS(EmitUnaryMathBuiltinCall(f, SymbolicAddress::ExpD, ValType::F64));
case uint16_t(MozOp::F64Log):
CHECK_ASMJS(EmitUnaryMathBuiltinCall(f, SymbolicAddress::LogD, ValType::F64));
case uint16_t(MozOp::F64Pow):
CHECK_ASMJS(EmitBinaryMathBuiltinCall(f, SymbolicAddress::PowD, ValType::F64));
case uint16_t(MozOp::F64Atan2):
CHECK_ASMJS(EmitBinaryMathBuiltinCall(f, SymbolicAddress::ATan2D, ValType::F64));
case uint16_t(MozOp::OldCallIndirect):
CHECK_ASMJS(EmitCallIndirect(f, /* oldStyle = */ true));
// Atomics
case uint16_t(Op::I32AtomicsLoad):
CHECK_ASMJS(EmitAtomicsLoad(f));
case uint16_t(Op::I32AtomicsStore):
CHECK_ASMJS(EmitAtomicsStore(f));
case uint16_t(Op::I32AtomicsBinOp):
CHECK_ASMJS(EmitAtomicsBinOp(f));
case uint16_t(Op::I32AtomicsCompareExchange):
CHECK_ASMJS(EmitAtomicsCompareExchange(f));
case uint16_t(Op::I32AtomicsExchange):
CHECK_ASMJS(EmitAtomicsExchange(f));
// Atomics
case uint16_t(MozOp::I32AtomicsLoad):
CHECK_ASMJS(EmitAtomicsLoad(f));
case uint16_t(MozOp::I32AtomicsStore):
CHECK_ASMJS(EmitAtomicsStore(f));
case uint16_t(MozOp::I32AtomicsBinOp):
CHECK_ASMJS(EmitAtomicsBinOp(f));
case uint16_t(MozOp::I32AtomicsCompareExchange):
CHECK_ASMJS(EmitAtomicsCompareExchange(f));
case uint16_t(MozOp::I32AtomicsExchange):
CHECK_ASMJS(EmitAtomicsExchange(f));
// SIMD
#define CASE(TYPE, OP, SIGN) \
case uint16_t(Op::TYPE##OP): \
CHECK_ASMJS(EmitSimdOp(f, ValType::TYPE, SimdOperation::Fn_##OP, SIGN));
// SIMD
#define CASE(TYPE, OP, SIGN) \
case uint16_t(MozOp::TYPE##OP): \
CHECK_ASMJS(EmitSimdOp(f, ValType::TYPE, SimdOperation::Fn_##OP, SIGN));
#define I8x16CASE(OP) CASE(I8x16, OP, SimdSign::Signed)
#define I16x8CASE(OP) CASE(I16x8, OP, SimdSign::Signed)
#define I32x4CASE(OP) CASE(I32x4, OP, SimdSign::Signed)
@ -3684,18 +3686,18 @@ EmitBodyExprs(FunctionCompiler& f)
#define B8x16CASE(OP) CASE(B8x16, OP, SimdSign::NotApplicable)
#define B16x8CASE(OP) CASE(B16x8, OP, SimdSign::NotApplicable)
#define B32x4CASE(OP) CASE(B32x4, OP, SimdSign::NotApplicable)
#define ENUMERATE(TYPE, FORALL, DO) \
case uint16_t(Op::TYPE##Constructor): \
CHECK_ASMJS(EmitSimdOp(f, ValType::TYPE, SimdOperation::Constructor, SimdSign::NotApplicable)); \
FORALL(DO)
#define ENUMERATE(TYPE, FORALL, DO) \
case uint16_t(MozOp::TYPE##Constructor): \
CHECK_ASMJS(EmitSimdOp(f, ValType::TYPE, SimdOperation::Constructor, SimdSign::NotApplicable)); \
FORALL(DO)
ENUMERATE(I8x16, FORALL_INT8X16_ASMJS_OP, I8x16CASE)
ENUMERATE(I16x8, FORALL_INT16X8_ASMJS_OP, I16x8CASE)
ENUMERATE(I32x4, FORALL_INT32X4_ASMJS_OP, I32x4CASE)
ENUMERATE(F32x4, FORALL_FLOAT32X4_ASMJS_OP, F32x4CASE)
ENUMERATE(B8x16, FORALL_BOOL_SIMD_OP, B8x16CASE)
ENUMERATE(B16x8, FORALL_BOOL_SIMD_OP, B16x8CASE)
ENUMERATE(B32x4, FORALL_BOOL_SIMD_OP, B32x4CASE)
ENUMERATE(I8x16, FORALL_INT8X16_ASMJS_OP, I8x16CASE)
ENUMERATE(I16x8, FORALL_INT16X8_ASMJS_OP, I16x8CASE)
ENUMERATE(I32x4, FORALL_INT32X4_ASMJS_OP, I32x4CASE)
ENUMERATE(F32x4, FORALL_FLOAT32X4_ASMJS_OP, F32x4CASE)
ENUMERATE(B8x16, FORALL_BOOL_SIMD_OP, B8x16CASE)
ENUMERATE(B16x8, FORALL_BOOL_SIMD_OP, B16x8CASE)
ENUMERATE(B32x4, FORALL_BOOL_SIMD_OP, B32x4CASE)
#undef CASE
#undef I8x16CASE
@ -3707,70 +3709,76 @@ EmitBodyExprs(FunctionCompiler& f)
#undef B32x4CASE
#undef ENUMERATE
case uint16_t(Op::I8x16Const):
CHECK_ASMJS(EmitI8x16Const(f));
case uint16_t(Op::I16x8Const):
CHECK_ASMJS(EmitI16x8Const(f));
case uint16_t(Op::I32x4Const):
CHECK_ASMJS(EmitI32x4Const(f));
case uint16_t(Op::F32x4Const):
CHECK_ASMJS(EmitF32x4Const(f));
case uint16_t(Op::B8x16Const):
CHECK_ASMJS(EmitB8x16Const(f));
case uint16_t(Op::B16x8Const):
CHECK_ASMJS(EmitB16x8Const(f));
case uint16_t(Op::B32x4Const):
CHECK_ASMJS(EmitB32x4Const(f));
case uint16_t(MozOp::I8x16Const):
CHECK_ASMJS(EmitI8x16Const(f));
case uint16_t(MozOp::I16x8Const):
CHECK_ASMJS(EmitI16x8Const(f));
case uint16_t(MozOp::I32x4Const):
CHECK_ASMJS(EmitI32x4Const(f));
case uint16_t(MozOp::F32x4Const):
CHECK_ASMJS(EmitF32x4Const(f));
case uint16_t(MozOp::B8x16Const):
CHECK_ASMJS(EmitB8x16Const(f));
case uint16_t(MozOp::B16x8Const):
CHECK_ASMJS(EmitB16x8Const(f));
case uint16_t(MozOp::B32x4Const):
CHECK_ASMJS(EmitB32x4Const(f));
case uint16_t(Op::I8x16addSaturateU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_addSaturate, SimdSign::Unsigned));
case uint16_t(Op::I8x16subSaturateU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_subSaturate, SimdSign::Unsigned));
case uint16_t(Op::I8x16shiftRightByScalarU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_shiftRightByScalar, SimdSign::Unsigned));
case uint16_t(Op::I8x16lessThanU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_lessThan, SimdSign::Unsigned));
case uint16_t(Op::I8x16lessThanOrEqualU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_lessThanOrEqual, SimdSign::Unsigned));
case uint16_t(Op::I8x16greaterThanU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_greaterThan, SimdSign::Unsigned));
case uint16_t(Op::I8x16greaterThanOrEqualU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_greaterThanOrEqual, SimdSign::Unsigned));
case uint16_t(Op::I8x16extractLaneU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_extractLane, SimdSign::Unsigned));
case uint16_t(MozOp::I8x16addSaturateU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_addSaturate, SimdSign::Unsigned));
case uint16_t(MozOp::I8x16subSaturateU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_subSaturate, SimdSign::Unsigned));
case uint16_t(MozOp::I8x16shiftRightByScalarU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_shiftRightByScalar, SimdSign::Unsigned));
case uint16_t(MozOp::I8x16lessThanU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_lessThan, SimdSign::Unsigned));
case uint16_t(MozOp::I8x16lessThanOrEqualU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_lessThanOrEqual, SimdSign::Unsigned));
case uint16_t(MozOp::I8x16greaterThanU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_greaterThan, SimdSign::Unsigned));
case uint16_t(MozOp::I8x16greaterThanOrEqualU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_greaterThanOrEqual, SimdSign::Unsigned));
case uint16_t(MozOp::I8x16extractLaneU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_extractLane, SimdSign::Unsigned));
case uint16_t(Op::I16x8addSaturateU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_addSaturate, SimdSign::Unsigned));
case uint16_t(Op::I16x8subSaturateU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_subSaturate, SimdSign::Unsigned));
case uint16_t(Op::I16x8shiftRightByScalarU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_shiftRightByScalar, SimdSign::Unsigned));
case uint16_t(Op::I16x8lessThanU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_lessThan, SimdSign::Unsigned));
case uint16_t(Op::I16x8lessThanOrEqualU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_lessThanOrEqual, SimdSign::Unsigned));
case uint16_t(Op::I16x8greaterThanU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_greaterThan, SimdSign::Unsigned));
case uint16_t(Op::I16x8greaterThanOrEqualU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_greaterThanOrEqual, SimdSign::Unsigned));
case uint16_t(Op::I16x8extractLaneU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_extractLane, SimdSign::Unsigned));
case uint16_t(MozOp::I16x8addSaturateU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_addSaturate, SimdSign::Unsigned));
case uint16_t(MozOp::I16x8subSaturateU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_subSaturate, SimdSign::Unsigned));
case uint16_t(MozOp::I16x8shiftRightByScalarU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_shiftRightByScalar, SimdSign::Unsigned));
case uint16_t(MozOp::I16x8lessThanU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_lessThan, SimdSign::Unsigned));
case uint16_t(MozOp::I16x8lessThanOrEqualU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_lessThanOrEqual, SimdSign::Unsigned));
case uint16_t(MozOp::I16x8greaterThanU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_greaterThan, SimdSign::Unsigned));
case uint16_t(MozOp::I16x8greaterThanOrEqualU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_greaterThanOrEqual, SimdSign::Unsigned));
case uint16_t(MozOp::I16x8extractLaneU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_extractLane, SimdSign::Unsigned));
case uint16_t(Op::I32x4shiftRightByScalarU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_shiftRightByScalar, SimdSign::Unsigned));
case uint16_t(Op::I32x4lessThanU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_lessThan, SimdSign::Unsigned));
case uint16_t(Op::I32x4lessThanOrEqualU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_lessThanOrEqual, SimdSign::Unsigned));
case uint16_t(Op::I32x4greaterThanU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_greaterThan, SimdSign::Unsigned));
case uint16_t(Op::I32x4greaterThanOrEqualU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_greaterThanOrEqual, SimdSign::Unsigned));
case uint16_t(Op::I32x4fromFloat32x4U):
CHECK_ASMJS(EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_fromFloat32x4, SimdSign::Unsigned));
case uint16_t(MozOp::I32x4shiftRightByScalarU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_shiftRightByScalar, SimdSign::Unsigned));
case uint16_t(MozOp::I32x4lessThanU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_lessThan, SimdSign::Unsigned));
case uint16_t(MozOp::I32x4lessThanOrEqualU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_lessThanOrEqual, SimdSign::Unsigned));
case uint16_t(MozOp::I32x4greaterThanU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_greaterThan, SimdSign::Unsigned));
case uint16_t(MozOp::I32x4greaterThanOrEqualU):
CHECK_ASMJS(EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_greaterThanOrEqual, SimdSign::Unsigned));
case uint16_t(MozOp::I32x4fromFloat32x4U):
CHECK_ASMJS(EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_fromFloat32x4, SimdSign::Unsigned));
default:
return f.iter().unrecognizedOpcode(&op);
}
break;
}
default:
return f.iter().unrecognizedOpcode(op);
return f.iter().unrecognizedOpcode(&op);
}
}

Просмотреть файл

@ -350,13 +350,13 @@ DecodeFunctionBodyExprs(const ModuleEnvironment& env, const Sig& sig, const ValT
#define CHECK(c) if (!(c)) return false; break
while (true) {
uint16_t op;
OpBytes op;
if (!iter.readOp(&op))
return false;
Nothing nothing;
switch (op) {
switch (op.b0) {
case uint16_t(Op::End): {
LabelKind unusedKind;
ExprType unusedType;
@ -681,7 +681,7 @@ DecodeFunctionBodyExprs(const ModuleEnvironment& env, const Sig& sig, const ValT
case uint16_t(Op::Unreachable):
CHECK(iter.readUnreachable());
default:
return iter.unrecognizedOpcode(op);
return iter.unrecognizedOpcode(&op);
}
}
@ -1143,11 +1143,11 @@ static bool
DecodeInitializerExpression(Decoder& d, const GlobalDescVector& globals, ValType expected,
InitExpr* init)
{
uint16_t op;
OpBytes op;
if (!d.readOp(&op))
return d.fail("failed to read initializer type");
switch (op) {
switch (op.b0) {
case uint16_t(Op::I32Const): {
int32_t i32;
if (!d.readVarS32(&i32))
@ -1195,8 +1195,8 @@ DecodeInitializerExpression(Decoder& d, const GlobalDescVector& globals, ValType
if (expected != init->type())
return d.fail("type mismatch: initializer type and expected type don't match");
uint16_t end;
if (!d.readOp(&end) || end != uint16_t(Op::End))
OpBytes end;
if (!d.readOp(&end) || end.b0 != uint16_t(Op::End))
return d.fail("failed to read end of initializer expression");
return true;

Просмотреть файл

@ -242,12 +242,15 @@ class Encoder
return writeFixedU8(uint8_t(type));
}
MOZ_MUST_USE bool writeOp(Op op) {
static_assert(size_t(Op::Limit) <= 2 * UINT8_MAX, "fits");
static_assert(size_t(Op::Limit) == 256, "fits");
MOZ_ASSERT(size_t(op) < size_t(Op::Limit));
if (size_t(op) < UINT8_MAX)
return writeFixedU8(uint8_t(op));
return writeFixedU8(UINT8_MAX) &&
writeFixedU8(size_t(op) - UINT8_MAX);
return writeFixedU8(uint8_t(op));
}
MOZ_MUST_USE bool writeOp(MozOp op) {
static_assert(size_t(MozOp::Limit) <= 256, "fits");
MOZ_ASSERT(size_t(op) < size_t(MozOp::Limit));
return writeFixedU8(uint8_t(Op::MozPrefix)) &&
writeFixedU8(uint8_t(op));
}
// Fixed-length encodings that allow back-patching.
@ -496,18 +499,19 @@ class Decoder
static_assert(size_t(TypeCode::Limit) <= UINT8_MAX, "fits");
return readFixedU8(type);
}
MOZ_MUST_USE bool readOp(uint16_t* op) {
static_assert(size_t(Op::Limit) <= 2 * UINT8_MAX, "fits");
MOZ_MUST_USE bool readOp(OpBytes* op) {
static_assert(size_t(Op::Limit) == 256, "fits");
uint8_t u8;
if (!readFixedU8(&u8))
return false;
if (MOZ_LIKELY(u8 != UINT8_MAX)) {
*op = u8;
op->b0 = u8;
if (MOZ_LIKELY(!IsPrefixByte(u8)))
return true;
}
if (!readFixedU8(&u8))
if (!readFixedU8(&u8)) {
op->b1 = 0; // Make it sane
return false;
*op = uint16_t(u8) + UINT8_MAX;
}
op->b1 = u8;
return true;
}
@ -615,7 +619,7 @@ class Decoder
return (ValType)uncheckedReadFixedU8();
}
Op uncheckedReadOp() {
static_assert(size_t(Op::Limit) <= 2 * UINT8_MAX, "fits");
static_assert(size_t(Op::Limit) == 256, "fits");
uint8_t u8 = uncheckedReadFixedU8();
return u8 != UINT8_MAX
? Op(u8)