Bug 1253137 - Baldr: switch from LEB128 to prefix-based scheme to match BinaryEncoding.md (r=sunfish)

MozReview-Commit-ID: HwLEk9HKW50

--HG--
extra : rebase_source : f2cc1884affc2a2b45631ac7dc5e28961c6755b9
This commit is contained in:
Luke Wagner 2016-03-09 13:14:14 -06:00
Родитель 994ae222fd
Коммит ee4712ee86
5 изменённых файлов: 174 добавлений и 184 удалений

Просмотреть файл

@ -2857,7 +2857,7 @@ class MOZ_STACK_CLASS FunctionValidator
fg_.addCallSiteLineNum(m().tokenStream().srcCoords.lineNum(pn->pn_pos.begin));
}
MOZ_WARN_UNUSED_RESULT bool patchableCall(ParseNode* pn, size_t* offset) {
return encoder().writePatchableExpr(offset) &&
return encoder().writePatchableOneByteExpr(offset) &&
fg_.addCallSiteLineNum(m().tokenStream().srcCoords.lineNum(pn->pn_pos.begin));
}
MOZ_WARN_UNUSED_RESULT bool writeSimdOp(SimdType simdType, SimdOperation op) {
@ -3701,7 +3701,7 @@ CheckAndPrepareArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode*
bool isSimd, Scalar::Type* viewType)
{
size_t flagsAt;
if (!f.encoder().writePatchableU8(&flagsAt))
if (!f.encoder().writePatchableFixedU8(&flagsAt))
return false;
// asm.js doesn't have constant offsets, so just encode a 0.
@ -3709,7 +3709,7 @@ CheckAndPrepareArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode*
return false;
size_t prepareAt;
if (!f.encoder().writePatchableExpr(&prepareAt))
if (!f.encoder().writePatchableOneByteExpr(&prepareAt))
return false;
int32_t mask;
@ -3719,16 +3719,16 @@ CheckAndPrepareArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode*
// asm.js only has naturally-aligned accesses.
size_t align = TypedArrayElemSize(*viewType);
MOZ_ASSERT(IsPowerOfTwo(align));
f.encoder().patchVarU8(flagsAt, CeilingLog2(align));
f.encoder().patchFixedU8(flagsAt, CeilingLog2(align));
// Don't generate the mask op if there is no need for it which could happen for
// a shift of zero or a SIMD access.
if (mask != NoMask) {
f.encoder().patchExpr(prepareAt, Expr::I32And);
f.encoder().patchOneByteExpr(prepareAt, Expr::I32And);
return f.writeInt32Lit(mask);
}
f.encoder().patchExpr(prepareAt, Expr::Id);
f.encoder().patchOneByteExpr(prepareAt, Expr::Id);
return true;
}
@ -3738,21 +3738,21 @@ CheckLoadArray(FunctionValidator& f, ParseNode* elem, Type* type)
Scalar::Type viewType;
size_t opcodeAt;
if (!f.encoder().writePatchableExpr(&opcodeAt))
if (!f.encoder().writePatchableOneByteExpr(&opcodeAt))
return false;
if (!CheckAndPrepareArrayAccess(f, ElemBase(elem), ElemIndex(elem), NoSimd, &viewType))
return false;
switch (viewType) {
case Scalar::Int8: f.encoder().patchExpr(opcodeAt, Expr::I32Load8S); break;
case Scalar::Uint8: f.encoder().patchExpr(opcodeAt, Expr::I32Load8U); break;
case Scalar::Int16: f.encoder().patchExpr(opcodeAt, Expr::I32Load16S); break;
case Scalar::Uint16: f.encoder().patchExpr(opcodeAt, Expr::I32Load16U); break;
case Scalar::Int8: f.encoder().patchOneByteExpr(opcodeAt, Expr::I32Load8S); break;
case Scalar::Uint8: f.encoder().patchOneByteExpr(opcodeAt, Expr::I32Load8U); break;
case Scalar::Int16: f.encoder().patchOneByteExpr(opcodeAt, Expr::I32Load16S); break;
case Scalar::Uint16: f.encoder().patchOneByteExpr(opcodeAt, Expr::I32Load16U); break;
case Scalar::Uint32:
case Scalar::Int32: f.encoder().patchExpr(opcodeAt, Expr::I32Load); break;
case Scalar::Float32: f.encoder().patchExpr(opcodeAt, Expr::F32Load); break;
case Scalar::Float64: f.encoder().patchExpr(opcodeAt, Expr::F64Load); break;
case Scalar::Int32: f.encoder().patchOneByteExpr(opcodeAt, Expr::I32Load); break;
case Scalar::Float32: f.encoder().patchOneByteExpr(opcodeAt, Expr::F32Load); break;
case Scalar::Float64: f.encoder().patchOneByteExpr(opcodeAt, Expr::F64Load); break;
default: MOZ_CRASH("unexpected scalar type");
}
@ -3781,7 +3781,7 @@ static bool
CheckStoreArray(FunctionValidator& f, ParseNode* lhs, ParseNode* rhs, Type* type)
{
size_t opcodeAt;
if (!f.encoder().writePatchableExpr(&opcodeAt))
if (!f.encoder().writePatchableOneByteExpr(&opcodeAt))
return false;
Scalar::Type viewType;
@ -3817,27 +3817,27 @@ CheckStoreArray(FunctionValidator& f, ParseNode* lhs, ParseNode* rhs, Type* type
switch (viewType) {
case Scalar::Int8:
case Scalar::Uint8:
f.encoder().patchExpr(opcodeAt, Expr::I32Store8);
f.encoder().patchOneByteExpr(opcodeAt, Expr::I32Store8);
break;
case Scalar::Int16:
case Scalar::Uint16:
f.encoder().patchExpr(opcodeAt, Expr::I32Store16);
f.encoder().patchOneByteExpr(opcodeAt, Expr::I32Store16);
break;
case Scalar::Int32:
case Scalar::Uint32:
f.encoder().patchExpr(opcodeAt, Expr::I32Store);
f.encoder().patchOneByteExpr(opcodeAt, Expr::I32Store);
break;
case Scalar::Float32:
if (rhsType.isFloatish())
f.encoder().patchExpr(opcodeAt, Expr::F32Store);
f.encoder().patchOneByteExpr(opcodeAt, Expr::F32Store);
else
f.encoder().patchExpr(opcodeAt, Expr::F64StoreF32);
f.encoder().patchOneByteExpr(opcodeAt, Expr::F64StoreF32);
break;
case Scalar::Float64:
if (rhsType.isFloatish())
f.encoder().patchExpr(opcodeAt, Expr::F32StoreF64);
f.encoder().patchOneByteExpr(opcodeAt, Expr::F32StoreF64);
else
f.encoder().patchExpr(opcodeAt, Expr::F64Store);
f.encoder().patchOneByteExpr(opcodeAt, Expr::F64Store);
break;
default: MOZ_CRASH("unexpected scalar type");
}
@ -3969,7 +3969,7 @@ CheckMathAbs(FunctionValidator& f, ParseNode* call, Type* type)
ParseNode* arg = CallArgList(call);
size_t opcodeAt;
if (!f.encoder().writePatchableExpr(&opcodeAt))
if (!f.encoder().writePatchableOneByteExpr(&opcodeAt))
return false;
Type argType;
@ -3977,19 +3977,19 @@ CheckMathAbs(FunctionValidator& f, ParseNode* call, Type* type)
return false;
if (argType.isSigned()) {
f.encoder().patchExpr(opcodeAt, Expr::I32Abs);
f.encoder().patchOneByteExpr(opcodeAt, Expr::I32Abs);
*type = Type::Unsigned;
return true;
}
if (argType.isMaybeDouble()) {
f.encoder().patchExpr(opcodeAt, Expr::F64Abs);
f.encoder().patchOneByteExpr(opcodeAt, Expr::F64Abs);
*type = Type::Double;
return true;
}
if (argType.isMaybeFloat()) {
f.encoder().patchExpr(opcodeAt, Expr::F32Abs);
f.encoder().patchOneByteExpr(opcodeAt, Expr::F32Abs);
*type = Type::Floatish;
return true;
}
@ -4006,7 +4006,7 @@ CheckMathSqrt(FunctionValidator& f, ParseNode* call, Type* type)
ParseNode* arg = CallArgList(call);
size_t opcodeAt;
if (!f.encoder().writePatchableExpr(&opcodeAt))
if (!f.encoder().writePatchableOneByteExpr(&opcodeAt))
return false;
Type argType;
@ -4014,13 +4014,13 @@ CheckMathSqrt(FunctionValidator& f, ParseNode* call, Type* type)
return false;
if (argType.isMaybeDouble()) {
f.encoder().patchExpr(opcodeAt, Expr::F64Sqrt);
f.encoder().patchOneByteExpr(opcodeAt, Expr::F64Sqrt);
*type = Type::Double;
return true;
}
if (argType.isMaybeFloat()) {
f.encoder().patchExpr(opcodeAt, Expr::F32Sqrt);
f.encoder().patchOneByteExpr(opcodeAt, Expr::F32Sqrt);
*type = Type::Floatish;
return true;
}
@ -4035,7 +4035,7 @@ CheckMathMinMax(FunctionValidator& f, ParseNode* callNode, bool isMax, Type* typ
return f.fail(callNode, "Math.min/max must be passed at least 2 arguments");
size_t opcodeAt;
if (!f.encoder().writePatchableExpr(&opcodeAt))
if (!f.encoder().writePatchableOneByteExpr(&opcodeAt))
return false;
ParseNode* firstArg = CallArgList(callNode);
@ -4060,7 +4060,7 @@ CheckMathMinMax(FunctionValidator& f, ParseNode* callNode, bool isMax, Type* typ
return f.failf(firstArg, "%s is not a subtype of double?, float? or signed",
firstType.toChars());
}
f.encoder().patchExpr(opcodeAt, expr);
f.encoder().patchOneByteExpr(opcodeAt, expr);
unsigned numArgs = CallArgListLength(callNode);
ParseNode* nextArg = NextNode(firstArg);
@ -4121,7 +4121,7 @@ static bool
WriteAtomicOperator(FunctionValidator& f, Expr opcode, size_t* viewTypeAt)
{
return f.encoder().writeExpr(opcode) &&
f.encoder().writePatchableU8(viewTypeAt);
f.encoder().writePatchableFixedU8(viewTypeAt);
}
static bool
@ -4141,7 +4141,7 @@ CheckAtomicsLoad(FunctionValidator& f, ParseNode* call, Type* type)
if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType))
return false;
f.encoder().patchU8(viewTypeAt, uint8_t(viewType));
f.encoder().patchFixedU8(viewTypeAt, uint8_t(viewType));
*type = Type::Int;
return true;
@ -4172,7 +4172,7 @@ CheckAtomicsStore(FunctionValidator& f, ParseNode* call, Type* type)
if (!rhsType.isIntish())
return f.failf(arrayArg, "%s is not a subtype of intish", rhsType.toChars());
f.encoder().patchU8(viewTypeAt, uint8_t(viewType));
f.encoder().patchFixedU8(viewTypeAt, uint8_t(viewType));
*type = rhsType;
return true;
@ -4191,7 +4191,7 @@ CheckAtomicsBinop(FunctionValidator& f, ParseNode* call, Type* type, AtomicOp op
size_t viewTypeAt;
if (!WriteAtomicOperator(f, Expr::I32AtomicsBinOp, &viewTypeAt))
return false;
if (!f.encoder().writeU8(uint8_t(op)))
if (!f.encoder().writeFixedU8(uint8_t(op)))
return false;
Scalar::Type viewType;
@ -4205,7 +4205,7 @@ CheckAtomicsBinop(FunctionValidator& f, ParseNode* call, Type* type, AtomicOp op
if (!valueArgType.isIntish())
return f.failf(valueArg, "%s is not a subtype of intish", valueArgType.toChars());
f.encoder().patchU8(viewTypeAt, uint8_t(viewType));
f.encoder().patchFixedU8(viewTypeAt, uint8_t(viewType));
*type = Type::Int;
return true;
@ -4260,7 +4260,7 @@ CheckAtomicsCompareExchange(FunctionValidator& f, ParseNode* call, Type* type)
if (!newValueArgType.isIntish())
return f.failf(newValueArg, "%s is not a subtype of intish", newValueArgType.toChars());
f.encoder().patchU8(viewTypeAt, uint8_t(viewType));
f.encoder().patchFixedU8(viewTypeAt, uint8_t(viewType));
*type = Type::Int;
return true;
@ -4291,7 +4291,7 @@ CheckAtomicsExchange(FunctionValidator& f, ParseNode* call, Type* type)
if (!valueArgType.isIntish())
return f.failf(arrayArg, "%s is not a subtype of intish", valueArgType.toChars());
f.encoder().patchU8(viewTypeAt, uint8_t(viewType));
f.encoder().patchFixedU8(viewTypeAt, uint8_t(viewType));
*type = Type::Int;
return true;
@ -4568,19 +4568,19 @@ CheckFloatCoercionArg(FunctionValidator& f, ParseNode* inputNode, Type inputType
size_t opcodeAt)
{
if (inputType.isMaybeDouble()) {
f.encoder().patchExpr(opcodeAt, Expr::F32DemoteF64);
f.encoder().patchOneByteExpr(opcodeAt, Expr::F32DemoteF64);
return true;
}
if (inputType.isSigned()) {
f.encoder().patchExpr(opcodeAt, Expr::F32ConvertSI32);
f.encoder().patchOneByteExpr(opcodeAt, Expr::F32ConvertSI32);
return true;
}
if (inputType.isUnsigned()) {
f.encoder().patchExpr(opcodeAt, Expr::F32ConvertUI32);
f.encoder().patchOneByteExpr(opcodeAt, Expr::F32ConvertUI32);
return true;
}
if (inputType.isFloatish()) {
f.encoder().patchExpr(opcodeAt, Expr::Id);
f.encoder().patchOneByteExpr(opcodeAt, Expr::Id);
return true;
}
@ -4600,7 +4600,7 @@ CheckCoercionArg(FunctionValidator& f, ParseNode* arg, Type expected, Type* type
return CheckCoercedCall(f, arg, expected, type);
size_t opcodeAt;
if (!f.encoder().writePatchableExpr(&opcodeAt))
if (!f.encoder().writePatchableOneByteExpr(&opcodeAt))
return false;
Type argType;
@ -4613,7 +4613,7 @@ CheckCoercionArg(FunctionValidator& f, ParseNode* arg, Type expected, Type* type
} else if (expected.isSimd()) {
if (!(argType <= expected))
return f.fail(arg, "argument to SIMD coercion isn't from the correct SIMD type");
f.encoder().patchExpr(opcodeAt, Expr::Id);
f.encoder().patchOneByteExpr(opcodeAt, Expr::Id);
} else {
MOZ_CRASH("not call coercions");
}
@ -4689,9 +4689,9 @@ CheckMathBuiltinCall(FunctionValidator& f, ParseNode* callNode, AsmJSMathBuiltin
return f.fail(callNode, "math builtin cannot be used as float");
if (opIsDouble)
f.encoder().patchExpr(opcodeAt, f64);
f.encoder().patchOneByteExpr(opcodeAt, f64);
else
f.encoder().patchExpr(opcodeAt, f32);
f.encoder().patchOneByteExpr(opcodeAt, f32);
if (arity == 2) {
Type secondType;
@ -4748,7 +4748,7 @@ CheckSimdCallArgsPatchable(FunctionValidator& f, ParseNode* call, unsigned expec
MOZ_ASSERT(!!arg);
Type argType;
size_t patchAt;
if (!f.encoder().writePatchableExpr(&patchAt))
if (!f.encoder().writePatchableOneByteExpr(&patchAt))
return false;
if (!CheckExpr(f, arg, &argType))
return false;
@ -4817,14 +4817,14 @@ class CheckSimdScalarArgs
// We emitted a double literal and actually want a float32.
MOZ_ASSERT(patchAt != size_t(-1));
f.encoder().patchExpr(patchAt, Expr::F32DemoteF64);
f.encoder().patchOneByteExpr(patchAt, Expr::F32DemoteF64);
return true;
}
if (patchAt == size_t(-1))
return true;
f.encoder().patchExpr(patchAt, Expr::Id);
f.encoder().patchOneByteExpr(patchAt, Expr::Id);
return true;
}
};
@ -4874,7 +4874,7 @@ class CheckSimdVectorScalarArgs
if (patchAt == size_t(-1))
return true;
f.encoder().patchExpr(patchAt, Expr::Id);
f.encoder().patchOneByteExpr(patchAt, Expr::Id);
return true;
}
@ -4931,7 +4931,7 @@ class CheckSimdReplaceLaneArgs
return f.failf(arg, "%s is not a subtype of %s", actualType.toChars(),
Type(formalSimdType_).toChars());
}
f.encoder().patchExpr(patchAt, Expr::Id);
f.encoder().patchOneByteExpr(patchAt, Expr::Id);
return true;
case 1:
// Second argument is the lane (< vector length).
@ -4939,7 +4939,7 @@ class CheckSimdReplaceLaneArgs
return f.failf(arg, "lane selector should be a constant integer literal");
if (u32 >= GetSimdLanes(formalSimdType_))
return f.failf(arg, "lane selector should be in bounds");
f.encoder().patchExpr(patchAt, Expr::Id);
f.encoder().patchOneByteExpr(patchAt, Expr::Id);
return true;
case 2:
// Third argument is the scalar
@ -5081,7 +5081,7 @@ CheckSimdSwizzle(FunctionValidator& f, ParseNode* call, SimdType opType, Type* t
return false;
for (unsigned i = 0; i < 4; i++) {
if (!f.encoder().writeU8(uint8_t(lanes[i])))
if (!f.encoder().writeFixedU8(uint8_t(lanes[i])))
return false;
}
@ -5114,7 +5114,7 @@ CheckSimdShuffle(FunctionValidator& f, ParseNode* call, SimdType opType, Type* t
return false;
for (unsigned i = 0; i < 4; i++) {
if (!f.encoder().writeU8(uint8_t(lanes[i])))
if (!f.encoder().writeFixedU8(uint8_t(lanes[i])))
return false;
}
@ -5378,12 +5378,12 @@ CoerceResult(FunctionValidator& f, ParseNode* expr, Type expected, Type actual,
// | patchAt | the thing we wanted to coerce | current position |>
switch (expected.which()) {
case Type::Void:
f.encoder().patchExpr(patchAt, Expr::Id);
f.encoder().patchOneByteExpr(patchAt, Expr::Id);
break;
case Type::Int:
if (!actual.isIntish())
return f.failf(expr, "%s is not a subtype of intish", actual.toChars());
f.encoder().patchExpr(patchAt, Expr::Id);
f.encoder().patchOneByteExpr(patchAt, Expr::Id);
break;
case Type::Float:
if (!CheckFloatCoercionArg(f, expr, actual, patchAt))
@ -5391,13 +5391,13 @@ CoerceResult(FunctionValidator& f, ParseNode* expr, Type expected, Type actual,
break;
case Type::Double:
if (actual.isMaybeDouble())
f.encoder().patchExpr(patchAt, Expr::Id);
f.encoder().patchOneByteExpr(patchAt, Expr::Id);
else if (actual.isMaybeFloat())
f.encoder().patchExpr(patchAt, Expr::F64PromoteF32);
f.encoder().patchOneByteExpr(patchAt, Expr::F64PromoteF32);
else if (actual.isSigned())
f.encoder().patchExpr(patchAt, Expr::F64ConvertSI32);
f.encoder().patchOneByteExpr(patchAt, Expr::F64ConvertSI32);
else if (actual.isUnsigned())
f.encoder().patchExpr(patchAt, Expr::F64ConvertUI32);
f.encoder().patchOneByteExpr(patchAt, Expr::F64ConvertUI32);
else
return f.failf(expr, "%s is not a subtype of double?, float?, signed or unsigned", actual.toChars());
break;
@ -5405,7 +5405,7 @@ CoerceResult(FunctionValidator& f, ParseNode* expr, Type expected, Type actual,
MOZ_ASSERT(expected.isSimd(), "Incomplete switch");
if (actual != expected)
return f.failf(expr, "got type %s, expected %s", actual.toChars(), expected.toChars());
f.encoder().patchExpr(patchAt, Expr::Id);
f.encoder().patchOneByteExpr(patchAt, Expr::Id);
break;
}
@ -5418,7 +5418,7 @@ CheckCoercedMathBuiltinCall(FunctionValidator& f, ParseNode* callNode, AsmJSMath
Type ret, Type* type)
{
size_t opcodeAt;
if (!f.encoder().writePatchableExpr(&opcodeAt))
if (!f.encoder().writePatchableOneByteExpr(&opcodeAt))
return false;
Type actual;
if (!CheckMathBuiltinCall(f, callNode, func, &actual))
@ -5433,7 +5433,7 @@ CheckCoercedSimdCall(FunctionValidator& f, ParseNode* call, const ModuleValidato
MOZ_ASSERT(ret.isCanonical());
size_t opcodeAt;
if (!f.encoder().writePatchableExpr(&opcodeAt))
if (!f.encoder().writePatchableOneByteExpr(&opcodeAt))
return false;
Type actual;
@ -5457,7 +5457,7 @@ CheckCoercedAtomicsBuiltinCall(FunctionValidator& f, ParseNode* callNode,
MOZ_ASSERT(ret.isCanonical());
size_t opcodeAt;
if (!f.encoder().writePatchableExpr(&opcodeAt))
if (!f.encoder().writePatchableOneByteExpr(&opcodeAt))
return false;
Type actual;
if (!CheckAtomicsBuiltinCall(f, callNode, func, &actual))
@ -5474,7 +5474,7 @@ CheckCoercedCall(FunctionValidator& f, ParseNode* call, Type ret, Type* type)
if (IsNumericLiteral(f.m(), call)) {
size_t coerceOp;
if (!f.encoder().writePatchableExpr(&coerceOp))
if (!f.encoder().writePatchableOneByteExpr(&coerceOp))
return false;
NumLit lit = ExtractNumericLiteral(f.m(), call);
if (!f.writeConstExpr(lit))
@ -5528,7 +5528,7 @@ CheckPos(FunctionValidator& f, ParseNode* pos, Type* type)
return CheckCoercedCall(f, operand, Type::Double, type);
size_t opcodeAt;
if (!f.encoder().writePatchableExpr(&opcodeAt))
if (!f.encoder().writePatchableOneByteExpr(&opcodeAt))
return false;
Type actual;
@ -5565,7 +5565,7 @@ CheckNeg(FunctionValidator& f, ParseNode* expr, Type* type)
ParseNode* operand = UnaryKid(expr);
size_t opcodeAt;
if (!f.encoder().writePatchableExpr(&opcodeAt))
if (!f.encoder().writePatchableOneByteExpr(&opcodeAt))
return false;
Type operandType;
@ -5573,19 +5573,19 @@ CheckNeg(FunctionValidator& f, ParseNode* expr, Type* type)
return false;
if (operandType.isInt()) {
f.encoder().patchExpr(opcodeAt, Expr::I32Neg);
f.encoder().patchOneByteExpr(opcodeAt, Expr::I32Neg);
*type = Type::Intish;
return true;
}
if (operandType.isMaybeDouble()) {
f.encoder().patchExpr(opcodeAt, Expr::F64Neg);
f.encoder().patchOneByteExpr(opcodeAt, Expr::F64Neg);
*type = Type::Double;
return true;
}
if (operandType.isMaybeFloat()) {
f.encoder().patchExpr(opcodeAt, Expr::F32Neg);
f.encoder().patchOneByteExpr(opcodeAt, Expr::F32Neg);
*type = Type::Floatish;
return true;
}
@ -5600,7 +5600,7 @@ CheckCoerceToInt(FunctionValidator& f, ParseNode* expr, Type* type)
ParseNode* operand = UnaryKid(expr);
size_t opcodeAt;
if (!f.encoder().writePatchableExpr(&opcodeAt))
if (!f.encoder().writePatchableOneByteExpr(&opcodeAt))
return false;
Type operandType;
@ -5609,7 +5609,7 @@ CheckCoerceToInt(FunctionValidator& f, ParseNode* expr, Type* type)
if (operandType.isMaybeDouble() || operandType.isMaybeFloat()) {
Expr opcode = operandType.isMaybeDouble() ? Expr::I32TruncSF64 : Expr::I32TruncSF32;
f.encoder().patchExpr(opcodeAt, opcode);
f.encoder().patchOneByteExpr(opcodeAt, opcode);
*type = Type::Signed;
return true;
}
@ -5617,7 +5617,7 @@ CheckCoerceToInt(FunctionValidator& f, ParseNode* expr, Type* type)
if (!operandType.isIntish())
return f.failf(operand, "%s is not a subtype of double?, float? or intish", operandType.toChars());
f.encoder().patchExpr(opcodeAt, Expr::Id);
f.encoder().patchOneByteExpr(opcodeAt, Expr::Id);
*type = Type::Signed;
return true;
}
@ -5749,7 +5749,7 @@ CheckMultiply(FunctionValidator& f, ParseNode* star, Type* type)
ParseNode* rhs = MultiplyRight(star);
size_t opcodeAt;
if (!f.encoder().writePatchableExpr(&opcodeAt))
if (!f.encoder().writePatchableOneByteExpr(&opcodeAt))
return false;
Type lhsType;
@ -5763,19 +5763,19 @@ CheckMultiply(FunctionValidator& f, ParseNode* star, Type* type)
if (lhsType.isInt() && rhsType.isInt()) {
if (!IsValidIntMultiplyConstant(f.m(), lhs) && !IsValidIntMultiplyConstant(f.m(), rhs))
return f.fail(star, "one arg to int multiply must be a small (-2^20, 2^20) int literal");
f.encoder().patchExpr(opcodeAt, Expr::I32Mul);
f.encoder().patchOneByteExpr(opcodeAt, Expr::I32Mul);
*type = Type::Intish;
return true;
}
if (lhsType.isMaybeDouble() && rhsType.isMaybeDouble()) {
f.encoder().patchExpr(opcodeAt, Expr::F64Mul);
f.encoder().patchOneByteExpr(opcodeAt, Expr::F64Mul);
*type = Type::Double;
return true;
}
if (lhsType.isMaybeFloat() && rhsType.isMaybeFloat()) {
f.encoder().patchExpr(opcodeAt, Expr::F32Mul);
f.encoder().patchOneByteExpr(opcodeAt, Expr::F32Mul);
*type = Type::Floatish;
return true;
}
@ -5796,7 +5796,7 @@ CheckAddOrSub(FunctionValidator& f, ParseNode* expr, Type* type, unsigned* numAd
unsigned lhsNumAddOrSub, rhsNumAddOrSub;
size_t opcodeAt;
if (!f.encoder().writePatchableExpr(&opcodeAt))
if (!f.encoder().writePatchableOneByteExpr(&opcodeAt))
return false;
if (lhs->isKind(PNK_ADD) || lhs->isKind(PNK_SUB)) {
@ -5826,13 +5826,13 @@ CheckAddOrSub(FunctionValidator& f, ParseNode* expr, Type* type, unsigned* numAd
return f.fail(expr, "too many + or - without intervening coercion");
if (lhsType.isInt() && rhsType.isInt()) {
f.encoder().patchExpr(opcodeAt, expr->isKind(PNK_ADD) ? Expr::I32Add : Expr::I32Sub);
f.encoder().patchOneByteExpr(opcodeAt, expr->isKind(PNK_ADD) ? Expr::I32Add : Expr::I32Sub);
*type = Type::Intish;
} else if (lhsType.isMaybeDouble() && rhsType.isMaybeDouble()) {
f.encoder().patchExpr(opcodeAt, expr->isKind(PNK_ADD) ? Expr::F64Add : Expr::F64Sub);
f.encoder().patchOneByteExpr(opcodeAt, expr->isKind(PNK_ADD) ? Expr::F64Add : Expr::F64Sub);
*type = Type::Double;
} else if (lhsType.isMaybeFloat() && rhsType.isMaybeFloat()) {
f.encoder().patchExpr(opcodeAt, expr->isKind(PNK_ADD) ? Expr::F32Add : Expr::F32Sub);
f.encoder().patchOneByteExpr(opcodeAt, expr->isKind(PNK_ADD) ? Expr::F32Add : Expr::F32Sub);
*type = Type::Floatish;
} else {
return f.failf(expr, "operands to + or - must both be int, float? or double?, got %s and %s",
@ -5850,7 +5850,7 @@ CheckDivOrMod(FunctionValidator& f, ParseNode* expr, Type* type)
MOZ_ASSERT(expr->isKind(PNK_DIV) || expr->isKind(PNK_MOD));
size_t opcodeAt;
if (!f.encoder().writePatchableExpr(&opcodeAt))
if (!f.encoder().writePatchableOneByteExpr(&opcodeAt))
return false;
ParseNode* lhs = DivOrModLeft(expr);
@ -5863,14 +5863,14 @@ CheckDivOrMod(FunctionValidator& f, ParseNode* expr, Type* type)
return false;
if (lhsType.isMaybeDouble() && rhsType.isMaybeDouble()) {
f.encoder().patchExpr(opcodeAt, expr->isKind(PNK_DIV) ? Expr::F64Div : Expr::F64Mod);
f.encoder().patchOneByteExpr(opcodeAt, expr->isKind(PNK_DIV) ? Expr::F64Div : Expr::F64Mod);
*type = Type::Double;
return true;
}
if (lhsType.isMaybeFloat() && rhsType.isMaybeFloat()) {
if (expr->isKind(PNK_DIV))
f.encoder().patchExpr(opcodeAt, Expr::F32Div);
f.encoder().patchOneByteExpr(opcodeAt, Expr::F32Div);
else
return f.fail(expr, "modulo cannot receive float arguments");
*type = Type::Floatish;
@ -5878,13 +5878,13 @@ CheckDivOrMod(FunctionValidator& f, ParseNode* expr, Type* type)
}
if (lhsType.isSigned() && rhsType.isSigned()) {
f.encoder().patchExpr(opcodeAt, expr->isKind(PNK_DIV) ? Expr::I32DivS : Expr::I32RemS);
f.encoder().patchOneByteExpr(opcodeAt, expr->isKind(PNK_DIV) ? Expr::I32DivS : Expr::I32RemS);
*type = Type::Intish;
return true;
}
if (lhsType.isUnsigned() && rhsType.isUnsigned()) {
f.encoder().patchExpr(opcodeAt, expr->isKind(PNK_DIV) ? Expr::I32DivU : Expr::I32RemU);
f.encoder().patchOneByteExpr(opcodeAt, expr->isKind(PNK_DIV) ? Expr::I32DivU : Expr::I32RemU);
*type = Type::Intish;
return true;
}
@ -5900,7 +5900,7 @@ CheckComparison(FunctionValidator& f, ParseNode* comp, Type* type)
comp->isKind(PNK_GE) || comp->isKind(PNK_EQ) || comp->isKind(PNK_NE));
size_t opcodeAt;
if (!f.encoder().writePatchableExpr(&opcodeAt))
if (!f.encoder().writePatchableOneByteExpr(&opcodeAt))
return false;
ParseNode* lhs = ComparisonLeft(comp);
@ -5966,7 +5966,7 @@ CheckComparison(FunctionValidator& f, ParseNode* comp, Type* type)
MOZ_CRASH("unexpected type");
}
f.encoder().patchExpr(opcodeAt, stmt);
f.encoder().patchOneByteExpr(opcodeAt, stmt);
*type = Type::Int;
return true;
}
@ -6322,7 +6322,7 @@ CheckIf(FunctionValidator& f, ParseNode* ifStmt)
{
recurse:
size_t opcodeAt;
if (!f.encoder().writePatchableExpr(&opcodeAt))
if (!f.encoder().writePatchableOneByteExpr(&opcodeAt))
return false;
MOZ_ASSERT(ifStmt->isKind(PNK_IF));
@ -6340,9 +6340,9 @@ CheckIf(FunctionValidator& f, ParseNode* ifStmt)
return false;
if (!elseStmt) {
f.encoder().patchExpr(opcodeAt, Expr::If);
f.encoder().patchOneByteExpr(opcodeAt, Expr::If);
} else {
f.encoder().patchExpr(opcodeAt, Expr::IfElse);
f.encoder().patchOneByteExpr(opcodeAt, Expr::IfElse);
if (elseStmt->isKind(PNK_IF)) {
ifStmt = elseStmt;

Просмотреть файл

@ -162,7 +162,7 @@ CheckValType(JSContext* cx, Decoder& d, ValType type)
break;
}
return Fail(cx, d, "bad value type");
return Fail(cx, d, "bad type");
}
static bool

Просмотреть файл

@ -367,13 +367,6 @@ class Encoder
return true;
}
template <class T>
MOZ_WARN_UNUSED_RESULT bool writeEnum(T v) {
static_assert(uint32_t(T::Limit) <= UINT32_MAX, "fits");
MOZ_ASSERT(uint32_t(v) < uint32_t(T::Limit));
return writeVarU32(uint32_t(v));
}
void patchVarU32(size_t offset, uint32_t patchBits, uint32_t assertBits) {
do {
uint8_t assertByte = assertBits & 0x7f;
@ -397,17 +390,7 @@ class Encoder
return offset - start + 1;
}
template <class T>
MOZ_WARN_UNUSED_RESULT bool writePatchableEnum(size_t* offset) {
*offset = bytes_.length();
return writeVarU32(uint32_t(T::Limit));
}
template <class T>
void patchEnum(size_t offset, T v) {
MOZ_ASSERT(uint32_t(v) < uint32_t(T::Limit));
return patchVarU32(offset, uint32_t(v), uint32_t(T::Limit));
}
static const size_t ExprLimit = 2 * UINT8_MAX - 1;
public:
explicit Encoder(Bytes& bytes)
@ -422,6 +405,9 @@ class Encoder
// Fixed-size encoding operations simply copy the literal bytes (without
// attempting to align).
MOZ_WARN_UNUSED_RESULT bool writeFixedU8(uint8_t i) {
return write<uint8_t>(i);
}
MOZ_WARN_UNUSED_RESULT bool writeFixedU32(uint32_t i) {
return write<uint32_t>(i);
}
@ -452,18 +438,33 @@ class Encoder
MOZ_WARN_UNUSED_RESULT bool writeVarS64(int64_t i) {
return writeVarS<int64_t>(i);
}
MOZ_WARN_UNUSED_RESULT bool writeExpr(Expr expr) {
return writeEnum(expr);
}
MOZ_WARN_UNUSED_RESULT bool writeValType(ValType type) {
return writeEnum(type);
static_assert(size_t(ValType::Limit) <= INT8_MAX, "fits");
return writeFixedU8(size_t(type));
}
MOZ_WARN_UNUSED_RESULT bool writeExprType(ExprType type) {
return writeEnum(type);
static_assert(size_t(ExprType::Limit) <= INT8_MAX, "fits");
return writeFixedU8(uint8_t(type));
}
MOZ_WARN_UNUSED_RESULT bool writeExpr(Expr expr) {
static_assert(size_t(Expr::Limit) <= ExprLimit, "fits");
if (size_t(expr) < UINT8_MAX)
return writeFixedU8(uint8_t(expr));
return writeFixedU8(UINT8_MAX) &&
writeFixedU8(size_t(expr) - UINT8_MAX);
}
// Variable-length encodings that allow back-patching.
MOZ_WARN_UNUSED_RESULT bool writePatchableFixedU8(size_t* offset) {
*offset = bytes_.length();
return bytes_.append(0xff);
}
void patchFixedU8(size_t offset, uint8_t i) {
MOZ_ASSERT(bytes_[offset] == 0xff);
bytes_[offset] = i;
}
MOZ_WARN_UNUSED_RESULT bool writePatchableVarU32(size_t* offset) {
*offset = bytes_.length();
return writeVarU32(UINT32_MAX);
@ -472,20 +473,14 @@ class Encoder
return patchVarU32(offset, patchBits, UINT32_MAX);
}
MOZ_WARN_UNUSED_RESULT bool writePatchableVarU8(size_t* offset) {
MOZ_WARN_UNUSED_RESULT bool writePatchableOneByteExpr(size_t* offset) {
*offset = bytes_.length();
return writeU8(UINT8_MAX);
return writeFixedU8(0xff);
}
void patchVarU8(size_t offset, uint8_t patchBits) {
MOZ_ASSERT(patchBits < 0x80);
return patchU8(offset, patchBits);
}
MOZ_WARN_UNUSED_RESULT bool writePatchableExpr(size_t* offset) {
return writePatchableEnum<Expr>(offset);
}
void patchExpr(size_t offset, Expr expr) {
patchEnum(offset, expr);
void patchOneByteExpr(size_t offset, Expr expr) {
MOZ_ASSERT(size_t(expr) < UINT8_MAX);
MOZ_ASSERT(bytes_[offset] == 0xff);
bytes_[offset] = uint8_t(expr);
}
// Byte ranges start with an LEB128 length followed by an arbitrary sequence
@ -513,21 +508,6 @@ class Encoder
void finishSection(size_t offset) {
return patchVarU32(offset, bytes_.length() - offset - varU32ByteLength(offset));
}
// Temporary encoding forms which should be removed as part of the
// conversion to wasm:
MOZ_WARN_UNUSED_RESULT bool writeU8(uint8_t i) {
return write<uint8_t>(i);
}
MOZ_WARN_UNUSED_RESULT bool writePatchableU8(size_t* offset) {
*offset = bytes_.length();
return bytes_.append(0xff);
}
void patchU8(size_t offset, uint8_t i) {
MOZ_ASSERT(bytes_[offset] == 0xff);
bytes_[offset] = i;
}
};
// The Decoder class decodes the bytes in the range it is given during
@ -549,16 +529,6 @@ class Decoder
return true;
}
template <class T>
MOZ_WARN_UNUSED_RESULT bool readEnum(T* out) {
static_assert(uint32_t(T::Limit) <= UINT32_MAX, "fits");
uint32_t u32;
if (!readVarU32(&u32) || u32 >= uint32_t(T::Limit))
return false;
*out = T(u32);
return true;
}
template <class T>
T uncheckedRead() {
MOZ_ASSERT(bytesRemain() >= sizeof(T));
@ -568,12 +538,6 @@ class Decoder
return ret;
}
template <class T>
T uncheckedReadEnum() {
static_assert(uint32_t(T::Limit) <= UINT32_MAX, "fits");
return (T)uncheckedReadVarU32();
}
template <typename UInt>
MOZ_WARN_UNUSED_RESULT bool readVarU(UInt* out) {
const unsigned numBits = sizeof(UInt) * CHAR_BIT;
@ -627,6 +591,8 @@ class Decoder
return true;
}
static const size_t ExprLimit = 2 * UINT8_MAX - 1;
public:
Decoder(const uint8_t* begin, const uint8_t* end)
: beg_(begin),
@ -660,6 +626,9 @@ class Decoder
// Fixed-size encoding operations simply copy the literal bytes (without
// attempting to align).
MOZ_WARN_UNUSED_RESULT bool readFixedU8(uint8_t* i) {
return read<uint8_t>(i);
}
MOZ_WARN_UNUSED_RESULT bool readFixedU32(uint32_t* u) {
return read<uint32_t>(u);
}
@ -690,14 +659,37 @@ class Decoder
MOZ_WARN_UNUSED_RESULT bool readVarS64(int64_t* out) {
return readVarS<int64_t>(out);
}
MOZ_WARN_UNUSED_RESULT bool readExpr(Expr* expr) {
return readEnum(expr);
}
MOZ_WARN_UNUSED_RESULT bool readValType(ValType* type) {
return readEnum(type);
static_assert(uint8_t(ValType::Limit) <= INT8_MAX, "fits");
uint8_t u8;
if (!readFixedU8(&u8))
return false;
*type = (ValType)u8;
return true;
}
MOZ_WARN_UNUSED_RESULT bool readExprType(ExprType* type) {
return readEnum(type);
static_assert(uint8_t(ExprType::Limit) <= INT8_MAX, "fits");
uint8_t u8;
if (!readFixedU8(&u8))
return false;
*type = (ExprType)u8;
return true;
}
MOZ_WARN_UNUSED_RESULT bool readExpr(Expr* expr) {
static_assert(size_t(Expr::Limit) <= ExprLimit, "fits");
uint8_t u8;
if (!readFixedU8(&u8))
return false;
if (u8 != UINT8_MAX) {
*expr = Expr(u8);
return true;
}
if (!readFixedU8(&u8))
return false;
if (u8 == UINT8_MAX)
return false;
*expr = Expr(u8 + UINT8_MAX);
return true;
}
// See writeBytes comment.
@ -778,6 +770,9 @@ class Decoder
// sure that the bytes are well-formed (by construction or due to previous
// validation).
uint8_t uncheckedReadFixedU8() {
return uncheckedRead<uint8_t>();
}
uint32_t uncheckedReadFixedU32() {
return uncheckedRead<uint32_t>();
}
@ -821,27 +816,22 @@ class Decoder
MOZ_ALWAYS_TRUE(readVarS64(&i64));
return i64;
}
ValType uncheckedReadValType() {
return (ValType)uncheckedReadFixedU8();
}
Expr uncheckedReadExpr() {
return uncheckedReadEnum<Expr>();
static_assert(size_t(Expr::Limit) <= ExprLimit, "fits");
uint8_t u8 = uncheckedReadFixedU8();
return u8 != UINT8_MAX
? Expr(u8)
: Expr(uncheckedReadFixedU8() + UINT8_MAX);
}
Expr uncheckedPeekExpr() {
const uint8_t* before = cur_;
Expr ret = uncheckedReadEnum<Expr>();
cur_ = before;
return ret;
}
ValType uncheckedReadValType() {
return uncheckedReadEnum<ValType>();
}
// Temporary encoding forms which should be removed as part of the
// conversion to wasm:
MOZ_WARN_UNUSED_RESULT bool readFixedU8(uint8_t* i) {
return read<uint8_t>(i);
}
uint8_t uncheckedReadFixedU8() {
return uncheckedRead<uint8_t>();
static_assert(size_t(Expr::Limit) <= ExprLimit, "fits");
uint8_t u8 = cur_[0];
return u8 != UINT8_MAX
? Expr(u8)
: Expr(cur_[1] + UINT8_MAX);
}
};

Просмотреть файл

@ -3854,7 +3854,7 @@ EncodeMemory(Encoder& e, WasmAstModule& module)
}
}
if (!e.writeU8(exported))
if (!e.writeFixedU8(exported))
return false;
e.finishSection(offset);

Просмотреть файл

@ -178,8 +178,8 @@ wasmEval(moduleWithSections([sigSection([v2vSig])]));
wasmEval(moduleWithSections([sigSection([i2vSig])]));
wasmEval(moduleWithSections([sigSection([v2vSig, i2vSig])]));
assertErrorMessage(() => wasmEval(moduleWithSections([sigSection([{args:[], ret:100}])])), TypeError, /expression type/);
assertErrorMessage(() => wasmEval(moduleWithSections([sigSection([{args:[100], ret:VoidCode}])])), TypeError, /value type/);
assertErrorMessage(() => wasmEval(moduleWithSections([sigSection([{args:[], ret:100}])])), TypeError, /bad type/);
assertErrorMessage(() => wasmEval(moduleWithSections([sigSection([{args:[100], ret:VoidCode}])])), TypeError, /bad type/);
assertThrowsInstanceOf(() => wasmEval(moduleWithSections([sigSection([]), declSection([0])])), TypeError, /signature index out of range/);
assertThrowsInstanceOf(() => wasmEval(moduleWithSections([sigSection([v2vSig]), declSection([1])])), TypeError, /signature index out of range/);