Backed out 6 changesets (bug 1794841) for causing build bustages CLOSED TREE

Backed out changeset fba05a94b614 (bug 1794841)
Backed out changeset 83fa047989c7 (bug 1794841)
Backed out changeset 2a4c88ddd548 (bug 1794841)
Backed out changeset 9fa0cc213d62 (bug 1794841)
Backed out changeset b3722221b4be (bug 1794841)
Backed out changeset 2daef03ea536 (bug 1794841)
This commit is contained in:
Cristian Tuns 2022-10-17 19:11:19 -04:00
Родитель f5b8a2c623
Коммит 73d2d7042e
34 изменённых файлов: 356 добавлений и 202 удалений

Просмотреть файл

@ -9525,7 +9525,7 @@ AttachDecision CallIRGenerator::tryAttachWasmCall(HandleFunction calleeFunc) {
#endif
ABIArgGenerator abi;
for (const auto& valType : sig.args()) {
MIRType mirType = valType.toMIRType();
MIRType mirType = ToMIRType(valType);
ABIArg abiArg = abi.next(mirType);
if (mirType != MIRType::Int64) {
continue;

Просмотреть файл

@ -17300,7 +17300,7 @@ void CodeGenerator::emitIonToWasmCallBase(LIonToWasmCallBase<NumDefs>* lir) {
case wasm::ValType::I64:
case wasm::ValType::F32:
case wasm::ValType::F64:
argMir = sig.args()[i].toMIRType();
argMir = ToMIRType(sig.args()[i]);
break;
case wasm::ValType::V128:
MOZ_CRASH("unexpected argument type when calling from ion to wasm");
@ -17308,7 +17308,7 @@ void CodeGenerator::emitIonToWasmCallBase(LIonToWasmCallBase<NumDefs>* lir) {
switch (sig.args()[i].refTypeKind()) {
case wasm::RefType::Extern:
// AnyRef is boxed on the JS side, so passed as a pointer here.
argMir = sig.args()[i].toMIRType();
argMir = ToMIRType(sig.args()[i]);
break;
case wasm::RefType::Func:
case wasm::RefType::Eq:

Просмотреть файл

@ -6772,7 +6772,7 @@ MIonToWasmCall* MIonToWasmCall::New(TempAllocator& alloc,
if (results.length() > 0 && !results[0].isEncodedAsJSValueOnEscape()) {
MOZ_ASSERT(results.length() == 1,
"multiple returns not implemented for inlined Wasm calls");
resultType = results[0].toMIRType();
resultType = ToMIRType(results[0]);
}
auto* ins = new (alloc) MIonToWasmCall(instanceObj, resultType, funcExport);

Просмотреть файл

@ -1159,6 +1159,8 @@ class MOZ_STACK_CLASS ModuleValidatorShared {
uint32_t mask_;
bool defined_;
Table(Table&& rhs) = delete;
public:
Table(uint32_t sigIndex, TaggedParserAtomIndex name, uint32_t firstUse,
uint32_t mask)
@ -1168,8 +1170,6 @@ class MOZ_STACK_CLASS ModuleValidatorShared {
mask_(mask),
defined_(false) {}
Table(Table&& rhs) = delete;
uint32_t sigIndex() const { return sigIndex_; }
TaggedParserAtomIndex name() const { return name_; }
uint32_t firstUse() const { return firstUse_; }
@ -1413,7 +1413,8 @@ class MOZ_STACK_CLASS ModuleValidatorShared {
sigSet_(cx),
funcImportMap_(cx),
arrayViews_(cx),
compilerEnv_(CompileMode::Once, Tier::Optimized, DebugEnabled::False),
compilerEnv_(CompileMode::Once, Tier::Optimized, OptimizedBackend::Ion,
DebugEnabled::False),
moduleEnv_(FeatureArgs(), ModuleKind::AsmJS) {
compilerEnv_.computeParameters();
memory_.minLength = RoundUpToNextValidAsmJSHeapLength(0);
@ -6951,7 +6952,7 @@ static bool TryInstantiate(JSContext* cx, CallArgs args, const Module& module,
}
imports.get().memory =
WasmMemoryObject::create(cx, buffer, /* isHuge= */ false, nullptr);
WasmMemoryObject::create(cx, buffer, /* hugeMemory= */ false, nullptr);
if (!imports.get().memory) {
return false;
}

Просмотреть файл

@ -44,6 +44,13 @@ const Local& BaseCompiler::localFromSlot(uint32_t slot, MIRType type) {
return localInfo_[slot];
}
uint32_t BaseCompiler::readCallSiteLineOrBytecode() {
if (!func_.callSiteLineNums.empty()) {
return func_.callSiteLineNums[lastReadCallSite_++];
}
return iter_.lastOpcodeOffset();
}
BytecodeOffset BaseCompiler::bytecodeOffset() const {
return iter_.bytecodeOffset();
}

Просмотреть файл

@ -128,8 +128,9 @@ struct AccessCheck {
// Encapsulate all the information about a function call.
struct FunctionCall {
FunctionCall()
: restoreRegisterStateAndRealm(false),
explicit FunctionCall(uint32_t lineOrBytecode)
: lineOrBytecode(lineOrBytecode),
restoreRegisterStateAndRealm(false),
usesSystemAbi(false),
#ifdef JS_CODEGEN_ARM
hardFP(true),
@ -138,6 +139,7 @@ struct FunctionCall {
stackArgAreaSize(0) {
}
uint32_t lineOrBytecode;
WasmABIArgGenerator abi;
bool restoreRegisterStateAndRealm;
bool usesSystemAbi;
@ -274,6 +276,10 @@ struct BaseCompiler final {
// Flag indicating that the compiler is currently in a dead code region.
bool deadCode_;
// Running count of call sites, used only to assert that the compiler is in a
// sensible state once compilation has completed.
size_t lastReadCallSite_;
///////////////////////////////////////////////////////////////////////////
//
// State for bounds check elimination.
@ -1190,7 +1196,7 @@ struct BaseCompiler final {
template <typename RegIndexType>
void atomicRMW64(MemoryAccessDesc* access, ValType type, AtomicOp op);
void atomicXchg(MemoryAccessDesc* access, ValType type);
void atomicXchg(MemoryAccessDesc* desc, ValType type);
template <typename RegIndexType>
void atomicXchg64(MemoryAccessDesc* access, WantResult wantResult);
template <typename RegIndexType>
@ -1237,7 +1243,10 @@ struct BaseCompiler final {
//
// Sundry helpers.
// Retrieve the current bytecodeOffset.
// Get the line number or bytecode offset, depending on what's available.
inline uint32_t readCallSiteLineOrBytecode();
// Retrieve the current bytecodeOffset
inline BytecodeOffset bytecodeOffset() const;
// Generate a trap instruction for the current bytecodeOffset.
@ -1245,7 +1254,7 @@ struct BaseCompiler final {
// Abstracted helper for throwing, used for throw, rethrow, and rethrowing
// at the end of a series of catch blocks (if none matched the exception).
[[nodiscard]] bool throwFrom(RegRef exn);
[[nodiscard]] bool throwFrom(RegRef exn, uint32_t lineOrBytecode);
// Load the specified tag object from the Instance.
void loadTag(RegPtr instanceData, uint32_t tagIndex, RegRef tagDst);
@ -1565,7 +1574,8 @@ struct BaseCompiler final {
//
// (see WasmBuiltins.cpp). In short, the most recently pushed value is the
// rightmost argument to the function.
[[nodiscard]] bool emitInstanceCall(const SymbolicAddressSignature& builtin);
[[nodiscard]] bool emitInstanceCall(uint32_t lineOrBytecode,
const SymbolicAddressSignature& builtin);
[[nodiscard]] bool emitMemoryGrow();
[[nodiscard]] bool emitMemorySize();
@ -1586,19 +1596,21 @@ struct BaseCompiler final {
AtomicOp op);
[[nodiscard]] bool emitAtomicStore(ValType type, Scalar::Type viewType);
[[nodiscard]] bool emitWait(ValType type, uint32_t byteSize);
[[nodiscard]] bool atomicWait(ValType type, MemoryAccessDesc* access);
[[nodiscard]] bool atomicWait(ValType type, MemoryAccessDesc* access,
uint32_t lineOrBytecode);
[[nodiscard]] bool emitWake();
[[nodiscard]] bool atomicWake(MemoryAccessDesc* access);
[[nodiscard]] bool atomicWake(MemoryAccessDesc* access,
uint32_t lineOrBytecode);
[[nodiscard]] bool emitFence();
[[nodiscard]] bool emitAtomicXchg(ValType type, Scalar::Type viewType);
[[nodiscard]] bool emitMemInit();
[[nodiscard]] bool emitMemCopy();
[[nodiscard]] bool memCopyCall();
[[nodiscard]] bool memCopyCall(uint32_t lineOrBytecode);
void memCopyInlineM32();
[[nodiscard]] bool emitTableCopy();
[[nodiscard]] bool emitDataOrElemDrop(bool isData);
[[nodiscard]] bool emitMemFill();
[[nodiscard]] bool memFillCall();
[[nodiscard]] bool memFillCall(uint32_t lineOrBytecode);
void memFillInlineM32();
[[nodiscard]] bool emitTableInit();
[[nodiscard]] bool emitTableFill();
@ -1634,7 +1646,7 @@ struct BaseCompiler final {
void emitGcNullCheck(RegRef rp);
RegPtr emitGcArrayGetData(RegRef rp);
RegI32 emitGcArrayGetNumElements(RegRef rp);
void emitGcArrayBoundsCheck(RegI32 index, RegI32 numElements);
void emitGcArrayBoundsCheck(RegI32 index, RegI32 length);
template <typename T>
void emitGcGet(FieldType type, FieldExtension extension, const T& src);
template <typename T>

Просмотреть файл

@ -478,18 +478,20 @@ void BaseCompiler::emitBinop(void (*op)(CompilerType1& compiler, RegType rs,
template <typename R>
bool BaseCompiler::emitInstanceCallOp(const SymbolicAddressSignature& fn,
R reader) {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
if (!reader()) {
return false;
}
if (deadCode_) {
return true;
}
return emitInstanceCall(fn);
return emitInstanceCall(lineOrBytecode, fn);
}
template <typename A1, typename R>
bool BaseCompiler::emitInstanceCallOp(const SymbolicAddressSignature& fn,
R reader) {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
A1 arg = 0;
if (!reader(&arg)) {
return false;
@ -498,12 +500,13 @@ bool BaseCompiler::emitInstanceCallOp(const SymbolicAddressSignature& fn,
return true;
}
push(arg);
return emitInstanceCall(fn);
return emitInstanceCall(lineOrBytecode, fn);
}
template <typename A1, typename A2, typename R>
bool BaseCompiler::emitInstanceCallOp(const SymbolicAddressSignature& fn,
R reader) {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
A1 arg1 = 0;
A2 arg2 = 0;
if (!reader(&arg1, &arg2)) {
@ -515,7 +518,7 @@ bool BaseCompiler::emitInstanceCallOp(const SymbolicAddressSignature& fn,
// Note order of arguments must be the same as for the reader.
push(arg1);
push(arg2);
return emitInstanceCall(fn);
return emitInstanceCall(lineOrBytecode, fn);
}
} // namespace wasm

Просмотреть файл

@ -114,7 +114,7 @@ void BaseLocalIter::settle() {
// TODO/AnyRef-boxing: With boxed immediates and strings, the
// debugger must be made aware that AnyRef != Pointer.
ASSERT_ANYREF_IS_JSOBJECT;
mirType_ = locals_[index_].toMIRType();
mirType_ = ToMIRType(locals_[index_]);
frameOffset_ = pushLocal(MIRTypeToSize(mirType_));
break;
default:

Просмотреть файл

@ -2121,7 +2121,8 @@ void BaseCompiler::atomicCmpXchg64(MemoryAccessDesc* access, ValType type) {
//
// Synchronization.
bool BaseCompiler::atomicWait(ValType type, MemoryAccessDesc* access) {
bool BaseCompiler::atomicWait(ValType type, MemoryAccessDesc* access,
uint32_t lineOrBytecode) {
switch (type.kind()) {
case ValType::I32: {
RegI64 timeout = popI64();
@ -2140,7 +2141,8 @@ bool BaseCompiler::atomicWait(ValType type, MemoryAccessDesc* access) {
pushI32(val);
pushI64(timeout);
if (!emitInstanceCall(isMem32() ? SASigWaitI32M32 : SASigWaitI32M64)) {
if (!emitInstanceCall(lineOrBytecode,
isMem32() ? SASigWaitI32M32 : SASigWaitI32M64)) {
return false;
}
break;
@ -2176,7 +2178,8 @@ bool BaseCompiler::atomicWait(ValType type, MemoryAccessDesc* access) {
pushI64(val);
pushI64(timeout);
if (!emitInstanceCall(isMem32() ? SASigWaitI64M32 : SASigWaitI64M64)) {
if (!emitInstanceCall(lineOrBytecode,
isMem32() ? SASigWaitI64M32 : SASigWaitI64M64)) {
return false;
}
break;
@ -2188,7 +2191,8 @@ bool BaseCompiler::atomicWait(ValType type, MemoryAccessDesc* access) {
return true;
}
bool BaseCompiler::atomicWake(MemoryAccessDesc* access) {
bool BaseCompiler::atomicWake(MemoryAccessDesc* access,
uint32_t lineOrBytecode) {
RegI32 count = popI32();
if (isMem32()) {
@ -2202,7 +2206,8 @@ bool BaseCompiler::atomicWake(MemoryAccessDesc* access) {
}
pushI32(count);
return emitInstanceCall(isMem32() ? SASigWakeM32 : SASigWakeM64);
return emitInstanceCall(lineOrBytecode,
isMem32() ? SASigWakeM32 : SASigWakeM64);
}
//////////////////////////////////////////////////////////////////////////////

Просмотреть файл

@ -1544,13 +1544,13 @@ void BaseCompiler::passArg(ValType type, const Stk& arg, FunctionCall* call) {
CodeOffset BaseCompiler::callDefinition(uint32_t funcIndex,
const FunctionCall& call) {
CallSiteDesc desc(bytecodeOffset(), CallSiteDesc::Func);
CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::Func);
return masm.call(desc, funcIndex);
}
CodeOffset BaseCompiler::callSymbolic(SymbolicAddress callee,
const FunctionCall& call) {
CallSiteDesc desc(bytecodeOffset(), CallSiteDesc::Symbolic);
CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::Symbolic);
return masm.call(desc, callee);
}
@ -1581,7 +1581,7 @@ bool BaseCompiler::callIndirect(uint32_t funcTypeIndex, uint32_t tableIndex,
loadI32(indexVal, RegI32(WasmTableCallIndexReg));
CallSiteDesc desc(bytecodeOffset(), CallSiteDesc::Indirect);
CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::Indirect);
CalleeDesc callee = CalleeDesc::wasmTable(table, funcTypeId);
OutOfLineCode* oob = addOutOfLineCode(
new (alloc_) OutOfLineAbortingTrap(Trap::OutOfBounds, bytecodeOffset()));
@ -1606,7 +1606,7 @@ bool BaseCompiler::callIndirect(uint32_t funcTypeIndex, uint32_t tableIndex,
void BaseCompiler::callRef(const Stk& calleeRef, const FunctionCall& call,
CodeOffset* fastCallOffset,
CodeOffset* slowCallOffset) {
CallSiteDesc desc(bytecodeOffset(), CallSiteDesc::FuncRef);
CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::FuncRef);
CalleeDesc callee = CalleeDesc::wasmFuncRef();
loadRef(calleeRef, RegRef(WasmCallRefReg));
@ -1619,7 +1619,7 @@ void BaseCompiler::callRef(const Stk& calleeRef, const FunctionCall& call,
CodeOffset BaseCompiler::callImport(unsigned globalDataOffset,
const FunctionCall& call) {
CallSiteDesc desc(bytecodeOffset(), CallSiteDesc::Import);
CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::Import);
CalleeDesc callee = CalleeDesc::import(globalDataOffset);
return masm.wasmCallImport(desc, callee);
}
@ -1636,7 +1636,7 @@ CodeOffset BaseCompiler::builtinInstanceMethodCall(
// Builtin method calls assume the instance register has been set.
fr.loadInstancePtr(InstanceReg);
#endif
CallSiteDesc desc(bytecodeOffset(), CallSiteDesc::Symbolic);
CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::Symbolic);
return masm.wasmCallBuiltinInstanceMethod(desc, instanceArg, builtin.identity,
builtin.failureMode);
}
@ -1660,11 +1660,11 @@ bool BaseCompiler::pushCallResults(const FunctionCall& call, ResultType type,
// Abstracted helper for throwing, used for throw, rethrow, and rethrowing
// at the end of a series of catch blocks (if none matched the exception).
bool BaseCompiler::throwFrom(RegRef exn) {
bool BaseCompiler::throwFrom(RegRef exn, uint32_t lineOrBytecode) {
pushRef(exn);
// ThrowException invokes a trap, and the rest is dead code.
return emitInstanceCall(SASigThrowException);
return emitInstanceCall(lineOrBytecode, SASigThrowException);
}
void BaseCompiler::loadTag(RegPtr instance, uint32_t tagIndex, RegRef tagDst) {
@ -4124,7 +4124,11 @@ bool BaseCompiler::emitCatchAll() {
captureResultRegisters(exnResult);
// This reference is pushed onto the stack because a potential rethrow
// may need to access it. It is always popped at the end of the block.
return pushBlockResults(exnResult);
if (!pushBlockResults(exnResult)) {
return false;
}
return true;
}
bool BaseCompiler::emitBodyDelegateThrowPad() {
@ -4132,6 +4136,7 @@ bool BaseCompiler::emitBodyDelegateThrowPad() {
// Only emit a landing pad if a `delegate` has generated a jump to here.
if (block.otherLabel.used()) {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
StackHeight savedHeight = fr.stackHeight();
fr.setStackHeight(block.stackHeight);
masm.bind(&block.otherLabel);
@ -4142,7 +4147,7 @@ bool BaseCompiler::emitBodyDelegateThrowPad() {
RegRef tag;
consumePendingException(&exn, &tag);
freeRef(tag);
if (!throwFrom(exn)) {
if (!throwFrom(exn, lineOrBytecode)) {
return false;
}
fr.setStackHeight(savedHeight);
@ -4228,6 +4233,8 @@ bool BaseCompiler::emitDelegate() {
}
bool BaseCompiler::endTryCatch(ResultType type) {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
Control& tryCatch = controlItem();
LabelKind tryKind = controlKind(0);
@ -4322,7 +4329,7 @@ bool BaseCompiler::endTryCatch(ResultType type) {
// then we rethrow the exception.
if (!hasCatchAll) {
captureResultRegisters(exnResult);
if (!pushBlockResults(exnResult) || !throwFrom(popRef())) {
if (!pushBlockResults(exnResult) || !throwFrom(popRef(), lineOrBytecode)) {
return false;
}
}
@ -4343,6 +4350,7 @@ bool BaseCompiler::endTryCatch(ResultType type) {
}
bool BaseCompiler::emitThrow() {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
uint32_t tagIndex;
BaseNothingVector unused_argValues{};
@ -4373,7 +4381,7 @@ bool BaseCompiler::emitThrow() {
// Create the new exception object that we will throw.
pushRef(tag);
if (!emitInstanceCall(SASigExceptionNew)) {
if (!emitInstanceCall(lineOrBytecode, SASigExceptionNew)) {
return false;
}
@ -4443,10 +4451,12 @@ bool BaseCompiler::emitThrow() {
deadCode_ = true;
return throwFrom(exn);
return throwFrom(exn, lineOrBytecode);
}
bool BaseCompiler::emitRethrow() {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
uint32_t relativeDepth;
if (!iter_.readRethrow(&relativeDepth)) {
return false;
@ -4462,7 +4472,7 @@ bool BaseCompiler::emitRethrow() {
deadCode_ = true;
return throwFrom(exn);
return throwFrom(exn, lineOrBytecode);
}
bool BaseCompiler::emitDrop() {
@ -4670,6 +4680,8 @@ void BaseCompiler::popStackResultsAfterCall(const StackResultsLoc& results,
// simpler.
bool BaseCompiler::emitCall() {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
uint32_t funcIndex;
BaseNothingVector args_{};
if (!iter_.readCall(&funcIndex, &args_)) {
@ -4694,7 +4706,7 @@ bool BaseCompiler::emitCall() {
return false;
}
FunctionCall baselineCall{};
FunctionCall baselineCall(lineOrBytecode);
beginCall(baselineCall, UseABI::Wasm,
import ? RestoreRegisterStateAndRealm::True
: RestoreRegisterStateAndRealm::False);
@ -4727,6 +4739,8 @@ bool BaseCompiler::emitCall() {
}
bool BaseCompiler::emitCallIndirect() {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
uint32_t funcTypeIndex;
uint32_t tableIndex;
Nothing callee_;
@ -4754,7 +4768,7 @@ bool BaseCompiler::emitCallIndirect() {
return false;
}
FunctionCall baselineCall{};
FunctionCall baselineCall(lineOrBytecode);
// State and realm are restored as needed by by callIndirect (really by
// MacroAssembler::wasmCallIndirect).
beginCall(baselineCall, UseABI::Wasm, RestoreRegisterStateAndRealm::False);
@ -4790,6 +4804,8 @@ bool BaseCompiler::emitCallIndirect() {
#ifdef ENABLE_WASM_FUNCTION_REFERENCES
bool BaseCompiler::emitCallRef() {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
const FuncType* funcType;
Nothing unused_callee;
BaseNothingVector unused_args{};
@ -4814,7 +4830,7 @@ bool BaseCompiler::emitCallRef() {
return false;
}
FunctionCall baselineCall{};
FunctionCall baselineCall(lineOrBytecode);
// State and realm are restored as needed by by callRef (really by
// MacroAssembler::wasmCallRef).
beginCall(baselineCall, UseABI::Wasm, RestoreRegisterStateAndRealm::False);
@ -4862,6 +4878,8 @@ void BaseCompiler::emitRound(RoundingMode roundingMode, ValType operandType) {
bool BaseCompiler::emitUnaryMathBuiltinCall(SymbolicAddress callee,
ValType operandType) {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
Nothing operand_;
if (!iter_.readUnary(operandType, &operand_)) {
return false;
@ -4886,7 +4904,7 @@ bool BaseCompiler::emitUnaryMathBuiltinCall(SymbolicAddress callee,
size_t stackSpace = stackConsumed(numArgs);
StackResultsLoc noStackResults;
FunctionCall baselineCall{};
FunctionCall baselineCall(lineOrBytecode);
beginCall(baselineCall, UseABI::Builtin, RestoreRegisterStateAndRealm::False);
if (!emitCallArgs(signature, noStackResults, &baselineCall,
@ -4903,7 +4921,7 @@ bool BaseCompiler::emitUnaryMathBuiltinCall(SymbolicAddress callee,
popValueStackBy(numArgs);
pushReturnValueOfCall(baselineCall, retType.toMIRType());
pushReturnValueOfCall(baselineCall, ToMIRType(retType));
return true;
}
@ -5623,7 +5641,8 @@ void BaseCompiler::emitCompareRef(Assembler::Condition compareOp,
pushI32(rd);
}
bool BaseCompiler::emitInstanceCall(const SymbolicAddressSignature& builtin) {
bool BaseCompiler::emitInstanceCall(uint32_t lineOrBytecode,
const SymbolicAddressSignature& builtin) {
// See declaration (WasmBCClass.h) for info on the relationship between the
// compiler's value stack and the argument order for the to-be-called
// function.
@ -5635,7 +5654,7 @@ bool BaseCompiler::emitInstanceCall(const SymbolicAddressSignature& builtin) {
uint32_t numNonInstanceArgs = builtin.numArgs - 1 /* instance */;
size_t stackSpace = stackConsumed(numNonInstanceArgs);
FunctionCall baselineCall{};
FunctionCall baselineCall(lineOrBytecode);
beginCall(baselineCall, UseABI::System, RestoreRegisterStateAndRealm::True);
ABIArg instanceArg = reservePointerArgument(&baselineCall);
@ -5842,6 +5861,7 @@ bool BaseCompiler::emitAtomicXchg(ValType type, Scalar::Type viewType) {
}
bool BaseCompiler::emitWait(ValType type, uint32_t byteSize) {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
Nothing nothing;
LinearMemoryAddress<Nothing> addr;
if (!iter_.readWait(&addr, type, byteSize, &nothing, &nothing)) {
@ -5853,10 +5873,11 @@ bool BaseCompiler::emitWait(ValType type, uint32_t byteSize) {
MemoryAccessDesc access(
type.kind() == ValType::I32 ? Scalar::Int32 : Scalar::Int64, addr.align,
addr.offset, bytecodeOffset());
return atomicWait(type, &access);
return atomicWait(type, &access, lineOrBytecode);
}
bool BaseCompiler::emitWake() {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
Nothing nothing;
LinearMemoryAddress<Nothing> addr;
if (!iter_.readWake(&addr, &nothing)) {
@ -5867,7 +5888,7 @@ bool BaseCompiler::emitWake() {
}
MemoryAccessDesc access(Scalar::Int32, addr.align, addr.offset,
bytecodeOffset());
return atomicWake(&access);
return atomicWake(&access, lineOrBytecode);
}
bool BaseCompiler::emitFence() {
@ -5901,6 +5922,7 @@ bool BaseCompiler::emitMemorySize() {
}
bool BaseCompiler::emitMemCopy() {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
uint32_t dstMemOrTableIndex = 0;
uint32_t srcMemOrTableIndex = 0;
Nothing nothing;
@ -5921,18 +5943,20 @@ bool BaseCompiler::emitMemCopy() {
}
}
return memCopyCall();
return memCopyCall(lineOrBytecode);
}
bool BaseCompiler::memCopyCall() {
bool BaseCompiler::memCopyCall(uint32_t lineOrBytecode) {
pushHeapBase();
return emitInstanceCall(
lineOrBytecode,
usesSharedMemory()
? (isMem32() ? SASigMemCopySharedM32 : SASigMemCopySharedM64)
: (isMem32() ? SASigMemCopyM32 : SASigMemCopyM64));
}
bool BaseCompiler::emitMemFill() {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
Nothing nothing;
if (!iter_.readMemFill(&nothing, &nothing, &nothing)) {
return false;
@ -5950,12 +5974,13 @@ bool BaseCompiler::emitMemFill() {
return true;
}
}
return memFillCall();
return memFillCall(lineOrBytecode);
}
bool BaseCompiler::memFillCall() {
bool BaseCompiler::memFillCall(uint32_t lineOrBytecode) {
pushHeapBase();
return emitInstanceCall(
lineOrBytecode,
usesSharedMemory()
? (isMem32() ? SASigMemFillSharedM32 : SASigMemFillSharedM64)
: (isMem32() ? SASigMemFillM32 : SASigMemFillM64));
@ -5979,6 +6004,8 @@ bool BaseCompiler::emitMemInit() {
// Bulk table operations.
bool BaseCompiler::emitTableCopy() {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
uint32_t dstMemOrTableIndex = 0;
uint32_t srcMemOrTableIndex = 0;
Nothing nothing;
@ -5993,7 +6020,7 @@ bool BaseCompiler::emitTableCopy() {
pushI32(dstMemOrTableIndex);
pushI32(srcMemOrTableIndex);
return emitInstanceCall(SASigTableCopy);
return emitInstanceCall(lineOrBytecode, SASigTableCopy);
}
bool BaseCompiler::emitTableInit() {
@ -6017,6 +6044,7 @@ bool BaseCompiler::emitTableFill() {
}
bool BaseCompiler::emitTableGet() {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
uint32_t tableIndex;
Nothing nothing;
if (!iter_.readTableGet(&tableIndex, &nothing)) {
@ -6030,7 +6058,7 @@ bool BaseCompiler::emitTableGet() {
}
pushI32(tableIndex);
// get(index:u32, table:u32) -> AnyRef
return emitInstanceCall(SASigTableGet);
return emitInstanceCall(lineOrBytecode, SASigTableGet);
}
bool BaseCompiler::emitTableGrow() {
@ -6043,6 +6071,7 @@ bool BaseCompiler::emitTableGrow() {
}
bool BaseCompiler::emitTableSet() {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
uint32_t tableIndex;
Nothing nothing;
if (!iter_.readTableSet(&tableIndex, &nothing, &nothing)) {
@ -6056,7 +6085,7 @@ bool BaseCompiler::emitTableSet() {
}
pushI32(tableIndex);
// set(index:u32, value:ref, table:u32) -> void
return emitInstanceCall(SASigTableSet);
return emitInstanceCall(lineOrBytecode, SASigTableSet);
}
bool BaseCompiler::emitTableSize() {
@ -6197,6 +6226,8 @@ void BaseCompiler::emitPreBarrier(RegPtr valueAddr) {
bool BaseCompiler::emitPostBarrierImprecise(const Maybe<RegRef>& object,
RegPtr valueAddr, RegRef value) {
uint32_t bytecodeOffset = iter_.lastOpcodeOffset();
// We must force a sync before the guard so that locals are in a consistent
// location for whether or not the post-barrier call is taken.
sync();
@ -6217,7 +6248,7 @@ bool BaseCompiler::emitPostBarrierImprecise(const Maybe<RegRef>& object,
// instance area, and we are careful so that the GC will not run while the
// post-barrier call is active, so push a uintptr_t value.
pushPtr(valueAddr);
if (!emitInstanceCall(SASigPostBarrier)) {
if (!emitInstanceCall(bytecodeOffset, SASigPostBarrier)) {
return false;
}
@ -6234,6 +6265,8 @@ bool BaseCompiler::emitPostBarrierImprecise(const Maybe<RegRef>& object,
bool BaseCompiler::emitPostBarrierPrecise(const Maybe<RegRef>& object,
RegPtr valueAddr, RegRef prevValue,
RegRef value) {
uint32_t bytecodeOffset = iter_.lastOpcodeOffset();
// Push `object` and `value` to preserve them across the call.
if (object) {
pushRef(*object);
@ -6243,7 +6276,7 @@ bool BaseCompiler::emitPostBarrierPrecise(const Maybe<RegRef>& object,
// Push the arguments and call the precise post-barrier
pushPtr(valueAddr);
pushRef(prevValue);
if (!emitInstanceCall(SASigPostBarrierPrecise)) {
if (!emitInstanceCall(bytecodeOffset, SASigPostBarrierPrecise)) {
return false;
}
@ -6539,6 +6572,8 @@ bool BaseCompiler::emitGcArraySet(RegRef object, RegPtr data, RegI32 index,
}
bool BaseCompiler::emitStructNew() {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
uint32_t typeIndex;
BaseNothingVector args{};
if (!iter_.readStructNew(&typeIndex, &args)) {
@ -6554,7 +6589,7 @@ bool BaseCompiler::emitStructNew() {
// Allocate a default initialized struct. This requires the rtt value for the
// struct to be pushed on the stack. This will trap on OOM.
emitGcCanon(typeIndex);
if (!emitInstanceCall(SASigStructNew)) {
if (!emitInstanceCall(lineOrBytecode, SASigStructNew)) {
return false;
}
@ -6629,6 +6664,8 @@ bool BaseCompiler::emitStructNew() {
}
bool BaseCompiler::emitStructNewDefault() {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
uint32_t typeIndex;
if (!iter_.readStructNewDefault(&typeIndex)) {
return false;
@ -6640,7 +6677,7 @@ bool BaseCompiler::emitStructNewDefault() {
// Allocate a default initialized struct. This requires the rtt value for the
// struct to be pushed on the stack. This will trap on OOM.
emitGcCanon(typeIndex);
return emitInstanceCall(SASigStructNew);
return emitInstanceCall(lineOrBytecode, SASigStructNew);
}
bool BaseCompiler::emitStructGet(FieldExtension extension) {
@ -6751,6 +6788,8 @@ bool BaseCompiler::emitStructSet() {
}
bool BaseCompiler::emitArrayNew() {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
uint32_t typeIndex;
Nothing nothing;
if (!iter_.readArrayNew(&typeIndex, &nothing, &nothing)) {
@ -6766,7 +6805,7 @@ bool BaseCompiler::emitArrayNew() {
// Allocate a default initialized array. This requires the rtt value for the
// array to be pushed on the stack. This will trap on OOM.
emitGcCanon(typeIndex);
if (!emitInstanceCall(SASigArrayNew)) {
if (!emitInstanceCall(lineOrBytecode, SASigArrayNew)) {
return false;
}
@ -6819,6 +6858,8 @@ bool BaseCompiler::emitArrayNew() {
}
bool BaseCompiler::emitArrayNewFixed() {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
uint32_t typeIndex, numElements;
if (!iter_.readArrayNewFixed(&typeIndex, &numElements)) {
return false;
@ -6836,7 +6877,7 @@ bool BaseCompiler::emitArrayNewFixed() {
// to SASigArrayNew will use them.
pushI32(numElements);
emitGcCanon(typeIndex);
if (!emitInstanceCall(SASigArrayNew)) {
if (!emitInstanceCall(lineOrBytecode, SASigArrayNew)) {
return false;
}
@ -6885,6 +6926,8 @@ bool BaseCompiler::emitArrayNewFixed() {
}
bool BaseCompiler::emitArrayNewDefault() {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
uint32_t typeIndex;
Nothing nothing;
if (!iter_.readArrayNewDefault(&typeIndex, &nothing)) {
@ -6898,10 +6941,12 @@ bool BaseCompiler::emitArrayNewDefault() {
// Allocate a default initialized array. This requires the rtt value for the
// array to be pushed on the stack. This will trap on OOM.
emitGcCanon(typeIndex);
return emitInstanceCall(SASigArrayNew);
return emitInstanceCall(lineOrBytecode, SASigArrayNew);
}
bool BaseCompiler::emitArrayNewData() {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
uint32_t typeIndex, segIndex;
Nothing nothing;
if (!iter_.readArrayNewData(&typeIndex, &segIndex, &nothing, &nothing)) {
@ -6918,10 +6963,12 @@ bool BaseCompiler::emitArrayNewData() {
// The call removes 4 items from the stack: the segment byte offset and
// number of elements (operands to array.new_data), and the type index and
// seg index as pushed above.
return emitInstanceCall(SASigArrayNewData);
return emitInstanceCall(lineOrBytecode, SASigArrayNewData);
}
bool BaseCompiler::emitArrayNewElem() {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
uint32_t typeIndex, segIndex;
Nothing nothing;
if (!iter_.readArrayNewElem(&typeIndex, &segIndex, &nothing, &nothing)) {
@ -6938,7 +6985,7 @@ bool BaseCompiler::emitArrayNewElem() {
// The call removes 4 items from the stack: the segment element offset and
// number of elements (operands to array.new_elem), and the type index and
// seg index as pushed above.
return emitInstanceCall(SASigArrayNewElem);
return emitInstanceCall(lineOrBytecode, SASigArrayNewElem);
}
bool BaseCompiler::emitArrayGet(FieldExtension extension) {
@ -7070,6 +7117,8 @@ bool BaseCompiler::emitArrayLen() {
}
bool BaseCompiler::emitArrayCopy() {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
int32_t elemSize;
bool elemsAreRefTyped;
Nothing nothing;
@ -7116,10 +7165,12 @@ bool BaseCompiler::emitArrayCopy() {
freePtr(RegPtr(PreBarrierReg));
}
return emitInstanceCall(SASigArrayCopy);
return emitInstanceCall(lineOrBytecode, SASigArrayCopy);
}
bool BaseCompiler::emitRefTest() {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
Nothing nothing;
uint32_t typeIndex;
if (!iter_.readRefTest(&typeIndex, &nothing)) {
@ -7131,10 +7182,12 @@ bool BaseCompiler::emitRefTest() {
}
emitGcCanon(typeIndex);
return emitInstanceCall(SASigRefTest);
return emitInstanceCall(lineOrBytecode, SASigRefTest);
}
bool BaseCompiler::emitRefCast() {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
Nothing nothing;
uint32_t typeIndex;
if (!iter_.readRefCast(&typeIndex, &nothing)) {
@ -7155,7 +7208,7 @@ bool BaseCompiler::emitRefCast() {
emitGcCanon(typeIndex);
// 2. ref.test : [ref, rtt] -> [i32]
if (!emitInstanceCall(SASigRefTest)) {
if (!emitInstanceCall(lineOrBytecode, SASigRefTest)) {
return false;
}
@ -7173,6 +7226,7 @@ bool BaseCompiler::emitRefCast() {
bool BaseCompiler::emitBrOnCast() {
MOZ_ASSERT(!hasLatentOp());
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
uint32_t relativeDepth;
BaseNothingVector unused_values{};
uint32_t typeIndex;
@ -7199,7 +7253,7 @@ bool BaseCompiler::emitBrOnCast() {
emitGcCanon(typeIndex);
// 2. ref.test : [ref, rtt] -> [i32]
if (!emitInstanceCall(SASigRefTest)) {
if (!emitInstanceCall(lineOrBytecode, SASigRefTest)) {
return false;
}
@ -8475,6 +8529,7 @@ bool BaseCompiler::emitVectorLaneSelect() {
// "Intrinsics" - magically imported functions for internal use.
bool BaseCompiler::emitIntrinsic() {
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
const Intrinsic* intrinsic;
BaseNothingVector params;
@ -8490,7 +8545,7 @@ bool BaseCompiler::emitIntrinsic() {
pushHeapBase();
// Call the intrinsic
return emitInstanceCall(intrinsic->signature);
return emitInstanceCall(lineOrBytecode, intrinsic->signature);
}
//////////////////////////////////////////////////////////////////////////////
@ -10505,6 +10560,7 @@ BaseCompiler::BaseCompiler(const ModuleEnvironment& moduleEnv,
stackMapGenerator_(stackMaps, trapExitLayout, trapExitLayoutNumWords,
*masm),
deadCode_(false),
lastReadCallSite_(0),
bceSafe_(0),
latentOp_(LatentOp::None),
latentType_(ValType::I32),
@ -10534,10 +10590,6 @@ BaseCompiler::~BaseCompiler() {
bool BaseCompiler::init() {
// We may lift this restriction in the future.
MOZ_ASSERT_IF(usesMemory() && isMem64(), !moduleEnv_.hugeMemoryEnabled());
// asm.js is not supported in baseline
MOZ_ASSERT(!moduleEnv_.isAsmJS());
// Only asm.js modules have call site line numbers
MOZ_ASSERT(func_.callSiteLineNums.empty());
ra.init(this);
@ -10555,6 +10607,8 @@ bool BaseCompiler::init() {
FuncOffsets BaseCompiler::finish() {
MOZ_ASSERT(iter_.done(), "all bytes must be consumed");
MOZ_ASSERT(func_.callSiteLineNums.length() == lastReadCallSite_);
MOZ_ASSERT(stk_.empty());
MOZ_ASSERT(stackMapGenerator_.memRefsOnStk == 0);

Просмотреть файл

@ -887,7 +887,7 @@ static int64_t UDivI64(uint32_t x_hi, uint32_t x_lo, uint32_t y_hi,
uint64_t x = ((uint64_t)x_hi << 32) + x_lo;
uint64_t y = ((uint64_t)y_hi << 32) + y_lo;
MOZ_ASSERT(y != 0);
return int64_t(x / y);
return x / y;
}
static int64_t ModI64(uint32_t x_hi, uint32_t x_lo, uint32_t y_hi,
@ -904,14 +904,14 @@ static int64_t UModI64(uint32_t x_hi, uint32_t x_lo, uint32_t y_hi,
uint64_t x = ((uint64_t)x_hi << 32) + x_lo;
uint64_t y = ((uint64_t)y_hi << 32) + y_lo;
MOZ_ASSERT(y != 0);
return int64_t(x % y);
return x % y;
}
static int64_t TruncateDoubleToInt64(double input) {
// Note: INT64_MAX is not representable in double. It is actually
// INT64_MAX + 1. Therefore also sending the failure value.
if (input >= double(INT64_MAX) || input < double(INT64_MIN) || IsNaN(input)) {
return int64_t(0x8000000000000000);
return 0x8000000000000000;
}
return int64_t(input);
}
@ -920,7 +920,7 @@ static uint64_t TruncateDoubleToUint64(double input) {
// Note: UINT64_MAX is not representable in double. It is actually
// UINT64_MAX + 1. Therefore also sending the failure value.
if (input >= double(UINT64_MAX) || input <= -1.0 || IsNaN(input)) {
return int64_t(0x8000000000000000);
return 0x8000000000000000;
}
return uint64_t(input);
}

Просмотреть файл

@ -371,7 +371,7 @@ struct MetadataCacheablePod {
WASM_DECLARE_CACHEABLE_POD(MetadataCacheablePod)
using ModuleHash = uint8_t[8];
typedef uint8_t ModuleHash[8];
struct Metadata : public ShareableBase<Metadata>, public MetadataCacheablePod {
TypeDefVector types;

Просмотреть файл

@ -105,7 +105,7 @@ class ArgTypeVector {
if (isSyntheticStackResultPointerArg(i)) {
return jit::MIRType::StackResults;
}
return args_[naturalIndex(i)].toMIRType();
return ToMIRType(args_[naturalIndex(i)]);
}
};
@ -426,11 +426,6 @@ class CallSiteDesc {
MOZ_ASSERT(kind == Kind(kind_));
MOZ_ASSERT(lineOrBytecode == lineOrBytecode_);
}
CallSiteDesc(BytecodeOffset bytecodeOffset, Kind kind)
: lineOrBytecode_(bytecodeOffset.offset()), kind_(kind) {
MOZ_ASSERT(kind == Kind(kind_));
MOZ_ASSERT(bytecodeOffset.offset() == lineOrBytecode_);
}
uint32_t lineOrBytecode() const { return lineOrBytecode_; }
Kind kind() const { return Kind(kind_); }
bool isImportCall() const { return kind() == CallSiteDesc::Import; }
@ -457,7 +452,7 @@ class CallSite : public CallSiteDesc {
CallSite(CallSiteDesc desc, uint32_t returnAddressOffset)
: CallSiteDesc(desc), returnAddressOffset_(returnAddressOffset) {}
void offsetBy(uint32_t delta) { returnAddressOffset_ += delta; }
void offsetBy(int32_t delta) { returnAddressOffset_ += delta; }
uint32_t returnAddressOffset() const { return returnAddressOffset_; }
};

Просмотреть файл

@ -573,10 +573,12 @@ CompilerEnvironment::CompilerEnvironment(const CompileArgs& args)
: state_(InitialWithArgs), args_(&args) {}
CompilerEnvironment::CompilerEnvironment(CompileMode mode, Tier tier,
OptimizedBackend optimizedBackend,
DebugEnabled debugEnabled)
: state_(InitialWithModeTierDebug),
mode_(mode),
tier_(tier),
optimizedBackend_(optimizedBackend),
debug_(debugEnabled) {}
void CompilerEnvironment::computeParameters() {
@ -622,6 +624,8 @@ void CompilerEnvironment::computeParameters(Decoder& d) {
tier_ = hasSecondTier ? Tier::Optimized : Tier::Baseline;
}
optimizedBackend_ = OptimizedBackend::Ion;
debug_ = debugEnabled ? DebugEnabled::True : DebugEnabled::False;
state_ = Computed;
@ -721,12 +725,14 @@ bool wasm::CompileTier2(const CompileArgs& args, const Bytes& bytecode,
UniqueCharsVector* warnings, Atomic<bool>* cancelled) {
Decoder d(bytecode, 0, error);
OptimizedBackend optimizedBackend = OptimizedBackend::Ion;
ModuleEnvironment moduleEnv(args.features);
if (!DecodeModuleEnvironment(d, &moduleEnv)) {
return false;
}
CompilerEnvironment compilerEnv(CompileMode::Tier2, Tier::Optimized,
DebugEnabled::False);
optimizedBackend, DebugEnabled::False);
compilerEnv.computeParameters(d);
ModuleGenerator mg(args, &moduleEnv, &compilerEnv, cancelled, error,

Просмотреть файл

@ -191,6 +191,7 @@ struct CompilerEnvironment {
struct {
CompileMode mode_;
Tier tier_;
OptimizedBackend optimizedBackend_;
DebugEnabled debug_;
};
};
@ -203,7 +204,9 @@ struct CompilerEnvironment {
// Save the provided values for mode, tier, and debug, and the initial value
// for gc/refTypes. A subsequent computeParameters() will compute the
// final value of gc/refTypes.
CompilerEnvironment(CompileMode mode, Tier tier, DebugEnabled debugEnabled);
CompilerEnvironment(CompileMode mode, Tier tier,
OptimizedBackend optimizedBackend,
DebugEnabled debugEnabled);
// Compute any remaining compilation parameters.
void computeParameters(Decoder& d);
@ -222,6 +225,10 @@ struct CompilerEnvironment {
MOZ_ASSERT(isComputed());
return tier_;
}
OptimizedBackend optimizedBackend() const {
MOZ_ASSERT(isComputed());
return optimizedBackend_;
}
DebugEnabled debug() const {
MOZ_ASSERT(isComputed());
return debug_;

Просмотреть файл

@ -1040,6 +1040,12 @@ static const unsigned MaxFrameSize = 512 * 1024;
static const unsigned MaxVarU32DecodedBytes = 5;
// Which backend to use in the case of the optimized tier.
enum class OptimizedBackend {
Ion,
};
// The CompileMode controls how compilation of a module is performed (notably,
// how many times we compile it).

Просмотреть файл

@ -164,7 +164,6 @@ class ResultType {
}
}
bool valid() const { return kind() != InvalidKind; }
bool empty() const { return kind() == EmptyKind; }
size_t length() const {

Просмотреть файл

@ -540,7 +540,7 @@ static void GenerateCallableEpilogue(MacroAssembler& masm, unsigned framePushed,
ClearExitFP(masm, ABINonArgReturnVolatileReg);
}
DebugOnly<uint32_t> poppedFP{};
DebugOnly<uint32_t> poppedFP;
#if defined(JS_CODEGEN_MIPS64)
@ -869,7 +869,7 @@ void wasm::GenerateJitEntryPrologue(MacroAssembler& masm,
void wasm::GenerateJitEntryEpilogue(MacroAssembler& masm,
CallableOffsets* offsets) {
DebugOnly<uint32_t> poppedFP{};
DebugOnly<uint32_t> poppedFP;
#ifdef JS_CODEGEN_ARM64
RegisterOrSP sp = masm.getStackPointer();
AutoForbidPoolsAndNops afp(&masm,

Просмотреть файл

@ -63,7 +63,7 @@ static const JSClassOps RttValueClassOps = {
RttValue::trace, // trace
};
RttValue* RttValue::create(JSContext* cx, const TypeHandle& handle) {
RttValue* RttValue::create(JSContext* cx, TypeHandle handle) {
Rooted<RttValue*> rtt(cx,
NewTenuredObjectWithGivenProto<RttValue>(cx, nullptr));
if (!rtt) {
@ -96,7 +96,7 @@ const JSClass js::RttValue::class_ = {
JSCLASS_HAS_RESERVED_SLOTS(RttValue::SlotCount),
&RttValueClassOps};
RttValue* RttValue::rttCanon(JSContext* cx, const TypeHandle& handle) {
RttValue* RttValue::rttCanon(JSContext* cx, TypeHandle handle) {
return RttValue::create(cx, handle);
}
@ -412,13 +412,11 @@ bool WasmGcObject::loadValue(JSContext* cx, const RttValue::PropOffset& offset,
if (type.isTypeIndex()) {
type = RefType::fromTypeCode(TypeCode::EqRef, true);
}
if (!type.isExposable()) {
JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
JSMSG_WASM_BAD_VAL_TYPE);
return false;
}
if (is<WasmStructObject>()) {
// `offset` is the field offset, without regard to the in/out-line split.
// That is handled by the call to `fieldOffsetToAddress`.
@ -430,20 +428,20 @@ bool WasmGcObject::loadValue(JSContext* cx, const RttValue::PropOffset& offset,
rtt.typeDef().structType().size_);
return ToJSValue(cx, structObj.fieldOffsetToAddress(type, offset.get()),
type, vp);
} else {
MOZ_ASSERT(is<WasmArrayObject>());
WasmArrayObject& arrayObj = as<WasmArrayObject>();
if (offset.get() == UINT32_MAX) {
// This denotes "length"
uint32_t numElements = arrayObj.numElements_;
// We can't use `ToJSValue(.., ValType::I32, ..)` here since it will
// treat the integer as signed, which it isn't. `vp.set(..)` will
// coerce correctly to a JS::Value, though.
vp.set(NumberValue(numElements));
return true;
}
return ToJSValue(cx, arrayObj.data_ + offset.get(), type, vp);
}
MOZ_ASSERT(is<WasmArrayObject>());
WasmArrayObject& arrayObj = as<WasmArrayObject>();
if (offset.get() == UINT32_MAX) {
// This denotes "length"
uint32_t numElements = arrayObj.numElements_;
// We can't use `ToJSValue(.., ValType::I32, ..)` here since it will
// treat the integer as signed, which it isn't. `vp.set(..)` will
// coerce correctly to a JS::Value, though.
vp.set(NumberValue(numElements));
return true;
}
return ToJSValue(cx, arrayObj.data_ + offset.get(), type, vp);
}
bool WasmGcObject::isRuntimeSubtype(Handle<RttValue*> rtt) const {
@ -487,7 +485,7 @@ bool WasmGcObject::obj_newEnumerate(JSContext* cx, HandleObject obj,
}
RootedId id(cx);
for (size_t index = 0; index < indexCount; index++) {
id = PropertyKey::Int(int32_t(index));
id = PropertyKey::Int(index);
properties.infallibleAppend(id);
}

Просмотреть файл

@ -28,7 +28,7 @@ class WasmGcObject;
class RttValue : public NativeObject {
private:
static RttValue* create(JSContext* cx, const wasm::TypeHandle& handle);
static RttValue* create(JSContext* cx, wasm::TypeHandle handle);
public:
static const JSClass class_;
@ -42,7 +42,7 @@ class RttValue : public NativeObject {
SlotCount = 4,
};
static RttValue* rttCanon(JSContext* cx, const wasm::TypeHandle& handle);
static RttValue* rttCanon(JSContext* cx, wasm::TypeHandle handle);
static RttValue* rttSub(JSContext* cx, js::Handle<RttValue*> parent,
js::Handle<RttValue*> subCanon);

Просмотреть файл

@ -215,7 +215,7 @@ bool ModuleGenerator::init(Metadata* maybeAsmJSMetadata) {
moduleEnv_->codeSection ? moduleEnv_->codeSection->size : 0;
size_t estimatedCodeSize =
size_t(1.2 * EstimateCompiledCodeSize(tier(), codeSectionSize));
1.2 * EstimateCompiledCodeSize(tier(), codeSectionSize);
(void)masm_.reserve(std::min(estimatedCodeSize, MaxCodeBytesPerProcess));
(void)metadataTier_->codeRanges.reserve(2 * moduleEnv_->numFuncDefs());
@ -256,7 +256,7 @@ bool ModuleGenerator::init(Metadata* maybeAsmJSMetadata) {
}
for (TagDesc& tag : moduleEnv_->tags) {
if (!allocateGlobalBytes(sizeof(void*), sizeof(void*),
if (!allocateGlobalBytes(sizeof(WasmTagObject*), sizeof(void*),
&tag.globalDataOffset)) {
return false;
}
@ -309,7 +309,8 @@ bool ModuleGenerator::init(Metadata* maybeAsmJSMetadata) {
continue;
}
uint32_t width = global.isIndirect() ? sizeof(void*) : global.type().size();
uint32_t width =
global.isIndirect() ? sizeof(void*) : SizeOf(global.type());
uint32_t globalDataOffset;
if (!allocateGlobalBytes(width, width, &globalDataOffset)) {
@ -677,8 +678,12 @@ bool ModuleGenerator::linkCompiledCode(CompiledCode& code) {
return tn->hasTryBody();
};
auto tryNoteOp = [=](uint32_t, TryNote* tn) { tn->offsetBy(offsetInModule); };
return AppendForEach(&metadataTier_->tryNotes, code.tryNotes, tryNoteFilter,
tryNoteOp);
if (!AppendForEach(&metadataTier_->tryNotes, code.tryNotes, tryNoteFilter,
tryNoteOp)) {
return false;
}
return true;
}
static bool ExecuteCompileTask(CompileTask* task, UniqueChars* error) {
@ -687,9 +692,14 @@ static bool ExecuteCompileTask(CompileTask* task, UniqueChars* error) {
switch (task->compilerEnv.tier()) {
case Tier::Optimized:
if (!IonCompileFunctions(task->moduleEnv, task->compilerEnv, task->lifo,
task->inputs, &task->output, error)) {
return false;
switch (task->compilerEnv.optimizedBackend()) {
case OptimizedBackend::Ion:
if (!IonCompileFunctions(task->moduleEnv, task->compilerEnv,
task->lifo, task->inputs, &task->output,
error)) {
return false;
}
break;
}
break;
case Tier::Baseline:
@ -831,7 +841,13 @@ bool ModuleGenerator::compileFuncDef(uint32_t funcIndex,
threshold = JitOptions.wasmBatchBaselineThreshold;
break;
case Tier::Optimized:
threshold = JitOptions.wasmBatchIonThreshold;
switch (compilerEnv_->optimizedBackend()) {
case OptimizedBackend::Ion:
threshold = JitOptions.wasmBatchIonThreshold;
break;
default:
MOZ_CRASH("Invalid optimizedBackend value");
}
break;
default:
MOZ_CRASH("Invalid tier value");

Просмотреть файл

@ -352,7 +352,7 @@ static int32_t PerformWait(Instance* instance, PtrT byteOffset, ValT value,
mozilla::Maybe<mozilla::TimeDuration> timeout;
if (timeout_ns >= 0) {
timeout = mozilla::Some(
mozilla::TimeDuration::FromMicroseconds(double(timeout_ns) / 1000));
mozilla::TimeDuration::FromMicroseconds(timeout_ns / 1000));
}
MOZ_ASSERT(byteOffset <= SIZE_MAX, "Bounds check is broken");
@ -1564,7 +1564,7 @@ RttValue* Instance::rttCanon(uint32_t typeIndex) const {
// Instance creation and related.
Instance::Instance(JSContext* cx, Handle<WasmInstanceObject*> object,
const SharedCode& code, Handle<WasmMemoryObject*> memory,
SharedCode code, Handle<WasmMemoryObject*> memory,
SharedTableVector&& tables, UniqueDebugState maybeDebug)
: realm_(cx->realm()),
jsJitArgsRectifier_(
@ -1587,7 +1587,7 @@ Instance::Instance(JSContext* cx, Handle<WasmInstanceObject*> object,
}
Instance* Instance::create(JSContext* cx, Handle<WasmInstanceObject*> object,
const SharedCode& code, uint32_t globalDataLength,
SharedCode code, uint32_t globalDataLength,
Handle<WasmMemoryObject*> memory,
SharedTableVector&& tables,
UniqueDebugState maybeDebug) {
@ -2599,7 +2599,7 @@ JSString* Instance::createDisplayURL(JSContext* cx) {
const ModuleHash& hash = metadata().debugHash;
for (unsigned char byte : hash) {
unsigned char digit1 = byte / 16, digit2 = byte % 16;
char digit1 = byte / 16, digit2 = byte % 16;
if (!result.append(
(char)(digit1 < 10 ? digit1 + '0' : digit1 + 'a' - 10))) {
return nullptr;
@ -2679,4 +2679,5 @@ void wasm::ReportTrapError(JSContext* cx, unsigned errorNumber) {
MOZ_ASSERT(exn.isObject() && exn.toObject().is<ErrorObject>());
exn.toObject().as<ErrorObject>().setFromWasmTrap();
return;
}

Просмотреть файл

@ -199,14 +199,14 @@ class alignas(16) Instance {
bool callImport(JSContext* cx, uint32_t funcImportIndex, unsigned argc,
uint64_t* argv);
Instance(JSContext* cx, Handle<WasmInstanceObject*> object,
const SharedCode& code, Handle<WasmMemoryObject*> memory,
SharedTableVector&& tables, UniqueDebugState maybeDebug);
Instance(JSContext* cx, Handle<WasmInstanceObject*> object, SharedCode code,
Handle<WasmMemoryObject*> memory, SharedTableVector&& tables,
UniqueDebugState maybeDebug);
~Instance();
public:
static Instance* create(JSContext* cx, Handle<WasmInstanceObject*> object,
const SharedCode& code, uint32_t globalDataLength,
SharedCode code, uint32_t globalDataLength,
Handle<WasmMemoryObject*> memory,
SharedTableVector&& tables,
UniqueDebugState maybeDebug);
@ -460,8 +460,7 @@ class alignas(16) Instance {
uint32_t numElements, void* arrayDescr,
uint32_t segIndex);
static void* arrayNewElem(Instance* instance, uint32_t segElemIndex,
uint32_t numElements, void* arrayDescr,
uint32_t segIndex);
uint32_t size, void* arrayDescr, uint32_t segIndex);
static int32_t arrayCopy(Instance* instance, void* dstArray,
uint32_t dstIndex, void* srcArray, uint32_t srcIndex,
uint32_t numElements, uint32_t elementSize);

Просмотреть файл

@ -105,7 +105,7 @@ bool wasm::CompileIntrinsicModule(JSContext* cx,
}
CompilerEnvironment compilerEnv(
CompileMode::Once, IonAvailable(cx) ? Tier::Optimized : Tier::Baseline,
DebugEnabled::False);
OptimizedBackend::Ion, DebugEnabled::False);
compilerEnv.computeParameters();
// Build a module environment

Просмотреть файл

@ -1012,7 +1012,7 @@ class FunctionCompiler {
MOZ_ASSERT(src->type() == MIRType::Simd128);
auto* ins =
MWasmReduceSimd128::New(alloc(), src, op, outType.toMIRType(), imm);
MWasmReduceSimd128::New(alloc(), src, op, ToMIRType(outType), imm);
curBlock_->add(ins);
return ins;
}
@ -1360,8 +1360,8 @@ class FunctionCompiler {
#ifndef JS_64BIT
MOZ_ASSERT(base->type() == MIRType::Int32);
#endif
load = MWasmLoad::New(alloc(), memoryBase, base, *access,
result.toMIRType());
load =
MWasmLoad::New(alloc(), memoryBase, base, *access, ToMIRType(result));
}
if (!load) {
return nullptr;
@ -1809,7 +1809,10 @@ class FunctionCompiler {
return false;
}
finishCall(&args);
return builtinInstanceMethodCall(callee, lineOrBytecode, args);
if (!builtinInstanceMethodCall(callee, lineOrBytecode, args)) {
return false;
}
return true;
}
/***************************************************************** Calls */
@ -1896,7 +1899,7 @@ class FunctionCompiler {
if (inDeadCode()) {
return true;
}
return passArgWorker(argDef, type.toMIRType(), call);
return passArgWorker(argDef, ToMIRType(type), call);
}
// If the call returns results on the stack, prepare a stack area to receive
@ -1925,7 +1928,7 @@ class FunctionCompiler {
}
for (uint32_t base = iter.index(); !iter.done(); iter.next()) {
MWasmStackResultArea::StackResult loc(iter.cur().stackOffset(),
iter.cur().type().toMIRType());
ToMIRType(iter.cur().type()));
stackResultArea->initResult(iter.index() - base, loc);
}
curBlock_->add(stackResultArea);
@ -2087,7 +2090,10 @@ class FunctionCompiler {
}
curBlock_->add(ins);
return finishTryCall(&tryDesc);
if (!finishTryCall(&tryDesc)) {
return false;
}
return true;
}
bool callDirect(const FuncType& funcType, uint32_t funcIndex,
@ -3096,7 +3102,7 @@ class FunctionCompiler {
// Load each value from the data pointer
for (size_t i = 0; i < params.length(); i++) {
auto* load = MWasmLoadObjectDataField::New(
alloc(), exception, data, offsets[i], params[i].toMIRType());
alloc(), exception, data, offsets[i], ToMIRType(params[i]));
if (!load || !values->append(load)) {
return false;
}
@ -3238,7 +3244,7 @@ class FunctionCompiler {
// Load the previous value
auto* prevValue = MWasmLoadObjectDataField::New(alloc(), exception, data,
offset, type.toMIRType());
offset, ToMIRType(type));
if (!prevValue) {
return false;
}
@ -3983,12 +3989,12 @@ static bool EmitGetGlobal(FunctionCompiler& f) {
if (!global.isConstant()) {
f.iter().setResult(f.loadGlobalVar(global.offset(), !global.isMutable(),
global.isIndirect(),
global.type().toMIRType()));
ToMIRType(global.type())));
return true;
}
LitVal value = global.constantValue();
MIRType mirType = value.type().toMIRType();
MIRType mirType = ToMIRType(value.type());
MDefinition* result;
switch (value.type().kind()) {
@ -4221,7 +4227,7 @@ static bool EmitRotate(FunctionCompiler& f, ValType type, bool isLeftRotation) {
return false;
}
MDefinition* result = f.rotate(lhs, rhs, type.toMIRType(), isLeftRotation);
MDefinition* result = f.rotate(lhs, rhs, ToMIRType(type), isLeftRotation);
f.iter().setResult(result);
return true;
}
@ -4330,7 +4336,7 @@ static bool EmitCopySign(FunctionCompiler& f, ValType operandType) {
return false;
}
f.iter().setResult(f.binary<MCopySign>(lhs, rhs, operandType.toMIRType()));
f.iter().setResult(f.binary<MCopySign>(lhs, rhs, ToMIRType(operandType)));
return true;
}
@ -4462,7 +4468,7 @@ static bool EmitUnaryMathBuiltinCall(FunctionCompiler& f,
uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
MDefinition* input;
if (!f.iter().readUnary(ValType::fromMIRType(callee.argTypes[0]), &input)) {
if (!f.iter().readUnary(ValType(callee.argTypes[0]), &input)) {
return false;
}
@ -4499,8 +4505,7 @@ static bool EmitBinaryMathBuiltinCall(FunctionCompiler& f,
MDefinition* lhs;
MDefinition* rhs;
// This call to readBinary assumes both operands have the same type.
if (!f.iter().readBinary(ValType::fromMIRType(callee.argTypes[0]), &lhs,
&rhs)) {
if (!f.iter().readBinary(ValType(callee.argTypes[0]), &lhs, &rhs)) {
return false;
}
@ -4656,7 +4661,7 @@ static bool EmitAtomicStore(FunctionCompiler& f, ValType type,
static bool EmitWait(FunctionCompiler& f, ValType type, uint32_t byteSize) {
MOZ_ASSERT(type == ValType::I32 || type == ValType::I64);
MOZ_ASSERT(type.size() == byteSize);
MOZ_ASSERT(SizeOf(type) == byteSize);
uint32_t bytecodeOffset = f.readBytecodeOffset();
@ -4687,7 +4692,7 @@ static bool EmitWait(FunctionCompiler& f, ValType type, uint32_t byteSize) {
return false;
}
MOZ_ASSERT(type.toMIRType() == callee.argTypes[2]);
MOZ_ASSERT(ToMIRType(type) == callee.argTypes[2]);
if (!f.passArg(expected, callee.argTypes[2], &args)) {
return false;
}
@ -7030,6 +7035,7 @@ bool wasm::IonCompileFunctions(const ModuleEnvironment& moduleEnv,
CompiledCode* code, UniqueChars* error) {
MOZ_ASSERT(compilerEnv.tier() == Tier::Optimized);
MOZ_ASSERT(compilerEnv.debug() == DebugEnabled::False);
MOZ_ASSERT(compilerEnv.optimizedBackend() == OptimizedBackend::Ion);
TempAllocator alloc(&lifo);
JitContext jitContext;

Просмотреть файл

@ -1048,14 +1048,14 @@ static JSObject* TableTypeToObject(JSContext* cx, RefType type,
if (maximum.isSome()) {
if (!props.append(IdValuePair(NameToId(cx->names().maximum),
NumberValue(maximum.value())))) {
Int32Value(maximum.value())))) {
ReportOutOfMemory(cx);
return nullptr;
}
}
if (!props.append(
IdValuePair(NameToId(cx->names().minimum), NumberValue(initial)))) {
IdValuePair(NameToId(cx->names().minimum), Int32Value(initial)))) {
ReportOutOfMemory(cx);
return nullptr;
}
@ -1863,8 +1863,7 @@ void WasmInstanceObject::trace(JSTracer* trc, JSObject* obj) {
/* static */
WasmInstanceObject* WasmInstanceObject::create(
JSContext* cx, const SharedCode& code,
const DataSegmentVector& dataSegments,
JSContext* cx, SharedCode code, const DataSegmentVector& dataSegments,
const ElemSegmentVector& elemSegments, uint32_t globalDataLength,
Handle<WasmMemoryObject*> memory, SharedTableVector&& tables,
const JSFunctionVector& funcImports, const GlobalDescVector& globals,
@ -2655,7 +2654,7 @@ bool WasmMemoryObject::growImpl(JSContext* cx, const CallArgs& args) {
return false;
}
args.rval().setInt32(int32_t(ret));
args.rval().setInt32(ret);
return true;
}
@ -3259,7 +3258,7 @@ bool WasmTableObject::growImpl(JSContext* cx, const CallArgs& args) {
}
#endif
args.rval().setInt32(int32_t(oldLength));
args.rval().setInt32(oldLength);
return true;
}
@ -4187,7 +4186,7 @@ JSFunction* WasmFunctionCreate(JSContext* cx, HandleFunction func,
ModuleEnvironment moduleEnv(compileArgs->features);
CompilerEnvironment compilerEnv(CompileMode::Once, Tier::Optimized,
DebugEnabled::False);
OptimizedBackend::Ion, DebugEnabled::False);
compilerEnv.computeParameters();
// Initialize the type section
@ -5391,7 +5390,10 @@ static bool WebAssemblyDefineConstructor(JSContext* cx,
}
id.set(AtomToId(className));
return DefineDataProperty(cx, wasm, id, ctorValue, 0);
if (!DefineDataProperty(cx, wasm, id, ctorValue, 0)) {
return false;
}
return true;
}
static bool WebAssemblyClassFinish(JSContext* cx, HandleObject object,

Просмотреть файл

@ -325,7 +325,7 @@ class WasmInstanceObject : public NativeObject {
static bool construct(JSContext*, unsigned, Value*);
static WasmInstanceObject* create(
JSContext* cx, const RefPtr<const wasm::Code>& code,
JSContext* cx, RefPtr<const wasm::Code> code,
const wasm::DataSegmentVector& dataSegments,
const wasm::ElemSegmentVector& elemSegments, uint32_t globalDataLength,
Handle<WasmMemoryObject*> memory,

Просмотреть файл

@ -342,7 +342,7 @@ using GlobalDescVector = Vector<GlobalDesc, 0, SystemAllocPolicy>;
// The TagOffsetVector represents the offsets in the layout of the
// data buffer stored in a Wasm exception.
using TagOffsetVector = Vector<uint32_t, 2, SystemAllocPolicy>;
using TagOffsetVector = Vector<uint32_t, 0, SystemAllocPolicy>;
struct TagType : AtomicRefCounted<TagType> {
ValTypeVector argTypes_;

Просмотреть файл

@ -162,6 +162,11 @@ enum class OpKind {
AtomicStore,
AtomicBinOp,
AtomicCompareExchange,
OldAtomicLoad,
OldAtomicStore,
OldAtomicBinOp,
OldAtomicCompareExchange,
OldAtomicExchange,
MemOrTableCopy,
DataOrElemDrop,
MemFill,
@ -307,8 +312,6 @@ class UnsetLocalsState {
uint32_t firstNonDefaultLocal_;
public:
UnsetLocalsState() : firstNonDefaultLocal_(UINT32_MAX) {}
[[nodiscard]] bool init(const ValTypeVector& locals, size_t numParams);
inline bool isUnset(uint32_t id) const {
@ -1505,7 +1508,7 @@ inline bool OpIter<Policy>::checkBrTableEntryAndPush(
*type = block->branchTargetType();
if (prevBranchType.valid()) {
if (prevBranchType != ResultType()) {
if (prevBranchType.length() != type->length()) {
return fail("br_table targets must all have the same arity");
}
@ -1557,7 +1560,7 @@ inline bool OpIter<Policy>::readBrTable(Uint32Vector* depths,
return false;
}
MOZ_ASSERT(defaultBranchType->valid());
MOZ_ASSERT(*defaultBranchType != ResultType());
afterUnconditionalBranch();
return true;
@ -2381,7 +2384,7 @@ inline bool OpIter<Policy>::popCallArgs(const ValTypeVector& expectedTypes,
return false;
}
for (int32_t i = int32_t(expectedTypes.length()) - 1; i >= 0; i--) {
for (int32_t i = expectedTypes.length() - 1; i >= 0; i--) {
if (!popWithType(expectedTypes[i], &(*values)[i])) {
return false;
}
@ -2849,7 +2852,12 @@ inline bool OpIter<Policy>::readMemOrTableInit(bool isMem, uint32_t* segIndex,
}
ValType ptrType = isMem ? ToValType(env_.memory->indexType()) : ValType::I32;
return popWithType(ptrType, dst);
if (!popWithType(ptrType, dst)) {
return false;
}
return true;
}
template <typename Policy>

Просмотреть файл

@ -80,7 +80,7 @@ namespace wasm {
struct OutOfMemory {};
// The result of serialization, either OK or OOM
using CoderResult = mozilla::Result<mozilla::Ok, OutOfMemory>;
typedef mozilla::Result<mozilla::Ok, OutOfMemory> CoderResult;
// CoderMode parameterizes the coding functions
enum CoderMode {

Просмотреть файл

@ -81,6 +81,11 @@ using MutableElemSegment = RefPtr<ElemSegment>;
using SharedElemSegment = RefPtr<const ElemSegment>;
using ElemSegmentVector = Vector<SharedElemSegment, 0, SystemAllocPolicy>;
struct ExceptionTag;
using SharedExceptionTag = RefPtr<ExceptionTag>;
using SharedExceptionTagVector =
Vector<SharedExceptionTag, 0, SystemAllocPolicy>;
class Val;
using ValVector = GCVector<Val, 0, SystemAllocPolicy>;
@ -90,6 +95,8 @@ using ValVector = GCVector<Val, 0, SystemAllocPolicy>;
using Uint32Vector = Vector<uint32_t, 8, SystemAllocPolicy>;
using Bytes = Vector<uint8_t, 0, SystemAllocPolicy>;
using UniqueBytes = UniquePtr<Bytes>;
using UniqueConstBytes = UniquePtr<const Bytes>;
using UTF8Bytes = Vector<char, 0, SystemAllocPolicy>;
using InstanceVector = Vector<Instance*, 0, SystemAllocPolicy>;
using UniqueCharsVector = Vector<UniqueChars, 0, SystemAllocPolicy>;

Просмотреть файл

@ -229,6 +229,7 @@ class StructType {
};
using StructTypeVector = Vector<StructType, 0, SystemAllocPolicy>;
using StructTypePtrVector = Vector<const StructType*, 0, SystemAllocPolicy>;
// Utility for computing field offset and alignments, and total size for
// structs and tags. This is complicated by fact that a WasmStructObject has
@ -292,6 +293,7 @@ class ArrayType {
WASM_DECLARE_CACHEABLE_POD(ArrayType);
using ArrayTypeVector = Vector<ArrayType, 0, SystemAllocPolicy>;
using ArrayTypePtrVector = Vector<const ArrayType*, 0, SystemAllocPolicy>;
// A tagged container for the various types that can be present in a wasm
// module's type section.

Просмотреть файл

@ -441,27 +441,25 @@ class PackedType : public T {
explicit PackedType(PackedTypeCode ptc) : tc_(ptc) { MOZ_ASSERT(isValid()); }
static PackedType fromMIRType(jit::MIRType mty) {
explicit PackedType(jit::MIRType mty) {
switch (mty) {
case jit::MIRType::Int32:
return PackedType::I32;
tc_ = PackedTypeCode::pack(TypeCode::I32);
break;
case jit::MIRType::Int64:
return PackedType::I64;
tc_ = PackedTypeCode::pack(TypeCode::I64);
break;
case jit::MIRType::Float32:
return PackedType::F32;
tc_ = PackedTypeCode::pack(TypeCode::F32);
break;
case jit::MIRType::Double:
return PackedType::F64;
tc_ = PackedTypeCode::pack(TypeCode::F64);
break;
case jit::MIRType::Simd128:
return PackedType::V128;
tc_ = PackedTypeCode::pack(TypeCode::V128);
break;
case jit::MIRType::RefOrNull:
return PackedType::Ref;
default:
MOZ_CRASH("fromMIRType: unexpected type");
MOZ_CRASH("PackedType(MIRType): unexpected type");
}
}
@ -667,29 +665,6 @@ class PackedType : public T {
return PackedType<ValTypeTraits>(tc_);
}
// Note, ToMIRType is only correct within Wasm, where an AnyRef is represented
// as a pointer. At the JS/wasm boundary, an AnyRef can be represented as a
// JS::Value, and the type translation may have to be handled specially and on
// a case-by-case basis.
jit::MIRType toMIRType() const {
switch (tc_.typeCode()) {
case TypeCode::I32:
return jit::MIRType::Int32;
case TypeCode::I64:
return jit::MIRType::Int64;
case TypeCode::F32:
return jit::MIRType::Float32;
case TypeCode::F64:
return jit::MIRType::Double;
case TypeCode::V128:
return jit::MIRType::Simd128;
case AbstractReferenceTypeCode:
return jit::MIRType::RefOrNull;
default:
MOZ_CRASH("bad type");
}
}
bool isValType() const {
switch (tc_.typeCode()) {
case TypeCode::I8:
@ -737,6 +712,51 @@ using ValTypeVector = Vector<ValType, 16, SystemAllocPolicy>;
// ValType utilities
static inline unsigned SizeOf(ValType vt) {
switch (vt.kind()) {
case ValType::I32:
case ValType::F32:
return 4;
case ValType::I64:
case ValType::F64:
return 8;
case ValType::V128:
return 16;
case ValType::Ref:
return sizeof(intptr_t);
}
MOZ_CRASH("Invalid ValType");
}
// Note, ToMIRType is only correct within Wasm, where an AnyRef is represented
// as a pointer. At the JS/wasm boundary, an AnyRef can be represented as a
// JS::Value, and the type translation may have to be handled specially and on a
// case-by-case basis.
static inline jit::MIRType ToMIRType(ValType vt) {
switch (vt.kind()) {
case ValType::I32:
return jit::MIRType::Int32;
case ValType::I64:
return jit::MIRType::Int64;
case ValType::F32:
return jit::MIRType::Float32;
case ValType::F64:
return jit::MIRType::Double;
case ValType::V128:
return jit::MIRType::Simd128;
case ValType::Ref:
return jit::MIRType::RefOrNull;
}
MOZ_CRASH("bad type");
}
static inline bool IsNumberType(ValType vt) { return !vt.isRefType(); }
static inline jit::MIRType ToMIRType(const Maybe<ValType>& t) {
return t ? ToMIRType(ValType(t.ref())) : jit::MIRType::None;
}
extern bool ToValType(JSContext* cx, HandleValue v, ValType* out);
extern bool ToRefType(JSContext* cx, JSLinearString* typeLinearStr,
RefType* out);

Просмотреть файл

@ -267,8 +267,8 @@ Value UnboxFuncRef(FuncRef val);
class LitVal {
public:
union Cell {
uint32_t i32_;
uint64_t i64_;
int32_t i32_;
int64_t i64_;
float f32_;
double f64_;
wasm::V128 v128_;