Bug 1668373 - wasm: Rename future ambiguous uses of 'env_' to 'moduleEnv_'. r=lth

The next commit will move CompilerEnvironment out of ModuleEnvironment and pass it
through the compiler pipeline. The compiler pipeline typically uses 'env_' to refer
to the module environment, with a 'compilerEnv_' being used as well I think we should
rename 'env_' to 'moduleEnv_'.

The one exception is within validation/decoding where I think using just 'env_' to
refer to the module environment is fine.

Differential Revision: https://phabricator.services.mozilla.com/D91994
This commit is contained in:
Ryan Hunt 2020-10-05 17:01:32 +00:00
Родитель 59dd6d410d
Коммит ab7788915e
10 изменённых файлов: 269 добавлений и 253 удалений

Просмотреть файл

@ -1334,7 +1334,7 @@ class MOZ_STACK_CLASS ModuleValidatorShared {
// State used to build the AsmJSModule in finish():
CompilerEnvironment compilerEnv_;
ModuleEnvironment env_;
ModuleEnvironment moduleEnv_;
MutableAsmJSMetadata asmJSMetadata_;
// Error reporting:
@ -1359,9 +1359,9 @@ class MOZ_STACK_CLASS ModuleValidatorShared {
arrayViews_(cx),
compilerEnv_(CompileMode::Once, Tier::Optimized, OptimizedBackend::Ion,
DebugEnabled::False),
env_(&compilerEnv_, FeatureArgs(), ModuleKind::AsmJS) {
moduleEnv_(&compilerEnv_, FeatureArgs(), ModuleKind::AsmJS) {
compilerEnv_.computeParameters();
env_.minMemoryLength = RoundUpToNextValidAsmJSHeapLength(0);
moduleEnv_.minMemoryLength = RoundUpToNextValidAsmJSHeapLength(0);
}
protected:
@ -1440,9 +1440,9 @@ class MOZ_STACK_CLASS ModuleValidatorShared {
const ParserName* globalArgumentName() const { return globalArgumentName_; }
const ParserName* importArgumentName() const { return importArgumentName_; }
const ParserName* bufferArgumentName() const { return bufferArgumentName_; }
const ModuleEnvironment& env() { return env_; }
const ModuleEnvironment& env() { return moduleEnv_; }
uint64_t minMemoryLength() const { return env_.minMemoryLength; }
uint64_t minMemoryLength() const { return moduleEnv_.minMemoryLength; }
void initModuleFunctionName(const ParserName* name) {
MOZ_ASSERT(!moduleFunctionName_);
@ -1483,9 +1483,9 @@ class MOZ_STACK_CLASS ModuleValidatorShared {
MOZ_ASSERT(type.isGlobalVarType());
MOZ_ASSERT(type == Type::canonicalize(Type::lit(lit)));
uint32_t index = env_.globals.length();
if (!env_.globals.emplaceBack(type.canonicalToValType(), !isConst, index,
ModuleKind::AsmJS)) {
uint32_t index = moduleEnv_.globals.length();
if (!moduleEnv_.globals.emplaceBack(type.canonicalToValType(), !isConst,
index, ModuleKind::AsmJS)) {
return false;
}
@ -1517,10 +1517,10 @@ class MOZ_STACK_CLASS ModuleValidatorShared {
return false;
}
uint32_t index = env_.globals.length();
uint32_t index = moduleEnv_.globals.length();
ValType valType = type.canonicalToValType();
if (!env_.globals.emplaceBack(valType, !isConst, index,
ModuleKind::AsmJS)) {
if (!moduleEnv_.globals.emplaceBack(valType, !isConst, index,
ModuleKind::AsmJS)) {
return false;
}
@ -1690,8 +1690,8 @@ class MOZ_STACK_CLASS ModuleValidatorShared {
// Declare which function is exported which gives us an index into the
// module ExportVector.
uint32_t funcIndex = funcImportMap_.count() + func.funcDefIndex();
if (!env_.exports.emplaceBack(std::move(fieldChars), funcIndex,
DefinitionKind::Function)) {
if (!moduleEnv_.exports.emplaceBack(std::move(fieldChars), funcIndex,
DefinitionKind::Function)) {
return false;
}
@ -1722,7 +1722,7 @@ class MOZ_STACK_CLASS ModuleValidatorShared {
seg->tableIndex = tableIndex;
seg->offsetIfActive = Some(InitExpr::fromConstant(LitVal(uint32_t(0))));
seg->elemFuncIndices = std::move(elems);
return env_.elemSegments.append(std::move(seg));
return moduleEnv_.elemSegments.append(std::move(seg));
}
bool tryConstantAccess(uint64_t start, uint64_t width) {
@ -1732,8 +1732,8 @@ class MOZ_STACK_CLASS ModuleValidatorShared {
return false;
}
len = RoundUpToNextValidAsmJSHeapLength(len);
if (len > env_.minMemoryLength) {
env_.minMemoryLength = len;
if (len > moduleEnv_.minMemoryLength) {
moduleEnv_.minMemoryLength = len;
}
return true;
}
@ -1835,9 +1835,9 @@ class MOZ_STACK_CLASS ModuleValidatorShared {
bool startFunctionBodies() {
if (!arrayViews_.empty()) {
env_.memoryUsage = MemoryUsage::Unshared;
moduleEnv_.memoryUsage = MemoryUsage::Unshared;
} else {
env_.memoryUsage = MemoryUsage::None;
moduleEnv_.memoryUsage = MemoryUsage::None;
}
return true;
}
@ -1872,23 +1872,23 @@ class MOZ_STACK_CLASS ModuleValidator : public ModuleValidatorShared {
private:
// Helpers:
bool newSig(FuncType&& sig, uint32_t* sigIndex) {
if (env_.types.length() >= MaxTypes) {
if (moduleEnv_.types.length() >= MaxTypes) {
return failCurrentOffset("too many signatures");
}
*sigIndex = env_.types.length();
return env_.types.append(std::move(sig));
*sigIndex = moduleEnv_.types.length();
return moduleEnv_.types.append(std::move(sig));
}
bool declareSig(FuncType&& sig, uint32_t* sigIndex) {
SigSet::AddPtr p = sigSet_.lookupForAdd(sig);
if (p) {
*sigIndex = p->sigIndex();
MOZ_ASSERT(env_.types[*sigIndex].funcType() == sig);
MOZ_ASSERT(moduleEnv_.types[*sigIndex].funcType() == sig);
return true;
}
return newSig(std::move(sig), sigIndex) &&
sigSet_.add(p, HashableSig(*sigIndex, env_.types));
sigSet_.add(p, HashableSig(*sigIndex, moduleEnv_.types));
}
private:
@ -1978,22 +1978,22 @@ class MOZ_STACK_CLASS ModuleValidator : public ModuleValidatorShared {
return failCurrentOffset("function pointer table too big");
}
MOZ_ASSERT(env_.tables.length() == tables_.length());
*tableIndex = env_.tables.length();
MOZ_ASSERT(moduleEnv_.tables.length() == tables_.length());
*tableIndex = moduleEnv_.tables.length();
uint32_t sigIndex;
if (!newSig(std::move(sig), &sigIndex)) {
return false;
}
MOZ_ASSERT(sigIndex >= env_.asmJSSigToTableIndex.length());
if (!env_.asmJSSigToTableIndex.resize(sigIndex + 1)) {
MOZ_ASSERT(sigIndex >= moduleEnv_.asmJSSigToTableIndex.length());
if (!moduleEnv_.asmJSSigToTableIndex.resize(sigIndex + 1)) {
return false;
}
env_.asmJSSigToTableIndex[sigIndex] = env_.tables.length();
if (!env_.tables.emplaceBack(RefType::func(), mask + 1, Nothing(),
/*isAsmJS*/ true)) {
moduleEnv_.asmJSSigToTableIndex[sigIndex] = moduleEnv_.tables.length();
if (!moduleEnv_.tables.emplaceBack(RefType::func(), mask + 1, Nothing(),
/*isAsmJS*/ true)) {
return false;
}
@ -2035,7 +2035,7 @@ class MOZ_STACK_CLASS ModuleValidator : public ModuleValidatorShared {
return false;
}
return funcImportMap_.add(p, NamedSig(name, sigIndex, env_.types),
return funcImportMap_.add(p, NamedSig(name, sigIndex, moduleEnv_.types),
*importIndex);
}
@ -2046,24 +2046,27 @@ class MOZ_STACK_CLASS ModuleValidator : public ModuleValidatorShared {
}
SharedModule finish() {
MOZ_ASSERT(env_.funcTypes.empty());
if (!env_.funcTypes.resize(funcImportMap_.count() + funcDefs_.length())) {
MOZ_ASSERT(moduleEnv_.funcTypes.empty());
if (!moduleEnv_.funcTypes.resize(funcImportMap_.count() +
funcDefs_.length())) {
return nullptr;
}
for (FuncImportMap::Range r = funcImportMap_.all(); !r.empty();
r.popFront()) {
uint32_t funcIndex = r.front().value();
MOZ_ASSERT(!env_.funcTypes[funcIndex]);
env_.funcTypes[funcIndex] =
&env_.types[r.front().key().sigIndex()].funcType();
MOZ_ASSERT(!moduleEnv_.funcTypes[funcIndex]);
moduleEnv_.funcTypes[funcIndex] =
&moduleEnv_.types[r.front().key().sigIndex()].funcType();
}
for (const Func& func : funcDefs_) {
uint32_t funcIndex = funcImportMap_.count() + func.funcDefIndex();
MOZ_ASSERT(!env_.funcTypes[funcIndex]);
env_.funcTypes[funcIndex] = &env_.types[func.sigIndex()].funcType();
MOZ_ASSERT(!moduleEnv_.funcTypes[funcIndex]);
moduleEnv_.funcTypes[funcIndex] =
&moduleEnv_.types[func.sigIndex()].funcType();
}
if (!env_.funcImportGlobalDataOffsets.resize(funcImportMap_.count())) {
if (!moduleEnv_.funcImportGlobalDataOffsets.resize(
funcImportMap_.count())) {
return nullptr;
}
@ -2109,9 +2112,9 @@ class MOZ_STACK_CLASS ModuleValidator : public ModuleValidatorShared {
codeSectionSize += func.bytes().length();
}
env_.codeSection.emplace();
env_.codeSection->start = 0;
env_.codeSection->size = codeSectionSize;
moduleEnv_.codeSection.emplace();
moduleEnv_.codeSection->start = 0;
moduleEnv_.codeSection->size = codeSectionSize;
// asm.js does not have any wasm bytecode to save; view-source is
// provided through the ScriptSource.
@ -2120,7 +2123,7 @@ class MOZ_STACK_CLASS ModuleValidator : public ModuleValidatorShared {
return nullptr;
}
ModuleGenerator mg(*args, &env_, nullptr, nullptr);
ModuleGenerator mg(*args, &moduleEnv_, nullptr, nullptr);
if (!mg.init(asmJSMetadata_.get())) {
return nullptr;
}

Просмотреть файл

@ -3220,7 +3220,7 @@ class BaseCompiler final : public BaseCompilerInterface {
bool onlyPointerAlignment;
};
const ModuleEnvironment& env_;
const ModuleEnvironment& moduleEnv_;
BaseOpIter iter_;
const FuncCompileInput& func_;
size_t lastReadCallSite_;
@ -3258,8 +3258,9 @@ class BaseCompiler final : public BaseCompilerInterface {
// There are more members scattered throughout.
public:
BaseCompiler(const ModuleEnvironment& env, const FuncCompileInput& input,
const ValTypeVector& locals, const MachineState& trapExitLayout,
BaseCompiler(const ModuleEnvironment& moduleEnv,
const FuncCompileInput& input, const ValTypeVector& locals,
const MachineState& trapExitLayout,
size_t trapExitLayoutNumWords, Decoder& decoder,
StkVector& stkSource, TempAllocator* alloc, MacroAssembler* masm,
StackMaps* stackMaps);
@ -3273,14 +3274,14 @@ class BaseCompiler final : public BaseCompilerInterface {
void emitInitStackLocals();
const FuncTypeWithId& funcType() const {
return *env_.funcTypes[func_.index];
return *moduleEnv_.funcTypes[func_.index];
}
// Used by some of the ScratchRegister implementations.
operator MacroAssembler&() const { return masm; }
operator BaseRegAlloc&() { return ra; }
bool usesSharedMemory() const { return env_.usesSharedMemory(); }
bool usesSharedMemory() const { return moduleEnv_.usesSharedMemory(); }
private:
////////////////////////////////////////////////////////////
@ -4168,7 +4169,7 @@ class BaseCompiler final : public BaseCompilerInterface {
const ExitStubMapVector& extras,
uint32_t assemblerOffset) {
auto debugFrame =
env_.debugEnabled() ? HasDebugFrame::Yes : HasDebugFrame::No;
moduleEnv_.debugEnabled() ? HasDebugFrame::Yes : HasDebugFrame::No;
return stackMapGenerator_.createStackMap(who, extras, assemblerOffset,
debugFrame, stk_);
}
@ -5277,8 +5278,8 @@ class BaseCompiler final : public BaseCompilerInterface {
}
GenerateFunctionPrologue(
masm, env_.funcTypes[func_.index]->id,
env_.mode() == CompileMode::Tier1 ? Some(func_.index) : Nothing(),
masm, moduleEnv_.funcTypes[func_.index]->id,
moduleEnv_.mode() == CompileMode::Tier1 ? Some(func_.index) : Nothing(),
&offsets_);
// GenerateFunctionPrologue pushes exactly one wasm::Frame's worth of
@ -5291,7 +5292,7 @@ class BaseCompiler final : public BaseCompilerInterface {
// Initialize DebugFrame fields before the stack overflow trap so that
// we have the invariant that all observable Frames in a debugEnabled
// Module have valid DebugFrames.
if (env_.debugEnabled()) {
if (moduleEnv_.debugEnabled()) {
#ifdef JS_CODEGEN_ARM64
static_assert(DebugFrame::offsetOfFrame() % WasmStackAlignment == 0,
"aligned");
@ -5358,7 +5359,7 @@ class BaseCompiler final : public BaseCompilerInterface {
}
// If we're in a debug frame, copy the stack result pointer arg
// to a well-known place.
if (env_.debugEnabled()) {
if (moduleEnv_.debugEnabled()) {
Register target = ABINonArgReturnReg0;
fr.loadIncomingStackResultAreaPtr(RegPtr(target));
size_t debugFrameOffset =
@ -5409,7 +5410,7 @@ class BaseCompiler final : public BaseCompilerInterface {
fr.zeroLocals(&ra);
fr.storeTlsPtr(WasmTlsReg);
if (env_.debugEnabled()) {
if (moduleEnv_.debugEnabled()) {
insertBreakablePoint(CallSiteDesc::EnterFrame);
if (!createStackMap("debug: breakable point")) {
return false;
@ -5437,7 +5438,7 @@ class BaseCompiler final : public BaseCompilerInterface {
}
void saveRegisterReturnValues(const ResultType& resultType) {
MOZ_ASSERT(env_.debugEnabled());
MOZ_ASSERT(moduleEnv_.debugEnabled());
size_t debugFrameOffset = masm.framePushed() - DebugFrame::offsetOfFrame();
size_t registerResultIdx = 0;
for (ABIResultIter i(resultType); !i.done(); i.next()) {
@ -5490,7 +5491,7 @@ class BaseCompiler final : public BaseCompilerInterface {
}
void restoreRegisterReturnValues(const ResultType& resultType) {
MOZ_ASSERT(env_.debugEnabled());
MOZ_ASSERT(moduleEnv_.debugEnabled());
size_t debugFrameOffset = masm.framePushed() - DebugFrame::offsetOfFrame();
size_t registerResultIdx = 0;
for (ABIResultIter i(resultType); !i.done(); i.next()) {
@ -5556,7 +5557,7 @@ class BaseCompiler final : public BaseCompilerInterface {
popStackReturnValues(resultType);
if (env_.debugEnabled()) {
if (moduleEnv_.debugEnabled()) {
// Store and reload the return value from DebugFrame::return so that
// it can be clobbered, and/or modified by the debug trap.
saveRegisterReturnValues(resultType);
@ -5891,10 +5892,10 @@ class BaseCompiler final : public BaseCompilerInterface {
CodeOffset callIndirect(uint32_t funcTypeIndex, uint32_t tableIndex,
const Stk& indexVal, const FunctionCall& call) {
const FuncTypeWithId& funcType = env_.types[funcTypeIndex].funcType();
const FuncTypeWithId& funcType = moduleEnv_.types[funcTypeIndex].funcType();
MOZ_ASSERT(funcType.id.kind() != FuncTypeIdDescKind::None);
const TableDesc& table = env_.tables[tableIndex];
const TableDesc& table = moduleEnv_.tables[tableIndex];
loadI32(indexVal, RegI32(WasmTableCallIndexReg));
@ -6532,7 +6533,8 @@ class BaseCompiler final : public BaseCompilerInterface {
return;
}
uint32_t offsetGuardLimit = GetOffsetGuardLimit(env_.hugeMemoryEnabled());
uint32_t offsetGuardLimit =
GetOffsetGuardLimit(moduleEnv_.hugeMemoryEnabled());
if ((bceSafe_ & (BCESet(1) << local)) &&
access->offset() < offsetGuardLimit) {
@ -6553,7 +6555,8 @@ class BaseCompiler final : public BaseCompilerInterface {
void prepareMemoryAccess(MemoryAccessDesc* access, AccessCheck* check,
RegI32 tls, RegI32 ptr) {
uint32_t offsetGuardLimit = GetOffsetGuardLimit(env_.hugeMemoryEnabled());
uint32_t offsetGuardLimit =
GetOffsetGuardLimit(moduleEnv_.hugeMemoryEnabled());
// Fold offset if necessary for further computations.
if (access->offset() >= offsetGuardLimit ||
@ -6582,7 +6585,7 @@ class BaseCompiler final : public BaseCompilerInterface {
// Ensure no tls if we don't need it.
if (env_.hugeMemoryEnabled()) {
if (moduleEnv_.hugeMemoryEnabled()) {
// We have HeapReg and no bounds checking and need load neither
// memoryBase nor boundsCheckLimit from tls.
MOZ_ASSERT_IF(check->omitBoundsCheck, tls.isInvalid());
@ -6594,7 +6597,7 @@ class BaseCompiler final : public BaseCompilerInterface {
// Bounds check if required.
if (!env_.hugeMemoryEnabled() && !check->omitBoundsCheck) {
if (!moduleEnv_.hugeMemoryEnabled() && !check->omitBoundsCheck) {
Label ok;
masm.wasmBoundsCheck(Assembler::Below, ptr,
Address(tls, offsetof(TlsData, boundsCheckLimit)),
@ -6660,7 +6663,7 @@ class BaseCompiler final : public BaseCompilerInterface {
// x86 requires Tls for memory base
return true;
#else
return !env_.hugeMemoryEnabled() && !check.omitBoundsCheck;
return !moduleEnv_.hugeMemoryEnabled() && !check.omitBoundsCheck;
#endif
}
@ -10266,8 +10269,8 @@ bool BaseCompiler::emitCall() {
sync();
const FuncType& funcType = *env_.funcTypes[funcIndex];
bool import = env_.funcIsImport(funcIndex);
const FuncType& funcType = *moduleEnv_.funcTypes[funcIndex];
bool import = moduleEnv_.funcIsImport(funcIndex);
uint32_t numArgs = funcType.args().length();
size_t stackArgBytes = stackConsumed(numArgs);
@ -10289,8 +10292,8 @@ bool BaseCompiler::emitCall() {
CodeOffset raOffset;
if (import) {
raOffset =
callImport(env_.funcImportGlobalDataOffsets[funcIndex], baselineCall);
raOffset = callImport(moduleEnv_.funcImportGlobalDataOffsets[funcIndex],
baselineCall);
} else {
raOffset = callDefinition(funcIndex, baselineCall);
}
@ -10328,7 +10331,7 @@ bool BaseCompiler::emitCallIndirect() {
sync();
const FuncTypeWithId& funcType = env_.types[funcTypeIndex].funcType();
const FuncTypeWithId& funcType = moduleEnv_.types[funcTypeIndex].funcType();
// Stack: ... arg1 .. argn callee
@ -10734,7 +10737,7 @@ bool BaseCompiler::emitGetGlobal() {
return true;
}
const GlobalDesc& global = env_.globals[id];
const GlobalDesc& global = moduleEnv_.globals[id];
if (global.isConstant()) {
LitVal value = global.constantValue();
@ -10828,7 +10831,7 @@ bool BaseCompiler::emitSetGlobal() {
return true;
}
const GlobalDesc& global = env_.globals[id];
const GlobalDesc& global = moduleEnv_.globals[id];
switch (global.type().kind()) {
case ValType::I32: {
@ -10941,7 +10944,7 @@ bool BaseCompiler::emitSetGlobal() {
//
// Finally, when the debugger allows locals to be mutated we must disable BCE
// for references via a local, by returning immediately from bceCheckLocal if
// env_.debugEnabled() is true.
// moduleEnv_.debugEnabled() is true.
//
//
// Alignment check elimination.
@ -10966,10 +10969,11 @@ RegI32 BaseCompiler::popMemoryAccess(MemoryAccessDesc* access,
if (popConstI32(&addrTemp)) {
uint32_t addr = addrTemp;
uint32_t offsetGuardLimit = GetOffsetGuardLimit(env_.hugeMemoryEnabled());
uint32_t offsetGuardLimit =
GetOffsetGuardLimit(moduleEnv_.hugeMemoryEnabled());
uint64_t ea = uint64_t(addr) + uint64_t(access->offset());
uint64_t limit = env_.minMemoryLength + offsetGuardLimit;
uint64_t limit = moduleEnv_.minMemoryLength + offsetGuardLimit;
check->omitBoundsCheck = ea < limit;
check->omitAlignmentCheck = (ea & (access->byteSize() - 1)) == 0;
@ -12491,7 +12495,7 @@ bool BaseCompiler::emitStructNew() {
//
// Returns null on OOM.
const StructType& structType = env_.types[typeIndex].structType();
const StructType& structType = moduleEnv_.types[typeIndex].structType();
pushI32(structType.moduleIndex_);
if (!emitInstanceCall(lineOrBytecode, SASigStructNew)) {
@ -12619,7 +12623,7 @@ bool BaseCompiler::emitStructGet() {
return true;
}
const StructType& structType = env_.types[typeIndex].structType();
const StructType& structType = moduleEnv_.types[typeIndex].structType();
RegPtr rp = popRef();
@ -12686,7 +12690,7 @@ bool BaseCompiler::emitStructSet() {
return true;
}
const StructType& structType = env_.types[typeIndex].structType();
const StructType& structType = moduleEnv_.types[typeIndex].structType();
RegI32 ri;
RegI64 rl;
@ -12796,8 +12800,8 @@ bool BaseCompiler::emitStructNarrow() {
// struct.narrow validation ensures that these hold.
MOZ_ASSERT(inputType.isExternRef() || env_.isStructType(inputType));
MOZ_ASSERT(outputType.isExternRef() || env_.isStructType(outputType));
MOZ_ASSERT(inputType.isExternRef() || moduleEnv_.isStructType(inputType));
MOZ_ASSERT(outputType.isExternRef() || moduleEnv_.isStructType(outputType));
MOZ_ASSERT_IF(outputType.isExternRef(), inputType.isExternRef());
// AnyRef -> AnyRef is a no-op, just leave the value on the stack.
@ -12814,7 +12818,7 @@ bool BaseCompiler::emitStructNarrow() {
// Dynamic downcast (optref T) -> (optref U), leaves rp or null
const StructType& outputStruct =
env_.types[outputType.refType().typeIndex()].structType();
moduleEnv_.types[outputType.refType().typeIndex()].structType();
pushI32(mustUnboxAnyref);
pushI32(outputStruct.moduleIndex_);
@ -14039,11 +14043,12 @@ bool BaseCompiler::emitBody() {
OpBytes op;
CHECK(iter_.readOp(&op));
// When env_.debugEnabled(), every operator has breakpoint site but Op::End.
if (env_.debugEnabled() && op.b0 != (uint16_t)Op::End) {
// When moduleEnv_.debugEnabled(), every operator has breakpoint site but
// Op::End.
if (moduleEnv_.debugEnabled() && op.b0 != (uint16_t)Op::End) {
// TODO sync only registers that can be clobbered by the exit
// prologue/epilogue or disable these registers for use in
// baseline compiler when env_.debugEnabled() is set.
// baseline compiler when moduleEnv_.debugEnabled() is set.
sync();
insertBreakablePoint(CallSiteDesc::Breakpoint);
@ -14129,7 +14134,7 @@ bool BaseCompiler::emitBody() {
case uint16_t(Op::SelectNumeric):
CHECK_NEXT(emitSelect(/*typed*/ false));
case uint16_t(Op::SelectTyped):
if (!env_.refTypesEnabled()) {
if (!moduleEnv_.refTypesEnabled()) {
return iter_.unrecognizedOpcode(&op);
}
CHECK_NEXT(emitSelect(/*typed*/ true));
@ -14632,19 +14637,19 @@ bool BaseCompiler::emitBody() {
#ifdef ENABLE_WASM_FUNCTION_REFERENCES
case uint16_t(Op::RefAsNonNull):
if (!env_.functionReferencesEnabled()) {
if (!moduleEnv_.functionReferencesEnabled()) {
return iter_.unrecognizedOpcode(&op);
}
CHECK_NEXT(emitRefAsNonNull());
case uint16_t(Op::BrOnNull):
if (!env_.functionReferencesEnabled()) {
if (!moduleEnv_.functionReferencesEnabled()) {
return iter_.unrecognizedOpcode(&op);
}
CHECK_NEXT(emitBrOnNull());
#endif
#ifdef ENABLE_WASM_GC
case uint16_t(Op::RefEq):
if (!env_.gcTypesEnabled()) {
if (!moduleEnv_.gcTypesEnabled()) {
return iter_.unrecognizedOpcode(&op);
}
CHECK_NEXT(dispatchComparison(emitCompareRef, RefType::extern_(),
@ -14665,7 +14670,7 @@ bool BaseCompiler::emitBody() {
#ifdef ENABLE_WASM_GC
// "GC" operations
case uint16_t(Op::GcPrefix): {
if (!env_.gcTypesEnabled()) {
if (!moduleEnv_.gcTypesEnabled()) {
return iter_.unrecognizedOpcode(&op);
}
switch (op.b1) {
@ -14688,7 +14693,7 @@ bool BaseCompiler::emitBody() {
// SIMD operations
case uint16_t(Op::SimdPrefix): {
uint32_t laneIndex;
if (!env_.v128Enabled()) {
if (!moduleEnv_.v128Enabled()) {
return iter_.unrecognizedOpcode(&op);
}
switch (op.b1) {
@ -15219,7 +15224,7 @@ bool BaseCompiler::emitBody() {
// Thread operations
case uint16_t(Op::ThreadPrefix): {
if (env_.sharedMemoryEnabled() == Shareable::False) {
if (moduleEnv_.sharedMemoryEnabled() == Shareable::False) {
return iter_.unrecognizedOpcode(&op);
}
switch (op.b1) {
@ -15459,15 +15464,15 @@ bool BaseCompiler::emitFunction() {
return true;
}
BaseCompiler::BaseCompiler(const ModuleEnvironment& env,
BaseCompiler::BaseCompiler(const ModuleEnvironment& moduleEnv,
const FuncCompileInput& func,
const ValTypeVector& locals,
const MachineState& trapExitLayout,
size_t trapExitLayoutNumWords, Decoder& decoder,
StkVector& stkSource, TempAllocator* alloc,
MacroAssembler* masm, StackMaps* stackMaps)
: env_(env),
iter_(env, decoder),
: moduleEnv_(moduleEnv),
iter_(moduleEnv, decoder),
func_(func),
lastReadCallSite_(0),
alloc_(*alloc),
@ -15515,7 +15520,7 @@ bool BaseCompiler::init() {
}
ArgTypeVector args(funcType());
if (!fr.setupLocals(locals_, args, env_.debugEnabled(), &localInfo_)) {
if (!fr.setupLocals(locals_, args, moduleEnv_.debugEnabled(), &localInfo_)) {
return false;
}
@ -15558,13 +15563,13 @@ bool js::wasm::BaselinePlatformSupport() {
#endif
}
bool js::wasm::BaselineCompileFunctions(const ModuleEnvironment& env,
bool js::wasm::BaselineCompileFunctions(const ModuleEnvironment& moduleEnv,
LifoAlloc& lifo,
const FuncCompileInputVector& inputs,
CompiledCode* code,
UniqueChars* error) {
MOZ_ASSERT(env.tier() == Tier::Baseline);
MOZ_ASSERT(env.kind == ModuleKind::Wasm);
MOZ_ASSERT(moduleEnv.tier() == Tier::Baseline);
MOZ_ASSERT(moduleEnv.kind == ModuleKind::Wasm);
// The MacroAssembler will sometimes access the jitContext.
@ -15598,17 +15603,18 @@ bool js::wasm::BaselineCompileFunctions(const ModuleEnvironment& env,
// Build the local types vector.
ValTypeVector locals;
if (!locals.appendAll(env.funcTypes[func.index]->args())) {
if (!locals.appendAll(moduleEnv.funcTypes[func.index]->args())) {
return false;
}
if (!DecodeLocalEntries(d, env.types, env.features, &locals)) {
if (!DecodeLocalEntries(d, moduleEnv.types, moduleEnv.features, &locals)) {
return false;
}
// One-pass baseline compilation.
BaseCompiler f(env, func, locals, trapExitLayout, trapExitLayoutNumWords, d,
stk, &alloc, &masm, &code->stackMaps);
BaseCompiler f(moduleEnv, func, locals, trapExitLayout,
trapExitLayoutNumWords, d, stk, &alloc, &masm,
&code->stackMaps);
if (!f.init()) {
return false;
}

Просмотреть файл

@ -30,7 +30,7 @@ namespace wasm {
MOZ_MUST_USE bool BaselinePlatformSupport();
// Generate adequate code quickly.
MOZ_MUST_USE bool BaselineCompileFunctions(const ModuleEnvironment& env,
MOZ_MUST_USE bool BaselineCompileFunctions(const ModuleEnvironment& moduleEnv,
LifoAlloc& lifo,
const FuncCompileInputVector& inputs,
CompiledCode* code,

Просмотреть файл

@ -577,21 +577,21 @@ SharedModule wasm::CompileBuffer(const CompileArgs& args,
Decoder d(bytecode.bytes, 0, error, warnings);
CompilerEnvironment compilerEnv(args);
ModuleEnvironment env(&compilerEnv, args.features);
if (!DecodeModuleEnvironment(d, &env)) {
ModuleEnvironment moduleEnv(&compilerEnv, args.features);
if (!DecodeModuleEnvironment(d, &moduleEnv)) {
return nullptr;
}
ModuleGenerator mg(args, &env, nullptr, error);
ModuleGenerator mg(args, &moduleEnv, nullptr, error);
if (!mg.init(nullptr, telemetrySender)) {
return nullptr;
}
if (!DecodeCodeSection(env, d, mg)) {
if (!DecodeCodeSection(moduleEnv, d, mg)) {
return nullptr;
}
if (!DecodeModuleTail(d, &env)) {
if (!DecodeModuleTail(d, &moduleEnv)) {
return nullptr;
}
@ -611,21 +611,21 @@ void wasm::CompileTier2(const CompileArgs& args, const Bytes& bytecode,
CompilerEnvironment compilerEnv(CompileMode::Tier2, Tier::Optimized,
optimizedBackend, DebugEnabled::False);
ModuleEnvironment env(&compilerEnv, args.features);
if (!DecodeModuleEnvironment(d, &env)) {
ModuleEnvironment moduleEnv(&compilerEnv, args.features);
if (!DecodeModuleEnvironment(d, &moduleEnv)) {
return;
}
ModuleGenerator mg(args, &env, cancelled, &error);
ModuleGenerator mg(args, &moduleEnv, cancelled, &error);
if (!mg.init(nullptr, telemetrySender)) {
return;
}
if (!DecodeCodeSection(env, d, mg)) {
if (!DecodeCodeSection(moduleEnv, d, mg)) {
return;
}
if (!DecodeModuleTail(d, &env)) {
if (!DecodeModuleTail(d, &moduleEnv)) {
return;
}
@ -719,34 +719,34 @@ SharedModule wasm::CompileStreaming(
const Atomic<bool>& cancelled, UniqueChars* error,
UniqueCharsVector* warnings, JSTelemetrySender telemetrySender) {
CompilerEnvironment compilerEnv(args);
ModuleEnvironment env(&compilerEnv, args.features);
ModuleEnvironment moduleEnv(&compilerEnv, args.features);
{
Decoder d(envBytes, 0, error, warnings);
if (!DecodeModuleEnvironment(d, &env)) {
if (!DecodeModuleEnvironment(d, &moduleEnv)) {
return nullptr;
}
if (!env.codeSection) {
if (!moduleEnv.codeSection) {
d.fail("unknown section before code section");
return nullptr;
}
MOZ_RELEASE_ASSERT(env.codeSection->size == codeBytes.length());
MOZ_RELEASE_ASSERT(moduleEnv.codeSection->size == codeBytes.length());
MOZ_RELEASE_ASSERT(d.done());
}
ModuleGenerator mg(args, &env, &cancelled, error);
ModuleGenerator mg(args, &moduleEnv, &cancelled, error);
if (!mg.init(nullptr, telemetrySender)) {
return nullptr;
}
{
StreamingDecoder d(env, codeBytes, codeBytesEnd, cancelled, error,
StreamingDecoder d(moduleEnv, codeBytes, codeBytesEnd, cancelled, error,
warnings);
if (!DecodeCodeSection(env, d, mg)) {
if (!DecodeCodeSection(moduleEnv, d, mg)) {
return nullptr;
}
@ -767,9 +767,9 @@ SharedModule wasm::CompileStreaming(
const Bytes& tailBytes = *streamEnd.tailBytes;
{
Decoder d(tailBytes, env.codeSection->end(), error, warnings);
Decoder d(tailBytes, moduleEnv.codeSection->end(), error, warnings);
if (!DecodeModuleTail(d, &env)) {
if (!DecodeModuleTail(d, &moduleEnv)) {
return nullptr;
}

Просмотреть файл

@ -285,15 +285,15 @@ static bool GenerateCraneliftCode(WasmMacroAssembler& masm,
class CraneliftContext {
CraneliftStaticEnvironment staticEnv_;
CraneliftModuleEnvironment env_;
CraneliftModuleEnvironment moduleEnv_;
CraneliftCompiler* compiler_;
public:
explicit CraneliftContext(const ModuleEnvironment& env)
: env_(env), compiler_(nullptr) {
staticEnv_.ref_types_enabled = env.refTypesEnabled();
explicit CraneliftContext(const ModuleEnvironment& moduleEnv)
: moduleEnv_(moduleEnv), compiler_(nullptr) {
staticEnv_.ref_types_enabled = moduleEnv.refTypesEnabled();
#ifdef WASM_SUPPORTS_HUGE_MEMORY
if (env.hugeMemoryEnabled()) {
if (moduleEnv.hugeMemoryEnabled()) {
// In the huge memory configuration, we always reserve the full 4 GB
// index space for a heap.
staticEnv_.static_memory_bound = HugeIndexRange;
@ -306,7 +306,7 @@ class CraneliftContext {
// of TlsData.
}
bool init() {
compiler_ = cranelift_compiler_create(&staticEnv_, &env_);
compiler_ = cranelift_compiler_create(&staticEnv_, &moduleEnv_);
return !!compiler_;
}
~CraneliftContext() {
@ -432,15 +432,15 @@ const GlobalDesc* env_global(const CraneliftModuleEnvironment* wrapper,
return &wrapper->env->globals[globalIndex];
}
bool wasm::CraneliftCompileFunctions(const ModuleEnvironment& env,
bool wasm::CraneliftCompileFunctions(const ModuleEnvironment& moduleEnv,
LifoAlloc& lifo,
const FuncCompileInputVector& inputs,
CompiledCode* code, UniqueChars* error) {
MOZ_RELEASE_ASSERT(CraneliftPlatformSupport());
MOZ_ASSERT(env.tier() == Tier::Optimized);
MOZ_ASSERT(env.optimizedBackend() == OptimizedBackend::Cranelift);
MOZ_ASSERT(!env.isAsmJS());
MOZ_ASSERT(moduleEnv.tier() == Tier::Optimized);
MOZ_ASSERT(moduleEnv.optimizedBackend() == OptimizedBackend::Cranelift);
MOZ_ASSERT(!moduleEnv.isAsmJS());
TempAllocator alloc(&lifo);
JitContext jitContext(&alloc);
@ -456,7 +456,7 @@ bool wasm::CraneliftCompileFunctions(const ModuleEnvironment& env,
}
if (!reusableContext) {
auto context = MakeUnique<CraneliftContext>(env);
auto context = MakeUnique<CraneliftContext>(moduleEnv);
if (!context || !context->init()) {
return false;
}
@ -482,7 +482,7 @@ bool wasm::CraneliftCompileFunctions(const ModuleEnvironment& env,
Decoder d(func.begin, func.end, func.lineOrBytecode, error);
size_t funcBytecodeSize = func.end - func.begin;
if (!ValidateFunctionBody(env, func.index, funcBytecodeSize, d)) {
if (!ValidateFunctionBody(moduleEnv, func.index, funcBytecodeSize, d)) {
return false;
}
@ -499,7 +499,7 @@ bool wasm::CraneliftCompileFunctions(const ModuleEnvironment& env,
}
uint32_t lineOrBytecode = func.lineOrBytecode;
const FuncTypeWithId& funcType = *env.funcTypes[clifInput.index];
const FuncTypeWithId& funcType = *moduleEnv.funcTypes[clifInput.index];
FuncOffsets offsets;
if (!GenerateCraneliftCode(

Просмотреть файл

@ -33,7 +33,7 @@ MOZ_MUST_USE bool CraneliftPlatformSupport();
// Generates code with Cranelift.
MOZ_MUST_USE bool CraneliftCompileFunctions(
const ModuleEnvironment& env, LifoAlloc& lifo,
const ModuleEnvironment& moduleEnv, LifoAlloc& lifo,
const FuncCompileInputVector& inputs, CompiledCode* code,
UniqueChars* error);
@ -42,7 +42,7 @@ void CraneliftFreeReusableData(void* data);
MOZ_MUST_USE inline bool CraneliftPlatformSupport() { return false; }
MOZ_MUST_USE inline bool CraneliftCompileFunctions(
const ModuleEnvironment& env, LifoAlloc& lifo,
const ModuleEnvironment& moduleEnv, LifoAlloc& lifo,
const FuncCompileInputVector& inputs, CompiledCode* code,
UniqueChars* error) {
MOZ_CRASH("Should not happen");

Просмотреть файл

@ -77,13 +77,13 @@ static const unsigned COMPILATION_LIFO_DEFAULT_CHUNK_SIZE = 64 * 1024;
static const uint32_t BAD_CODE_RANGE = UINT32_MAX;
ModuleGenerator::ModuleGenerator(const CompileArgs& args,
ModuleEnvironment* env,
ModuleEnvironment* moduleEnv,
const Atomic<bool>* cancelled,
UniqueChars* error)
: compileArgs_(&args),
error_(error),
cancelled_(cancelled),
env_(env),
moduleEnv_(moduleEnv),
linkData_(nullptr),
metadataTier_(nullptr),
lifo_(GENERATOR_LIFO_DEFAULT_CHUNK_SIZE),
@ -211,7 +211,7 @@ bool ModuleGenerator::init(Metadata* maybeAsmJSMetadata,
// elements will be initialized by the time module generation is finished.
if (!metadataTier_->funcToCodeRange.appendN(BAD_CODE_RANGE,
env_->funcTypes.length())) {
moduleEnv_->funcTypes.length())) {
return false;
}
@ -221,13 +221,14 @@ bool ModuleGenerator::init(Metadata* maybeAsmJSMetadata,
// actual allocations will succeed, ignore OOM failures. Note,
// shrinkStorageToFit calls at the end will trim off unneeded capacity.
size_t codeSectionSize = env_->codeSection ? env_->codeSection->size : 0;
size_t codeSectionSize =
moduleEnv_->codeSection ? moduleEnv_->codeSection->size : 0;
size_t estimatedCodeSize =
1.2 * EstimateCompiledCodeSize(tier(), codeSectionSize);
Unused << masm_.reserve(std::min(estimatedCodeSize, MaxCodeBytesPerProcess));
Unused << metadataTier_->codeRanges.reserve(2 * env_->numFuncDefs());
Unused << metadataTier_->codeRanges.reserve(2 * moduleEnv_->numFuncDefs());
const size_t ByteCodesPerCallSite = 50;
Unused << metadataTier_->callSites.reserve(codeSectionSize /
@ -241,17 +242,18 @@ bool ModuleGenerator::init(Metadata* maybeAsmJSMetadata,
MOZ_ASSERT(metadata_->globalDataLength == 0);
for (size_t i = 0; i < env_->funcImportGlobalDataOffsets.length(); i++) {
for (size_t i = 0; i < moduleEnv_->funcImportGlobalDataOffsets.length();
i++) {
uint32_t globalDataOffset;
if (!allocateGlobalBytes(sizeof(FuncImportTls), sizeof(void*),
&globalDataOffset)) {
return false;
}
env_->funcImportGlobalDataOffsets[i] = globalDataOffset;
moduleEnv_->funcImportGlobalDataOffsets[i] = globalDataOffset;
FuncType copy;
if (!copy.clone(*env_->funcTypes[i])) {
if (!copy.clone(*moduleEnv_->funcTypes[i])) {
return false;
}
if (!metadataTier_->funcImports.emplaceBack(std::move(copy),
@ -260,7 +262,7 @@ bool ModuleGenerator::init(Metadata* maybeAsmJSMetadata,
}
}
for (TableDesc& table : env_->tables) {
for (TableDesc& table : moduleEnv_->tables) {
if (!allocateGlobalBytes(sizeof(TableTls), sizeof(void*),
&table.globalDataOffset)) {
return false;
@ -268,7 +270,7 @@ bool ModuleGenerator::init(Metadata* maybeAsmJSMetadata,
}
if (!isAsmJS()) {
for (TypeDef& td : env_->types) {
for (TypeDef& td : moduleEnv_->types) {
if (!td.isFuncType()) {
continue;
}
@ -297,7 +299,7 @@ bool ModuleGenerator::init(Metadata* maybeAsmJSMetadata,
}
}
for (GlobalDesc& global : env_->globals) {
for (GlobalDesc& global : moduleEnv_->globals) {
if (global.isConstant()) {
continue;
}
@ -349,7 +351,7 @@ bool ModuleGenerator::init(Metadata* maybeAsmJSMetadata,
};
Vector<ExportedFunc, 8, SystemAllocPolicy> exportedFuncs;
if (!exportedFuncs.resize(env_->numFuncs())) {
if (!exportedFuncs.resize(moduleEnv_->numFuncs())) {
return false;
}
@ -362,17 +364,17 @@ bool ModuleGenerator::init(Metadata* maybeAsmJSMetadata,
}
};
for (const Export& exp : env_->exports) {
for (const Export& exp : moduleEnv_->exports) {
if (exp.kind() == DefinitionKind::Function) {
addOrMerge(ExportedFunc(exp.funcIndex(), true));
}
}
if (env_->startFuncIndex) {
addOrMerge(ExportedFunc(*env_->startFuncIndex, true));
if (moduleEnv_->startFuncIndex) {
addOrMerge(ExportedFunc(*moduleEnv_->startFuncIndex, true));
}
for (const ElemSegment* seg : env_->elemSegments) {
for (const ElemSegment* seg : moduleEnv_->elemSegments) {
// For now, the segments always carry function indices regardless of the
// segment's declared element type; this works because the only legal
// element types are funcref and externref and the only legal values are
@ -380,7 +382,7 @@ bool ModuleGenerator::init(Metadata* maybeAsmJSMetadata,
// functions, regardless of the segment's type. In the future, if we make
// the representation of AnyRef segments different, we will have to consider
// function values in those segments specially.
bool isAsmJS = seg->active() && env_->tables[seg->tableIndex].isAsmJS;
bool isAsmJS = seg->active() && moduleEnv_->tables[seg->tableIndex].isAsmJS;
if (!isAsmJS) {
for (uint32_t funcIndex : seg->elemFuncIndices) {
if (funcIndex != NullFuncIndex) {
@ -390,7 +392,7 @@ bool ModuleGenerator::init(Metadata* maybeAsmJSMetadata,
}
}
for (const GlobalDesc& global : env_->globals) {
for (const GlobalDesc& global : moduleEnv_->globals) {
if (global.isVariable() &&
global.initExpr().kind() == InitExpr::Kind::RefFunc) {
addOrMerge(ExportedFunc(global.initExpr().refFuncIndex(), false));
@ -408,7 +410,7 @@ bool ModuleGenerator::init(Metadata* maybeAsmJSMetadata,
for (const ExportedFunc& funcIndex : exportedFuncs) {
FuncType funcType;
if (!funcType.clone(*env_->funcTypes[funcIndex.index()])) {
if (!funcType.clone(*moduleEnv_->funcTypes[funcIndex.index()])) {
return false;
}
metadataTier_->funcExports.infallibleEmplaceBack(
@ -433,7 +435,7 @@ bool ModuleGenerator::init(Metadata* maybeAsmJSMetadata,
return false;
}
for (size_t i = 0; i < numTasks; i++) {
tasks_.infallibleEmplaceBack(*env_, taskState_,
tasks_.infallibleEmplaceBack(*moduleEnv_, taskState_,
COMPILATION_LIFO_DEFAULT_CHUNK_SIZE,
telemetrySender);
}
@ -452,7 +454,7 @@ bool ModuleGenerator::init(Metadata* maybeAsmJSMetadata,
CompiledCode& importCode = tasks_[0].output;
MOZ_ASSERT(importCode.empty());
if (!GenerateImportFunctions(*env_, metadataTier_->funcImports,
if (!GenerateImportFunctions(*moduleEnv_, metadataTier_->funcImports,
&importCode)) {
return false;
}
@ -737,12 +739,12 @@ static bool ExecuteCompileTask(CompileTask* task, UniqueChars* error) {
int compileTimeTelemetryID;
#endif
switch (task->env.tier()) {
switch (task->moduleEnv.tier()) {
case Tier::Optimized:
switch (task->env.optimizedBackend()) {
switch (task->moduleEnv.optimizedBackend()) {
case OptimizedBackend::Cranelift:
if (!CraneliftCompileFunctions(task->env, task->lifo, task->inputs,
&task->output, error)) {
if (!CraneliftCompileFunctions(task->moduleEnv, task->lifo,
task->inputs, &task->output, error)) {
return false;
}
#ifdef ENABLE_SPIDERMONKEY_TELEMETRY
@ -750,7 +752,7 @@ static bool ExecuteCompileTask(CompileTask* task, UniqueChars* error) {
#endif
break;
case OptimizedBackend::Ion:
if (!IonCompileFunctions(task->env, task->lifo, task->inputs,
if (!IonCompileFunctions(task->moduleEnv, task->lifo, task->inputs,
&task->output, error)) {
return false;
}
@ -761,7 +763,7 @@ static bool ExecuteCompileTask(CompileTask* task, UniqueChars* error) {
}
break;
case Tier::Baseline:
if (!BaselineCompileFunctions(task->env, task->lifo, task->inputs,
if (!BaselineCompileFunctions(task->moduleEnv, task->lifo, task->inputs,
&task->output, error)) {
return false;
}
@ -889,7 +891,7 @@ bool ModuleGenerator::compileFuncDef(uint32_t funcIndex,
const uint8_t* begin, const uint8_t* end,
Uint32Vector&& lineNums) {
MOZ_ASSERT(!finishedFuncDefs_);
MOZ_ASSERT(funcIndex < env_->numFuncs());
MOZ_ASSERT(funcIndex < moduleEnv_->numFuncs());
uint32_t threshold;
switch (tier()) {
@ -897,7 +899,7 @@ bool ModuleGenerator::compileFuncDef(uint32_t funcIndex,
threshold = JitOptions.wasmBatchBaselineThreshold;
break;
case Tier::Optimized:
switch (env_->optimizedBackend()) {
switch (moduleEnv_->optimizedBackend()) {
case OptimizedBackend::Ion:
threshold = JitOptions.wasmBatchIonThreshold;
break;
@ -1067,7 +1069,7 @@ UniqueCodeTier ModuleGenerator::finishCodeTier() {
CompiledCode& stubCode = tasks_[0].output;
MOZ_ASSERT(stubCode.empty());
if (!GenerateStubs(*env_, metadataTier_->funcImports,
if (!GenerateStubs(*moduleEnv_, metadataTier_->funcImports,
metadataTier_->funcExports, &stubCode)) {
return nullptr;
}
@ -1097,7 +1099,7 @@ UniqueCodeTier ModuleGenerator::finishCodeTier() {
#ifdef DEBUG
// Check that each stack map is associated with a plausible instruction.
for (size_t i = 0; i < metadataTier_->stackMaps.length(); i++) {
MOZ_ASSERT(IsValidStackMapKey(env_->debugEnabled(),
MOZ_ASSERT(IsValidStackMapKey(moduleEnv_->debugEnabled(),
metadataTier_->stackMaps.get(i).nextInsnAddr),
"wasm stack map does not reference a valid insn");
}
@ -1113,24 +1115,24 @@ SharedMetadata ModuleGenerator::finishMetadata(const Bytes& bytecode) {
// Copy over data from the ModuleEnvironment.
metadata_->memoryUsage = env_->memoryUsage;
metadata_->minMemoryLength = env_->minMemoryLength;
metadata_->maxMemoryLength = env_->maxMemoryLength;
metadata_->startFuncIndex = env_->startFuncIndex;
metadata_->tables = std::move(env_->tables);
metadata_->globals = std::move(env_->globals);
metadata_->nameCustomSectionIndex = env_->nameCustomSectionIndex;
metadata_->moduleName = env_->moduleName;
metadata_->funcNames = std::move(env_->funcNames);
metadata_->omitsBoundsChecks = env_->hugeMemoryEnabled();
metadata_->v128Enabled = env_->v128Enabled();
metadata_->memoryUsage = moduleEnv_->memoryUsage;
metadata_->minMemoryLength = moduleEnv_->minMemoryLength;
metadata_->maxMemoryLength = moduleEnv_->maxMemoryLength;
metadata_->startFuncIndex = moduleEnv_->startFuncIndex;
metadata_->tables = std::move(moduleEnv_->tables);
metadata_->globals = std::move(moduleEnv_->globals);
metadata_->nameCustomSectionIndex = moduleEnv_->nameCustomSectionIndex;
metadata_->moduleName = moduleEnv_->moduleName;
metadata_->funcNames = std::move(moduleEnv_->funcNames);
metadata_->omitsBoundsChecks = moduleEnv_->hugeMemoryEnabled();
metadata_->v128Enabled = moduleEnv_->v128Enabled();
// Copy over additional debug information.
if (env_->debugEnabled()) {
if (moduleEnv_->debugEnabled()) {
metadata_->debugEnabled = true;
const size_t numFuncTypes = env_->funcTypes.length();
const size_t numFuncTypes = moduleEnv_->funcTypes.length();
if (!metadata_->debugFuncArgTypes.resize(numFuncTypes)) {
return nullptr;
}
@ -1139,11 +1141,11 @@ SharedMetadata ModuleGenerator::finishMetadata(const Bytes& bytecode) {
}
for (size_t i = 0; i < numFuncTypes; i++) {
if (!metadata_->debugFuncArgTypes[i].appendAll(
env_->funcTypes[i]->args())) {
moduleEnv_->funcTypes[i]->args())) {
return nullptr;
}
if (!metadata_->debugFuncReturnTypes[i].appendAll(
env_->funcTypes[i]->results())) {
moduleEnv_->funcTypes[i]->results())) {
return nullptr;
}
}
@ -1157,7 +1159,7 @@ SharedMetadata ModuleGenerator::finishMetadata(const Bytes& bytecode) {
memcpy(metadata_->debugHash, hash, sizeof(ModuleHash));
}
MOZ_ASSERT_IF(env_->nameCustomSectionIndex, !!metadata_->namePayload);
MOZ_ASSERT_IF(moduleEnv_->nameCustomSectionIndex, !!metadata_->namePayload);
// Metadata shouldn't be mutably modified after finishMetadata().
SharedMetadata metadata = metadata_;
@ -1185,10 +1187,10 @@ SharedModule ModuleGenerator::finishModule(
// compilation.
DataSegmentVector dataSegments;
if (!dataSegments.reserve(env_->dataSegments.length())) {
if (!dataSegments.reserve(moduleEnv_->dataSegments.length())) {
return nullptr;
}
for (const DataSegmentEnv& srcSeg : env_->dataSegments) {
for (const DataSegmentEnv& srcSeg : moduleEnv_->dataSegments) {
MutableDataSegment dstSeg = js_new<DataSegment>(srcSeg);
if (!dstSeg) {
return nullptr;
@ -1201,10 +1203,10 @@ SharedModule ModuleGenerator::finishModule(
}
CustomSectionVector customSections;
if (!customSections.reserve(env_->customSections.length())) {
if (!customSections.reserve(moduleEnv_->customSections.length())) {
return nullptr;
}
for (const CustomSectionEnv& srcSec : env_->customSections) {
for (const CustomSectionEnv& srcSec : moduleEnv_->customSections) {
CustomSection sec;
if (!sec.name.append(bytecode.begin() + srcSec.nameOffset,
srcSec.nameLength)) {
@ -1222,9 +1224,9 @@ SharedModule ModuleGenerator::finishModule(
customSections.infallibleAppend(std::move(sec));
}
if (env_->nameCustomSectionIndex) {
if (moduleEnv_->nameCustomSectionIndex) {
metadata_->namePayload =
customSections[*env_->nameCustomSectionIndex].payload;
customSections[*moduleEnv_->nameCustomSectionIndex].payload;
}
SharedMetadata metadata = finishMetadata(bytecode.bytes);
@ -1233,7 +1235,7 @@ SharedModule ModuleGenerator::finishModule(
}
StructTypeVector structTypes;
for (TypeDef& td : env_->types) {
for (TypeDef& td : moduleEnv_->types) {
if (td.isStructType() && !structTypes.append(std::move(td.structType()))) {
return nullptr;
}
@ -1252,7 +1254,7 @@ SharedModule ModuleGenerator::finishModule(
UniqueBytes debugUnlinkedCode;
UniqueLinkData debugLinkData;
const ShareableBytes* debugBytecode = nullptr;
if (env_->debugEnabled()) {
if (moduleEnv_->debugEnabled()) {
MOZ_ASSERT(mode() == CompileMode::Once);
MOZ_ASSERT(tier() == Tier::Debug);
@ -1270,11 +1272,11 @@ SharedModule ModuleGenerator::finishModule(
// All the components are finished, so create the complete Module and start
// tier-2 compilation if requested.
MutableModule module =
js_new<Module>(*code, std::move(env_->imports), std::move(env_->exports),
std::move(dataSegments), std::move(env_->elemSegments),
std::move(customSections), std::move(debugUnlinkedCode),
std::move(debugLinkData), debugBytecode);
MutableModule module = js_new<Module>(
*code, std::move(moduleEnv_->imports), std::move(moduleEnv_->exports),
std::move(dataSegments), std::move(moduleEnv_->elemSegments),
std::move(customSections), std::move(debugUnlinkedCode),
std::move(debugLinkData), debugBytecode);
if (!module) {
return nullptr;
}
@ -1292,7 +1294,7 @@ SharedModule ModuleGenerator::finishModule(
bool ModuleGenerator::finishTier2(const Module& module) {
MOZ_ASSERT(mode() == CompileMode::Tier2);
MOZ_ASSERT(tier() == Tier::Optimized);
MOZ_ASSERT(!env_->debugEnabled());
MOZ_ASSERT(!moduleEnv_->debugEnabled());
if (cancelled_ && *cancelled_) {
return false;

Просмотреть файл

@ -131,16 +131,16 @@ struct CompileTaskState {
// helper thread as well as, eventually, the results of compilation.
struct CompileTask : public HelperThreadTask {
const ModuleEnvironment& env;
const ModuleEnvironment& moduleEnv;
CompileTaskState& state;
LifoAlloc lifo;
FuncCompileInputVector inputs;
CompiledCode output;
JSTelemetrySender telemetrySender;
CompileTask(const ModuleEnvironment& env, CompileTaskState& state,
CompileTask(const ModuleEnvironment& moduleEnv, CompileTaskState& state,
size_t defaultChunkSize, JSTelemetrySender telemetrySender)
: env(env),
: moduleEnv(moduleEnv),
state(state),
lifo(defaultChunkSize),
telemetrySender(telemetrySender) {}
@ -173,7 +173,7 @@ class MOZ_STACK_CLASS ModuleGenerator {
SharedCompileArgs const compileArgs_;
UniqueChars* const error_;
const Atomic<bool>* const cancelled_;
ModuleEnvironment* const env_;
ModuleEnvironment* const moduleEnv_;
JSTelemetrySender telemetrySender_;
// Data that is moved into the result of finish()
@ -223,13 +223,13 @@ class MOZ_STACK_CLASS ModuleGenerator {
UniqueCodeTier finishCodeTier();
SharedMetadata finishMetadata(const Bytes& bytecode);
bool isAsmJS() const { return env_->isAsmJS(); }
Tier tier() const { return env_->tier(); }
CompileMode mode() const { return env_->mode(); }
bool debugEnabled() const { return env_->debugEnabled(); }
bool isAsmJS() const { return moduleEnv_->isAsmJS(); }
Tier tier() const { return moduleEnv_->tier(); }
CompileMode mode() const { return moduleEnv_->mode(); }
bool debugEnabled() const { return moduleEnv_->debugEnabled(); }
public:
ModuleGenerator(const CompileArgs& args, ModuleEnvironment* env,
ModuleGenerator(const CompileArgs& args, ModuleEnvironment* moduleEnv,
const Atomic<bool>* cancelled, UniqueChars* error);
~ModuleGenerator();
MOZ_MUST_USE bool init(

Просмотреть файл

@ -95,7 +95,7 @@ class FunctionCompiler {
typedef Vector<ControlFlowPatchVector, 0, SystemAllocPolicy>
ControlFlowPatchsVector;
const ModuleEnvironment& env_;
const ModuleEnvironment& moduleEnv_;
IonOpIter iter_;
const FuncCompileInput& func_;
const ValTypeVector& locals_;
@ -118,11 +118,11 @@ class FunctionCompiler {
MWasmParameter* stackResultPointer_;
public:
FunctionCompiler(const ModuleEnvironment& env, Decoder& decoder,
FunctionCompiler(const ModuleEnvironment& moduleEnv, Decoder& decoder,
const FuncCompileInput& func, const ValTypeVector& locals,
MIRGenerator& mirGen)
: env_(env),
iter_(env, decoder),
: moduleEnv_(moduleEnv),
iter_(moduleEnv, decoder),
func_(func),
locals_(locals),
lastReadCallSite_(0),
@ -137,16 +137,18 @@ class FunctionCompiler {
tlsPointer_(nullptr),
stackResultPointer_(nullptr) {}
const ModuleEnvironment& env() const { return env_; }
const ModuleEnvironment& moduleEnv() const { return moduleEnv_; }
IonOpIter& iter() { return iter_; }
TempAllocator& alloc() const { return alloc_; }
// FIXME(1401675): Replace with BlockType.
uint32_t funcIndex() const { return func_.index; }
const FuncType& funcType() const { return *env_.funcTypes[func_.index]; }
const FuncType& funcType() const {
return *moduleEnv_.funcTypes[func_.index];
}
BytecodeOffset bytecodeOffset() const { return iter_.bytecodeOffset(); }
BytecodeOffset bytecodeIfNotAsmJS() const {
return env_.isAsmJS() ? BytecodeOffset() : iter_.bytecodeOffset();
return moduleEnv_.isAsmJS() ? BytecodeOffset() : iter_.bytecodeOffset();
}
bool init() {
@ -380,7 +382,7 @@ class FunctionCompiler {
}
bool mustPreserveNaN(MIRType type) {
return IsFloatingPointType(type) && !env().isAsmJS();
return IsFloatingPointType(type) && !moduleEnv().isAsmJS();
}
MDefinition* sub(MDefinition* lhs, MDefinition* rhs, MIRType type) {
@ -440,7 +442,7 @@ class FunctionCompiler {
if (inDeadCode()) {
return nullptr;
}
bool trapOnError = !env().isAsmJS();
bool trapOnError = !moduleEnv().isAsmJS();
if (!unsignd && type == MIRType::Int32) {
// Enforce the signedness of the operation by coercing the operands
// to signed. Otherwise, operands that "look" unsigned to Ion but
@ -492,7 +494,7 @@ class FunctionCompiler {
if (inDeadCode()) {
return nullptr;
}
bool trapOnError = !env().isAsmJS();
bool trapOnError = !moduleEnv().isAsmJS();
if (!unsignd && type == MIRType::Int32) {
// See block comment in div().
auto* lhs2 = createTruncateToInt32(lhs);
@ -885,7 +887,7 @@ class FunctionCompiler {
MWasmLoadTls* maybeLoadMemoryBase() {
MWasmLoadTls* load = nullptr;
#ifdef JS_CODEGEN_X86
AliasSet aliases = env_.maxMemoryLength.isSome()
AliasSet aliases = moduleEnv_.maxMemoryLength.isSome()
? AliasSet::None()
: AliasSet::Load(AliasSet::WasmHeapMeta);
load = MWasmLoadTls::New(alloc(), tlsPointer_,
@ -897,10 +899,10 @@ class FunctionCompiler {
}
MWasmLoadTls* maybeLoadBoundsCheckLimit() {
if (env_.hugeMemoryEnabled()) {
if (moduleEnv_.hugeMemoryEnabled()) {
return nullptr;
}
AliasSet aliases = env_.maxMemoryLength.isSome()
AliasSet aliases = moduleEnv_.maxMemoryLength.isSome()
? AliasSet::None()
: AliasSet::Load(AliasSet::WasmHeapMeta);
auto load = MWasmLoadTls::New(alloc(), tlsPointer_,
@ -913,7 +915,7 @@ class FunctionCompiler {
public:
MWasmHeapBase* memoryBase() {
MWasmHeapBase* base = nullptr;
AliasSet aliases = env_.maxMemoryLength.isSome()
AliasSet aliases = moduleEnv_.maxMemoryLength.isSome()
? AliasSet::None()
: AliasSet::Load(AliasSet::WasmHeapMeta);
base = MWasmHeapBase::New(alloc(), tlsPointer_, aliases);
@ -928,7 +930,7 @@ class FunctionCompiler {
MOZ_ASSERT(!*mustAdd);
// asm.js accesses are always aligned and need no checks.
if (env_.isAsmJS() || !access->isAtomic()) {
if (moduleEnv_.isAsmJS() || !access->isAtomic()) {
return false;
}
@ -948,7 +950,8 @@ class FunctionCompiler {
MDefinition** base) {
MOZ_ASSERT(!inDeadCode());
uint32_t offsetGuardLimit = GetOffsetGuardLimit(env_.hugeMemoryEnabled());
uint32_t offsetGuardLimit =
GetOffsetGuardLimit(moduleEnv_.hugeMemoryEnabled());
// Fold a constant base into the offset (so the base is 0 in which case
// the codegen is optimized), if it doesn't wrap or trigger an
@ -1027,7 +1030,7 @@ class FunctionCompiler {
MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
MInstruction* load = nullptr;
if (env_.isAsmJS()) {
if (moduleEnv_.isAsmJS()) {
MOZ_ASSERT(access->offset() == 0);
MWasmLoadTls* boundsCheckLimit = maybeLoadBoundsCheckLimit();
load = MAsmJSLoadHeap::New(alloc(), memoryBase, base, boundsCheckLimit,
@ -1051,7 +1054,7 @@ class FunctionCompiler {
MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
MInstruction* store = nullptr;
if (env_.isAsmJS()) {
if (moduleEnv_.isAsmJS()) {
MOZ_ASSERT(access->offset() == 0);
MWasmLoadTls* boundsCheckLimit = maybeLoadBoundsCheckLimit();
store = MAsmJSStoreHeap::New(alloc(), memoryBase, base, boundsCheckLimit,
@ -1511,14 +1514,14 @@ class FunctionCompiler {
return true;
}
const FuncTypeWithId& funcType = env_.types[funcTypeIndex].funcType();
const FuncTypeWithId& funcType = moduleEnv_.types[funcTypeIndex].funcType();
CalleeDesc callee;
if (env_.isAsmJS()) {
if (moduleEnv_.isAsmJS()) {
MOZ_ASSERT(tableIndex == 0);
MOZ_ASSERT(funcType.id.kind() == FuncTypeIdDescKind::None);
const TableDesc& table =
env_.tables[env_.asmJSSigToTableIndex[funcTypeIndex]];
moduleEnv_.tables[moduleEnv_.asmJSSigToTableIndex[funcTypeIndex]];
MOZ_ASSERT(IsPowerOfTwo(table.initialLength));
MConstant* mask =
@ -1531,7 +1534,7 @@ class FunctionCompiler {
callee = CalleeDesc::asmJSTable(table);
} else {
MOZ_ASSERT(funcType.id.kind() != FuncTypeIdDescKind::None);
const TableDesc& table = env_.tables[tableIndex];
const TableDesc& table = moduleEnv_.tables[tableIndex];
callee = CalleeDesc::wasmTable(table, funcType.id);
}
@ -2507,7 +2510,7 @@ static bool EmitCall(FunctionCompiler& f, bool asmJSFuncDef) {
uint32_t funcIndex;
DefVector args;
if (asmJSFuncDef) {
if (!f.iter().readOldCallDirect(f.env().numFuncImports(), &funcIndex,
if (!f.iter().readOldCallDirect(f.moduleEnv().numFuncImports(), &funcIndex,
&args)) {
return false;
}
@ -2521,7 +2524,7 @@ static bool EmitCall(FunctionCompiler& f, bool asmJSFuncDef) {
return true;
}
const FuncType& funcType = *f.env().funcTypes[funcIndex];
const FuncType& funcType = *f.moduleEnv().funcTypes[funcIndex];
CallCompileState call;
if (!EmitCallArgs(f, funcType, args, &call)) {
@ -2529,8 +2532,9 @@ static bool EmitCall(FunctionCompiler& f, bool asmJSFuncDef) {
}
DefVector results;
if (f.env().funcIsImport(funcIndex)) {
uint32_t globalDataOffset = f.env().funcImportGlobalDataOffsets[funcIndex];
if (f.moduleEnv().funcIsImport(funcIndex)) {
uint32_t globalDataOffset =
f.moduleEnv().funcImportGlobalDataOffsets[funcIndex];
if (!f.callImport(globalDataOffset, lineOrBytecode, call, funcType,
&results)) {
return false;
@ -2568,7 +2572,7 @@ static bool EmitCallIndirect(FunctionCompiler& f, bool oldStyle) {
return true;
}
const FuncType& funcType = f.env().types[funcTypeIndex].funcType();
const FuncType& funcType = f.moduleEnv().types[funcTypeIndex].funcType();
CallCompileState call;
if (!EmitCallArgs(f, funcType, args, &call)) {
@ -2623,7 +2627,7 @@ static bool EmitGetGlobal(FunctionCompiler& f) {
return false;
}
const GlobalDesc& global = f.env().globals[id];
const GlobalDesc& global = f.moduleEnv().globals[id];
if (!global.isConstant()) {
f.iter().setResult(f.loadGlobalVar(global.offset(), !global.isMutable(),
global.isIndirect(),
@ -2683,7 +2687,7 @@ static bool EmitSetGlobal(FunctionCompiler& f) {
return false;
}
const GlobalDesc& global = f.env().globals[id];
const GlobalDesc& global = f.moduleEnv().globals[id];
MOZ_ASSERT(global.isMutable());
MInstruction* barrierAddr =
f.storeGlobalVar(global.offset(), global.isIndirect(), value);
@ -2717,7 +2721,7 @@ static bool EmitTeeGlobal(FunctionCompiler& f) {
return false;
}
const GlobalDesc& global = f.env().globals[id];
const GlobalDesc& global = f.moduleEnv().globals[id];
MOZ_ASSERT(global.isMutable());
f.storeGlobalVar(global.offset(), global.isIndirect(), value);
@ -2787,7 +2791,7 @@ static bool EmitTruncate(FunctionCompiler& f, ValType operandType,
flags |= TRUNC_SATURATING;
}
if (resultType == ValType::I32) {
if (f.env().isAsmJS()) {
if (f.moduleEnv().isAsmJS()) {
if (input && (input->type() == MIRType::Double ||
input->type() == MIRType::Float32)) {
f.iter().setResult(f.unary<MWasmBuiltinTruncateToInt32>(input));
@ -2799,7 +2803,7 @@ static bool EmitTruncate(FunctionCompiler& f, ValType operandType,
}
} else {
MOZ_ASSERT(resultType == ValType::I64);
MOZ_ASSERT(!f.env().isAsmJS());
MOZ_ASSERT(!f.moduleEnv().isAsmJS());
#if defined(JS_CODEGEN_ARM)
f.iter().setResult(f.truncateWithTls(input, flags));
#else
@ -3427,7 +3431,7 @@ static bool EmitMemCopyCall(FunctionCompiler& f, MDefinition* dst,
uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
const SymbolicAddressSignature& callee =
(f.env().usesSharedMemory() ? SASigMemCopyShared : SASigMemCopy);
(f.moduleEnv().usesSharedMemory() ? SASigMemCopyShared : SASigMemCopy);
CallCompileState args;
if (!f.passInstance(callee.argTypes[0], &args)) {
return false;
@ -3670,7 +3674,7 @@ static bool EmitMemFillCall(FunctionCompiler& f, MDefinition* start,
uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
const SymbolicAddressSignature& callee =
f.env().usesSharedMemory() ? SASigMemFillShared : SASigMemFill;
f.moduleEnv().usesSharedMemory() ? SASigMemFillShared : SASigMemFill;
CallCompileState args;
if (!f.passInstance(callee.argTypes[0], &args)) {
return false;
@ -4367,7 +4371,7 @@ static bool EmitBodyExprs(FunctionCompiler& f) {
case uint16_t(Op::SelectNumeric):
CHECK(EmitSelect(f, /*typed*/ false));
case uint16_t(Op::SelectTyped):
if (!f.env().refTypesEnabled()) {
if (!f.moduleEnv().refTypesEnabled()) {
return f.iter().unrecognizedOpcode(&op);
}
CHECK(EmitSelect(f, /*typed*/ true));
@ -4741,7 +4745,7 @@ static bool EmitBodyExprs(FunctionCompiler& f) {
#ifdef ENABLE_WASM_GC
case uint16_t(Op::RefEq):
if (!f.env().gcTypesEnabled()) {
if (!f.moduleEnv().gcTypesEnabled()) {
return f.iter().unrecognizedOpcode(&op);
}
CHECK(EmitComparison(f, RefType::extern_(), JSOp::Eq,
@ -4787,7 +4791,7 @@ static bool EmitBodyExprs(FunctionCompiler& f) {
// SIMD operations
#ifdef ENABLE_WASM_SIMD
case uint16_t(Op::SimdPrefix): {
if (!f.env().v128Enabled()) {
if (!f.moduleEnv().v128Enabled()) {
return f.iter().unrecognizedOpcode(&op);
}
switch (op.b1) {
@ -5083,7 +5087,7 @@ static bool EmitBodyExprs(FunctionCompiler& f) {
// Thread operations
case uint16_t(Op::ThreadPrefix): {
if (f.env().sharedMemoryEnabled() == Shareable::False) {
if (f.moduleEnv().sharedMemoryEnabled() == Shareable::False) {
return f.iter().unrecognizedOpcode(&op);
}
switch (op.b1) {
@ -5275,7 +5279,7 @@ static bool EmitBodyExprs(FunctionCompiler& f) {
// asm.js-specific operators
case uint16_t(Op::MozPrefix): {
if (!f.env().isAsmJS()) {
if (!f.moduleEnv().isAsmJS()) {
return f.iter().unrecognizedOpcode(&op);
}
switch (op.b1) {
@ -5358,11 +5362,12 @@ static bool EmitBodyExprs(FunctionCompiler& f) {
#undef CHECK_SIMD_EXPERIMENTAL
}
bool wasm::IonCompileFunctions(const ModuleEnvironment& env, LifoAlloc& lifo,
bool wasm::IonCompileFunctions(const ModuleEnvironment& moduleEnv,
LifoAlloc& lifo,
const FuncCompileInputVector& inputs,
CompiledCode* code, UniqueChars* error) {
MOZ_ASSERT(env.tier() == Tier::Optimized);
MOZ_ASSERT(env.optimizedBackend() == OptimizedBackend::Ion);
MOZ_ASSERT(moduleEnv.tier() == Tier::Optimized);
MOZ_ASSERT(moduleEnv.optimizedBackend() == OptimizedBackend::Ion);
TempAllocator alloc(&lifo);
JitContext jitContext(&alloc);
@ -5391,12 +5396,12 @@ bool wasm::IonCompileFunctions(const ModuleEnvironment& env, LifoAlloc& lifo,
// Build the local types vector.
const FuncTypeWithId& funcType = *env.funcTypes[func.index];
const FuncTypeWithId& funcType = *moduleEnv.funcTypes[func.index];
ValTypeVector locals;
if (!locals.appendAll(funcType.args())) {
return false;
}
if (!DecodeLocalEntries(d, env.types, env.features, &locals)) {
if (!DecodeLocalEntries(d, moduleEnv.types, moduleEnv.features, &locals)) {
return false;
}
@ -5407,11 +5412,11 @@ bool wasm::IonCompileFunctions(const ModuleEnvironment& env, LifoAlloc& lifo,
CompileInfo compileInfo(locals.length());
MIRGenerator mir(nullptr, options, &alloc, &graph, &compileInfo,
IonOptimizations.get(OptimizationLevel::Wasm));
mir.initMinWasmHeapLength(env.minMemoryLength);
mir.initMinWasmHeapLength(moduleEnv.minMemoryLength);
// Build MIR graph
{
FunctionCompiler f(env, d, func, locals, mir);
FunctionCompiler f(moduleEnv, d, func, locals, mir);
if (!f.init()) {
return false;
}

Просмотреть файл

@ -31,7 +31,7 @@ namespace wasm {
MOZ_MUST_USE bool IonPlatformSupport();
// Generates very fast code at the expense of compilation time.
MOZ_MUST_USE bool IonCompileFunctions(const ModuleEnvironment& env,
MOZ_MUST_USE bool IonCompileFunctions(const ModuleEnvironment& moduleEnv,
LifoAlloc& lifo,
const FuncCompileInputVector& inputs,
CompiledCode* code, UniqueChars* error);