Bug 1727084 - Memory64 - Preliminaries. r=yury

Scaffolding: Add assertions everywhere execution currently depends on
a memory being a memory32.  These assertions will disappear by and by.

Add some useful predicates and guard memory64 on !huge-memory.

Make it possible for the initial heap size to be a 64-bit value,
update WasmBCE.cpp to deal with this, and note in a comment that this
does not affect asm.js at all.

Exclude Cranelift and MIPS64 from memory64 in moz.configure; this
is extra work that does not pay off at this time.

Enable the inclusion of memory64 code by default in moz.configure.

Flip the about:config switch to false so that we can land in Nightly
without exposing content to m64 until we have completed Milestone 2.

Test cases are in the last patch in the queue.

Differential Revision: https://phabricator.services.mozilla.com/D124887
This commit is contained in:
Lars T Hansen 2021-10-18 07:31:54 +00:00
Родитель 0af22ab84d
Коммит 96fecc40b8
9 изменённых файлов: 101 добавлений и 9 удалений

Просмотреть файл

@ -898,17 +898,44 @@ set_define("ENABLE_WASM_MOZ_INTGEMM", wasm_moz_intgemm)
# Support for WebAssembly Memory64.
# ===========================
@depends(milestone.is_nightly, "--enable-cranelift", "--enable-simulator", target)
def default_wasm_memory64(is_nightly, cranelift, simulator, target):
if cranelift:
return
if target.cpu == "mips32" or target.cpu == "mips64":
return
if simulator and (simulator[0] == "mips32" or simulator[0] == "mips64"):
return
if is_nightly:
return True
option(
"--enable-wasm-memory64",
default=False,
default=default_wasm_memory64,
help="{Enable|Disable} WebAssembly Memory64",
)
@depends("--enable-wasm-memory64")
def wasm_memory64(value):
if value:
return True
@depends("--enable-wasm-memory64", "--enable-cranelift", "--enable-simulator", target)
def wasm_memory64(value, cranelift, simulator, target):
if not value:
return
if cranelift:
die("Memory64 is incompatible with Cranelift")
if target.cpu == "mips32" or target.cpu == "mips64":
die("Memory64 is incompatible with MIPS target")
if simulator and (simulator[0] == "mips32" or simulator[0] == "mips64"):
die("Memory64 is incompatible with MIPS simulator")
return True
set_config("ENABLE_WASM_MEMORY64", wasm_memory64)

Просмотреть файл

@ -199,8 +199,9 @@ void EffectiveAddressAnalysis::analyzeAsmJSHeapAccess(AsmJSMemoryAccess* ins) {
MDefinition* base = ins->base();
if (base->isConstant()) {
// If the index is within the minimum heap length, we can optimize
// away the bounds check.
// If the index is within the minimum heap length, we can optimize away the
// bounds check. Asm.js accesses always have an int32 base, the memory is
// always a memory32.
int32_t imm = base->toConstant()->toInt32();
if (imm >= 0) {
int32_t end = (uint32_t)imm + ins->byteSize();

Просмотреть файл

@ -33,6 +33,8 @@ const TypeIdDesc& BaseCompiler::funcTypeId() const {
return *moduleEnv_.funcs[func_.index].typeId;
}
bool BaseCompiler::usesMemory() const { return moduleEnv_.usesMemory(); }
bool BaseCompiler::usesSharedMemory() const {
return moduleEnv_.usesSharedMemory();
}
@ -53,6 +55,14 @@ BytecodeOffset BaseCompiler::bytecodeOffset() const {
return iter_.bytecodeOffset();
}
bool BaseCompiler::isMem32() const {
return moduleEnv_.memory->indexType() == IndexType::I32;
}
bool BaseCompiler::isMem64() const {
return moduleEnv_.memory->indexType() == IndexType::I64;
}
} // namespace wasm
} // namespace js

Просмотреть файл

@ -320,7 +320,10 @@ struct BaseCompiler final {
inline const FuncType& funcType() const;
inline const TypeIdDesc& funcTypeId() const;
inline bool usesMemory() const;
inline bool usesSharedMemory() const;
inline bool isMem32() const;
inline bool isMem64() const;
// The casts are used by some of the ScratchRegister implementations.
operator MacroAssembler&() const { return masm; }

Просмотреть файл

@ -767,6 +767,7 @@ void BaseCompiler::atomicStore(MemoryAccessDesc* access, ValType type) {
void BaseCompiler::atomicRMW(MemoryAccessDesc* access, ValType type,
AtomicOp op) {
MOZ_ASSERT(isMem32());
Scalar::Type viewType = access->type();
if (Scalar::byteSize(viewType) <= 4) {
atomicRMW32(access, type, op);
@ -1100,6 +1101,7 @@ void BaseCompiler::atomicRMW64(MemoryAccessDesc* access, ValType type,
// Atomic exchange (also used for atomic store in some cases).
void BaseCompiler::atomicXchg(MemoryAccessDesc* access, ValType type) {
MOZ_ASSERT(isMem32());
Scalar::Type viewType = access->type();
if (Scalar::byteSize(viewType) <= 4) {
atomicXchg32(access, type);
@ -1357,6 +1359,7 @@ void BaseCompiler::atomicXchg64(MemoryAccessDesc* access,
// Atomic compare-exchange.
void BaseCompiler::atomicCmpXchg(MemoryAccessDesc* access, ValType type) {
MOZ_ASSERT(isMem32());
Scalar::Type viewType = access->type();
if (Scalar::byteSize(viewType) <= 4) {
atomicCmpXchg32(access, type);
@ -1660,6 +1663,7 @@ bool BaseCompiler::atomicWait(ValType type, MemoryAccessDesc* access,
RegI64 timeout = popI64();
RegI32 val = popI32();
MOZ_ASSERT(isMem32());
computeEffectiveAddress(access);
pushI32(val);
@ -1674,6 +1678,7 @@ bool BaseCompiler::atomicWait(ValType type, MemoryAccessDesc* access,
RegI64 timeout = popI64();
RegI64 val = popI64();
MOZ_ASSERT(isMem32());
computeEffectiveAddress(access);
pushI64(val);
@ -1695,6 +1700,7 @@ bool BaseCompiler::atomicWake(MemoryAccessDesc* access,
uint32_t lineOrBytecode) {
RegI32 count = popI32();
MOZ_ASSERT(isMem32());
computeEffectiveAddress(access);
pushI32(count);

Просмотреть файл

@ -4919,6 +4919,7 @@ bool BaseCompiler::emitLoad(ValType type, Scalar::Type viewType) {
if (deadCode_) {
return true;
}
MOZ_ASSERT(isMem32());
MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset());
loadCommon(&access, AccessCheck(), type);
return true;
@ -4934,6 +4935,7 @@ bool BaseCompiler::emitStore(ValType resultType, Scalar::Type viewType) {
if (deadCode_) {
return true;
}
MOZ_ASSERT(isMem32());
MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset());
storeCommon(&access, AccessCheck(), resultType);
return true;
@ -5324,6 +5326,7 @@ bool BaseCompiler::emitAtomicLoad(ValType type, Scalar::Type viewType) {
if (deadCode_) {
return true;
}
MOZ_ASSERT(isMem32());
MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset(),
Synchronization::Load());
atomicLoad(&access, type);
@ -5357,6 +5360,7 @@ bool BaseCompiler::emitAtomicStore(ValType type, Scalar::Type viewType) {
if (deadCode_) {
return true;
}
MOZ_ASSERT(isMem32());
MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset(),
Synchronization::Store());
atomicStore(&access, type);
@ -5452,6 +5456,7 @@ bool BaseCompiler::emitMemCopy() {
return true;
}
MOZ_ASSERT(isMem32());
int32_t signedLength;
if (peekConst(&signedLength) && signedLength != 0 &&
uint32_t(signedLength) <= MaxInlineMemoryCopyLength) {
@ -5481,6 +5486,7 @@ bool BaseCompiler::emitMemFill() {
return true;
}
MOZ_ASSERT(isMem32());
int32_t signedLength;
int32_t signedValue;
if (peek2xConst(&signedLength, &signedValue) && signedLength != 0 &&
@ -5505,6 +5511,7 @@ bool BaseCompiler::emitMemInit() {
Nothing nothing;
if (iter_.readMemOrTableInit(/*isMem*/ true, segIndex, &dstTableIndex,
&nothing, &nothing, &nothing)) {
MOZ_ASSERT(isMem32());
return true;
}
return false;
@ -7488,6 +7495,7 @@ bool BaseCompiler::emitLoadSplat(Scalar::Type viewType) {
if (deadCode_) {
return true;
}
MOZ_ASSERT(isMem32());
MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset());
loadSplat(&access);
return true;
@ -7502,6 +7510,7 @@ bool BaseCompiler::emitLoadZero(Scalar::Type viewType) {
if (deadCode_) {
return true;
}
MOZ_ASSERT(isMem32());
MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset());
loadZero(&access);
return true;
@ -7515,6 +7524,7 @@ bool BaseCompiler::emitLoadExtend(Scalar::Type viewType) {
if (deadCode_) {
return true;
}
MOZ_ASSERT(isMem32());
MemoryAccessDesc access(Scalar::Int64, addr.align, addr.offset,
bytecodeOffset());
loadExtend(&access, viewType);
@ -7548,6 +7558,7 @@ bool BaseCompiler::emitLoadLane(uint32_t laneSize) {
default:
MOZ_CRASH("unsupported laneSize");
}
MOZ_ASSERT(isMem32());
MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset());
loadLane(&access, laneIndex);
return true;
@ -7580,6 +7591,7 @@ bool BaseCompiler::emitStoreLane(uint32_t laneSize) {
default:
MOZ_CRASH("unsupported laneSize");
}
MOZ_ASSERT(isMem32());
MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset());
storeLane(&access, laneIndex);
return true;
@ -9713,6 +9725,9 @@ BaseCompiler::~BaseCompiler() {
}
bool BaseCompiler::init() {
// We may lift this restriction in the future.
MOZ_ASSERT_IF(usesMemory() && isMem64(), !moduleEnv_.hugeMemoryEnabled());
ra.init(this);
if (!SigD_.append(ValType::F64)) {

Просмотреть файл

@ -1042,6 +1042,8 @@ class FunctionCompiler {
}
public:
bool isMem32() { return moduleEnv_.memory->indexType() == IndexType::I32; }
// Add the offset into the pointer to yield the EA; trap on overflow.
MDefinition* computeEffectiveAddress(MDefinition* base,
MemoryAccessDesc* access) {
@ -3251,6 +3253,7 @@ static bool EmitLoad(FunctionCompiler& f, ValType type, Scalar::Type viewType) {
return false;
}
MOZ_ASSERT(f.isMem32());
MemoryAccessDesc access(viewType, addr.align, addr.offset,
f.bytecodeIfNotAsmJS());
auto* ins = f.load(addr.base, &access, type);
@ -3271,6 +3274,7 @@ static bool EmitStore(FunctionCompiler& f, ValType resultType,
return false;
}
MOZ_ASSERT(f.isMem32());
MemoryAccessDesc access(viewType, addr.align, addr.offset,
f.bytecodeIfNotAsmJS());
@ -3287,6 +3291,7 @@ static bool EmitTeeStore(FunctionCompiler& f, ValType resultType,
return false;
}
MOZ_ASSERT(f.isMem32());
MemoryAccessDesc access(viewType, addr.align, addr.offset,
f.bytecodeIfNotAsmJS());
@ -3311,6 +3316,7 @@ static bool EmitTeeStoreWithCoercion(FunctionCompiler& f, ValType resultType,
MOZ_CRASH("unexpected coerced store");
}
MOZ_ASSERT(f.isMem32());
MemoryAccessDesc access(viewType, addr.align, addr.offset,
f.bytecodeIfNotAsmJS());
@ -3472,6 +3478,7 @@ static bool EmitAtomicCmpXchg(FunctionCompiler& f, ValType type,
return false;
}
MOZ_ASSERT(f.isMem32());
MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
Synchronization::Full());
auto* ins =
@ -3491,6 +3498,7 @@ static bool EmitAtomicLoad(FunctionCompiler& f, ValType type,
return false;
}
MOZ_ASSERT(f.isMem32());
MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
Synchronization::Load());
auto* ins = f.load(addr.base, &access, type);
@ -3510,6 +3518,7 @@ static bool EmitAtomicRMW(FunctionCompiler& f, ValType type,
return false;
}
MOZ_ASSERT(f.isMem32());
MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
Synchronization::Full());
auto* ins = f.atomicBinopHeap(op, addr.base, &access, type, value);
@ -3529,6 +3538,7 @@ static bool EmitAtomicStore(FunctionCompiler& f, ValType type,
return false;
}
MOZ_ASSERT(f.isMem32());
MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
Synchronization::Store());
f.store(addr.base, &access, value);
@ -3555,6 +3565,7 @@ static bool EmitWait(FunctionCompiler& f, ValType type, uint32_t byteSize) {
return false;
}
MOZ_ASSERT(f.isMem32());
MemoryAccessDesc access(type == ValType::I32 ? Scalar::Int32 : Scalar::Int64,
addr.align, addr.offset, f.bytecodeOffset());
MDefinition* ptr = f.computeEffectiveAddress(addr.base, &access);
@ -3612,6 +3623,7 @@ static bool EmitWake(FunctionCompiler& f) {
return false;
}
MOZ_ASSERT(f.isMem32());
MemoryAccessDesc access(Scalar::Int32, addr.align, addr.offset,
f.bytecodeOffset());
MDefinition* ptr = f.computeEffectiveAddress(addr.base, &access);
@ -3648,6 +3660,7 @@ static bool EmitAtomicXchg(FunctionCompiler& f, ValType type,
return false;
}
MOZ_ASSERT(f.isMem32());
MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
Synchronization::Full());
MDefinition* ins = f.atomicExchangeHeap(addr.base, &access, type, value);
@ -3841,6 +3854,7 @@ static bool EmitMemCopy(FunctionCompiler& f) {
return true;
}
MOZ_ASSERT(f.isMem32());
if (len->isConstant() && len->type() == MIRType::Int32 &&
len->toConstant()->toInt32() != 0 &&
uint32_t(len->toConstant()->toInt32()) <= MaxInlineMemoryCopyLength) {
@ -4069,6 +4083,7 @@ static bool EmitMemFill(FunctionCompiler& f) {
return true;
}
MOZ_ASSERT(f.isMem32());
if (len->isConstant() && len->type() == MIRType::Int32 &&
len->toConstant()->toInt32() != 0 &&
uint32_t(len->toConstant()->toInt32()) <= MaxInlineMemoryFillLength &&
@ -4090,6 +4105,7 @@ static bool EmitMemOrTableInit(FunctionCompiler& f, bool isMem) {
return true;
}
MOZ_ASSERT_IF(isMem, f.isMem32());
uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
const SymbolicAddressSignature& callee =
@ -4580,6 +4596,7 @@ static bool EmitLoadSplatSimd128(FunctionCompiler& f, Scalar::Type viewType,
return false;
}
MOZ_ASSERT(f.isMem32());
f.iter().setResult(f.loadSplatSimd128(viewType, addr, splatOp));
return true;
}
@ -4590,6 +4607,7 @@ static bool EmitLoadExtendSimd128(FunctionCompiler& f, wasm::SimdOp op) {
return false;
}
MOZ_ASSERT(f.isMem32());
f.iter().setResult(f.loadExtendSimd128(addr, op));
return true;
}
@ -4601,6 +4619,7 @@ static bool EmitLoadZeroSimd128(FunctionCompiler& f, Scalar::Type viewType,
return false;
}
MOZ_ASSERT(f.isMem32());
f.iter().setResult(f.loadZeroSimd128(viewType, numBytes, addr));
return true;
}
@ -4613,6 +4632,7 @@ static bool EmitLoadLaneSimd128(FunctionCompiler& f, uint32_t laneSize) {
return false;
}
MOZ_ASSERT(f.isMem32());
f.iter().setResult(f.loadLaneSimd128(laneSize, addr, laneIndex, src));
return true;
}
@ -4625,6 +4645,7 @@ static bool EmitStoreLaneSimd128(FunctionCompiler& f, uint32_t laneSize) {
return false;
}
MOZ_ASSERT(f.isMem32());
f.storeLaneSimd128(laneSize, addr, laneIndex, src);
return true;
}
@ -5885,7 +5906,11 @@ bool wasm::IonCompileFunctions(const ModuleEnvironment& moduleEnv,
MIRGenerator mir(nullptr, options, &alloc, &graph, &compileInfo,
IonOptimizations.get(OptimizationLevel::Wasm));
if (moduleEnv.usesMemory()) {
mir.initMinWasmHeapLength(moduleEnv.memory->initialLength32());
if (moduleEnv.memory->indexType() == IndexType::I32) {
mir.initMinWasmHeapLength(moduleEnv.memory->initialLength32());
} else {
mir.initMinWasmHeapLength(moduleEnv.memory->initialLength64());
}
}
// Build MIR graph

Просмотреть файл

@ -517,6 +517,11 @@ struct MemoryDesc {
return limits.initial * PageSize;
}
uint64_t initialLength64() const {
MOZ_ASSERT(indexType() == IndexType::I64);
return limits.initial * PageSize;
}
MemoryDesc() = default;
explicit MemoryDesc(Limits limits) : limits(limits) {}
};

Просмотреть файл

@ -6208,7 +6208,7 @@
#if defined(ENABLE_WASM_MEMORY64)
- name: javascript.options.wasm_memory64
type: bool
value: true
value: false
mirror: always
#endif // defined(ENABLE_WASM_MEMORY64)