Bug 1277008 - Wasm baseline: x86 support. r=bbouvier

--HG--
extra : rebase_source : 019a8906c06ede3a2a14265e4885fe91ab23b017
extra : histedit_source : 4771423b8391681294e9b8a7aa9c7d5e5d72d830
This commit is contained in:
Lars T Hansen 2016-07-13 12:29:26 +02:00
Родитель cd1f5c0400
Коммит 34f3c37654
1 изменённых файлов: 401 добавлений и 83 удалений

Просмотреть файл

@ -29,13 +29,12 @@
* replacement (duh)
* - int64 load and store
* - SIMD
* - Atomics
* - Atomics (very simple now, we have range checking)
* - current_memory, grow_memory
* - non-signaling interrupts
* - non-signaling bounds checks
* - profiler support (devtools)
* - Platform support:
* x86
* ARM-32
* ARM-64
*
@ -125,6 +124,7 @@ namespace js {
namespace wasm {
using namespace js::jit;
using JS::GenericNaN;
struct BaseCompilePolicy : ExprIterPolicy
{
@ -304,10 +304,39 @@ class BaseCompiler
explicit AnyReg(RegF32 r) { tag = F32; f32_ = r; }
explicit AnyReg(RegF64 r) { tag = F64; f64_ = r; }
RegI32 i32() { MOZ_ASSERT(tag == I32); return i32_; }
RegI64 i64() { MOZ_ASSERT(tag == I64); return i64_; }
RegF32 f32() { MOZ_ASSERT(tag == F32); return f32_; }
RegF64 f64() { MOZ_ASSERT(tag == F64); return f64_; }
RegI32 i32() {
MOZ_ASSERT(tag == I32);
return i32_;
}
RegI64 i64() {
MOZ_ASSERT(tag == I64);
return i64_;
}
RegF32 f32() {
MOZ_ASSERT(tag == F32);
return f32_;
}
RegF64 f64() {
MOZ_ASSERT(tag == F64);
return f64_;
}
AnyRegister any() {
switch (tag) {
case F32: return AnyRegister(f32_.reg);
case F64: return AnyRegister(f64_.reg);
case I32: return AnyRegister(i32_.reg);
case I64:
#ifdef JS_PUNBOX64
return AnyRegister(i64_.reg.reg);
#else
MOZ_CRASH("WasmBaseline platform hook: AnyReg::any()");
#endif
case NONE:
MOZ_CRASH("AnyReg::any() on NONE");
}
// Work around GCC 5 analysis/warning bug.
MOZ_CRASH("AnyReg::any(): impossible case");
}
union {
RegI32 i32_;
@ -457,6 +486,10 @@ class BaseCompiler
RegI32 specific_edx;
#endif
#if defined(JS_CODEGEN_X86)
AllocatableGeneralRegisterSet singleByteRegs_;
#endif
// The join registers are used to carry values out of blocks.
// JoinRegI32 and joinRegI64 must overlap: emitBrIf and
// emitBrTable assume that.
@ -1683,6 +1716,9 @@ class BaseCompiler
MOZ_MUST_USE
PooledLabel* newLabel() {
// TODO / INVESTIGATE: allocate() is fallible, but we can
// probably rely on an infallible allocator here. That would
// simplify code later.
PooledLabel* candidate = labelPool_.allocate();
if (!candidate)
return nullptr;
@ -1860,7 +1896,7 @@ class BaseCompiler
}
call.frameAlignAdjustment_ = ComputeByteAlignment(masm.framePushed() + sizeof(AsmJSFrame),
ABIStackAlignment);
JitStackAlignment);
}
void endCall(FunctionCall& call)
@ -2619,13 +2655,16 @@ class BaseCompiler
//
// Global variable access.
// CodeGeneratorX64::visitAsmJSLoadGlobalVar()
// CodeGenerator{X86,X64}::visitAsmJSLoadGlobalVar()
void loadGlobalVarI32(unsigned globalDataOffset, RegI32 r)
{
#ifdef JS_CODEGEN_X64
#if defined(JS_CODEGEN_X64)
CodeOffset label = masm.loadRipRelativeInt32(r.reg);
masm.append(AsmJSGlobalAccess(label, globalDataOffset));
#elif defined(JS_CODEGEN_X86)
CodeOffset label = masm.movlWithPatch(PatchedAbsoluteAddress(), r.reg);
masm.append(AsmJSGlobalAccess(label, globalDataOffset));
#else
MOZ_CRASH("BaseCompiler platform hook: loadGlobalVarI32");
#endif
@ -2633,7 +2672,7 @@ class BaseCompiler
void loadGlobalVarI64(unsigned globalDataOffset, RegI64 r)
{
#ifdef JS_CODEGEN_X64
#if defined(JS_CODEGEN_X64)
CodeOffset label = masm.loadRipRelativeInt64(r.reg.reg);
masm.append(AsmJSGlobalAccess(label, globalDataOffset));
#else
@ -2643,9 +2682,12 @@ class BaseCompiler
void loadGlobalVarF32(unsigned globalDataOffset, RegF32 r)
{
#ifdef JS_CODEGEN_X64
#if defined(JS_CODEGEN_X64)
CodeOffset label = masm.loadRipRelativeFloat32(r.reg);
masm.append(AsmJSGlobalAccess(label, globalDataOffset));
#elif defined(JS_CODEGEN_X86)
CodeOffset label = masm.vmovssWithPatch(PatchedAbsoluteAddress(), r.reg);
masm.append(AsmJSGlobalAccess(label, globalDataOffset));
#else
MOZ_CRASH("BaseCompiler platform hook: loadGlobalVarF32");
#endif
@ -2653,9 +2695,12 @@ class BaseCompiler
void loadGlobalVarF64(unsigned globalDataOffset, RegF64 r)
{
#ifdef JS_CODEGEN_X64
#if defined(JS_CODEGEN_X64)
CodeOffset label = masm.loadRipRelativeDouble(r.reg);
masm.append(AsmJSGlobalAccess(label, globalDataOffset));
#elif defined(JS_CODEGEN_X86)
CodeOffset label = masm.vmovsdWithPatch(PatchedAbsoluteAddress(), r.reg);
masm.append(AsmJSGlobalAccess(label, globalDataOffset));
#else
MOZ_CRASH("BaseCompiler platform hook: loadGlobalVarF32");
#endif
@ -2665,9 +2710,12 @@ class BaseCompiler
void storeGlobalVarI32(unsigned globalDataOffset, RegI32 r)
{
#ifdef JS_CODEGEN_X64
#if defined(JS_CODEGEN_X64)
CodeOffset label = masm.storeRipRelativeInt32(r.reg);
masm.append(AsmJSGlobalAccess(label, globalDataOffset));
#elif defined(JS_CODEGEN_X86)
CodeOffset label = masm.movlWithPatch(r.reg, PatchedAbsoluteAddress());
masm.append(AsmJSGlobalAccess(label, globalDataOffset));
#else
MOZ_CRASH("BaseCompiler platform hook: storeGlobalVarI32");
#endif
@ -2675,7 +2723,7 @@ class BaseCompiler
void storeGlobalVarI64(unsigned globalDataOffset, RegI64 r)
{
#ifdef JS_CODEGEN_X64
#if defined(JS_CODEGEN_X64)
CodeOffset label = masm.storeRipRelativeInt64(r.reg.reg);
masm.append(AsmJSGlobalAccess(label, globalDataOffset));
#else
@ -2685,9 +2733,12 @@ class BaseCompiler
void storeGlobalVarF32(unsigned globalDataOffset, RegF32 r)
{
#ifdef JS_CODEGEN_X64
#if defined(JS_CODEGEN_X64)
CodeOffset label = masm.storeRipRelativeFloat32(r.reg);
masm.append(AsmJSGlobalAccess(label, globalDataOffset));
#elif defined(JS_CODEGEN_X86)
CodeOffset label = masm.vmovssWithPatch(r.reg, PatchedAbsoluteAddress());
masm.append(AsmJSGlobalAccess(label, globalDataOffset));
#else
MOZ_CRASH("BaseCompiler platform hook: storeGlobalVarF32");
#endif
@ -2695,9 +2746,12 @@ class BaseCompiler
void storeGlobalVarF64(unsigned globalDataOffset, RegF64 r)
{
#ifdef JS_CODEGEN_X64
#if defined(JS_CODEGEN_X64)
CodeOffset label = masm.storeRipRelativeDouble(r.reg);
masm.append(AsmJSGlobalAccess(label, globalDataOffset));
#elif defined(JS_CODEGEN_X86)
CodeOffset label = masm.vmovsdWithPatch(r.reg, PatchedAbsoluteAddress());
masm.append(AsmJSGlobalAccess(label, globalDataOffset));
#else
MOZ_CRASH("BaseCompiler platform hook: storeGlobalVarF64");
#endif
@ -2707,28 +2761,7 @@ class BaseCompiler
//
// Heap access.
#if defined(JS_CODEGEN_X64)
// Copied from CodeGenerator-x64.cpp
// TODO / CLEANUP - share with the code generator.
MemoryAccess
WasmMemoryAccess(uint32_t before)
{
if (isCompilingAsmJS())
return MemoryAccess(before, MemoryAccess::CarryOn, MemoryAccess::WrapOffset);
return MemoryAccess(before, MemoryAccess::Throw, MemoryAccess::DontWrapOffset);
}
#endif
void memoryBarrier(MemoryBarrierBits barrier) {
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
if (barrier & MembarStoreLoad)
masm.storeLoadFence();
#else
MOZ_CRASH("BaseCompiler platform hook: memoryBarrier");
#endif
}
// Cloned from MIRGraph.cpp, merge somehow?
// TODO / CLEANUP - cloned from MIRGraph.cpp, should share.
bool needsBoundsCheckBranch(const MWasmMemoryAccess& access) const {
// A heap access needs a bounds-check branch if we're not relying on signal
@ -2745,31 +2778,210 @@ class BaseCompiler
return access.needsBoundsCheck();
}
#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
void verifyHeapAccessDisassembly(uint32_t before, uint32_t after, bool isLoad,
Scalar::Type accessType, int nelems, Operand srcAddr,
AnyReg dest)
{
#ifdef DEBUG
// TODO / MISSING: this needs to be adapted from what's in the
// platform's CodeGenerator; that code takes an LAllocation as
// the last arg now.
#endif
bool throwOnOutOfBounds(const MWasmMemoryAccess& access) {
return access.isAtomicAccess() || !isCompilingAsmJS();
}
#endif
void loadHeap(const MWasmMemoryAccess& access, RegI32 ptr, AnyReg dest) {
if (access.offset() > INT32_MAX) {
masm.jump(wasm::JumpTarget::OutOfBounds);
return;
// For asm.js code only: If we have a non-zero offset, it's possible that
// |ptr| itself is out of bounds, while adding the offset computes an
// in-bounds address. To catch this case, we need a second branch, which we
// emit out of line since it's unlikely to be needed in normal programs.
// For this, we'll generate an OffsetBoundsCheck OOL stub.
bool needsOffsetBoundsCheck(const MWasmMemoryAccess& access) const {
return isCompilingAsmJS() && access.offset() != 0;
}
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
# if defined(JS_CODEGEN_X64)
// TODO / CLEANUP - copied from CodeGenerator-x64.cpp, should share.
MemoryAccess
WasmMemoryAccess(uint32_t before)
{
if (isCompilingAsmJS())
return MemoryAccess(before, MemoryAccess::CarryOn, MemoryAccess::WrapOffset);
return MemoryAccess(before, MemoryAccess::Throw, MemoryAccess::DontWrapOffset);
}
# endif
class OffsetBoundsCheck : public OutOfLineCode
{
Label* maybeOutOfBounds;
Register ptrReg;
int32_t offset;
public:
OffsetBoundsCheck(Label* maybeOutOfBounds, Register ptrReg, int32_t offset)
: maybeOutOfBounds(maybeOutOfBounds),
ptrReg(ptrReg),
offset(offset)
{}
void generate(MacroAssembler& masm) {
// asm.js code only:
//
// The access is heap[ptr + offset]. The inline code checks that
// ptr < heap.length - offset. We get here when that fails. We need to check
// for the case where ptr + offset >= 0, in which case the access is still
// in bounds.
MOZ_ASSERT(offset != 0,
"An access without a constant offset doesn't need a separate "
"OffsetBoundsCheck");
masm.cmp32(ptrReg, Imm32(-uint32_t(offset)));
if (maybeOutOfBounds)
masm.j(Assembler::Below, maybeOutOfBounds);
else
masm.j(Assembler::Below, wasm::JumpTarget::OutOfBounds);
# ifdef JS_CODEGEN_X64
// In order to get the offset to wrap properly, we must sign-extend the
// pointer to 32-bits. We'll zero out the sign extension immediately
// after the access to restore asm.js invariants.
masm.movslq(ptrReg, ptrReg);
# endif
masm.jmp(rejoin());
}
};
// CodeGeneratorX86Shared::emitAsmJSBoundsCheckBranch()
MOZ_MUST_USE
bool emitBoundsCheckBranch(const MWasmMemoryAccess& access, RegI32 ptr, Label* maybeFail) {
Label* pass = nullptr;
if (needsOffsetBoundsCheck(access)) {
auto* oolCheck = new(alloc_) OffsetBoundsCheck(maybeFail, ptr.reg, access.offset());
maybeFail = oolCheck->entry();
pass = oolCheck->rejoin();
if (!addOutOfLineCode(oolCheck))
return false;
}
#if defined(JS_CODEGEN_X64)
// CodeGeneratorX64::visitAsmJSLoadHeap()/visitWasmLoad()/visitWasmLoadI64()
// The bounds check is a comparison with an immediate value. The asm.js
// module linking process will add the length of the heap to the immediate
// field, so -access->endOffset() will turn into
// (heapLength - access->endOffset()), allowing us to test whether the end
// of the access is beyond the end of the heap.
MOZ_ASSERT(access.endOffset() >= 1,
"need to subtract 1 to use JAE, see also AssemblerX86Shared::UpdateBoundsCheck");
if (needsBoundsCheckBranch(access))
MOZ_CRASH("BaseCompiler platform hook: bounds checking");
uint32_t cmpOffset = masm.cmp32WithPatch(ptr.reg, Imm32(1 - access.endOffset())).offset();
if (maybeFail)
masm.j(Assembler::AboveOrEqual, maybeFail);
else
masm.j(Assembler::AboveOrEqual, wasm::JumpTarget::OutOfBounds);
if (pass)
masm.bind(pass);
masm.append(wasm::BoundsCheck(cmpOffset));
return true;
}
class OutOfLineLoadTypedArrayOOB : public OutOfLineCode
{
Scalar::Type viewType;
AnyRegister dest;
public:
OutOfLineLoadTypedArrayOOB(Scalar::Type viewType, AnyRegister dest)
: viewType(viewType),
dest(dest)
{}
void generate(MacroAssembler& masm) {
switch (viewType) {
case Scalar::Float32x4:
case Scalar::Int32x4:
case Scalar::Int8x16:
case Scalar::Int16x8:
case Scalar::MaxTypedArrayViewType:
MOZ_CRASH("unexpected array type");
case Scalar::Float32:
masm.loadConstantFloat32(float(GenericNaN()), dest.fpu());
break;
case Scalar::Float64:
masm.loadConstantDouble(GenericNaN(), dest.fpu());
break;
case Scalar::Int8:
case Scalar::Uint8:
case Scalar::Int16:
case Scalar::Uint16:
case Scalar::Int32:
case Scalar::Uint32:
case Scalar::Uint8Clamped:
masm.movePtr(ImmWord(0), dest.gpr());
break;
case Scalar::Int64:
MOZ_CRASH("unexpected array type");
}
masm.jump(rejoin());
}
};
MOZ_MUST_USE
bool maybeEmitLoadBoundsCheck(const MWasmMemoryAccess& access, RegI32 ptr, AnyRegister dest,
OutOfLineCode** ool)
{
*ool = nullptr;
if (!needsBoundsCheckBranch(access))
return true;
if (throwOnOutOfBounds(access))
return emitBoundsCheckBranch(access, ptr, nullptr);
// TODO / MEMORY: We'll allocate *a lot* of these OOL objects,
// thus risking OOM on a platform that is already
// memory-constrained. We could opt to allocate this path
// in-line instead.
*ool = new (alloc_) OutOfLineLoadTypedArrayOOB(access.accessType(), dest);
if (!addOutOfLineCode(*ool))
return false;
return emitBoundsCheckBranch(access, ptr, (*ool)->entry());
}
MOZ_MUST_USE
bool maybeEmitStoreBoundsCheck(const MWasmMemoryAccess& access, RegI32 ptr, Label** rejoin) {
*rejoin = nullptr;
if (!needsBoundsCheckBranch(access))
return true;
if (throwOnOutOfBounds(access))
return emitBoundsCheckBranch(access, ptr, nullptr);
*rejoin = newLabel();
if (!*rejoin)
return false;
return emitBoundsCheckBranch(access, ptr, *rejoin);
}
void cleanupAfterBoundsCheck(const MWasmMemoryAccess& access, RegI32 ptr) {
# ifdef JS_CODEGEN_X64
if (needsOffsetBoundsCheck(access)) {
// Zero out the high 32 bits, in case the OffsetBoundsCheck code had to
// sign-extend (movslq) the pointer value to get wraparound to work.
masm.movl(ptr.reg, ptr.reg);
}
# endif
}
MOZ_MUST_USE
bool loadHeap(const MWasmMemoryAccess& access, RegI32 ptr, AnyReg dest) {
if (access.offset() > INT32_MAX) {
masm.jump(wasm::JumpTarget::OutOfBounds);
return true;
}
OutOfLineCode* ool = nullptr;
if (!maybeEmitLoadBoundsCheck(access, ptr, dest.any(), &ool))
return false;
# if defined(JS_CODEGEN_X64)
Operand srcAddr(HeapReg, ptr.reg, TimesOne, access.offset());
uint32_t before = masm.size();
@ -2801,22 +3013,64 @@ class BaseCompiler
MOZ_CRASH("Compiler bug: Unexpected array type");
}
}
uint32_t after = masm.size();
masm.append(WasmMemoryAccess(before));
verifyHeapAccessDisassembly(before, after, IsLoad(true), access.accessType(), 0, srcAddr, dest);
#else
MOZ_CRASH("BaseCompiler platform hook: loadHeap");
#endif
// TODO: call verifyHeapAccessDisassembly somehow
# elif defined(JS_CODEGEN_X86)
Operand srcAddr(ptr.reg, access.offset());
if (dest.tag == AnyReg::I64)
MOZ_CRASH("Not implemented: I64 support");
bool mustMove = access.byteSize() == 1 && !singleByteRegs_.has(dest.i32().reg);
switch (access.accessType()) {
case Scalar::Int8:
case Scalar::Uint8: {
Register rd = mustMove ? ScratchRegX86 : dest.i32().reg;
if (access.accessType() == Scalar::Int8)
masm.movsblWithPatch(srcAddr, rd);
else
masm.movzblWithPatch(srcAddr, rd);
break;
}
case Scalar::Int16: masm.movswlWithPatch(srcAddr, dest.i32().reg); break;
case Scalar::Uint16: masm.movzwlWithPatch(srcAddr, dest.i32().reg); break;
case Scalar::Int32:
case Scalar::Uint32: masm.movlWithPatch(srcAddr, dest.i32().reg); break;
case Scalar::Float32: masm.vmovssWithPatch(srcAddr, dest.f32().reg); break;
case Scalar::Float64: masm.vmovsdWithPatch(srcAddr, dest.f64().reg); break;
default:
MOZ_CRASH("Compiler bug: Unexpected array type");
}
uint32_t after = masm.size();
if (mustMove)
masm.mov(ScratchRegX86, dest.i32().reg);
masm.append(wasm::MemoryAccess(after));
// TODO: call verifyHeapAccessDisassembly somehow
# else
MOZ_CRASH("Compiler bug: Unexpected platform.");
# endif
if (ool) {
cleanupAfterBoundsCheck(access, ptr);
masm.bind(ool->rejoin());
}
return true;
}
void storeHeap(const MWasmMemoryAccess& access, RegI32 ptr, AnyReg src) {
#if defined(JS_CODEGEN_X64)
// CodeGeneratorX64::visitAsmJSStoreHeap()
MOZ_MUST_USE
bool storeHeap(const MWasmMemoryAccess& access, RegI32 ptr, AnyReg src) {
if (access.offset() > INT32_MAX) {
masm.jump(wasm::JumpTarget::OutOfBounds);
return true;
}
if (needsBoundsCheckBranch(access))
MOZ_CRASH("BaseCompiler platform hook: bounds checking");
Label* rejoin = nullptr;
if (!maybeEmitStoreBoundsCheck(access, ptr, &rejoin))
return false;
# if defined(JS_CODEGEN_X64)
Operand dstAddr(HeapReg, ptr.reg, TimesOne, access.offset());
Register intReg;
@ -2839,15 +3093,66 @@ class BaseCompiler
default:
MOZ_CRASH("Compiler bug: Unexpected array type");
}
uint32_t after = masm.size();
masm.append(WasmMemoryAccess(before));
verifyHeapAccessDisassembly(before, after, IsLoad(false), access.accessType(), 0, dstAddr, src);
#else
MOZ_CRASH("BaseCompiler platform hook: storeHeap");
#endif
// TODO: call verifyHeapAccessDisassembly somehow
# elif defined(JS_CODEGEN_X86)
Operand dstAddr(ptr.reg, access.offset());
if (src.tag == AnyReg::I64)
MOZ_CRASH("Not implemented: I64 support");
bool didMove = false;
if (access.byteSize() == 1 && !singleByteRegs_.has(src.i32().reg)) {
didMove = true;
masm.mov(src.i32().reg, ScratchRegX86);
}
switch (access.accessType()) {
case Scalar::Int8:
case Scalar::Uint8: {
Register rs = src.i32().reg;
Register rt = didMove ? ScratchRegX86 : rs;
masm.movbWithPatch(rt, dstAddr);
break;
}
case Scalar::Int16:
case Scalar::Uint16: masm.movwWithPatch(src.i32().reg, dstAddr); break;
case Scalar::Int32:
case Scalar::Uint32: masm.movlWithPatch(src.i32().reg, dstAddr); break;
case Scalar::Float32: masm.vmovssWithPatch(src.f32().reg, dstAddr); break;
case Scalar::Float64: masm.vmovsdWithPatch(src.f64().reg, dstAddr); break;
default:
MOZ_CRASH("Compiler bug: Unexpected array type");
}
uint32_t after = masm.size();
masm.append(wasm::MemoryAccess(after));
// TODO: call verifyHeapAccessDisassembly somehow
# else
MOZ_CRASH("Compiler bug: unexpected platform");
# endif
if (rejoin) {
cleanupAfterBoundsCheck(access, ptr);
masm.bind(rejoin);
}
return true;
}
#else
MOZ_MUST_USE
bool loadHeap(const MWasmMemoryAccess& access, RegI32 ptr, AnyReg dest) {
MOZ_CRASH("BaseCompiler platform hook: loadHeap");
}
MOZ_MUST_USE
bool storeHeap(const MWasmMemoryAccess& access, RegI32 ptr, AnyReg src) {
MOZ_CRASH("BaseCompiler platform hook: storeHeap");
}
#endif
////////////////////////////////////////////////////////////
// Generally speaking, ABOVE this point there should be no value
@ -5244,14 +5549,16 @@ BaseCompiler::emitLoad(ValType type, Scalar::Type viewType)
switch (type) {
case ValType::I32: {
RegI32 rp = popI32();
loadHeap(access, rp, AnyReg(rp));
if (!loadHeap(access, rp, AnyReg(rp)))
return false;
pushI32(rp);
break;
}
case ValType::I64: {
RegI32 rp = popI32();
RegI64 rv = needI64();
loadHeap(access, rp, AnyReg(rv));
if (!loadHeap(access, rp, AnyReg(rv)))
return false;
pushI64(rv);
freeI32(rp);
break;
@ -5259,7 +5566,8 @@ BaseCompiler::emitLoad(ValType type, Scalar::Type viewType)
case ValType::F32: {
RegI32 rp = popI32();
RegF32 rv = needF32();
loadHeap(access, rp, AnyReg(rv));
if (!loadHeap(access, rp, AnyReg(rv)))
return false;
pushF32(rv);
freeI32(rp);
break;
@ -5267,7 +5575,8 @@ BaseCompiler::emitLoad(ValType type, Scalar::Type viewType)
case ValType::F64: {
RegI32 rp = popI32();
RegF64 rv = needF64();
loadHeap(access, rp, AnyReg(rv));
if (!loadHeap(access, rp, AnyReg(rv)))
return false;
pushF64(rv);
freeI32(rp);
break;
@ -5299,7 +5608,8 @@ BaseCompiler::emitStore(ValType resultType, Scalar::Type viewType)
case ValType::I32: {
RegI32 rp, rv;
pop2xI32(&rp, &rv);
storeHeap(access, rp, AnyReg(rv));
if (!storeHeap(access, rp, AnyReg(rv)))
return false;
freeI32(rp);
pushI32(rv);
break;
@ -5307,7 +5617,8 @@ BaseCompiler::emitStore(ValType resultType, Scalar::Type viewType)
case ValType::I64: {
RegI64 rv = popI64();
RegI32 rp = popI32();
storeHeap(access, rp, AnyReg(rv));
if (!storeHeap(access, rp, AnyReg(rv)))
return false;
freeI32(rp);
pushI64(rv);
break;
@ -5315,7 +5626,8 @@ BaseCompiler::emitStore(ValType resultType, Scalar::Type viewType)
case ValType::F32: {
RegF32 rv = popF32();
RegI32 rp = popI32();
storeHeap(access, rp, AnyReg(rv));
if (!storeHeap(access, rp, AnyReg(rv)))
return false;
freeI32(rp);
pushF32(rv);
break;
@ -5323,7 +5635,8 @@ BaseCompiler::emitStore(ValType resultType, Scalar::Type viewType)
case ValType::F64: {
RegF64 rv = popF64();
RegI32 rp = popI32();
storeHeap(access, rp, AnyReg(rv));
if (!storeHeap(access, rp, AnyReg(rv)))
return false;
freeI32(rp);
pushF64(rv);
break;
@ -5584,7 +5897,8 @@ BaseCompiler::emitStoreWithCoercion(ValType resultType, Scalar::Type viewType)
RegF64 rw = needF64();
masm.convertFloat32ToDouble(rv.reg, rw.reg);
RegI32 rp = popI32();
storeHeap(access, rp, AnyReg(rw));
if (!storeHeap(access, rp, AnyReg(rw)))
return false;
pushF32(rv);
freeI32(rp);
freeF64(rw);
@ -5594,7 +5908,8 @@ BaseCompiler::emitStoreWithCoercion(ValType resultType, Scalar::Type viewType)
RegF32 rw = needF32();
masm.convertDoubleToFloat32(rv.reg, rw.reg);
RegI32 rp = popI32();
storeHeap(access, rp, AnyReg(rw));
if (!storeHeap(access, rp, AnyReg(rw)))
return false;
pushF64(rv);
freeI32(rp);
freeF32(rw);
@ -6257,6 +6572,9 @@ BaseCompiler::BaseCompiler(const ModuleGeneratorData& mg,
specific_eax(RegI32(eax)),
specific_ecx(RegI32(ecx)),
specific_edx(RegI32(edx)),
#endif
#ifdef JS_CODEGEN_X86
singleByteRegs_(GeneralRegisterSet(Registers::SingleByteRegs)),
#endif
joinRegI32(RegI32(ReturnReg)),
joinRegI64(RegI64(Register64(ReturnReg))),
@ -6399,7 +6717,7 @@ LiveRegisterSet BaseCompiler::VolatileReturnGPR = volatileReturnGPR();
bool
js::wasm::BaselineCanCompile(const FunctionGenerator* fg)
{
#if defined(JS_CODEGEN_X64)
#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
if (!fg->usesSignalsForInterrupts())
return false;