зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1284414 : Wasm baseline MIPS32/64 r=lth
--HG-- extra : rebase_source : 57cd4813b2f8d15446902ade2f4fb0786841650b
This commit is contained in:
Родитель
e3e552ebad
Коммит
f1ef29a421
|
@ -1490,83 +1490,85 @@ class MacroAssembler : public MacroAssemblerSpecific
|
|||
// `ptr` will be updated if access.offset() != 0 or access.type() == Scalar::Int64.
|
||||
void wasmLoad(const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
|
||||
Register ptrScratch, AnyRegister output)
|
||||
DEFINED_ON(arm);
|
||||
DEFINED_ON(arm, mips_shared);
|
||||
void wasmLoadI64(const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
|
||||
Register ptrScratch, Register64 output)
|
||||
DEFINED_ON(arm);
|
||||
DEFINED_ON(arm, mips32, mips64);
|
||||
void wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value, Register memoryBase,
|
||||
Register ptr, Register ptrScratch)
|
||||
DEFINED_ON(arm);
|
||||
DEFINED_ON(arm, mips_shared);
|
||||
void wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value, Register memoryBase,
|
||||
Register ptr, Register ptrScratch)
|
||||
DEFINED_ON(arm);
|
||||
DEFINED_ON(arm, mips32, mips64);
|
||||
|
||||
// `ptr` will always be updated.
|
||||
void wasmUnalignedLoad(const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
|
||||
Register ptrScratch, Register output, Register tmp)
|
||||
DEFINED_ON(arm);
|
||||
DEFINED_ON(arm, mips32, mips64);
|
||||
|
||||
// `ptr` will always be updated and `tmp1` is always needed. `tmp2` is
|
||||
// ARM: `ptr` will always be updated and `tmp1` is always needed. `tmp2` is
|
||||
// needed for Float32; `tmp2` and `tmp3` are needed for Float64. Temps must
|
||||
// be Invalid when they are not needed.
|
||||
// MIPS: `ptr` will always be updated.
|
||||
void wasmUnalignedLoadFP(const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
|
||||
Register ptrScratch, FloatRegister output, Register tmp1, Register tmp2,
|
||||
Register tmp3)
|
||||
DEFINED_ON(arm);
|
||||
DEFINED_ON(arm, mips32, mips64);
|
||||
|
||||
// `ptr` will always be updated.
|
||||
void wasmUnalignedLoadI64(const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
|
||||
Register ptrScratch, Register64 output, Register tmp)
|
||||
DEFINED_ON(arm);
|
||||
DEFINED_ON(arm, mips32, mips64);
|
||||
|
||||
// `ptr` and `value` will always be updated.
|
||||
// ARM: `ptr` and `value` will always be updated. 'tmp' must be Invalid.
|
||||
// MIPS: `ptr` will always be updated.
|
||||
void wasmUnalignedStore(const wasm::MemoryAccessDesc& access, Register value, Register memoryBase,
|
||||
Register ptr, Register ptrScratch)
|
||||
DEFINED_ON(arm);
|
||||
Register ptr, Register ptrScratch, Register tmp)
|
||||
DEFINED_ON(arm, mips32, mips64);
|
||||
|
||||
// `ptr` will always be updated.
|
||||
void wasmUnalignedStoreFP(const wasm::MemoryAccessDesc& access, FloatRegister floatValue,
|
||||
Register memoryBase, Register ptr, Register ptrScratch, Register tmp)
|
||||
DEFINED_ON(arm);
|
||||
DEFINED_ON(arm, mips32, mips64);
|
||||
|
||||
// `ptr` will always be updated.
|
||||
void wasmUnalignedStoreI64(const wasm::MemoryAccessDesc& access, Register64 value,
|
||||
Register memoryBase, Register ptr, Register ptrScratch,
|
||||
Register tmp)
|
||||
DEFINED_ON(arm);
|
||||
DEFINED_ON(arm, mips32, mips64);
|
||||
|
||||
// wasm specific methods, used in both the wasm baseline compiler and ion.
|
||||
void wasmTruncateDoubleToUInt32(FloatRegister input, Register output, Label* oolEntry) PER_ARCH;
|
||||
void wasmTruncateDoubleToInt32(FloatRegister input, Register output, Label* oolEntry) PER_SHARED_ARCH;
|
||||
void oolWasmTruncateCheckF64ToI32(FloatRegister input, bool isUnsigned,
|
||||
wasm::BytecodeOffset off, Label* rejoin)
|
||||
DEFINED_ON(arm, arm64, x86_shared);
|
||||
DEFINED_ON(arm, arm64, x86_shared, mips_shared);
|
||||
|
||||
void wasmTruncateFloat32ToUInt32(FloatRegister input, Register output, Label* oolEntry) PER_ARCH;
|
||||
void wasmTruncateFloat32ToInt32(FloatRegister input, Register output, Label* oolEntry) PER_SHARED_ARCH;
|
||||
void oolWasmTruncateCheckF32ToI32(FloatRegister input, bool isUnsigned,
|
||||
wasm::BytecodeOffset off, Label* rejoin)
|
||||
DEFINED_ON(arm, arm64, x86_shared);
|
||||
DEFINED_ON(arm, arm64, x86_shared, mips_shared);
|
||||
|
||||
void wasmTruncateDoubleToInt64(FloatRegister input, Register64 output, Label* oolEntry,
|
||||
Label* oolRejoin, FloatRegister tempDouble)
|
||||
DEFINED_ON(arm64, x86, x64);
|
||||
DEFINED_ON(arm64, x86, x64, mips64);
|
||||
void wasmTruncateDoubleToUInt64(FloatRegister input, Register64 output, Label* oolEntry,
|
||||
Label* oolRejoin, FloatRegister tempDouble)
|
||||
DEFINED_ON(arm64, x86, x64);
|
||||
DEFINED_ON(arm64, x86, x64, mips64);
|
||||
void oolWasmTruncateCheckF64ToI64(FloatRegister input, bool isUnsigned,
|
||||
wasm::BytecodeOffset off, Label* rejoin)
|
||||
DEFINED_ON(arm, arm64, x86_shared);
|
||||
DEFINED_ON(arm, arm64, x86_shared, mips_shared);
|
||||
|
||||
void wasmTruncateFloat32ToInt64(FloatRegister input, Register64 output, Label* oolEntry,
|
||||
Label* oolRejoin, FloatRegister tempDouble)
|
||||
DEFINED_ON(arm64, x86, x64);
|
||||
DEFINED_ON(arm64, x86, x64, mips64);
|
||||
void wasmTruncateFloat32ToUInt64(FloatRegister input, Register64 output, Label* oolEntry,
|
||||
Label* oolRejoin, FloatRegister tempDouble)
|
||||
DEFINED_ON(arm64, x86, x64);
|
||||
DEFINED_ON(arm64, x86, x64, mips64);
|
||||
void oolWasmTruncateCheckF32ToI64(FloatRegister input, bool isUnsigned,
|
||||
wasm::BytecodeOffset off, Label* rejoin)
|
||||
DEFINED_ON(arm, arm64, x86_shared);
|
||||
DEFINED_ON(arm, arm64, x86_shared, mips_shared);
|
||||
|
||||
// This function takes care of loading the callee's TLS and pinned regs but
|
||||
// it is the caller's responsibility to save/restore TLS or pinned regs.
|
||||
|
|
|
@ -2176,7 +2176,7 @@ CodeGeneratorARM::emitWasmUnalignedStore(T* lir)
|
|||
FloatRegister value = ToFloatRegister(lir->getOperand(LWasmUnalignedStore::ValueIndex));
|
||||
masm.wasmUnalignedStoreFP(mir->access(), value, HeapReg, ptr, ptr, valOrTmp);
|
||||
} else {
|
||||
masm.wasmUnalignedStore(mir->access(), valOrTmp, HeapReg, ptr, ptr);
|
||||
masm.wasmUnalignedStore(mir->access(), valOrTmp, HeapReg, ptr, ptr, Register::Invalid());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -5008,8 +5008,10 @@ MacroAssembler::wasmUnalignedLoadI64(const wasm::MemoryAccessDesc& access, Regis
|
|||
|
||||
void
|
||||
MacroAssembler::wasmUnalignedStore(const wasm::MemoryAccessDesc& access, Register value,
|
||||
Register memoryBase, Register ptr, Register ptrScratch)
|
||||
Register memoryBase, Register ptr, Register ptrScratch,
|
||||
Register tmp)
|
||||
{
|
||||
MOZ_ASSERT(tmp == Register::Invalid());
|
||||
wasmUnalignedStoreImpl(access, FloatRegister(), Register64::Invalid(), memoryBase, ptr,
|
||||
ptrScratch, value);
|
||||
}
|
||||
|
|
|
@ -1490,79 +1490,10 @@ CodeGeneratorMIPSShared::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir)
|
|||
void
|
||||
CodeGeneratorMIPSShared::visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool)
|
||||
{
|
||||
FloatRegister input = ool->input();
|
||||
MIRType fromType = ool->fromType();
|
||||
MIRType toType = ool->toType();
|
||||
bool isUnsigned = ool->isUnsigned();
|
||||
// Eagerly take care of NaNs.
|
||||
Label inputIsNaN;
|
||||
if (fromType == MIRType::Double)
|
||||
masm.branchDouble(Assembler::DoubleUnordered, input, input, &inputIsNaN);
|
||||
else if (fromType == MIRType::Float32)
|
||||
masm.branchFloat(Assembler::DoubleUnordered, input, input, &inputIsNaN);
|
||||
else
|
||||
MOZ_CRASH("unexpected type in visitOutOfLineWasmTruncateCheck");
|
||||
|
||||
// By default test for the following inputs and bail:
|
||||
// signed: ] -Inf, INTXX_MIN - 1.0 ] and [ INTXX_MAX + 1.0 : +Inf [
|
||||
// unsigned: ] -Inf, -1.0 ] and [ UINTXX_MAX + 1.0 : +Inf [
|
||||
// Note: we cannot always represent those exact values. As a result
|
||||
// this changes the actual comparison a bit.
|
||||
double minValue, maxValue;
|
||||
Assembler::DoubleCondition minCond = Assembler::DoubleLessThanOrEqual;
|
||||
Assembler::DoubleCondition maxCond = Assembler::DoubleGreaterThanOrEqual;
|
||||
if (toType == MIRType::Int64) {
|
||||
if (isUnsigned) {
|
||||
minValue = -1;
|
||||
maxValue = double(UINT64_MAX) + 1.0;
|
||||
} else {
|
||||
// In the float32/double range there exists no value between
|
||||
// INT64_MIN and INT64_MIN - 1.0. Making INT64_MIN the lower-bound.
|
||||
minValue = double(INT64_MIN);
|
||||
minCond = Assembler::DoubleLessThan;
|
||||
maxValue = double(INT64_MAX) + 1.0;
|
||||
}
|
||||
} else {
|
||||
if (isUnsigned) {
|
||||
minValue = -1;
|
||||
maxValue = double(UINT32_MAX) + 1.0;
|
||||
} else {
|
||||
if (fromType == MIRType::Float32) {
|
||||
// In the float32 range there exists no value between
|
||||
// INT32_MIN and INT32_MIN - 1.0. Making INT32_MIN the lower-bound.
|
||||
minValue = double(INT32_MIN);
|
||||
minCond = Assembler::DoubleLessThan;
|
||||
} else {
|
||||
minValue = double(INT32_MIN) - 1.0;
|
||||
}
|
||||
maxValue = double(INT32_MAX) + 1.0;
|
||||
}
|
||||
}
|
||||
|
||||
Label fail;
|
||||
|
||||
if (fromType == MIRType::Double) {
|
||||
masm.loadConstantDouble(minValue, ScratchDoubleReg);
|
||||
masm.branchDouble(minCond, input, ScratchDoubleReg, &fail);
|
||||
|
||||
masm.loadConstantDouble(maxValue, ScratchDoubleReg);
|
||||
masm.branchDouble(maxCond, input, ScratchDoubleReg, &fail);
|
||||
} else {
|
||||
masm.loadConstantFloat32(float(minValue), ScratchFloat32Reg);
|
||||
masm.branchFloat(minCond, input, ScratchFloat32Reg, &fail);
|
||||
|
||||
masm.loadConstantFloat32(float(maxValue), ScratchFloat32Reg);
|
||||
masm.branchFloat(maxCond, input, ScratchFloat32Reg, &fail);
|
||||
}
|
||||
|
||||
masm.jump(ool->rejoin());
|
||||
|
||||
// Handle errors.
|
||||
masm.bind(&fail);
|
||||
masm.jump(oldTrap(ool, wasm::Trap::IntegerOverflow));
|
||||
|
||||
masm.bind(&inputIsNaN);
|
||||
masm.jump(oldTrap(ool, wasm::Trap::InvalidConversionToInteger));
|
||||
masm.outOfLineWasmTruncateToIntCheck(ool->input(), ool->fromType(), ool->toType(),
|
||||
ool->isUnsigned(), ool->rejoin(),
|
||||
ool->bytecodeOffset());
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -1869,77 +1800,24 @@ CodeGeneratorMIPSShared::emitWasmLoad(T* lir)
|
|||
{
|
||||
const MWasmLoad* mir = lir->mir();
|
||||
|
||||
uint32_t offset = mir->access().offset();
|
||||
MOZ_ASSERT(offset <= INT32_MAX);
|
||||
|
||||
Register ptr = ToRegister(lir->ptr());
|
||||
|
||||
// Maybe add the offset.
|
||||
if (offset) {
|
||||
Register ptrPlusOffset = ToRegister(lir->ptrCopy());
|
||||
masm.addPtr(Imm32(offset), ptrPlusOffset);
|
||||
ptr = ptrPlusOffset;
|
||||
} else {
|
||||
MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
|
||||
Register ptrScratch = InvalidReg;
|
||||
if(!lir->ptrCopy()->isBogusTemp()){
|
||||
ptrScratch = ToRegister(lir->ptrCopy());
|
||||
}
|
||||
|
||||
unsigned byteSize = mir->access().byteSize();
|
||||
bool isSigned;
|
||||
bool isFloat = false;
|
||||
|
||||
switch (mir->access().type()) {
|
||||
case Scalar::Int8: isSigned = true; break;
|
||||
case Scalar::Uint8: isSigned = false; break;
|
||||
case Scalar::Int16: isSigned = true; break;
|
||||
case Scalar::Uint16: isSigned = false; break;
|
||||
case Scalar::Int32: isSigned = true; break;
|
||||
case Scalar::Uint32: isSigned = false; break;
|
||||
case Scalar::Float64: isFloat = true; break;
|
||||
case Scalar::Float32: isFloat = true; break;
|
||||
default: MOZ_CRASH("unexpected array type");
|
||||
}
|
||||
|
||||
masm.memoryBarrierBefore(mir->access().sync());
|
||||
|
||||
BaseIndex address(HeapReg, ptr, TimesOne);
|
||||
|
||||
if (IsUnaligned(mir->access())) {
|
||||
Register temp = ToRegister(lir->getTemp(1));
|
||||
|
||||
if (isFloat) {
|
||||
FloatRegister output = ToFloatRegister(lir->output());
|
||||
|
||||
if (byteSize == 4)
|
||||
masm.loadUnalignedFloat32(mir->access(), address, temp, output);
|
||||
else
|
||||
masm.loadUnalignedDouble(mir->access(), address, temp, output);
|
||||
if (IsFloatingPointType(mir->type())) {
|
||||
masm.wasmUnalignedLoadFP(mir->access(), HeapReg, ToRegister(lir->ptr()), ptrScratch,
|
||||
ToFloatRegister(lir->output()), ToRegister(lir->getTemp(1)),
|
||||
InvalidReg, InvalidReg);
|
||||
} else {
|
||||
masm.ma_load_unaligned(mir->access(), ToRegister(lir->output()), address, temp,
|
||||
static_cast<LoadStoreSize>(8 * byteSize),
|
||||
isSigned ? SignExtend : ZeroExtend);
|
||||
}
|
||||
|
||||
masm.memoryBarrierAfter(mir->access().sync());
|
||||
return;
|
||||
}
|
||||
|
||||
if (isFloat) {
|
||||
FloatRegister output = ToFloatRegister(lir->output());
|
||||
|
||||
if (byteSize == 4) {
|
||||
masm.loadFloat32(address, output);
|
||||
} else {
|
||||
masm.computeScaledAddress(address, SecondScratchReg);
|
||||
masm.as_ld(output, SecondScratchReg, 0);
|
||||
masm.wasmUnalignedLoad(mir->access(), HeapReg, ToRegister(lir->ptr()),
|
||||
ptrScratch, ToRegister(lir->output()), ToRegister(lir->getTemp(1)));
|
||||
}
|
||||
} else {
|
||||
masm.ma_load(ToRegister(lir->output()), address,
|
||||
static_cast<LoadStoreSize>(8 * byteSize),
|
||||
isSigned ? SignExtend : ZeroExtend);
|
||||
masm.wasmLoad(mir->access(), HeapReg, ToRegister(lir->ptr()), ptrScratch,
|
||||
ToAnyRegister(lir->output()));
|
||||
}
|
||||
masm.append(mir->access(), masm.size() - 4, masm.framePushed());
|
||||
|
||||
masm.memoryBarrierAfter(mir->access().sync());
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -1960,81 +1838,26 @@ CodeGeneratorMIPSShared::emitWasmStore(T* lir)
|
|||
{
|
||||
const MWasmStore* mir = lir->mir();
|
||||
|
||||
uint32_t offset = mir->access().offset();
|
||||
MOZ_ASSERT(offset <= INT32_MAX);
|
||||
|
||||
Register ptr = ToRegister(lir->ptr());
|
||||
|
||||
// Maybe add the offset.
|
||||
if (offset) {
|
||||
Register ptrPlusOffset = ToRegister(lir->ptrCopy());
|
||||
masm.addPtr(Imm32(offset), ptrPlusOffset);
|
||||
ptr = ptrPlusOffset;
|
||||
} else {
|
||||
MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
|
||||
Register ptrScratch = InvalidReg;
|
||||
if(!lir->ptrCopy()->isBogusTemp()){
|
||||
ptrScratch = ToRegister(lir->ptrCopy());
|
||||
}
|
||||
|
||||
unsigned byteSize = mir->access().byteSize();
|
||||
bool isSigned;
|
||||
bool isFloat = false;
|
||||
|
||||
switch (mir->access().type()) {
|
||||
case Scalar::Int8: isSigned = true; break;
|
||||
case Scalar::Uint8: isSigned = false; break;
|
||||
case Scalar::Int16: isSigned = true; break;
|
||||
case Scalar::Uint16: isSigned = false; break;
|
||||
case Scalar::Int32: isSigned = true; break;
|
||||
case Scalar::Uint32: isSigned = false; break;
|
||||
case Scalar::Int64: isSigned = true; break;
|
||||
case Scalar::Float64: isFloat = true; break;
|
||||
case Scalar::Float32: isFloat = true; break;
|
||||
default: MOZ_CRASH("unexpected array type");
|
||||
}
|
||||
|
||||
masm.memoryBarrierBefore(mir->access().sync());
|
||||
|
||||
BaseIndex address(HeapReg, ptr, TimesOne);
|
||||
|
||||
if (IsUnaligned(mir->access())) {
|
||||
Register temp = ToRegister(lir->getTemp(1));
|
||||
|
||||
if (isFloat) {
|
||||
FloatRegister value = ToFloatRegister(lir->value());
|
||||
|
||||
if (byteSize == 4)
|
||||
masm.storeUnalignedFloat32(mir->access(), value, temp, address);
|
||||
else
|
||||
masm.storeUnalignedDouble(mir->access(), value, temp, address);
|
||||
if (mir->access().type() == Scalar::Float32 ||
|
||||
mir->access().type() == Scalar::Float64) {
|
||||
masm.wasmUnalignedStoreFP(mir->access(), ToFloatRegister(lir->value()),
|
||||
HeapReg, ToRegister(lir->ptr()), ptrScratch,
|
||||
ToRegister(lir->getTemp(1)));
|
||||
} else {
|
||||
masm.ma_store_unaligned(mir->access(), ToRegister(lir->value()), address, temp,
|
||||
static_cast<LoadStoreSize>(8 * byteSize),
|
||||
isSigned ? SignExtend : ZeroExtend);
|
||||
}
|
||||
|
||||
masm.memoryBarrierAfter(mir->access().sync());
|
||||
return;
|
||||
}
|
||||
|
||||
if (isFloat) {
|
||||
FloatRegister value = ToFloatRegister(lir->value());
|
||||
|
||||
if (byteSize == 4) {
|
||||
masm.storeFloat32(value, address);
|
||||
} else {
|
||||
// For time being storeDouble for mips32 uses two store instructions,
|
||||
// so we emit only one to get correct behavior in case of OOB access.
|
||||
masm.computeScaledAddress(address, SecondScratchReg);
|
||||
masm.as_sd(value, SecondScratchReg, 0);
|
||||
masm.wasmUnalignedStore(mir->access(), ToRegister(lir->value()), HeapReg,
|
||||
ToRegister(lir->ptr()), ptrScratch,
|
||||
ToRegister(lir->getTemp(1)));
|
||||
}
|
||||
} else {
|
||||
masm.ma_store(ToRegister(lir->value()), address,
|
||||
static_cast<LoadStoreSize>(8 * byteSize),
|
||||
isSigned ? SignExtend : ZeroExtend);
|
||||
masm.wasmStore(mir->access(), ToAnyRegister(lir->value()), HeapReg,
|
||||
ToRegister(lir->ptr()), ptrScratch);
|
||||
}
|
||||
// Only the last emitted instruction is a memory access.
|
||||
masm.append(mir->access(), masm.size() - 4, masm.framePushed());
|
||||
|
||||
masm.memoryBarrierAfter(mir->access().sync());
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -328,16 +328,10 @@ template <typename L>
|
|||
void
|
||||
MacroAssemblerMIPSShared::ma_addTestCarry(Register rd, Register rs, Register rt, L overflow)
|
||||
{
|
||||
if (rd != rs) {
|
||||
as_addu(rd, rs, rt);
|
||||
as_sltu(SecondScratchReg, rd, rs);
|
||||
ma_b(SecondScratchReg, SecondScratchReg, overflow, Assembler::NonZero);
|
||||
} else {
|
||||
ma_move(SecondScratchReg, rs);
|
||||
as_addu(rd, rs, rt);
|
||||
as_sltu(SecondScratchReg, rd, SecondScratchReg);
|
||||
ma_b(SecondScratchReg, SecondScratchReg, overflow, Assembler::NonZero);
|
||||
}
|
||||
MOZ_ASSERT_IF(rd == rs, rt != rd);
|
||||
as_addu(rd, rs, rt);
|
||||
as_sltu(SecondScratchReg, rd, rd == rs? rt : rs);
|
||||
ma_b(SecondScratchReg, SecondScratchReg, overflow, Assembler::NonZero);
|
||||
}
|
||||
|
||||
template void
|
||||
|
@ -1543,20 +1537,51 @@ MacroAssembler::call(JitCode* c)
|
|||
CodeOffset
|
||||
MacroAssembler::nopPatchableToCall(const wasm::CallSiteDesc& desc)
|
||||
{
|
||||
MOZ_CRASH("NYI");
|
||||
return CodeOffset();
|
||||
CodeOffset offset(currentOffset());
|
||||
// MIPS32 //MIPS64
|
||||
as_nop(); // lui // lui
|
||||
as_nop(); // ori // ori
|
||||
as_nop(); // jalr // drotr32
|
||||
as_nop(); // ori
|
||||
#ifdef JS_CODEGEN_MIPS64
|
||||
as_nop(); //jalr
|
||||
as_nop();
|
||||
#endif
|
||||
append(desc, CodeOffset(currentOffset()));
|
||||
return offset;
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::patchNopToCall(uint8_t* call, uint8_t* target)
|
||||
{
|
||||
MOZ_CRASH("NYI");
|
||||
#ifdef JS_CODEGEN_MIPS64
|
||||
Instruction* inst = (Instruction*) call - 6 /* six nops */;
|
||||
Assembler::WriteLoad64Instructions(inst, ScratchRegister, (uint64_t) target);
|
||||
inst[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
|
||||
#else
|
||||
Instruction* inst = (Instruction*) call - 4 /* four nops */;
|
||||
Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister, (uint32_t) target);
|
||||
inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::patchCallToNop(uint8_t* call)
|
||||
{
|
||||
MOZ_CRASH("NYI");
|
||||
#ifdef JS_CODEGEN_MIPS64
|
||||
Instruction* inst = (Instruction*) call - 6 /* six nops */;
|
||||
#else
|
||||
Instruction* inst = (Instruction*) call - 4 /* four nops */;
|
||||
#endif
|
||||
|
||||
inst[0].makeNop();
|
||||
inst[1].makeNop();
|
||||
inst[2].makeNop();
|
||||
inst[3].makeNop();
|
||||
#ifdef JS_CODEGEN_MIPS64
|
||||
inst[4].makeNop();
|
||||
inst[5].makeNop();
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -216,6 +216,16 @@ class MacroAssemblerMIPSShared : public Assembler
|
|||
// Handle NaN specially if handleNaN is true.
|
||||
void minMaxDouble(FloatRegister srcDest, FloatRegister other, bool handleNaN, bool isMax);
|
||||
void minMaxFloat32(FloatRegister srcDest, FloatRegister other, bool handleNaN, bool isMax);
|
||||
|
||||
void outOfLineWasmTruncateToIntCheck(FloatRegister input, MIRType fromType,
|
||||
MIRType toType, bool isUnsigned, Label* rejoin,
|
||||
wasm::BytecodeOffset trapOffset);
|
||||
|
||||
protected:
|
||||
void wasmLoadImpl(const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
|
||||
Register ptrScratch, AnyRegister output, Register tmp);
|
||||
void wasmStoreImpl(const wasm::MemoryAccessDesc& access, AnyRegister value, Register memoryBase,
|
||||
Register ptr, Register ptrScratch, Register tmp);
|
||||
};
|
||||
|
||||
} // namespace jit
|
||||
|
|
|
@ -392,7 +392,9 @@ Assembler::bind(RepatchLabel* label)
|
|||
MOZ_ASSERT(inst[0].extractOpcode() == (uint32_t(op_beq) >> OpcodeShift) ||
|
||||
inst[0].extractOpcode() == (uint32_t(op_bne) >> OpcodeShift) ||
|
||||
inst[0].extractOpcode() == (uint32_t(op_blez) >> OpcodeShift) ||
|
||||
inst[0].extractOpcode() == (uint32_t(op_bgtz) >> OpcodeShift));
|
||||
inst[0].extractOpcode() == (uint32_t(op_bgtz) >> OpcodeShift) ||
|
||||
(inst[0].extractOpcode() == (uint32_t(op_regimm) >> OpcodeShift) &&
|
||||
inst[0].extractRT() == (uint32_t(rt_bltz) >> RTShift)));
|
||||
inst[0].setBOffImm16(BOffImm16(offset));
|
||||
} else if (inst[0].encode() == inst_beq.encode()) {
|
||||
// Handle open long unconditional jumps created by
|
||||
|
|
|
@ -82,6 +82,13 @@ static constexpr FloatRegister ScratchDoubleReg = { FloatRegisters::f18, FloatRe
|
|||
static constexpr FloatRegister SecondScratchFloat32Reg = { FloatRegisters::f16, FloatRegister::Single };
|
||||
static constexpr FloatRegister SecondScratchDoubleReg = { FloatRegisters::f16, FloatRegister::Double };
|
||||
|
||||
struct ScratchFloat32Scope : public AutoFloatRegisterScope
|
||||
{
|
||||
explicit ScratchFloat32Scope(MacroAssembler& masm)
|
||||
: AutoFloatRegisterScope(masm, ScratchFloat32Reg)
|
||||
{ }
|
||||
};
|
||||
|
||||
struct ScratchDoubleScope : public AutoFloatRegisterScope
|
||||
{
|
||||
explicit ScratchDoubleScope(MacroAssembler& masm)
|
||||
|
|
|
@ -453,73 +453,19 @@ void
|
|||
CodeGeneratorMIPS::emitWasmLoadI64(T* lir)
|
||||
{
|
||||
const MWasmLoad* mir = lir->mir();
|
||||
Register64 output = ToOutRegister64(lir);
|
||||
|
||||
uint32_t offset = mir->access().offset();
|
||||
MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
|
||||
|
||||
Register ptr = ToRegister(lir->ptr());
|
||||
|
||||
if (offset) {
|
||||
Register ptrPlusOffset = ToRegister(lir->ptrCopy());
|
||||
masm.addPtr(Imm32(offset), ptrPlusOffset);
|
||||
ptr = ptrPlusOffset;
|
||||
} else {
|
||||
MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
|
||||
Register ptrScratch = InvalidReg;
|
||||
if(!lir->ptrCopy()->isBogusTemp()){
|
||||
ptrScratch = ToRegister(lir->ptrCopy());
|
||||
}
|
||||
|
||||
unsigned byteSize = mir->access().byteSize();
|
||||
bool isSigned;
|
||||
switch (mir->access().type()) {
|
||||
case Scalar::Int8: isSigned = true; break;
|
||||
case Scalar::Uint8: isSigned = false; break;
|
||||
case Scalar::Int16: isSigned = true; break;
|
||||
case Scalar::Uint16: isSigned = false; break;
|
||||
case Scalar::Int32: isSigned = true; break;
|
||||
case Scalar::Uint32: isSigned = false; break;
|
||||
case Scalar::Int64: isSigned = true; break;
|
||||
default: MOZ_CRASH("unexpected array type");
|
||||
}
|
||||
|
||||
masm.memoryBarrierBefore(mir->access().sync());
|
||||
|
||||
MOZ_ASSERT(INT64LOW_OFFSET == 0);
|
||||
if (IsUnaligned(mir->access())) {
|
||||
Register temp = ToRegister(lir->getTemp(1));
|
||||
|
||||
if (byteSize <= 4) {
|
||||
masm.ma_load_unaligned(mir->access(), output.low, BaseIndex(HeapReg, ptr, TimesOne),
|
||||
temp, static_cast<LoadStoreSize>(8 * byteSize),
|
||||
isSigned ? SignExtend : ZeroExtend);
|
||||
if (!isSigned)
|
||||
masm.move32(Imm32(0), output.high);
|
||||
else
|
||||
masm.ma_sra(output.high, output.low, Imm32(31));
|
||||
} else {
|
||||
MOZ_ASSERT(output.low != ptr);
|
||||
masm.ma_load_unaligned(mir->access(), output.low,
|
||||
BaseIndex(HeapReg, ptr, TimesOne), temp, SizeWord, ZeroExtend);
|
||||
masm.ma_load_unaligned(mir->access(), output.high,
|
||||
BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET), temp,
|
||||
SizeWord, SignExtend);
|
||||
}
|
||||
} else if (byteSize <= 4) {
|
||||
masm.ma_load(output.low, BaseIndex(HeapReg, ptr, TimesOne),
|
||||
static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
|
||||
masm.append(mir->access(), masm.size() - 4 , masm.framePushed());
|
||||
if (!isSigned)
|
||||
masm.move32(Imm32(0), output.high);
|
||||
else
|
||||
masm.ma_sra(output.high, output.low, Imm32(31));
|
||||
masm.wasmUnalignedLoadI64(mir->access(), HeapReg, ToRegister(lir->ptr()),
|
||||
ptrScratch, ToOutRegister64(lir), ToRegister(lir->getTemp(1)));
|
||||
} else {
|
||||
MOZ_ASSERT(output.low != ptr);
|
||||
masm.ma_load(output.low, BaseIndex(HeapReg, ptr, TimesOne), SizeWord);
|
||||
masm.append(mir->access(), masm.size() - 4 , masm.framePushed());
|
||||
masm.ma_load(output.high, BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET), SizeWord);
|
||||
masm.append(mir->access(), masm.size() - 4 , masm.framePushed());
|
||||
masm.wasmLoadI64(mir->access(), HeapReg, ToRegister(lir->ptr()), ptrScratch,
|
||||
ToOutRegister64(lir));
|
||||
}
|
||||
|
||||
masm.memoryBarrierAfter(mir->access().sync());
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -539,63 +485,19 @@ void
|
|||
CodeGeneratorMIPS::emitWasmStoreI64(T* lir)
|
||||
{
|
||||
const MWasmStore* mir = lir->mir();
|
||||
Register64 value = ToRegister64(lir->getInt64Operand(lir->ValueIndex));
|
||||
|
||||
uint32_t offset = mir->access().offset();
|
||||
MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
|
||||
|
||||
Register ptr = ToRegister(lir->ptr());
|
||||
|
||||
if (offset) {
|
||||
Register ptrPlusOffset = ToRegister(lir->ptrCopy());
|
||||
masm.addPtr(Imm32(offset), ptrPlusOffset);
|
||||
ptr = ptrPlusOffset;
|
||||
} else {
|
||||
MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
|
||||
Register ptrScratch = InvalidReg;
|
||||
if(!lir->ptrCopy()->isBogusTemp()){
|
||||
ptrScratch = ToRegister(lir->ptrCopy());
|
||||
}
|
||||
|
||||
unsigned byteSize = mir->access().byteSize();
|
||||
bool isSigned;
|
||||
switch (mir->access().type()) {
|
||||
case Scalar::Int8: isSigned = true; break;
|
||||
case Scalar::Uint8: isSigned = false; break;
|
||||
case Scalar::Int16: isSigned = true; break;
|
||||
case Scalar::Uint16: isSigned = false; break;
|
||||
case Scalar::Int32: isSigned = true; break;
|
||||
case Scalar::Uint32: isSigned = false; break;
|
||||
case Scalar::Int64: isSigned = true; break;
|
||||
default: MOZ_CRASH("unexpected array type");
|
||||
}
|
||||
|
||||
masm.memoryBarrierBefore(mir->access().sync());
|
||||
|
||||
MOZ_ASSERT(INT64LOW_OFFSET == 0);
|
||||
if (IsUnaligned(mir->access())) {
|
||||
Register temp = ToRegister(lir->getTemp(1));
|
||||
|
||||
if (byteSize <= 4) {
|
||||
masm.ma_store_unaligned(mir->access(), value.low, BaseIndex(HeapReg, ptr, TimesOne),
|
||||
temp, static_cast<LoadStoreSize>(8 * byteSize),
|
||||
isSigned ? SignExtend : ZeroExtend);
|
||||
} else {
|
||||
masm.ma_store_unaligned(mir->access(), value.high,
|
||||
BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET), temp,
|
||||
SizeWord, SignExtend);
|
||||
masm.ma_store_unaligned(mir->access(), value.low, BaseIndex(HeapReg, ptr, TimesOne),
|
||||
temp, SizeWord, ZeroExtend);
|
||||
}
|
||||
} else if (byteSize <= 4) {
|
||||
masm.ma_store(value.low, BaseIndex(HeapReg, ptr, TimesOne),
|
||||
static_cast<LoadStoreSize>(8 * byteSize));
|
||||
masm.append(mir->access(), masm.size() - 4, masm.framePushed());
|
||||
|
||||
masm.wasmUnalignedStoreI64(mir->access(), ToRegister64(lir->value()), HeapReg,
|
||||
ToRegister(lir->ptr()), ptrScratch, ToRegister(lir->getTemp(1)));
|
||||
} else {
|
||||
masm.ma_store(value.high, BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET), SizeWord);
|
||||
masm.append(mir->access(), masm.size() - 4, masm.framePushed());
|
||||
masm.ma_store(value.low, BaseIndex(HeapReg, ptr, TimesOne), SizeWord);
|
||||
masm.wasmStoreI64(mir->access(), ToRegister64(lir->value()), HeapReg,
|
||||
ToRegister(lir->ptr()), ptrScratch);
|
||||
}
|
||||
|
||||
masm.memoryBarrierAfter(mir->access().sync());
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -33,43 +33,52 @@ MacroAssembler::move64(Imm64 imm, Register64 dest)
|
|||
void
|
||||
MacroAssembler::moveDoubleToGPR64(FloatRegister src, Register64 dest)
|
||||
{
|
||||
MOZ_CRASH("NYI: moveDoubleToGPR64");
|
||||
moveFromDoubleHi(src, dest.high);
|
||||
moveFromDoubleLo(src, dest.low);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::moveGPR64ToDouble(Register64 src, FloatRegister dest)
|
||||
{
|
||||
MOZ_CRASH("NYI: moveGPR64ToDouble");
|
||||
moveToDoubleHi(src.high, dest);
|
||||
moveToDoubleLo(src.low, dest);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::move64To32(Register64 src, Register dest)
|
||||
{
|
||||
MOZ_CRASH("NYI: move64To32");
|
||||
if (src.low != dest)
|
||||
move32(src.low, dest);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::move32To64ZeroExtend(Register src, Register64 dest)
|
||||
{
|
||||
MOZ_CRASH("NYI: move32To64ZeroExtend");
|
||||
if (src != dest.low)
|
||||
move32(src, dest.low);
|
||||
move32(Imm32(0), dest.high);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::move8To64SignExtend(Register src, Register64 dest)
|
||||
{
|
||||
MOZ_CRASH("NYI: move8To64SignExtend");
|
||||
move8SignExtend(src, dest.low);
|
||||
move32To64SignExtend(dest.low, dest);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::move16To64SignExtend(Register src, Register64 dest)
|
||||
{
|
||||
MOZ_CRASH("NYI: move16To64SignExtend");
|
||||
move16SignExtend(src, dest.low);
|
||||
move32To64SignExtend(dest.low, dest);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::move32To64SignExtend(Register src, Register64 dest)
|
||||
{
|
||||
MOZ_CRASH("NYI: move32To64SignExtend");
|
||||
if (src != dest.low)
|
||||
move32(src, dest.low);
|
||||
ma_sra(dest.high, dest.low, Imm32(31));
|
||||
}
|
||||
|
||||
// ===============================================================
|
||||
|
@ -210,13 +219,20 @@ MacroAssembler::add64(Imm64 imm, Register64 dest)
|
|||
CodeOffset
|
||||
MacroAssembler::sub32FromStackPtrWithPatch(Register dest)
|
||||
{
|
||||
MOZ_CRASH("NYI - sub32FromStackPtrWithPatch");
|
||||
CodeOffset offset = CodeOffset(currentOffset());
|
||||
ma_liPatchable(dest, Imm32(0));
|
||||
as_subu(dest, StackPointer, dest);
|
||||
return offset;
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::patchSub32FromStackPtr(CodeOffset offset, Imm32 imm)
|
||||
{
|
||||
MOZ_CRASH("NYI - patchSub32FromStackPtr");
|
||||
Instruction* lui = (Instruction*) m_buffer.getInst(BufferOffset(offset.offset()));
|
||||
MOZ_ASSERT(lui->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
|
||||
MOZ_ASSERT(lui->next()->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
|
||||
|
||||
UpdateLuiOriValue(lui, lui->next(), imm.value);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -405,7 +421,7 @@ MacroAssembler::lshift64(Imm32 imm, Register64 dest)
|
|||
return;
|
||||
} else if (imm.value < 32) {
|
||||
as_sll(dest.high, dest.high, imm.value);
|
||||
as_srl(scratch, dest.low, 32 - imm.value);
|
||||
as_srl(scratch, dest.low, (32 - imm.value)%32);
|
||||
as_or(dest.high, dest.high, scratch);
|
||||
as_sll(dest.low, dest.low, imm.value);
|
||||
} else {
|
||||
|
@ -464,7 +480,7 @@ MacroAssembler::rshift64(Imm32 imm, Register64 dest)
|
|||
|
||||
if (imm.value < 32) {
|
||||
as_srl(dest.low, dest.low, imm.value);
|
||||
as_sll(scratch, dest.high, 32 - imm.value);
|
||||
as_sll(scratch, dest.high, (32 - imm.value)%32);
|
||||
as_or(dest.low, dest.low, scratch);
|
||||
as_srl(dest.high, dest.high, imm.value);
|
||||
} else if (imm.value == 32) {
|
||||
|
@ -512,7 +528,7 @@ MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest)
|
|||
|
||||
if (imm.value < 32) {
|
||||
as_srl(dest.low, dest.low, imm.value);
|
||||
as_sll(scratch, dest.high, 32 - imm.value);
|
||||
as_sll(scratch, dest.high, (32 - imm.value)%32);
|
||||
as_or(dest.low, dest.low, scratch);
|
||||
as_sra(dest.high, dest.high, imm.value);
|
||||
} else if (imm.value == 32) {
|
||||
|
@ -593,26 +609,26 @@ MacroAssembler::rotateLeft64(Register shift, Register64 src, Register64 dest, Re
|
|||
MOZ_ASSERT(temp != src.low && temp != src.high);
|
||||
MOZ_ASSERT(shift != src.low && shift != src.high);
|
||||
MOZ_ASSERT(temp != InvalidReg);
|
||||
MOZ_ASSERT(src != dest);
|
||||
|
||||
ScratchRegisterScope shift_value(*this);
|
||||
Label high, swap, done, zero;
|
||||
ma_and(temp, shift, Imm32(0x3f));
|
||||
ma_b(temp, Imm32(32), &swap, Equal);
|
||||
ma_b(temp, Imm32(32), &high, GreaterThan);
|
||||
ma_and(shift, shift, Imm32(0x3f));
|
||||
ma_b(shift, Imm32(32), &swap, Equal);
|
||||
ma_b(shift, Imm32(32), &high, GreaterThan);
|
||||
|
||||
// high = high << shift | low >> 32 - shift
|
||||
// low = low << shift | high >> 32 - shift
|
||||
ma_sll(dest.high, src.high, temp);
|
||||
ma_b(temp, Imm32(0), &zero, Equal);
|
||||
ma_move(temp, src.high);
|
||||
ma_sll(dest.high, src.high, shift);
|
||||
ma_b(shift, Imm32(0), &zero, Equal);
|
||||
ma_li(SecondScratchReg, Imm32(32));
|
||||
as_subu(shift_value, SecondScratchReg, temp);
|
||||
as_subu(shift_value, SecondScratchReg, shift);
|
||||
|
||||
ma_srl(SecondScratchReg, src.low, shift_value);
|
||||
as_or(dest.high, dest.high, SecondScratchReg);
|
||||
|
||||
ma_sll(dest.low, src.low, temp);
|
||||
ma_srl(SecondScratchReg, src.high, shift_value);
|
||||
ma_sll(dest.low, src.low, shift);
|
||||
ma_srl(SecondScratchReg, temp, shift_value);
|
||||
as_or(dest.low, dest.low, SecondScratchReg);
|
||||
ma_b(&done);
|
||||
|
||||
|
@ -620,6 +636,7 @@ MacroAssembler::rotateLeft64(Register shift, Register64 src, Register64 dest, Re
|
|||
ma_move(dest.low, src.low);
|
||||
ma_move(dest.high, src.high);
|
||||
ma_b(&done);
|
||||
|
||||
bind(&swap);
|
||||
ma_move(SecondScratchReg, src.low);
|
||||
ma_move(dest.low, src.high);
|
||||
|
@ -627,19 +644,21 @@ MacroAssembler::rotateLeft64(Register shift, Register64 src, Register64 dest, Re
|
|||
ma_b(&done);
|
||||
// A 32 - 64 shift is a 0 - 32 shift in the other direction.
|
||||
bind(&high);
|
||||
ma_and(shift, shift, Imm32(0x3f));
|
||||
ma_li(SecondScratchReg, Imm32(64));
|
||||
as_subu(temp, SecondScratchReg, shift);
|
||||
as_subu(shift_value, SecondScratchReg, shift);
|
||||
|
||||
ma_srl(dest.high, src.high, temp);
|
||||
ma_move(temp, src.high);
|
||||
ma_srl(dest.high, src.high, shift_value);
|
||||
ma_li(SecondScratchReg, Imm32(32));
|
||||
as_subu(shift_value, SecondScratchReg, temp);
|
||||
as_subu(shift_value, SecondScratchReg, shift_value);
|
||||
ma_sll(SecondScratchReg, src.low, shift_value);
|
||||
as_or(dest.high, dest.high, SecondScratchReg);
|
||||
|
||||
ma_srl(dest.low, src.low, temp);
|
||||
ma_sll(SecondScratchReg, src.high, shift_value);
|
||||
as_or(dest.low, dest.low, SecondScratchReg);
|
||||
ma_sll(temp, temp, shift_value);
|
||||
ma_li(SecondScratchReg, Imm32(64));
|
||||
as_subu(shift_value, SecondScratchReg, shift);
|
||||
ma_srl(dest.low, src.low, shift_value);
|
||||
as_or(dest.low, dest.low, temp);
|
||||
|
||||
bind(&done);
|
||||
}
|
||||
|
@ -681,30 +700,30 @@ MacroAssembler::rotateRight64(Register shift, Register64 src, Register64 dest, R
|
|||
MOZ_ASSERT(temp != src.low && temp != src.high);
|
||||
MOZ_ASSERT(shift != src.low && shift != src.high);
|
||||
MOZ_ASSERT(temp != InvalidReg);
|
||||
MOZ_ASSERT(src != dest);
|
||||
|
||||
ScratchRegisterScope shift_value(*this);
|
||||
Label high, swap, done, zero;
|
||||
|
||||
ma_and(temp, shift, Imm32(0x3f));
|
||||
ma_b(temp, Imm32(32), &swap, Equal);
|
||||
ma_b(temp, Imm32(32), &high, GreaterThan);
|
||||
ma_and(shift, shift, Imm32(0x3f));
|
||||
ma_b(shift, Imm32(32), &swap, Equal);
|
||||
ma_b(shift, Imm32(32), &high, GreaterThan);
|
||||
|
||||
// high = high >> shift | low << 32 - shift
|
||||
// low = low >> shift | high << 32 - shift
|
||||
ma_srl(dest.high, src.high, temp);
|
||||
ma_b(temp, Imm32(0), &zero, Equal);
|
||||
ma_move(temp, src.high);
|
||||
ma_srl(dest.high, src.high, shift);
|
||||
ma_b(shift, Imm32(0), &zero, Equal);
|
||||
ma_li(SecondScratchReg, Imm32(32));
|
||||
as_subu(shift_value, SecondScratchReg, temp);
|
||||
as_subu(shift_value, SecondScratchReg, shift);
|
||||
|
||||
ma_sll(SecondScratchReg, src.low, shift_value);
|
||||
as_or(dest.high, dest.high, SecondScratchReg);
|
||||
|
||||
ma_srl(dest.low, src.low, temp);
|
||||
ma_srl(dest.low, src.low, shift);
|
||||
|
||||
//ma_li(SecondScratchReg, Imm32(32));
|
||||
//as_subu(shift_value, SecondScratchReg, shift_value);
|
||||
ma_sll(SecondScratchReg, src.high, shift_value);
|
||||
ma_sll(SecondScratchReg, temp, shift_value);
|
||||
as_or(dest.low, dest.low, SecondScratchReg);
|
||||
|
||||
ma_b(&done);
|
||||
|
@ -713,6 +732,7 @@ MacroAssembler::rotateRight64(Register shift, Register64 src, Register64 dest, R
|
|||
ma_move(dest.low, src.low);
|
||||
ma_move(dest.high, src.high);
|
||||
ma_b(&done);
|
||||
|
||||
bind(&swap);
|
||||
ma_move(SecondScratchReg, src.low);
|
||||
ma_move(dest.low, src.high);
|
||||
|
@ -720,20 +740,21 @@ MacroAssembler::rotateRight64(Register shift, Register64 src, Register64 dest, R
|
|||
ma_b(&done);
|
||||
// A 32 - 64 shift is a 0 - 32 shift in the other direction.
|
||||
bind(&high);
|
||||
ma_and(shift, shift, Imm32(0x3f));
|
||||
ma_li(SecondScratchReg, Imm32(64));
|
||||
as_subu(temp, SecondScratchReg, shift);
|
||||
as_subu(shift_value, SecondScratchReg, shift);
|
||||
|
||||
ma_sll(dest.high, src.high, temp);
|
||||
ma_move(temp, src.high);
|
||||
ma_sll(dest.high, src.high, shift_value);
|
||||
ma_li(SecondScratchReg, Imm32(32));
|
||||
as_subu(shift_value, SecondScratchReg, temp);
|
||||
|
||||
as_subu(shift_value, SecondScratchReg, shift_value);
|
||||
ma_srl(SecondScratchReg, src.low, shift_value);
|
||||
as_or(dest.high, dest.high, SecondScratchReg);
|
||||
|
||||
ma_sll(dest.low, src.low, temp);
|
||||
ma_srl(SecondScratchReg, src.high, shift_value);
|
||||
as_or(dest.low, dest.low, SecondScratchReg);
|
||||
ma_srl(temp, temp, shift_value);
|
||||
ma_li(SecondScratchReg, Imm32(64));
|
||||
as_subu(shift_value, SecondScratchReg, shift);
|
||||
ma_sll(dest.low, src.low, shift_value);
|
||||
as_or(dest.low, dest.low, temp);
|
||||
|
||||
bind(&done);
|
||||
}
|
||||
|
|
|
@ -2394,6 +2394,164 @@ MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input, Register output
|
|||
bind(&done);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
|
||||
Register ptrScratch, Register64 output)
|
||||
{
|
||||
wasmLoadI64Impl(access, memoryBase, ptr, ptrScratch, output, InvalidReg);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::wasmUnalignedLoadI64(const wasm::MemoryAccessDesc& access, Register memoryBase,
|
||||
Register ptr, Register ptrScratch, Register64 output,
|
||||
Register tmp)
|
||||
{
|
||||
wasmLoadI64Impl(access, memoryBase, ptr, ptrScratch, output, tmp);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value,
|
||||
Register memoryBase, Register ptr, Register ptrScratch)
|
||||
{
|
||||
wasmStoreI64Impl(access, value, memoryBase, ptr, ptrScratch, InvalidReg);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::wasmUnalignedStoreI64(const wasm::MemoryAccessDesc& access, Register64 value,
|
||||
Register memoryBase, Register ptr, Register ptrScratch,
|
||||
Register tmp)
|
||||
{
|
||||
wasmStoreI64Impl(access, value, memoryBase, ptr, ptrScratch, tmp);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssemblerMIPSCompat::wasmLoadI64Impl(const wasm::MemoryAccessDesc& access, Register memoryBase,
|
||||
Register ptr, Register ptrScratch, Register64 output,
|
||||
Register tmp)
|
||||
{
|
||||
uint32_t offset = access.offset();
|
||||
MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
|
||||
|
||||
// Maybe add the offset.
|
||||
if (offset) {
|
||||
asMasm().movePtr(ptr, ptrScratch);
|
||||
asMasm().addPtr(Imm32(offset), ptrScratch);
|
||||
ptr = ptrScratch;
|
||||
}
|
||||
|
||||
unsigned byteSize = access.byteSize();
|
||||
bool isSigned;
|
||||
|
||||
switch (access.type()) {
|
||||
case Scalar::Int8: isSigned = true; break;
|
||||
case Scalar::Uint8: isSigned = false; break;
|
||||
case Scalar::Int16: isSigned = true; break;
|
||||
case Scalar::Uint16: isSigned = false; break;
|
||||
case Scalar::Int32: isSigned = true; break;
|
||||
case Scalar::Uint32: isSigned = false; break;
|
||||
case Scalar::Int64: isSigned = true; break;
|
||||
default: MOZ_CRASH("unexpected array type");
|
||||
}
|
||||
|
||||
BaseIndex address(memoryBase, ptr, TimesOne);
|
||||
MOZ_ASSERT(INT64LOW_OFFSET == 0);
|
||||
if (IsUnaligned(access)) {
|
||||
MOZ_ASSERT(tmp != InvalidReg);
|
||||
if (byteSize <= 4) {
|
||||
asMasm().ma_load_unaligned(access, output.low, address, tmp,
|
||||
static_cast<LoadStoreSize>(8 * byteSize),
|
||||
isSigned ? SignExtend : ZeroExtend);
|
||||
if (!isSigned)
|
||||
asMasm().move32(Imm32(0), output.high);
|
||||
else
|
||||
asMasm().ma_sra(output.high, output.low, Imm32(31));
|
||||
} else {
|
||||
MOZ_ASSERT(output.low != ptr);
|
||||
asMasm().ma_load_unaligned(access, output.low, address, tmp, SizeWord, ZeroExtend);
|
||||
asMasm().ma_load_unaligned(access, output.high,
|
||||
BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET),
|
||||
tmp, SizeWord, SignExtend);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
asMasm().memoryBarrierBefore(access.sync());
|
||||
if (byteSize <= 4) {
|
||||
asMasm().ma_load(output.low, address, static_cast<LoadStoreSize>(8 * byteSize),
|
||||
isSigned ? SignExtend : ZeroExtend);
|
||||
asMasm().append(access, asMasm().size() - 4 , asMasm().framePushed());
|
||||
if (!isSigned)
|
||||
asMasm().move32(Imm32(0), output.high);
|
||||
else
|
||||
asMasm().ma_sra(output.high, output.low, Imm32(31));
|
||||
} else {
|
||||
MOZ_ASSERT(output.low != ptr);
|
||||
asMasm().ma_load(output.low, BaseIndex(HeapReg, ptr, TimesOne), SizeWord);
|
||||
asMasm().append(access, asMasm().size() - 4 , asMasm().framePushed());
|
||||
asMasm().ma_load(output.high, BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET), SizeWord);
|
||||
asMasm().append(access, asMasm().size() - 4 , asMasm().framePushed());
|
||||
}
|
||||
asMasm().memoryBarrierAfter(access.sync());
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssemblerMIPSCompat::wasmStoreI64Impl(const wasm::MemoryAccessDesc& access, Register64 value,
|
||||
Register memoryBase, Register ptr, Register ptrScratch,
|
||||
Register tmp)
|
||||
{
|
||||
uint32_t offset = access.offset();
|
||||
MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
|
||||
MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
|
||||
|
||||
// Maybe add the offset.
|
||||
if (offset) {
|
||||
asMasm().addPtr(Imm32(offset), ptrScratch);
|
||||
ptr = ptrScratch;
|
||||
}
|
||||
|
||||
unsigned byteSize = access.byteSize();
|
||||
bool isSigned;
|
||||
switch (access.type()) {
|
||||
case Scalar::Int8: isSigned = true; break;
|
||||
case Scalar::Uint8: isSigned = false; break;
|
||||
case Scalar::Int16: isSigned = true; break;
|
||||
case Scalar::Uint16: isSigned = false; break;
|
||||
case Scalar::Int32: isSigned = true; break;
|
||||
case Scalar::Uint32: isSigned = false; break;
|
||||
case Scalar::Int64: isSigned = true; break;
|
||||
default: MOZ_CRASH("unexpected array type");
|
||||
}
|
||||
|
||||
MOZ_ASSERT(INT64LOW_OFFSET == 0);
|
||||
BaseIndex address(memoryBase, ptr, TimesOne);
|
||||
if (IsUnaligned(access)) {
|
||||
MOZ_ASSERT(tmp != InvalidReg);
|
||||
if (byteSize <= 4) {
|
||||
asMasm().ma_store_unaligned(access, value.low, address, tmp,
|
||||
static_cast<LoadStoreSize>(8 * byteSize),
|
||||
isSigned ? SignExtend : ZeroExtend);
|
||||
} else {
|
||||
asMasm().ma_store_unaligned(access, value.high,
|
||||
BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET),
|
||||
tmp, SizeWord, SignExtend);
|
||||
asMasm().ma_store_unaligned(access, value.low, address, tmp, SizeWord, ZeroExtend);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
asMasm().memoryBarrierBefore(access.sync());
|
||||
if (byteSize <= 4) {
|
||||
asMasm().ma_store(value.low, address, static_cast<LoadStoreSize>(8 * byteSize));
|
||||
asMasm().append(access, asMasm().size() - 4, asMasm().framePushed());
|
||||
} else {
|
||||
asMasm().ma_store(value.high, BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET),
|
||||
SizeWord);
|
||||
asMasm().append(access, asMasm().size() - 4, asMasm().framePushed());
|
||||
asMasm().ma_store(value.low, address, SizeWord);
|
||||
}
|
||||
asMasm().memoryBarrierAfter(access.sync());
|
||||
}
|
||||
|
||||
static void
|
||||
EnterAtomic64Region(MacroAssembler& masm, Register addr, Register spinlock, Register scratch)
|
||||
{
|
||||
|
@ -2602,6 +2760,7 @@ MacroAssembler::atomicFetchOp64(const Synchronization&, AtomicOp op, Register64
|
|||
{
|
||||
AtomicFetchOp64(*this, op, value, mem, temp, output);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Convert floating point.
|
||||
|
||||
|
@ -2625,4 +2784,3 @@ MacroAssembler::convertUInt64ToDouble(Register64 src, FloatRegister dest, Regist
|
|||
}
|
||||
|
||||
//}}} check_macroassembler_style
|
||||
|
||||
|
|
|
@ -719,6 +719,10 @@ class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS
|
|||
|
||||
void enterAtomic64Region(Register addr, Register spinlock, Register tmp);
|
||||
void exitAtomic64Region(Register spinlock);
|
||||
void wasmLoadI64Impl(const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
|
||||
Register ptrScratch, Register64 output, Register tmp);
|
||||
void wasmStoreI64Impl(const wasm::MemoryAccessDesc& access, Register64 value, Register memoryBase,
|
||||
Register ptr, Register ptrScratch, Register tmp);
|
||||
|
||||
public:
|
||||
CodeOffset labelForPatch() {
|
||||
|
|
|
@ -323,7 +323,9 @@ Assembler::bind(RepatchLabel* label)
|
|||
MOZ_ASSERT(inst[0].extractOpcode() == (uint32_t(op_beq) >> OpcodeShift) ||
|
||||
inst[0].extractOpcode() == (uint32_t(op_bne) >> OpcodeShift) ||
|
||||
inst[0].extractOpcode() == (uint32_t(op_blez) >> OpcodeShift) ||
|
||||
inst[0].extractOpcode() == (uint32_t(op_bgtz) >> OpcodeShift));
|
||||
inst[0].extractOpcode() == (uint32_t(op_bgtz) >> OpcodeShift) ||
|
||||
(inst[0].extractOpcode() == (uint32_t(op_regimm) >> OpcodeShift) &&
|
||||
inst[0].extractRT() == (uint32_t(rt_bltz) >> RTShift)));
|
||||
inst[0].setBOffImm16(BOffImm16(offset));
|
||||
} else if (inst[0].encode() == inst_beq.encode()) {
|
||||
// Handle open long unconditional jumps created by
|
||||
|
|
|
@ -76,6 +76,20 @@ static constexpr FloatRegister ScratchDoubleReg = { FloatRegisters::f23, FloatRe
|
|||
static constexpr FloatRegister SecondScratchFloat32Reg = { FloatRegisters::f21, FloatRegisters::Single };
|
||||
static constexpr FloatRegister SecondScratchDoubleReg = { FloatRegisters::f21, FloatRegisters::Double };
|
||||
|
||||
struct ScratchFloat32Scope : public AutoFloatRegisterScope
|
||||
{
|
||||
explicit ScratchFloat32Scope(MacroAssembler& masm)
|
||||
: AutoFloatRegisterScope(masm, ScratchFloat32Reg)
|
||||
{ }
|
||||
};
|
||||
|
||||
struct ScratchDoubleScope : public AutoFloatRegisterScope
|
||||
{
|
||||
explicit ScratchDoubleScope(MacroAssembler& masm)
|
||||
: AutoFloatRegisterScope(masm, ScratchDoubleReg)
|
||||
{ }
|
||||
};
|
||||
|
||||
// Registers used in the GenerateFFIIonExit Disable Activation block.
|
||||
// None of these may be the second scratch register (t8).
|
||||
static constexpr Register WasmIonExitRegReturnData = JSReturnReg_Data;
|
||||
|
|
|
@ -417,51 +417,18 @@ CodeGeneratorMIPS64::emitWasmLoadI64(T* lir)
|
|||
{
|
||||
const MWasmLoad* mir = lir->mir();
|
||||
|
||||
MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
|
||||
|
||||
uint32_t offset = mir->access().offset();
|
||||
MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
|
||||
|
||||
Register ptr = ToRegister(lir->ptr());
|
||||
|
||||
// Maybe add the offset.
|
||||
if (offset) {
|
||||
Register ptrPlusOffset = ToRegister(lir->ptrCopy());
|
||||
masm.addPtr(Imm32(offset), ptrPlusOffset);
|
||||
ptr = ptrPlusOffset;
|
||||
} else {
|
||||
MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
|
||||
Register ptrScratch = InvalidReg;
|
||||
if(!lir->ptrCopy()->isBogusTemp()){
|
||||
ptrScratch = ToRegister(lir->ptrCopy());
|
||||
}
|
||||
|
||||
unsigned byteSize = mir->access().byteSize();
|
||||
bool isSigned;
|
||||
|
||||
switch (mir->access().type()) {
|
||||
case Scalar::Int8: isSigned = true; break;
|
||||
case Scalar::Uint8: isSigned = false; break;
|
||||
case Scalar::Int16: isSigned = true; break;
|
||||
case Scalar::Uint16: isSigned = false; break;
|
||||
case Scalar::Int32: isSigned = true; break;
|
||||
case Scalar::Uint32: isSigned = false; break;
|
||||
case Scalar::Int64: isSigned = true; break;
|
||||
default: MOZ_CRASH("unexpected array type");
|
||||
}
|
||||
|
||||
masm.memoryBarrierBefore(mir->access().sync());
|
||||
|
||||
if (IsUnaligned(mir->access())) {
|
||||
Register temp = ToRegister(lir->getTemp(1));
|
||||
|
||||
masm.ma_load_unaligned(mir->access(), ToOutRegister64(lir).reg, BaseIndex(HeapReg, ptr, TimesOne),
|
||||
temp, static_cast<LoadStoreSize>(8 * byteSize),
|
||||
isSigned ? SignExtend : ZeroExtend);
|
||||
masm.wasmUnalignedLoadI64(mir->access(), HeapReg, ToRegister(lir->ptr()),
|
||||
ptrScratch, ToOutRegister64(lir), ToRegister(lir->getTemp(1)));
|
||||
} else {
|
||||
masm.ma_load(ToOutRegister64(lir).reg, BaseIndex(HeapReg, ptr, TimesOne),
|
||||
static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
|
||||
masm.append(mir->access(), masm.size() - 4, masm.framePushed());
|
||||
masm.wasmLoadI64(mir->access(), HeapReg, ToRegister(lir->ptr()), ptrScratch,
|
||||
ToOutRegister64(lir));
|
||||
}
|
||||
|
||||
masm.memoryBarrierAfter(mir->access().sync());
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -482,49 +449,18 @@ CodeGeneratorMIPS64::emitWasmStoreI64(T* lir)
|
|||
{
|
||||
const MWasmStore* mir = lir->mir();
|
||||
|
||||
uint32_t offset = mir->access().offset();
|
||||
MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
|
||||
|
||||
Register ptr = ToRegister(lir->ptr());
|
||||
|
||||
// Maybe add the offset.
|
||||
if (offset) {
|
||||
Register ptrPlusOffset = ToRegister(lir->ptrCopy());
|
||||
masm.addPtr(Imm32(offset), ptrPlusOffset);
|
||||
ptr = ptrPlusOffset;
|
||||
} else {
|
||||
MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
|
||||
Register ptrScratch = InvalidReg;
|
||||
if(!lir->ptrCopy()->isBogusTemp()){
|
||||
ptrScratch = ToRegister(lir->ptrCopy());
|
||||
}
|
||||
|
||||
unsigned byteSize = mir->access().byteSize();
|
||||
bool isSigned;
|
||||
|
||||
switch (mir->access().type()) {
|
||||
case Scalar::Int8: isSigned = true; break;
|
||||
case Scalar::Uint8: isSigned = false; break;
|
||||
case Scalar::Int16: isSigned = true; break;
|
||||
case Scalar::Uint16: isSigned = false; break;
|
||||
case Scalar::Int32: isSigned = true; break;
|
||||
case Scalar::Uint32: isSigned = false; break;
|
||||
case Scalar::Int64: isSigned = true; break;
|
||||
default: MOZ_CRASH("unexpected array type");
|
||||
}
|
||||
|
||||
masm.memoryBarrierBefore(mir->access().sync());
|
||||
|
||||
if (IsUnaligned(mir->access())) {
|
||||
Register temp = ToRegister(lir->getTemp(1));
|
||||
|
||||
masm.ma_store_unaligned(mir->access(), ToRegister64(lir->value()).reg, BaseIndex(HeapReg, ptr, TimesOne),
|
||||
temp, static_cast<LoadStoreSize>(8 * byteSize),
|
||||
isSigned ? SignExtend : ZeroExtend);
|
||||
masm.wasmUnalignedStoreI64(mir->access(), ToRegister64(lir->value()), HeapReg,
|
||||
ToRegister(lir->ptr()), ptrScratch, ToRegister(lir->getTemp(1)));
|
||||
} else {
|
||||
masm.ma_store(ToRegister64(lir->value()).reg, BaseIndex(HeapReg, ptr, TimesOne),
|
||||
static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
|
||||
masm.append(mir->access(), masm.size() - 4, masm.framePushed());
|
||||
masm.wasmStoreI64(mir->access(), ToRegister64(lir->value()), HeapReg,
|
||||
ToRegister(lir->ptr()), ptrScratch);
|
||||
}
|
||||
|
||||
masm.memoryBarrierAfter(mir->access().sync());
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -31,43 +31,43 @@ MacroAssembler::move64(Imm64 imm, Register64 dest)
|
|||
void
|
||||
MacroAssembler::moveDoubleToGPR64(FloatRegister src, Register64 dest)
|
||||
{
|
||||
MOZ_CRASH("NYI: moveDoubleToGPR64");
|
||||
moveFromDouble(src, dest.reg);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::moveGPR64ToDouble(Register64 src, FloatRegister dest)
|
||||
{
|
||||
MOZ_CRASH("NYI: moveGPR64ToDouble");
|
||||
moveToDouble(src.reg, dest);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::move64To32(Register64 src, Register dest)
|
||||
{
|
||||
MOZ_CRASH("NYI: move64To32");
|
||||
ma_sll(dest, src.reg, Imm32(0));
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::move32To64ZeroExtend(Register src, Register64 dest)
|
||||
{
|
||||
MOZ_CRASH("NYI: move32To64ZeroExtend");
|
||||
ma_dext(dest.reg, src, Imm32(0), Imm32(32));
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::move8To64SignExtend(Register src, Register64 dest)
|
||||
{
|
||||
MOZ_CRASH("NYI: move8To64SignExtend");
|
||||
move8SignExtend(src, dest.reg);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::move16To64SignExtend(Register src, Register64 dest)
|
||||
{
|
||||
MOZ_CRASH("NYI: move16To64SignExtend");
|
||||
move16SignExtend(src, dest.reg);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::move32To64SignExtend(Register src, Register64 dest)
|
||||
{
|
||||
MOZ_CRASH("NYI: move32To64SignExtend");
|
||||
ma_sll(dest.reg, src, Imm32(0));
|
||||
}
|
||||
|
||||
// ===============================================================
|
||||
|
@ -245,13 +245,20 @@ MacroAssembler::add64(Imm64 imm, Register64 dest)
|
|||
CodeOffset
|
||||
MacroAssembler::sub32FromStackPtrWithPatch(Register dest)
|
||||
{
|
||||
MOZ_CRASH("NYI - sub32FromStackPtrWithPatch");
|
||||
CodeOffset offset = CodeOffset(currentOffset());
|
||||
MacroAssemblerMIPSShared::ma_liPatchable(dest, Imm32(0));
|
||||
as_dsubu(dest, StackPointer, dest);
|
||||
return offset;
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::patchSub32FromStackPtr(CodeOffset offset, Imm32 imm)
|
||||
{
|
||||
MOZ_CRASH("NYI - patchSub32FromStackPtr");
|
||||
Instruction* lui = (Instruction*) m_buffer.getInst(BufferOffset(offset.offset()));
|
||||
MOZ_ASSERT(lui->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
|
||||
MOZ_ASSERT(lui->next()->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
|
||||
|
||||
MacroAssemblerMIPSShared::UpdateLuiOriValue(lui, lui->next(), imm.value);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -2445,6 +2445,236 @@ MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input, Register output
|
|||
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
|
||||
Register ptrScratch, Register64 output)
|
||||
{
|
||||
wasmLoadI64Impl(access, memoryBase, ptr, ptrScratch, output, InvalidReg);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::wasmUnalignedLoadI64(const wasm::MemoryAccessDesc& access, Register memoryBase,
|
||||
Register ptr, Register ptrScratch, Register64 output,
|
||||
Register tmp)
|
||||
{
|
||||
wasmLoadI64Impl(access, memoryBase, ptr, ptrScratch, output, tmp);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value,
|
||||
Register memoryBase, Register ptr, Register ptrScratch)
|
||||
{
|
||||
wasmStoreI64Impl(access, value, memoryBase, ptr, ptrScratch, InvalidReg);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::wasmUnalignedStoreI64(const wasm::MemoryAccessDesc& access, Register64 value,
|
||||
Register memoryBase, Register ptr, Register ptrScratch,
|
||||
Register tmp)
|
||||
{
|
||||
wasmStoreI64Impl(access, value, memoryBase, ptr, ptrScratch, tmp);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::wasmTruncateDoubleToInt64(FloatRegister input, Register64 output,
|
||||
Label* oolEntry, Label* oolRejoin,
|
||||
FloatRegister tempDouble)
|
||||
{
|
||||
MOZ_ASSERT(tempDouble.isInvalid());
|
||||
wasmTruncateToI64(input, output.reg, MIRType::Double, false, oolEntry, oolRejoin);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::wasmTruncateDoubleToUInt64(FloatRegister input, Register64 output,
|
||||
Label* oolEntry, Label* oolRejoin,
|
||||
FloatRegister tempDouble)
|
||||
{
|
||||
MOZ_ASSERT(tempDouble.isInvalid());
|
||||
wasmTruncateToI64(input, output.reg, MIRType::Double, true, oolEntry, oolRejoin);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::wasmTruncateFloat32ToInt64(FloatRegister input, Register64 output,
|
||||
Label* oolEntry, Label* oolRejoin,
|
||||
FloatRegister tempFloat)
|
||||
{
|
||||
MOZ_ASSERT(tempFloat.isInvalid());
|
||||
wasmTruncateToI64(input, output.reg, MIRType::Float32, false, oolEntry, oolRejoin);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::wasmTruncateFloat32ToUInt64(FloatRegister input, Register64 output,
|
||||
Label* oolEntry, Label* oolRejoin,
|
||||
FloatRegister tempFloat)
|
||||
{
|
||||
MOZ_ASSERT(tempFloat.isInvalid());
|
||||
wasmTruncateToI64(input, output.reg, MIRType::Float32, true, oolEntry, oolRejoin);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssemblerMIPS64Compat::wasmTruncateToI64(FloatRegister input, Register output, MIRType fromType,
|
||||
bool isUnsigned, Label* oolEntry, Label* oolRejoin)
|
||||
{
|
||||
if (isUnsigned) {
|
||||
Label isLarge, done;
|
||||
|
||||
if (fromType == MIRType::Double) {
|
||||
asMasm().loadConstantDouble(double(INT64_MAX), ScratchDoubleReg);
|
||||
asMasm().ma_bc1d(ScratchDoubleReg, input, &isLarge,
|
||||
Assembler::DoubleLessThanOrEqual, ShortJump);
|
||||
|
||||
asMasm().as_truncld(ScratchDoubleReg, input);
|
||||
} else {
|
||||
asMasm().loadConstantFloat32(float(INT64_MAX), ScratchFloat32Reg);
|
||||
asMasm().ma_bc1s(ScratchFloat32Reg, input, &isLarge,
|
||||
Assembler::DoubleLessThanOrEqual, ShortJump);
|
||||
|
||||
asMasm().as_truncls(ScratchDoubleReg, input);
|
||||
}
|
||||
|
||||
// Check that the result is in the uint64_t range.
|
||||
asMasm().moveFromDouble(ScratchDoubleReg, output);
|
||||
asMasm().as_cfc1(ScratchRegister, Assembler::FCSR);
|
||||
// extract invalid operation flag (bit 6) from FCSR
|
||||
asMasm().ma_ext(ScratchRegister, ScratchRegister, 16, 1);
|
||||
asMasm().ma_dsrl(SecondScratchReg, output, Imm32(63));
|
||||
asMasm().ma_or(SecondScratchReg, ScratchRegister);
|
||||
asMasm().ma_b(SecondScratchReg, Imm32(0), oolEntry, Assembler::NotEqual);
|
||||
|
||||
asMasm().ma_b(&done, ShortJump);
|
||||
|
||||
// The input is greater than double(INT64_MAX).
|
||||
asMasm().bind(&isLarge);
|
||||
if (fromType == MIRType::Double) {
|
||||
asMasm().as_subd(ScratchDoubleReg, input, ScratchDoubleReg);
|
||||
asMasm().as_truncld(ScratchDoubleReg, ScratchDoubleReg);
|
||||
} else {
|
||||
asMasm().as_subs(ScratchDoubleReg, input, ScratchDoubleReg);
|
||||
asMasm().as_truncls(ScratchDoubleReg, ScratchDoubleReg);
|
||||
}
|
||||
|
||||
// Check that the result is in the uint64_t range.
|
||||
asMasm().moveFromDouble(ScratchDoubleReg, output);
|
||||
asMasm().as_cfc1(ScratchRegister, Assembler::FCSR);
|
||||
asMasm().ma_ext(ScratchRegister, ScratchRegister, 16, 1);
|
||||
asMasm().ma_dsrl(SecondScratchReg, output, Imm32(63));
|
||||
asMasm().ma_or(SecondScratchReg, ScratchRegister);
|
||||
asMasm().ma_b(SecondScratchReg, Imm32(0), oolEntry, Assembler::NotEqual);
|
||||
|
||||
asMasm().ma_li(ScratchRegister, Imm32(1));
|
||||
asMasm().ma_dins(output, ScratchRegister, Imm32(63), Imm32(1));
|
||||
|
||||
asMasm().bind(&done);
|
||||
asMasm().bind(oolRejoin);
|
||||
return;
|
||||
}
|
||||
|
||||
// When the input value is Infinity, NaN, or rounds to an integer outside the
|
||||
// range [INT64_MIN; INT64_MAX + 1[, the Invalid Operation flag is set in the FCSR.
|
||||
if (fromType == MIRType::Double)
|
||||
asMasm().as_truncld(ScratchDoubleReg, input);
|
||||
else
|
||||
asMasm().as_truncls(ScratchDoubleReg, input);
|
||||
|
||||
// Check that the result is in the int64_t range.
|
||||
asMasm().as_cfc1(output, Assembler::FCSR);
|
||||
asMasm().ma_ext(output, output, 16, 1);
|
||||
asMasm().ma_b(output, Imm32(0), oolEntry, Assembler::NotEqual);
|
||||
|
||||
asMasm().bind(oolRejoin);
|
||||
asMasm().moveFromDouble(ScratchDoubleReg, output);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssemblerMIPS64Compat::wasmLoadI64Impl(const wasm::MemoryAccessDesc& access,
|
||||
Register memoryBase, Register ptr, Register ptrScratch,
|
||||
Register64 output, Register tmp)
|
||||
{
|
||||
uint32_t offset = access.offset();
|
||||
MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
|
||||
MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
|
||||
|
||||
// Maybe add the offset.
|
||||
if (offset) {
|
||||
asMasm().addPtr(Imm32(offset), ptrScratch);
|
||||
ptr = ptrScratch;
|
||||
}
|
||||
|
||||
unsigned byteSize = access.byteSize();
|
||||
bool isSigned;
|
||||
|
||||
switch (access.type()) {
|
||||
case Scalar::Int8: isSigned = true; break;
|
||||
case Scalar::Uint8: isSigned = false; break;
|
||||
case Scalar::Int16: isSigned = true; break;
|
||||
case Scalar::Uint16: isSigned = false; break;
|
||||
case Scalar::Int32: isSigned = true; break;
|
||||
case Scalar::Uint32: isSigned = false; break;
|
||||
case Scalar::Int64: isSigned = true; break;
|
||||
default: MOZ_CRASH("unexpected array type");
|
||||
}
|
||||
|
||||
BaseIndex address(memoryBase, ptr, TimesOne);
|
||||
if (IsUnaligned(access)) {
|
||||
MOZ_ASSERT(tmp != InvalidReg);
|
||||
asMasm().ma_load_unaligned(access, output.reg, address, tmp,
|
||||
static_cast<LoadStoreSize>(8 * byteSize),
|
||||
isSigned ? SignExtend : ZeroExtend);
|
||||
return;
|
||||
}
|
||||
|
||||
asMasm().memoryBarrierBefore(access.sync());
|
||||
asMasm().ma_load(output.reg, address, static_cast<LoadStoreSize>(8 * byteSize),
|
||||
isSigned ? SignExtend : ZeroExtend);
|
||||
asMasm().append(access, asMasm().size() - 4, asMasm().framePushed());
|
||||
asMasm().memoryBarrierAfter(access.sync());
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssemblerMIPS64Compat::wasmStoreI64Impl(const wasm::MemoryAccessDesc& access, Register64 value,
|
||||
Register memoryBase, Register ptr, Register ptrScratch,
|
||||
Register tmp)
|
||||
{
|
||||
uint32_t offset = access.offset();
|
||||
MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
|
||||
MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
|
||||
|
||||
// Maybe add the offset.
|
||||
if (offset) {
|
||||
asMasm().addPtr(Imm32(offset), ptrScratch);
|
||||
ptr = ptrScratch;
|
||||
}
|
||||
|
||||
unsigned byteSize = access.byteSize();
|
||||
bool isSigned;
|
||||
switch (access.type()) {
|
||||
case Scalar::Int8: isSigned = true; break;
|
||||
case Scalar::Uint8: isSigned = false; break;
|
||||
case Scalar::Int16: isSigned = true; break;
|
||||
case Scalar::Uint16: isSigned = false; break;
|
||||
case Scalar::Int32: isSigned = true; break;
|
||||
case Scalar::Uint32: isSigned = false; break;
|
||||
case Scalar::Int64: isSigned = true; break;
|
||||
default: MOZ_CRASH("unexpected array type");
|
||||
}
|
||||
|
||||
BaseIndex address(memoryBase, ptr, TimesOne);
|
||||
|
||||
if (IsUnaligned(access)) {
|
||||
MOZ_ASSERT(tmp != InvalidReg);
|
||||
asMasm().ma_store_unaligned(access, value.reg, address, tmp,
|
||||
static_cast<LoadStoreSize>(8 * byteSize),
|
||||
isSigned ? SignExtend : ZeroExtend);
|
||||
return;
|
||||
}
|
||||
|
||||
asMasm().memoryBarrierBefore(access.sync());
|
||||
asMasm().ma_store(value.reg, address, static_cast<LoadStoreSize>(8 * byteSize),
|
||||
isSigned ? SignExtend : ZeroExtend);
|
||||
asMasm().append(access, asMasm().size() - 4, asMasm().framePushed());
|
||||
asMasm().memoryBarrierAfter(access.sync());
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static void
|
||||
CompareExchange64(MacroAssembler& masm, const Synchronization& sync, const T& mem,
|
||||
|
|
|
@ -727,6 +727,9 @@ class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
|
|||
|
||||
void convertUInt64ToDouble(Register src, FloatRegister dest);
|
||||
|
||||
void wasmTruncateToI64(FloatRegister input, Register output, MIRType fromType,
|
||||
bool isUnsigned, Label* oolEntry, Label* oolRejoin);
|
||||
|
||||
void breakpoint();
|
||||
|
||||
void checkStackAlignment();
|
||||
|
@ -750,6 +753,11 @@ class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
|
|||
protected:
|
||||
bool buildOOLFakeExitFrame(void* fakeReturnAddr);
|
||||
|
||||
void wasmLoadI64Impl(const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
|
||||
Register ptrScratch, Register64 output, Register tmp);
|
||||
void wasmStoreI64Impl(const wasm::MemoryAccessDesc& access, Register64 value, Register memoryBase,
|
||||
Register ptr, Register ptrScratch, Register tmp);
|
||||
|
||||
public:
|
||||
CodeOffset labelForPatch() {
|
||||
return CodeOffset(nextOffset().getOffset());
|
||||
|
|
|
@ -126,6 +126,14 @@
|
|||
# include "jit/x86-shared/Architecture-x86-shared.h"
|
||||
# include "jit/x86-shared/Assembler-x86-shared.h"
|
||||
#endif
|
||||
#if defined(JS_CODEGEN_MIPS32)
|
||||
# include "jit/mips-shared/Assembler-mips-shared.h"
|
||||
# include "jit/mips32/Assembler-mips32.h"
|
||||
#endif
|
||||
#if defined(JS_CODEGEN_MIPS64)
|
||||
# include "jit/mips-shared/Assembler-mips-shared.h"
|
||||
# include "jit/mips64/Assembler-mips64.h"
|
||||
#endif
|
||||
|
||||
#include "wasm/WasmBinaryIterator.h"
|
||||
#include "wasm/WasmGenerator.h"
|
||||
|
@ -178,8 +186,7 @@ typedef unsigned BitSize;
|
|||
enum class UseABI { Wasm, System };
|
||||
enum class InterModule { False = false, True = true };
|
||||
|
||||
#if defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_NONE) || \
|
||||
defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
#if defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_NONE)
|
||||
# define RABALDR_SCRATCH_I32
|
||||
# define RABALDR_SCRATCH_F32
|
||||
# define RABALDR_SCRATCH_F64
|
||||
|
@ -220,6 +227,20 @@ static const Register RabaldrScratchI32 = CallTempReg2;
|
|||
# define RABALDR_FLOAT_TO_I64_CALLOUT
|
||||
#endif
|
||||
|
||||
#ifdef JS_CODEGEN_MIPS32
|
||||
# define RABALDR_SCRATCH_I32
|
||||
static const Register RabaldrScratchI32 = CallTempReg2;
|
||||
|
||||
# define RABALDR_INT_DIV_I64_CALLOUT
|
||||
# define RABALDR_I64_TO_FLOAT_CALLOUT
|
||||
# define RABALDR_FLOAT_TO_I64_CALLOUT
|
||||
#endif
|
||||
|
||||
#ifdef JS_CODEGEN_MIPS64
|
||||
# define RABALDR_SCRATCH_I32
|
||||
static const Register RabaldrScratchI32 = CallTempReg2;
|
||||
#endif
|
||||
|
||||
template<MIRType t>
|
||||
struct RegTypeOf {
|
||||
static_assert(t == MIRType::Float32 || t == MIRType::Double, "Float mask type");
|
||||
|
@ -374,6 +395,17 @@ struct SpecificRegs
|
|||
: abiReturnRegI64(ReturnReg64)
|
||||
{}
|
||||
};
|
||||
#elif defined(JS_CODEGEN_MIPS32)
|
||||
struct SpecificRegs
|
||||
{
|
||||
RegI64 abiReturnRegI64;
|
||||
|
||||
SpecificRegs()
|
||||
: abiReturnRegI64(ReturnReg64)
|
||||
{}
|
||||
};
|
||||
#elif defined(JS_CODEGEN_MIPS64)
|
||||
struct SpecificRegs {};
|
||||
#else
|
||||
struct SpecificRegs
|
||||
{
|
||||
|
@ -3094,6 +3126,8 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
call.hardFP = false;
|
||||
# endif
|
||||
call.abi.setUseHardFp(call.hardFP);
|
||||
#elif defined(JS_CODEGEN_MIPS32)
|
||||
call.abi.enforceO32ABI();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -3209,11 +3243,18 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
}
|
||||
#if defined(JS_CODEGEN_REGISTER_PAIR)
|
||||
case ABIArg::GPR_PAIR: {
|
||||
# ifdef JS_CODEGEN_ARM
|
||||
# if defined(JS_CODEGEN_ARM)
|
||||
ScratchF64 scratch(*this);
|
||||
loadF64(arg, scratch);
|
||||
masm.ma_vxfer(scratch, argLoc.evenGpr(), argLoc.oddGpr());
|
||||
break;
|
||||
# elif defined(JS_CODEGEN_MIPS32)
|
||||
ScratchF64 scratch(*this);
|
||||
loadF64(arg, scratch);
|
||||
MOZ_ASSERT(MOZ_LITTLE_ENDIAN);
|
||||
masm.moveFromDoubleLo(scratch, argLoc.evenGpr());
|
||||
masm.moveFromDoubleHi(scratch, argLoc.oddGpr());
|
||||
break;
|
||||
# else
|
||||
MOZ_CRASH("BaseCompiler platform hook: passArg F64 pair");
|
||||
# endif
|
||||
|
@ -3322,10 +3363,11 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
// Sundry low-level code generators.
|
||||
|
||||
// The compiler depends on moveImm32() clearing the high bits of a 64-bit
|
||||
// register on 64-bit systems.
|
||||
// register on 64-bit systems except MIPS64 where high bits are sign extended
|
||||
// from lower bits.
|
||||
|
||||
void moveImm32(int32_t v, RegI32 dest) {
|
||||
masm.mov(ImmWord(uint32_t(v)), dest);
|
||||
masm.move32(Imm32(v), dest);
|
||||
}
|
||||
|
||||
void moveImm64(int64_t v, RegI64 dest) {
|
||||
|
@ -3400,6 +3442,22 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
// Jump indirect via table element.
|
||||
masm.ma_ldr(DTRAddr(scratch, DtrRegImmShift(switchValue, LSL, 2)), pc, Offset,
|
||||
Assembler::Always);
|
||||
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
ScratchI32 scratch(*this);
|
||||
CodeLabel tableCl;
|
||||
|
||||
masm.ma_li(scratch, tableCl.patchAt());
|
||||
# ifdef JS_CODEGEN_MIPS32
|
||||
masm.lshiftPtr(Imm32(4), switchValue);
|
||||
# else
|
||||
masm.ma_mul(switchValue, switchValue, Imm32(6 * 4));
|
||||
# endif
|
||||
masm.addPtr(switchValue, scratch);
|
||||
|
||||
tableCl.target()->bind(theTable->offset());
|
||||
masm.addCodeLabel(tableCl);
|
||||
|
||||
masm.branch(scratch);
|
||||
#else
|
||||
MOZ_CRASH("BaseCompiler platform hook: tableSwitch");
|
||||
#endif
|
||||
|
@ -3505,6 +3563,12 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
masm.cqo();
|
||||
masm.idivq(rhs.reg);
|
||||
}
|
||||
# elif defined(JS_CODEGEN_MIPS64)
|
||||
if (isUnsigned)
|
||||
masm.as_ddivu(srcDest.reg, rhs.reg);
|
||||
else
|
||||
masm.as_ddiv(srcDest.reg, rhs.reg);
|
||||
masm.as_mflo(srcDest.reg);
|
||||
# else
|
||||
MOZ_CRASH("BaseCompiler platform hook: quotientI64");
|
||||
# endif
|
||||
|
@ -3535,6 +3599,12 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
masm.idivq(rhs.reg);
|
||||
}
|
||||
masm.movq(rdx, rax);
|
||||
# elif defined(JS_CODEGEN_MIPS64)
|
||||
if (isUnsigned)
|
||||
masm.as_ddivu(srcDest.reg, rhs.reg);
|
||||
else
|
||||
masm.as_ddiv(srcDest.reg, rhs.reg);
|
||||
masm.as_mfhi(srcDest.reg);
|
||||
# else
|
||||
MOZ_CRASH("BaseCompiler platform hook: remainderI64");
|
||||
# endif
|
||||
|
@ -3545,7 +3615,8 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
RegI32 needRotate64Temp() {
|
||||
#if defined(JS_CODEGEN_X86)
|
||||
return needI32();
|
||||
#elif defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM)
|
||||
#elif defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) || \
|
||||
defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
return RegI32::Invalid();
|
||||
#else
|
||||
MOZ_CRASH("BaseCompiler platform hook: needRotate64Temp");
|
||||
|
@ -3561,7 +3632,7 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
RegI32 needPopcnt32Temp() {
|
||||
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
|
||||
return AssemblerX86Shared::HasPOPCNT() ? RegI32::Invalid() : needI32();
|
||||
#elif defined(JS_CODEGEN_ARM)
|
||||
#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
return needI32();
|
||||
#else
|
||||
MOZ_CRASH("BaseCompiler platform hook: needPopcnt32Temp");
|
||||
|
@ -3571,7 +3642,7 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
RegI32 needPopcnt64Temp() {
|
||||
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
|
||||
return AssemblerX86Shared::HasPOPCNT() ? RegI32::Invalid() : needI32();
|
||||
#elif defined(JS_CODEGEN_ARM)
|
||||
#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
return needI32();
|
||||
#else
|
||||
MOZ_CRASH("BaseCompiler platform hook: needPopcnt64Temp");
|
||||
|
@ -3593,7 +3664,7 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
|
||||
virtual void generate(MacroAssembler* masm) override {
|
||||
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) || \
|
||||
defined(JS_CODEGEN_ARM64)
|
||||
defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
if (src.tag == AnyReg::F32)
|
||||
masm->oolWasmTruncateCheckF32ToI32(src.f32(), isUnsigned, off, rejoin());
|
||||
else if (src.tag == AnyReg::F64)
|
||||
|
@ -3656,7 +3727,7 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
|
||||
virtual void generate(MacroAssembler* masm) override {
|
||||
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) || \
|
||||
defined(JS_CODEGEN_ARM64)
|
||||
defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
if (src.tag == AnyReg::F32)
|
||||
masm->oolWasmTruncateCheckF32ToI64(src.f32(), isUnsigned, off, rejoin());
|
||||
else if (src.tag == AnyReg::F64)
|
||||
|
@ -3682,7 +3753,7 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
}
|
||||
|
||||
MOZ_MUST_USE bool truncateF32ToI64(RegF32 src, RegI64 dest, bool isUnsigned, RegF64 temp) {
|
||||
# if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
|
||||
# if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_MIPS64)
|
||||
OutOfLineCode* ool =
|
||||
addOutOfLineCode(new (alloc_) OutOfLineTruncateCheckF32OrF64ToI64(AnyReg(src),
|
||||
isUnsigned,
|
||||
|
@ -3702,7 +3773,7 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
}
|
||||
|
||||
MOZ_MUST_USE bool truncateF64ToI64(RegF64 src, RegI64 dest, bool isUnsigned, RegF64 temp) {
|
||||
# if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
|
||||
# if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_MIPS64)
|
||||
OutOfLineCode* ool =
|
||||
addOutOfLineCode(new (alloc_) OutOfLineTruncateCheckF32OrF64ToI64(AnyReg(src),
|
||||
isUnsigned,
|
||||
|
@ -3771,8 +3842,7 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
masm.cmpPtrSet(Assembler::Equal, src.reg, ImmWord(0), dest);
|
||||
#else
|
||||
masm.or32(src.high, src.low);
|
||||
masm.cmp32(src.low, Imm32(0));
|
||||
masm.emitSet(Assembler::Equal, dest);
|
||||
masm.cmp32Set(Assembler::Equal, src.low, Imm32(0), dest);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -3937,7 +4007,8 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
#endif
|
||||
}
|
||||
|
||||
#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM)
|
||||
#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) || \
|
||||
defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
BaseIndex prepareAtomicMemoryAccess(MemoryAccessDesc* access, AccessCheck* check, RegI32 tls,
|
||||
RegI32 ptr)
|
||||
{
|
||||
|
@ -3982,11 +4053,13 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
break;
|
||||
}
|
||||
}
|
||||
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
*temp1 = needI32();
|
||||
#endif
|
||||
}
|
||||
|
||||
MOZ_MUST_USE bool needTlsForAccess(const AccessCheck& check) {
|
||||
#if defined(JS_CODEGEN_ARM)
|
||||
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
return !check.omitBoundsCheck;
|
||||
#elif defined(JS_CODEGEN_X86)
|
||||
return true;
|
||||
|
@ -4026,7 +4099,7 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
if (byteRegConflict)
|
||||
masm.mov(scratch, dest.i32());
|
||||
}
|
||||
#elif defined(JS_CODEGEN_ARM)
|
||||
#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
if (IsUnaligned(*access)) {
|
||||
switch (dest.tag) {
|
||||
case AnyReg::I64:
|
||||
|
@ -4061,6 +4134,8 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
#if defined(JS_CODEGEN_ARM)
|
||||
if (IsUnaligned(access) && srcType != ValType::I32)
|
||||
return needI32();
|
||||
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
return needI32();
|
||||
#endif
|
||||
return RegI32::Invalid();
|
||||
}
|
||||
|
@ -4118,7 +4193,7 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
break;
|
||||
default:
|
||||
MOZ_ASSERT(temp.isInvalid());
|
||||
masm.wasmUnalignedStore(*access, src.i32(), HeapReg, ptr, ptr);
|
||||
masm.wasmUnalignedStore(*access, src.i32(), HeapReg, ptr, ptr, temp);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
|
@ -4130,6 +4205,28 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
else
|
||||
masm.wasmStore(*access, src.any(), HeapReg, ptr, ptr);
|
||||
}
|
||||
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
if (IsUnaligned(*access)) {
|
||||
switch (src.tag) {
|
||||
case AnyReg::I64:
|
||||
masm.wasmUnalignedStoreI64(*access, src.i64(), HeapReg, ptr, ptr, temp);
|
||||
break;
|
||||
case AnyReg::F32:
|
||||
masm.wasmUnalignedStoreFP(*access, src.f32(), HeapReg, ptr, ptr, temp);
|
||||
break;
|
||||
case AnyReg::F64:
|
||||
masm.wasmUnalignedStoreFP(*access, src.f64(), HeapReg, ptr, ptr, temp);
|
||||
break;
|
||||
default:
|
||||
masm.wasmUnalignedStore(*access, src.i32(), HeapReg, ptr, ptr, temp);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
if (src.tag == AnyReg::I64)
|
||||
masm.wasmStoreI64(*access, src.i64(), HeapReg, ptr, ptr);
|
||||
else
|
||||
masm.wasmStore(*access, src.any(), HeapReg, ptr, ptr);
|
||||
}
|
||||
#else
|
||||
MOZ_CRASH("BaseCompiler platform hook: store");
|
||||
#endif
|
||||
|
@ -4137,26 +4234,53 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
return true;
|
||||
}
|
||||
|
||||
template <size_t Count>
|
||||
struct Atomic32Temps : mozilla::Array<RegI32, Count> {
|
||||
|
||||
// Allocate all temp registers if 'allocate' is not specified.
|
||||
void allocate(BaseCompiler* bc, size_t allocate = Count) {
|
||||
MOZ_ASSERT(Count != 0);
|
||||
for (size_t i = 0; i < allocate; ++i)
|
||||
this->operator[](i) = bc->needI32();
|
||||
}
|
||||
void maybeFree(BaseCompiler* bc){
|
||||
for (size_t i = 0; i < Count; ++i)
|
||||
bc->maybeFreeI32(this->operator[](i));
|
||||
}
|
||||
};
|
||||
|
||||
#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
typedef Atomic32Temps<3> AtomicRMW32Temps;
|
||||
#else
|
||||
typedef Atomic32Temps<1> AtomicRMW32Temps;
|
||||
#endif
|
||||
|
||||
template<typename T>
|
||||
void atomicRMW32(T srcAddr, Scalar::Type viewType, AtomicOp op, RegI32 rv, RegI32 rd, RegI32 temp)
|
||||
void atomicRMW32(T srcAddr, Scalar::Type viewType, AtomicOp op, RegI32 rv, RegI32 rd,
|
||||
const AtomicRMW32Temps& temps)
|
||||
{
|
||||
Synchronization sync = Synchronization::Full();
|
||||
switch (viewType) {
|
||||
case Scalar::Uint8: {
|
||||
case Scalar::Uint8:
|
||||
#ifdef JS_CODEGEN_X86
|
||||
{
|
||||
// The temp, if used, must be a byte register.
|
||||
MOZ_ASSERT(temp.isInvalid());
|
||||
ScratchI8 scratch(*this);
|
||||
if (op != AtomicFetchAddOp && op != AtomicFetchSubOp)
|
||||
temp = scratch;
|
||||
#endif
|
||||
masm.atomicFetchOp(viewType, sync, op, rv, srcAddr, temp, rd);
|
||||
masm.atomicFetchOp(viewType, sync, op, rv, srcAddr, temps[0], rd);
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
case Scalar::Uint16:
|
||||
case Scalar::Int32:
|
||||
case Scalar::Uint32:
|
||||
masm.atomicFetchOp(viewType, sync, op, rv, srcAddr, temp, rd);
|
||||
#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
masm.atomicFetchOp(viewType, sync, op, rv, srcAddr, temps[0], temps[1], temps[2], rd);
|
||||
#else
|
||||
masm.atomicFetchOp(viewType, sync, op, rv, srcAddr, temps[0], rd);
|
||||
#endif
|
||||
break;
|
||||
default: {
|
||||
MOZ_CRASH("Bad type for atomic operation");
|
||||
|
@ -4172,13 +4296,21 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
masm.atomicFetchOp64(Synchronization::Full(), op, value, srcAddr, temp, rd);
|
||||
}
|
||||
|
||||
#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
typedef Atomic32Temps<3> AtomicCmpXchg32Temps;
|
||||
#else
|
||||
typedef Atomic32Temps<0> AtomicCmpXchg32Temps;
|
||||
#endif
|
||||
|
||||
template<typename T>
|
||||
void atomicCmpXchg32(T srcAddr, Scalar::Type viewType, RegI32 rexpect, RegI32 rnew, RegI32 rd)
|
||||
void atomicCmpXchg32(T srcAddr, Scalar::Type viewType, RegI32 rexpect, RegI32 rnew, RegI32 rd,
|
||||
const AtomicCmpXchg32Temps& temps)
|
||||
{
|
||||
Synchronization sync = Synchronization::Full();
|
||||
switch (viewType) {
|
||||
case Scalar::Uint8: {
|
||||
case Scalar::Uint8:
|
||||
#if defined(JS_CODEGEN_X86)
|
||||
{
|
||||
ScratchI8 scratch(*this);
|
||||
MOZ_ASSERT(rd == specific.eax);
|
||||
if (!ra.isSingleByteI32(rnew)) {
|
||||
|
@ -4186,22 +4318,34 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
masm.movl(rnew, scratch);
|
||||
rnew = scratch;
|
||||
}
|
||||
#endif
|
||||
masm.compareExchange(viewType, sync, srcAddr, rexpect, rnew, rd);
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
case Scalar::Uint16:
|
||||
case Scalar::Int32:
|
||||
case Scalar::Uint32:
|
||||
#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
masm.compareExchange(viewType, sync, srcAddr, rexpect, rnew, temps[0], temps[1],
|
||||
temps[2], rd);
|
||||
#else
|
||||
masm.compareExchange(viewType, sync, srcAddr, rexpect, rnew, rd);
|
||||
#endif
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Bad type for atomic operation");
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
typedef Atomic32Temps<3> AtomicXchg32Temps;
|
||||
#else
|
||||
typedef Atomic32Temps<0> AtomicXchg32Temps;
|
||||
#endif
|
||||
|
||||
template<typename T>
|
||||
void atomicXchg32(T srcAddr, Scalar::Type viewType, RegI32 rv, RegI32 rd)
|
||||
void atomicXchg32(T srcAddr, Scalar::Type viewType, RegI32 rv, RegI32 rd,
|
||||
const AtomicXchg32Temps& temps)
|
||||
{
|
||||
Synchronization sync = Synchronization::Full();
|
||||
switch (viewType) {
|
||||
|
@ -4222,7 +4366,11 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
case Scalar::Uint16:
|
||||
case Scalar::Int32:
|
||||
case Scalar::Uint32:
|
||||
#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
masm.atomicExchange(viewType, sync, srcAddr, rv, temps[0], temps[1], temps[2], rd);
|
||||
#else
|
||||
masm.atomicExchange(viewType, sync, srcAddr, rv, rd);
|
||||
#endif
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Bad type for atomic operation");
|
||||
|
@ -4273,6 +4421,8 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
*r1 = popI64();
|
||||
*r0 = popI64ToSpecific(specific.edx_eax);
|
||||
*temp = needI32();
|
||||
#elif defined(JS_CODEGEN_MIPS64)
|
||||
pop2xI64(r0, r1);
|
||||
#else
|
||||
pop2xI64(r0, r1);
|
||||
*temp = needI32();
|
||||
|
@ -4378,10 +4528,11 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
{
|
||||
using Base = PopBase<RegI32>;
|
||||
RegI32 rexpect, rnew;
|
||||
AtomicCmpXchg32Temps temps;
|
||||
|
||||
public:
|
||||
#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
|
||||
explicit PopAtomicCmpXchg32Regs(BaseCompiler* bc, ValType type) : Base(bc) {
|
||||
explicit PopAtomicCmpXchg32Regs(BaseCompiler* bc, ValType type, Scalar::Type viewType) : Base(bc) {
|
||||
// For cmpxchg, the expected value and the result are both in eax.
|
||||
bc->needI32(bc->specific.eax);
|
||||
if (type == ValType::I64) {
|
||||
|
@ -4397,7 +4548,7 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
bc->freeI32(rnew);
|
||||
}
|
||||
#elif defined(JS_CODEGEN_ARM)
|
||||
explicit PopAtomicCmpXchg32Regs(BaseCompiler* bc, ValType type) : Base(bc) {
|
||||
explicit PopAtomicCmpXchg32Regs(BaseCompiler* bc, ValType type, Scalar::Type viewType) : Base(bc) {
|
||||
if (type == ValType::I64) {
|
||||
rnew = bc->popI64ToI32();
|
||||
rexpect = bc->popI64ToI32();
|
||||
|
@ -4411,15 +4562,33 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
bc->freeI32(rnew);
|
||||
bc->freeI32(rexpect);
|
||||
}
|
||||
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
explicit PopAtomicCmpXchg32Regs(BaseCompiler* bc, ValType type, Scalar::Type viewType) : Base(bc) {
|
||||
if (type == ValType::I64) {
|
||||
rnew = bc->popI64ToI32();
|
||||
rexpect = bc->popI64ToI32();
|
||||
} else {
|
||||
rnew = bc->popI32();
|
||||
rexpect = bc->popI32();
|
||||
}
|
||||
if (Scalar::byteSize(viewType) < 4)
|
||||
temps.allocate(bc);
|
||||
setRd(bc->needI32());
|
||||
}
|
||||
~PopAtomicCmpXchg32Regs() {
|
||||
bc->freeI32(rnew);
|
||||
bc->freeI32(rexpect);
|
||||
temps.maybeFree(bc);
|
||||
}
|
||||
#else
|
||||
explicit PopAtomicCmpXchg32Regs(BaseCompiler* bc, ValType type) : Base(bc) {
|
||||
explicit PopAtomicCmpXchg32Regs(BaseCompiler* bc, ValType type, Scalar::Type viewType) : Base(bc) {
|
||||
MOZ_CRASH("BaseCompiler porting interface: PopAtomicCmpXchg32Regs");
|
||||
}
|
||||
#endif
|
||||
|
||||
template<typename T>
|
||||
void atomicCmpXchg32(T srcAddr, Scalar::Type viewType) {
|
||||
bc->atomicCmpXchg32(srcAddr, viewType, rexpect, rnew, getRd());
|
||||
bc->atomicCmpXchg32(srcAddr, viewType, rexpect, rnew, getRd(), temps);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -4468,6 +4637,16 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
bc->freeI64(rexpect);
|
||||
bc->freeI64(rnew);
|
||||
}
|
||||
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
explicit PopAtomicCmpXchg64Regs(BaseCompiler* bc) : Base(bc) {
|
||||
rnew = bc->popI64();
|
||||
rexpect = bc->popI64();
|
||||
setRd(bc->needI64());
|
||||
}
|
||||
~PopAtomicCmpXchg64Regs() {
|
||||
bc->freeI64(rexpect);
|
||||
bc->freeI64(rnew);
|
||||
}
|
||||
#else
|
||||
explicit PopAtomicCmpXchg64Regs(BaseCompiler* bc) : Base(bc) {
|
||||
MOZ_CRASH("BaseCompiler porting interface: PopAtomicCmpXchg64Regs");
|
||||
|
@ -4512,6 +4691,10 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
explicit PopAtomicLoad64Regs(BaseCompiler* bc) : Base(bc) {
|
||||
setRd(bc->needI64Pair());
|
||||
}
|
||||
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
explicit PopAtomicLoad64Regs(BaseCompiler* bc) : Base(bc) {
|
||||
setRd(bc->needI64());
|
||||
}
|
||||
# else
|
||||
explicit PopAtomicLoad64Regs(BaseCompiler* bc) : Base(bc) {
|
||||
MOZ_CRASH("BaseCompiler porting interface: PopAtomicLoad64Regs");
|
||||
|
@ -4537,7 +4720,8 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
class PopAtomicRMW32Regs : public PopBase<RegI32>
|
||||
{
|
||||
using Base = PopBase<RegI32>;
|
||||
RegI32 rv, temp;
|
||||
RegI32 rv;
|
||||
AtomicRMW32Temps temps;
|
||||
|
||||
public:
|
||||
#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
|
||||
|
@ -4568,16 +4752,16 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
// Single-byte is a special case handled very locally with
|
||||
// ScratchReg, see atomicRMW32 above.
|
||||
if (Scalar::byteSize(viewType) > 1)
|
||||
temp = bc->needI32();
|
||||
temps.allocate(bc);
|
||||
# else
|
||||
temp = bc->needI32();
|
||||
temps.allocate(bc);
|
||||
# endif
|
||||
}
|
||||
}
|
||||
~PopAtomicRMW32Regs() {
|
||||
if (rv != bc->specific.eax)
|
||||
bc->freeI32(rv);
|
||||
bc->maybeFreeI32(temp);
|
||||
temps.maybeFree(bc);
|
||||
}
|
||||
#elif defined(JS_CODEGEN_ARM)
|
||||
explicit PopAtomicRMW32Regs(BaseCompiler* bc, ValType type, Scalar::Type viewType,
|
||||
|
@ -4585,12 +4769,27 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
: Base(bc)
|
||||
{
|
||||
rv = type == ValType::I64 ? bc->popI64ToI32() : bc->popI32();
|
||||
temp = bc->needI32();
|
||||
temps.allocate(bc);
|
||||
setRd(bc->needI32());
|
||||
}
|
||||
~PopAtomicRMW32Regs() {
|
||||
bc->freeI32(rv);
|
||||
bc->freeI32(temp);
|
||||
temps.maybeFree(bc);
|
||||
}
|
||||
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
explicit PopAtomicRMW32Regs(BaseCompiler* bc, ValType type, Scalar::Type viewType,
|
||||
AtomicOp op)
|
||||
: Base(bc)
|
||||
{
|
||||
rv = type == ValType::I64 ? bc->popI64ToI32() : bc->popI32();
|
||||
if (Scalar::byteSize(viewType) < 4)
|
||||
temps.allocate(bc);
|
||||
|
||||
setRd(bc->needI32());
|
||||
}
|
||||
~PopAtomicRMW32Regs() {
|
||||
bc->freeI32(rv);
|
||||
temps.maybeFree(bc);
|
||||
}
|
||||
#else
|
||||
explicit PopAtomicRMW32Regs(BaseCompiler* bc, ValType type, Scalar::Type viewType,
|
||||
|
@ -4603,7 +4802,7 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
|
||||
template<typename T>
|
||||
void atomicRMW32(T srcAddr, Scalar::Type viewType, AtomicOp op) {
|
||||
bc->atomicRMW32(srcAddr, viewType, op, rv, getRd(), temp);
|
||||
bc->atomicRMW32(srcAddr, viewType, op, rv, getRd(), temps);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -4666,6 +4865,16 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
bc->freeI64(rv);
|
||||
bc->freeI64(temp);
|
||||
}
|
||||
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
explicit PopAtomicRMW64Regs(BaseCompiler* bc, AtomicOp) : Base(bc) {
|
||||
rv = bc->popI64();
|
||||
temp = bc->needI64();
|
||||
setRd(bc->needI64());
|
||||
}
|
||||
~PopAtomicRMW64Regs() {
|
||||
bc->freeI64(rv);
|
||||
bc->freeI64(temp);
|
||||
}
|
||||
#else
|
||||
explicit PopAtomicRMW64Regs(BaseCompiler* bc, AtomicOp) : Base(bc) {
|
||||
MOZ_CRASH("BaseCompiler porting interface: PopAtomicRMW64Regs");
|
||||
|
@ -4691,31 +4900,43 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
{
|
||||
using Base = PopBase<RegI32>;
|
||||
RegI32 rv;
|
||||
AtomicXchg32Temps temps;
|
||||
|
||||
public:
|
||||
#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
|
||||
explicit PopAtomicXchg32Regs(BaseCompiler* bc, ValType type) : Base(bc) {
|
||||
explicit PopAtomicXchg32Regs(BaseCompiler* bc, ValType type, Scalar::Type viewType) : Base(bc) {
|
||||
// The xchg instruction reuses rv as rd.
|
||||
rv = (type == ValType::I64) ? bc->popI64ToI32() : bc->popI32();
|
||||
setRd(rv);
|
||||
}
|
||||
#elif defined(JS_CODEGEN_ARM)
|
||||
explicit PopAtomicXchg32Regs(BaseCompiler* bc, ValType type) : Base(bc) {
|
||||
explicit PopAtomicXchg32Regs(BaseCompiler* bc, ValType type, Scalar::Type viewType) : Base(bc) {
|
||||
rv = (type == ValType::I64) ? bc->popI64ToI32() : bc->popI32();
|
||||
setRd(bc->needI32());
|
||||
}
|
||||
~PopAtomicXchg32Regs() {
|
||||
bc->freeI32(rv);
|
||||
}
|
||||
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
explicit PopAtomicXchg32Regs(BaseCompiler* bc, ValType type, Scalar::Type viewType) : Base(bc) {
|
||||
rv = (type == ValType::I64) ? bc->popI64ToI32() : bc->popI32();
|
||||
if (Scalar::byteSize(viewType) < 4)
|
||||
temps.allocate(bc);
|
||||
setRd(bc->needI32());
|
||||
}
|
||||
~PopAtomicXchg32Regs() {
|
||||
temps.maybeFree(bc);
|
||||
bc->freeI32(rv);
|
||||
}
|
||||
#else
|
||||
explicit PopAtomicXchg32Regs(BaseCompiler* bc, ValType type) : Base(bc) {
|
||||
explicit PopAtomicXchg32Regs(BaseCompiler* bc, ValType type, Scalar::Type viewType) : Base(bc) {
|
||||
MOZ_CRASH("BaseCompiler porting interface: PopAtomicXchg32Regs");
|
||||
}
|
||||
#endif
|
||||
|
||||
template<typename T>
|
||||
void atomicXchg32(T srcAddr, Scalar::Type viewType) {
|
||||
bc->atomicXchg32(srcAddr, viewType, rv, getRd());
|
||||
bc->atomicXchg32(srcAddr, viewType, rv, getRd(), temps);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -4756,6 +4977,14 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
~PopAtomicXchg64Regs() {
|
||||
bc->freeI64(rv);
|
||||
}
|
||||
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
explicit PopAtomicXchg64Regs(BaseCompiler* bc) : Base(bc) {
|
||||
rv = bc->popI64ToSpecific(bc->needI64());
|
||||
setRd(bc->needI64());
|
||||
}
|
||||
~PopAtomicXchg64Regs() {
|
||||
bc->freeI64(rv);
|
||||
}
|
||||
#else
|
||||
explicit PopAtomicXchg64Regs(BaseCompiler* bc) : Base(bc) {
|
||||
MOZ_CRASH("BaseCompiler porting interface: xchg64");
|
||||
|
@ -8099,7 +8328,7 @@ BaseCompiler::emitAtomicCmpXchg(ValType type, Scalar::Type viewType)
|
|||
/*numSimdExprs=*/ 0, Synchronization::Full());
|
||||
|
||||
if (Scalar::byteSize(viewType) <= 4) {
|
||||
PopAtomicCmpXchg32Regs regs(this, type);
|
||||
PopAtomicCmpXchg32Regs regs(this, type, viewType);
|
||||
|
||||
AccessCheck check;
|
||||
RegI32 rp = popMemoryAccess(&access, &check);
|
||||
|
@ -8290,7 +8519,7 @@ BaseCompiler::emitAtomicXchg(ValType type, Scalar::Type viewType)
|
|||
/*numSimdElems=*/ 0, Synchronization::Full());
|
||||
|
||||
if (Scalar::byteSize(viewType) <= 4) {
|
||||
PopAtomicXchg32Regs regs(this, type);
|
||||
PopAtomicXchg32Regs regs(this, type, viewType);
|
||||
RegI32 rp = popMemoryAccess(&access, &check);
|
||||
RegI32 tls = maybeLoadTlsForAccess(check);
|
||||
|
||||
|
@ -9235,7 +9464,8 @@ js::wasm::BaselineCanCompile()
|
|||
return false;
|
||||
#endif
|
||||
|
||||
#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
|
||||
#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM) || \
|
||||
defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
|
|
Загрузка…
Ссылка в новой задаче