зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1425149
- Revamp the MASM atomics API. r=nbp
We do two things that are intertwined: 1. Expose primitive atomic operations for JS and Wasm in MacroAssembler.h, and rewrite the back-ends and the code generators to conform to the new APIs. 2. Parameterize the atomic operations with a synchronization specification, which is an abstraction of the barriers that are needed. Previously most implementations assumed seq_cst but this will change with Wasm. Note that by and large, the MIPS back-ends are not updated and they will need to be adapted separately. --HG-- extra : source : 8dc620adcb86dea1c902069e15665891672e32db extra : histedit_source : 315fef2adaadd183a681977a01008a419db1822b%2Ce51e63492279231517b0590c2ffd8add55fa6136
This commit is contained in:
Родитель
61cf15cc85
Коммит
61dcca4d6b
|
@ -67,6 +67,37 @@ static constexpr MemoryBarrierBits MembarAfterLoad = MembarLoadLoad|MembarLoadSt
|
|||
static constexpr MemoryBarrierBits MembarBeforeStore = MembarStoreStore;
|
||||
static constexpr MemoryBarrierBits MembarAfterStore = MembarStoreLoad;
|
||||
|
||||
struct Synchronization
|
||||
{
|
||||
const MemoryBarrierBits barrierBefore;
|
||||
const MemoryBarrierBits barrierAfter;
|
||||
|
||||
constexpr Synchronization(MemoryBarrierBits before, MemoryBarrierBits after)
|
||||
: barrierBefore(before),
|
||||
barrierAfter(after)
|
||||
{}
|
||||
|
||||
static Synchronization None() {
|
||||
return Synchronization(MemoryBarrierBits(MembarNobits), MemoryBarrierBits(MembarNobits));
|
||||
}
|
||||
|
||||
static Synchronization Full() {
|
||||
return Synchronization(MembarFull, MembarFull);
|
||||
}
|
||||
|
||||
static Synchronization Load() {
|
||||
return Synchronization(MembarBeforeLoad, MembarAfterLoad);
|
||||
}
|
||||
|
||||
static Synchronization Store() {
|
||||
return Synchronization(MembarBeforeStore, MembarAfterStore);
|
||||
}
|
||||
|
||||
bool isNone() const {
|
||||
return (barrierBefore | barrierAfter) == MembarNobits;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace jit
|
||||
} // namespace js
|
||||
|
||||
|
|
|
@ -3565,8 +3565,9 @@ LIRGenerator::visitLoadUnboxedScalar(MLoadUnboxedScalar* ins)
|
|||
if (ins->readType() == Scalar::Uint32 && IsFloatingPointType(ins->type()))
|
||||
tempDef = temp();
|
||||
|
||||
Synchronization sync = Synchronization::Load();
|
||||
if (ins->requiresMemoryBarrier()) {
|
||||
LMemoryBarrier* fence = new(alloc()) LMemoryBarrier(MembarBeforeLoad);
|
||||
LMemoryBarrier* fence = new(alloc()) LMemoryBarrier(sync.barrierBefore);
|
||||
add(fence, ins);
|
||||
}
|
||||
LLoadUnboxedScalar* lir = new(alloc()) LLoadUnboxedScalar(elements, index, tempDef);
|
||||
|
@ -3574,7 +3575,7 @@ LIRGenerator::visitLoadUnboxedScalar(MLoadUnboxedScalar* ins)
|
|||
assignSnapshot(lir, Bailout_Overflow);
|
||||
define(lir, ins);
|
||||
if (ins->requiresMemoryBarrier()) {
|
||||
LMemoryBarrier* fence = new(alloc()) LMemoryBarrier(MembarAfterLoad);
|
||||
LMemoryBarrier* fence = new(alloc()) LMemoryBarrier(sync.barrierAfter);
|
||||
add(fence, ins);
|
||||
}
|
||||
}
|
||||
|
@ -3675,13 +3676,14 @@ LIRGenerator::visitStoreUnboxedScalar(MStoreUnboxedScalar* ins)
|
|||
// is a store instruction that incorporates the necessary
|
||||
// barriers, and we could use that instead of separate barrier and
|
||||
// store instructions. See bug #1077027.
|
||||
Synchronization sync = Synchronization::Store();
|
||||
if (ins->requiresMemoryBarrier()) {
|
||||
LMemoryBarrier* fence = new(alloc()) LMemoryBarrier(MembarBeforeStore);
|
||||
LMemoryBarrier* fence = new(alloc()) LMemoryBarrier(sync.barrierBefore);
|
||||
add(fence, ins);
|
||||
}
|
||||
add(new(alloc()) LStoreUnboxedScalar(elements, index, value), ins);
|
||||
if (ins->requiresMemoryBarrier()) {
|
||||
LMemoryBarrier* fence = new(alloc()) LMemoryBarrier(MembarAfterStore);
|
||||
LMemoryBarrier* fence = new(alloc()) LMemoryBarrier(sync.barrierAfter);
|
||||
add(fence, ins);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3243,8 +3243,123 @@ MacroAssembler::emitPreBarrierFastPath(JSRuntime* rt, MIRType type, Register tem
|
|||
branchTestPtr(Assembler::NonZero, temp2, temp1, noBarrier);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// JS atomic operations.
|
||||
|
||||
template<typename T>
|
||||
static void
|
||||
CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
|
||||
const T& mem, Register oldval, Register newval, Register temp, AnyRegister output)
|
||||
{
|
||||
if (arrayType == Scalar::Uint32) {
|
||||
masm.compareExchange(arrayType, sync, mem, oldval, newval, temp);
|
||||
masm.convertUInt32ToDouble(temp, output.fpu());
|
||||
} else {
|
||||
masm.compareExchange(arrayType, sync, mem, oldval, newval, output.gpr());
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
|
||||
const Address& mem, Register oldval, Register newval,
|
||||
Register temp, AnyRegister output)
|
||||
{
|
||||
CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
|
||||
const BaseIndex& mem, Register oldval, Register newval,
|
||||
Register temp, AnyRegister output)
|
||||
{
|
||||
CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static void
|
||||
AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
|
||||
const T& mem, Register value, Register temp, AnyRegister output)
|
||||
{
|
||||
if (arrayType == Scalar::Uint32) {
|
||||
masm.atomicExchange(arrayType, sync, mem, value, temp);
|
||||
masm.convertUInt32ToDouble(temp, output.fpu());
|
||||
} else {
|
||||
masm.atomicExchange(arrayType, sync, mem, value, output.gpr());
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
|
||||
const Address& mem, Register value, Register temp,
|
||||
AnyRegister output)
|
||||
{
|
||||
AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
|
||||
const BaseIndex& mem, Register value, Register temp,
|
||||
AnyRegister output)
|
||||
{
|
||||
AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static void
|
||||
AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
|
||||
AtomicOp op, Register value, const T& mem, Register temp1, Register temp2,
|
||||
AnyRegister output)
|
||||
{
|
||||
if (arrayType == Scalar::Uint32) {
|
||||
masm.atomicFetchOp(arrayType, sync, op, value, mem, temp2, temp1);
|
||||
masm.convertUInt32ToDouble(temp1, output.fpu());
|
||||
} else {
|
||||
masm.atomicFetchOp(arrayType, sync, op, value, mem, temp1, output.gpr());
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
|
||||
Register value, const Address& mem, Register temp1, Register temp2,
|
||||
AnyRegister output)
|
||||
{
|
||||
AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
|
||||
Register value, const BaseIndex& mem, Register temp1, Register temp2,
|
||||
AnyRegister output)
|
||||
{
|
||||
AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
|
||||
Register value, const BaseIndex& mem, Register temp)
|
||||
{
|
||||
atomicEffectOp(arrayType, sync, op, value, mem, temp);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
|
||||
Register value, const Address& mem, Register temp)
|
||||
{
|
||||
atomicEffectOp(arrayType, sync, op, value, mem, temp);
|
||||
}
|
||||
|
||||
//}}} check_macroassembler_style
|
||||
|
||||
void
|
||||
MacroAssembler::memoryBarrierBefore(const Synchronization& sync) {
|
||||
memoryBarrier(sync.barrierBefore);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::memoryBarrierAfter(const Synchronization& sync) {
|
||||
memoryBarrier(sync.barrierAfter);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::loadWasmTlsRegFromFrame(Register dest)
|
||||
{
|
||||
|
|
|
@ -1520,6 +1520,218 @@ class MacroAssembler : public MacroAssemblerSpecific
|
|||
|
||||
inline void clampIntToUint8(Register reg) PER_SHARED_ARCH;
|
||||
|
||||
public:
|
||||
// ========================================================================
|
||||
// Primitive atomic operations.
|
||||
//
|
||||
// If the access is from JS and the eventual destination of the result is a
|
||||
// js::Value, it's probably best to use the JS-specific versions of these,
|
||||
// see further below.
|
||||
//
|
||||
// Temp registers must be defined unless otherwise noted in the per-function
|
||||
// constraints.
|
||||
|
||||
// 8-bit, 16-bit, and 32-bit wide operations.
|
||||
//
|
||||
// The 8-bit and 16-bit operations zero-extend or sign-extend the result to
|
||||
// 32 bits, according to `type`. On 64-bit systems, the upper 32 bits of
|
||||
// the result will be zero.
|
||||
|
||||
// CompareExchange with memory. Return the value that was in memory,
|
||||
// whether we wrote or not.
|
||||
//
|
||||
// x86-shared: `output` must be eax.
|
||||
|
||||
void compareExchange(Scalar::Type type, const Synchronization& sync, const Address& mem,
|
||||
Register expected, Register replacement, Register output)
|
||||
DEFINED_ON(arm, arm64, x86_shared);
|
||||
|
||||
void compareExchange(Scalar::Type type, const Synchronization& sync, const BaseIndex& mem,
|
||||
Register expected, Register replacement, Register output)
|
||||
DEFINED_ON(arm, arm64, x86_shared);
|
||||
|
||||
// Exchange with memory. Return the value initially in memory.
|
||||
|
||||
void atomicExchange(Scalar::Type type, const Synchronization& sync, const Address& mem,
|
||||
Register value, Register output)
|
||||
DEFINED_ON(arm, arm64, x86_shared);
|
||||
|
||||
void atomicExchange(Scalar::Type type, const Synchronization& sync, const BaseIndex& mem,
|
||||
Register value, Register output)
|
||||
DEFINED_ON(arm, arm64, x86_shared);
|
||||
|
||||
// Read-modify-write with memory. Return the value in memory before the
|
||||
// operation.
|
||||
//
|
||||
// x86-shared:
|
||||
// For 8-bit operations, `value` and `output` must have a byte subregister.
|
||||
// For Add and Sub, `temp` must be invalid.
|
||||
// For And, Or, and Xor, `output` must be eax and `temp` must have a byte subregister.
|
||||
//
|
||||
// ARM: Registers `value` and `output` must differ.
|
||||
|
||||
void atomicFetchOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
|
||||
Register value, const Address& mem, Register temp, Register output)
|
||||
DEFINED_ON(arm, arm64, x86_shared);
|
||||
|
||||
void atomicFetchOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
|
||||
Imm32 value, const Address& mem, Register temp, Register output)
|
||||
DEFINED_ON(x86_shared);
|
||||
|
||||
void atomicFetchOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
|
||||
Register value, const BaseIndex& mem, Register temp, Register output)
|
||||
DEFINED_ON(arm, arm64, x86_shared);
|
||||
|
||||
void atomicFetchOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
|
||||
Imm32 value, const BaseIndex& mem, Register temp, Register output)
|
||||
DEFINED_ON(x86_shared);
|
||||
|
||||
// Read-modify-write with memory. Return no value.
|
||||
|
||||
void atomicEffectOp(Scalar::Type type, const Synchronization& sync, AtomicOp op, Register value,
|
||||
const Address& mem, Register temp)
|
||||
DEFINED_ON(arm, arm64, x86_shared);
|
||||
|
||||
void atomicEffectOp(Scalar::Type type, const Synchronization& sync, AtomicOp op, Imm32 value,
|
||||
const Address& mem, Register temp)
|
||||
DEFINED_ON(x86_shared);
|
||||
|
||||
void atomicEffectOp(Scalar::Type type, const Synchronization& sync, AtomicOp op, Register value,
|
||||
const BaseIndex& mem, Register temp)
|
||||
DEFINED_ON(arm, arm64, x86_shared);
|
||||
|
||||
void atomicEffectOp(Scalar::Type type, const Synchronization& sync, AtomicOp op, Imm32 value,
|
||||
const BaseIndex& mem, Register temp)
|
||||
DEFINED_ON(x86_shared);
|
||||
|
||||
// 64-bit wide operations.
|
||||
|
||||
// 64-bit atomic load. On 64-bit systems, use regular wasm load with
|
||||
// Synchronization::Load, not this method.
|
||||
//
|
||||
// x86: `temp` must be ecx:ebx; `output` must be edx:eax.
|
||||
// ARM: `temp` should be invalid; `output` must be (even,odd) pair.
|
||||
|
||||
void atomicLoad64(const Synchronization& sync, const Address& mem, Register64 temp,
|
||||
Register64 output)
|
||||
DEFINED_ON(arm, x86);
|
||||
|
||||
void atomicLoad64(const Synchronization& sync, const BaseIndex& mem, Register64 temp,
|
||||
Register64 output)
|
||||
DEFINED_ON(arm, x86);
|
||||
|
||||
// x86: `expected` must be the same as `output`, and must be edx:eax
|
||||
// x86: `replacement` must be ecx:ebx
|
||||
// x64: `output` must be rax.
|
||||
// ARM: Registers must be distinct; `replacement` and `output` must be (even,odd) pairs.
|
||||
|
||||
void compareExchange64(const Synchronization& sync, const Address& mem, Register64 expected,
|
||||
Register64 replacement, Register64 output)
|
||||
DEFINED_ON(arm, arm64, x64, x86);
|
||||
|
||||
void compareExchange64(const Synchronization& sync, const BaseIndex& mem, Register64 expected,
|
||||
Register64 replacement, Register64 output)
|
||||
DEFINED_ON(arm, arm64, x64, x86);
|
||||
|
||||
// x86: `value` must be ecx:ebx; `output` must be edx:eax.
|
||||
// ARM: Registers must be distinct; `value` and `output` must be (even,odd) pairs.
|
||||
|
||||
void atomicExchange64(const Synchronization& sync, const Address& mem, Register64 value,
|
||||
Register64 output)
|
||||
DEFINED_ON(arm, arm64, x64, x86);
|
||||
|
||||
void atomicExchange64(const Synchronization& sync, const BaseIndex& mem, Register64 value,
|
||||
Register64 output)
|
||||
DEFINED_ON(arm, arm64, x64, x86);
|
||||
|
||||
// x86: `output` must be edx:eax, `temp` must be ecx:ebx.
|
||||
// x64: For And, Or, and Xor `output` must be rax.
|
||||
// ARM: Registers must be distinct; `temp` and `output` must be (even,odd) pairs.
|
||||
|
||||
void atomicFetchOp64(const Synchronization& sync, AtomicOp op, Register64 value,
|
||||
const Address& mem, Register64 temp, Register64 output)
|
||||
DEFINED_ON(arm, arm64, x64);
|
||||
|
||||
void atomicFetchOp64(const Synchronization& sync, AtomicOp op, Register64 value,
|
||||
const BaseIndex& mem, Register64 temp, Register64 output)
|
||||
DEFINED_ON(arm, arm64, x64);
|
||||
|
||||
void atomicFetchOp64(const Synchronization& sync, AtomicOp op, const Address& value,
|
||||
const Address& mem, Register64 temp, Register64 output)
|
||||
DEFINED_ON(x86);
|
||||
|
||||
void atomicFetchOp64(const Synchronization& sync, AtomicOp op, const Address& value,
|
||||
const BaseIndex& mem, Register64 temp, Register64 output)
|
||||
DEFINED_ON(x86);
|
||||
|
||||
void atomicEffectOp64(const Synchronization& sync, AtomicOp op, Register64 value,
|
||||
const BaseIndex& mem)
|
||||
DEFINED_ON(x64);
|
||||
|
||||
// ========================================================================
|
||||
// JS atomic operations.
|
||||
//
|
||||
// Here the arrayType must be a type that is valid for JS. As of 2017 that
|
||||
// is an 8-bit, 16-bit, or 32-bit integer type.
|
||||
//
|
||||
// If arrayType is Scalar::Uint32 then:
|
||||
//
|
||||
// - `output` must be a float register (this is bug 1077305)
|
||||
// - if the operation takes one temp register then `temp` must be defined
|
||||
// - if the operation takes two temp registers then `temp2` must be defined.
|
||||
//
|
||||
// Otherwise `output` must be a GPR and `temp`/`temp2` should be InvalidReg.
|
||||
// (`temp1` must always be valid.)
|
||||
//
|
||||
// For additional register constraints, see the primitive 32-bit operations
|
||||
// above.
|
||||
|
||||
void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync, const Address& mem,
|
||||
Register expected, Register replacement, Register temp,
|
||||
AnyRegister output);
|
||||
|
||||
void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
|
||||
const BaseIndex& mem, Register expected, Register replacement,
|
||||
Register temp, AnyRegister output);
|
||||
|
||||
void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync, const Address& mem,
|
||||
Register value, Register temp, AnyRegister output);
|
||||
|
||||
void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync, const BaseIndex& mem,
|
||||
Register value, Register temp, AnyRegister output);
|
||||
|
||||
void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
|
||||
Register value, const Address& mem, Register temp1, Register temp2,
|
||||
AnyRegister output);
|
||||
|
||||
void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
|
||||
Register value, const BaseIndex& mem, Register temp1, Register temp2,
|
||||
AnyRegister output);
|
||||
|
||||
void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
|
||||
Imm32 value, const Address& mem, Register temp1, Register temp2,
|
||||
AnyRegister output)
|
||||
DEFINED_ON(x86_shared);
|
||||
|
||||
void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
|
||||
Imm32 value, const BaseIndex& mem, Register temp1, Register temp2,
|
||||
AnyRegister output)
|
||||
DEFINED_ON(x86_shared);
|
||||
|
||||
void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
|
||||
Register value, const Address& mem, Register temp);
|
||||
|
||||
void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
|
||||
Register value, const BaseIndex& mem, Register temp);
|
||||
|
||||
void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
|
||||
Imm32 value, const Address& mem, Register temp)
|
||||
DEFINED_ON(x86_shared);
|
||||
|
||||
void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
|
||||
Imm32 value, const BaseIndex& mem, Register temp)
|
||||
DEFINED_ON(x86_shared);
|
||||
|
||||
//}}} check_macroassembler_style
|
||||
public:
|
||||
|
||||
|
@ -1736,6 +1948,9 @@ class MacroAssembler : public MacroAssemblerSpecific
|
|||
void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const Address& dest,
|
||||
unsigned numElems = 0);
|
||||
|
||||
void memoryBarrierBefore(const Synchronization& sync);
|
||||
void memoryBarrierAfter(const Synchronization& sync);
|
||||
|
||||
// Load a property from an UnboxedPlainObject or UnboxedArrayObject.
|
||||
template <typename T>
|
||||
void loadUnboxedProperty(T address, JSValueType type, TypedOrValueRegister output);
|
||||
|
|
|
@ -1834,10 +1834,10 @@ CodeGeneratorARM::visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArr
|
|||
|
||||
if (lir->index()->isConstant()) {
|
||||
Address dest(elements, ToInt32(lir->index()) * width);
|
||||
masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp, output);
|
||||
masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval, newval, temp, output);
|
||||
} else {
|
||||
BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
|
||||
masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp, output);
|
||||
masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval, newval, temp, output);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1855,289 +1855,13 @@ CodeGeneratorARM::visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArray
|
|||
|
||||
if (lir->index()->isConstant()) {
|
||||
Address dest(elements, ToInt32(lir->index()) * width);
|
||||
masm.atomicExchangeToTypedIntArray(arrayType, dest, value, temp, output);
|
||||
masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, temp, output);
|
||||
} else {
|
||||
BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
|
||||
masm.atomicExchangeToTypedIntArray(arrayType, dest, value, temp, output);
|
||||
masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, temp, output);
|
||||
}
|
||||
}
|
||||
|
||||
template<typename S, typename T>
|
||||
void
|
||||
CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
|
||||
const S& value, const T& mem, Register flagTemp,
|
||||
Register outTemp, AnyRegister output)
|
||||
{
|
||||
MOZ_ASSERT(flagTemp != InvalidReg);
|
||||
MOZ_ASSERT_IF(arrayType == Scalar::Uint32, outTemp != InvalidReg);
|
||||
|
||||
switch (arrayType) {
|
||||
case Scalar::Int8:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
masm.atomicFetchAdd8SignExtend(value, mem, flagTemp, output.gpr());
|
||||
break;
|
||||
case AtomicFetchSubOp:
|
||||
masm.atomicFetchSub8SignExtend(value, mem, flagTemp, output.gpr());
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
masm.atomicFetchAnd8SignExtend(value, mem, flagTemp, output.gpr());
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
masm.atomicFetchOr8SignExtend(value, mem, flagTemp, output.gpr());
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
masm.atomicFetchXor8SignExtend(value, mem, flagTemp, output.gpr());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
break;
|
||||
case Scalar::Uint8:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
masm.atomicFetchAdd8ZeroExtend(value, mem, flagTemp, output.gpr());
|
||||
break;
|
||||
case AtomicFetchSubOp:
|
||||
masm.atomicFetchSub8ZeroExtend(value, mem, flagTemp, output.gpr());
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
masm.atomicFetchAnd8ZeroExtend(value, mem, flagTemp, output.gpr());
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
masm.atomicFetchOr8ZeroExtend(value, mem, flagTemp, output.gpr());
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
masm.atomicFetchXor8ZeroExtend(value, mem, flagTemp, output.gpr());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
break;
|
||||
case Scalar::Int16:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
masm.atomicFetchAdd16SignExtend(value, mem, flagTemp, output.gpr());
|
||||
break;
|
||||
case AtomicFetchSubOp:
|
||||
masm.atomicFetchSub16SignExtend(value, mem, flagTemp, output.gpr());
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
masm.atomicFetchAnd16SignExtend(value, mem, flagTemp, output.gpr());
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
masm.atomicFetchOr16SignExtend(value, mem, flagTemp, output.gpr());
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
masm.atomicFetchXor16SignExtend(value, mem, flagTemp, output.gpr());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
break;
|
||||
case Scalar::Uint16:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
masm.atomicFetchAdd16ZeroExtend(value, mem, flagTemp, output.gpr());
|
||||
break;
|
||||
case AtomicFetchSubOp:
|
||||
masm.atomicFetchSub16ZeroExtend(value, mem, flagTemp, output.gpr());
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
masm.atomicFetchAnd16ZeroExtend(value, mem, flagTemp, output.gpr());
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
masm.atomicFetchOr16ZeroExtend(value, mem, flagTemp, output.gpr());
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
masm.atomicFetchXor16ZeroExtend(value, mem, flagTemp, output.gpr());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
break;
|
||||
case Scalar::Int32:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
masm.atomicFetchAdd32(value, mem, flagTemp, output.gpr());
|
||||
break;
|
||||
case AtomicFetchSubOp:
|
||||
masm.atomicFetchSub32(value, mem, flagTemp, output.gpr());
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
masm.atomicFetchAnd32(value, mem, flagTemp, output.gpr());
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
masm.atomicFetchOr32(value, mem, flagTemp, output.gpr());
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
masm.atomicFetchXor32(value, mem, flagTemp, output.gpr());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
break;
|
||||
case Scalar::Uint32:
|
||||
// At the moment, the code in MCallOptimize.cpp requires the output
|
||||
// type to be double for uint32 arrays. See bug 1077305.
|
||||
MOZ_ASSERT(output.isFloat());
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
masm.atomicFetchAdd32(value, mem, flagTemp, outTemp);
|
||||
break;
|
||||
case AtomicFetchSubOp:
|
||||
masm.atomicFetchSub32(value, mem, flagTemp, outTemp);
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
masm.atomicFetchAnd32(value, mem, flagTemp, outTemp);
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
masm.atomicFetchOr32(value, mem, flagTemp, outTemp);
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
masm.atomicFetchXor32(value, mem, flagTemp, outTemp);
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
masm.convertUInt32ToDouble(outTemp, output.fpu());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array type");
|
||||
}
|
||||
}
|
||||
|
||||
template void
|
||||
CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
|
||||
const Imm32& value, const Address& mem,
|
||||
Register flagTemp, Register outTemp,
|
||||
AnyRegister output);
|
||||
template void
|
||||
CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
|
||||
const Imm32& value, const BaseIndex& mem,
|
||||
Register flagTemp, Register outTemp,
|
||||
AnyRegister output);
|
||||
template void
|
||||
CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
|
||||
const Register& value, const Address& mem,
|
||||
Register flagTemp, Register outTemp,
|
||||
AnyRegister output);
|
||||
template void
|
||||
CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
|
||||
const Register& value, const BaseIndex& mem,
|
||||
Register flagTemp, Register outTemp,
|
||||
AnyRegister output);
|
||||
|
||||
// Binary operation for effect, result discarded.
|
||||
template<typename S, typename T>
|
||||
void
|
||||
CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
|
||||
const T& mem, Register flagTemp)
|
||||
{
|
||||
MOZ_ASSERT(flagTemp != InvalidReg);
|
||||
|
||||
switch (arrayType) {
|
||||
case Scalar::Int8:
|
||||
case Scalar::Uint8:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
masm.atomicAdd8(value, mem, flagTemp);
|
||||
break;
|
||||
case AtomicFetchSubOp:
|
||||
masm.atomicSub8(value, mem, flagTemp);
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
masm.atomicAnd8(value, mem, flagTemp);
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
masm.atomicOr8(value, mem, flagTemp);
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
masm.atomicXor8(value, mem, flagTemp);
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
break;
|
||||
case Scalar::Int16:
|
||||
case Scalar::Uint16:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
masm.atomicAdd16(value, mem, flagTemp);
|
||||
break;
|
||||
case AtomicFetchSubOp:
|
||||
masm.atomicSub16(value, mem, flagTemp);
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
masm.atomicAnd16(value, mem, flagTemp);
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
masm.atomicOr16(value, mem, flagTemp);
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
masm.atomicXor16(value, mem, flagTemp);
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
break;
|
||||
case Scalar::Int32:
|
||||
case Scalar::Uint32:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
masm.atomicAdd32(value, mem, flagTemp);
|
||||
break;
|
||||
case AtomicFetchSubOp:
|
||||
masm.atomicSub32(value, mem, flagTemp);
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
masm.atomicAnd32(value, mem, flagTemp);
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
masm.atomicOr32(value, mem, flagTemp);
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
masm.atomicXor32(value, mem, flagTemp);
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array type");
|
||||
}
|
||||
}
|
||||
|
||||
template void
|
||||
CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
|
||||
const Imm32& value, const Address& mem,
|
||||
Register flagTemp);
|
||||
template void
|
||||
CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
|
||||
const Imm32& value, const BaseIndex& mem,
|
||||
Register flagTemp);
|
||||
template void
|
||||
CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
|
||||
const Register& value, const Address& mem,
|
||||
Register flagTemp);
|
||||
template void
|
||||
CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
|
||||
const Register& value, const BaseIndex& mem,
|
||||
Register flagTemp);
|
||||
|
||||
|
||||
template <typename T>
|
||||
static inline void
|
||||
AtomicBinopToTypedArray(CodeGeneratorARM* cg, AtomicOp op,
|
||||
Scalar::Type arrayType, const LAllocation* value, const T& mem,
|
||||
Register flagTemp, Register outTemp, AnyRegister output)
|
||||
{
|
||||
if (value->isConstant())
|
||||
cg->atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem, flagTemp, outTemp, output);
|
||||
else
|
||||
cg->atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem, flagTemp, outTemp, output);
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorARM::visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir)
|
||||
{
|
||||
|
@ -2147,31 +1871,22 @@ CodeGeneratorARM::visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBino
|
|||
Register elements = ToRegister(lir->elements());
|
||||
Register flagTemp = ToRegister(lir->temp1());
|
||||
Register outTemp = lir->temp2()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp2());
|
||||
const LAllocation* value = lir->value();
|
||||
Register value = ToRegister(lir->value());
|
||||
|
||||
Scalar::Type arrayType = lir->mir()->arrayType();
|
||||
int width = Scalar::byteSize(arrayType);
|
||||
|
||||
if (lir->index()->isConstant()) {
|
||||
Address mem(elements, ToInt32(lir->index()) * width);
|
||||
AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, flagTemp, outTemp, output);
|
||||
masm.atomicFetchOpJS(arrayType, Synchronization::Full(), lir->mir()->operation(), value,
|
||||
mem, flagTemp, outTemp, output);
|
||||
} else {
|
||||
BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
|
||||
AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, flagTemp, outTemp, output);
|
||||
masm.atomicFetchOpJS(arrayType, Synchronization::Full(), lir->mir()->operation(), value,
|
||||
mem, flagTemp, outTemp, output);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static inline void
|
||||
AtomicBinopToTypedArray(CodeGeneratorARM* cg, AtomicOp op, Scalar::Type arrayType,
|
||||
const LAllocation* value, const T& mem, Register flagTemp)
|
||||
{
|
||||
if (value->isConstant())
|
||||
cg->atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem, flagTemp);
|
||||
else
|
||||
cg->atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem, flagTemp);
|
||||
}
|
||||
|
||||
void
|
||||
CodeGeneratorARM::visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir)
|
||||
{
|
||||
|
@ -2179,16 +1894,18 @@ CodeGeneratorARM::visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayEl
|
|||
|
||||
Register elements = ToRegister(lir->elements());
|
||||
Register flagTemp = ToRegister(lir->flagTemp());
|
||||
const LAllocation* value = lir->value();
|
||||
Register value = ToRegister(lir->value());
|
||||
Scalar::Type arrayType = lir->mir()->arrayType();
|
||||
int width = Scalar::byteSize(arrayType);
|
||||
|
||||
if (lir->index()->isConstant()) {
|
||||
Address mem(elements, ToInt32(lir->index()) * width);
|
||||
AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, flagTemp);
|
||||
masm.atomicEffectOpJS(arrayType, Synchronization::Full(), lir->mir()->operation(), value,
|
||||
mem, flagTemp);
|
||||
} else {
|
||||
BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
|
||||
AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, flagTemp);
|
||||
masm.atomicEffectOpJS(arrayType, Synchronization::Full(), lir->mir()->operation(), value,
|
||||
mem, flagTemp);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2554,10 +2271,9 @@ CodeGeneratorARM::visitWasmCompareExchangeHeap(LWasmCompareExchangeHeap* ins)
|
|||
|
||||
Register oldval = ToRegister(ins->oldValue());
|
||||
Register newval = ToRegister(ins->newValue());
|
||||
Register out = ToRegister(ins->output());
|
||||
|
||||
masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
|
||||
srcAddr, oldval, newval, InvalidReg,
|
||||
ToAnyRegister(ins->output()));
|
||||
masm.compareExchange(vt, Synchronization::Full(), srcAddr, oldval, newval, out);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -2568,11 +2284,11 @@ CodeGeneratorARM::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins)
|
|||
Scalar::Type vt = mir->access().type();
|
||||
Register ptrReg = ToRegister(ins->ptr());
|
||||
Register value = ToRegister(ins->value());
|
||||
Register output = ToRegister(ins->output());
|
||||
BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
|
||||
MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
|
||||
|
||||
masm.atomicExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
|
||||
srcAddr, value, InvalidReg, ToAnyRegister(ins->output()));
|
||||
masm.atomicExchange(vt, Synchronization::Full(), srcAddr, value, output);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -2584,21 +2300,14 @@ CodeGeneratorARM::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins)
|
|||
Scalar::Type vt = mir->access().type();
|
||||
Register ptrReg = ToRegister(ins->ptr());
|
||||
Register flagTemp = ToRegister(ins->flagTemp());
|
||||
Register output = ToRegister(ins->output());
|
||||
const LAllocation* value = ins->value();
|
||||
AtomicOp op = mir->operation();
|
||||
MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
|
||||
|
||||
BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
|
||||
|
||||
if (value->isConstant()) {
|
||||
atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
|
||||
Imm32(ToInt32(value)), srcAddr, flagTemp, InvalidReg,
|
||||
ToAnyRegister(ins->output()));
|
||||
} else {
|
||||
atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
|
||||
ToRegister(value), srcAddr, flagTemp, InvalidReg,
|
||||
ToAnyRegister(ins->output()));
|
||||
}
|
||||
masm.atomicFetchOp(vt, Synchronization::Full(), op, ToRegister(value), srcAddr, flagTemp,
|
||||
output);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -2615,11 +2324,7 @@ CodeGeneratorARM::visitWasmAtomicBinopHeapForEffect(LWasmAtomicBinopHeapForEffec
|
|||
MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
|
||||
|
||||
BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
|
||||
|
||||
if (value->isConstant())
|
||||
atomicBinopToTypedIntArray(op, vt, Imm32(ToInt32(value)), srcAddr, flagTemp);
|
||||
else
|
||||
atomicBinopToTypedIntArray(op, vt, ToRegister(value), srcAddr, flagTemp);
|
||||
masm.atomicEffectOp(vt, Synchronization::Full(), op, ToRegister(value), srcAddr, flagTemp);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -3396,7 +3101,7 @@ CodeGeneratorARM::visitWasmAtomicLoadI64(LWasmAtomicLoadI64* lir)
|
|||
Register64 tmp(InvalidReg, InvalidReg);
|
||||
|
||||
BaseIndex addr(HeapReg, ptr, TimesOne, lir->mir()->access().offset());
|
||||
masm.atomicLoad64(addr, tmp, output);
|
||||
masm.atomicLoad64(Synchronization::Full(), addr, tmp, output);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -3407,7 +3112,7 @@ CodeGeneratorARM::visitWasmAtomicStoreI64(LWasmAtomicStoreI64* lir)
|
|||
Register64 tmp(ToRegister(lir->tmpHigh()), ToRegister(lir->tmpLow()));
|
||||
|
||||
BaseIndex addr(HeapReg, ptr, TimesOne, lir->mir()->access().offset());
|
||||
masm.atomicExchange64(addr, value, tmp);
|
||||
masm.atomicExchange64(Synchronization::Full(), addr, value, tmp);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -3419,7 +3124,7 @@ CodeGeneratorARM::visitWasmCompareExchangeI64(LWasmCompareExchangeI64* lir)
|
|||
Register64 out = ToOutRegister64(lir);
|
||||
|
||||
BaseIndex addr(HeapReg, ptr, TimesOne, lir->mir()->access().offset());
|
||||
masm.compareExchange64(addr, expected, replacement, out);
|
||||
masm.compareExchange64(Synchronization::Full(), addr, expected, replacement, out);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -3431,14 +3136,7 @@ CodeGeneratorARM::visitWasmAtomicBinopI64(LWasmAtomicBinopI64* lir)
|
|||
|
||||
BaseIndex addr(HeapReg, ptr, TimesOne, lir->access().offset());
|
||||
Register64 tmp(ToRegister(lir->tmpHigh()), ToRegister(lir->tmpLow()));
|
||||
switch (lir->operation()) {
|
||||
case AtomicFetchAddOp: masm.atomicFetchAdd64(value, addr, tmp, out); break;
|
||||
case AtomicFetchSubOp: masm.atomicFetchSub64(value, addr, tmp, out); break;
|
||||
case AtomicFetchAndOp: masm.atomicFetchAnd64(value, addr, tmp, out); break;
|
||||
case AtomicFetchOrOp: masm.atomicFetchOr64(value, addr, tmp, out); break;
|
||||
case AtomicFetchXorOp: masm.atomicFetchXor64(value, addr, tmp, out); break;
|
||||
default: MOZ_CRASH();
|
||||
}
|
||||
masm.atomicFetchOp64(Synchronization::Full(), lir->operation(), value, addr, tmp, out);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -3449,5 +3147,5 @@ CodeGeneratorARM::visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64* lir)
|
|||
Register64 out = ToOutRegister64(lir);
|
||||
|
||||
BaseIndex addr(HeapReg, ptr, TimesOne, lir->access().offset());
|
||||
masm.atomicExchange64(addr, value, out);
|
||||
masm.atomicExchange64(Synchronization::Full(), addr, value, out);
|
||||
}
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -12,7 +12,6 @@
|
|||
#include "jsopcode.h"
|
||||
|
||||
#include "jit/arm/Assembler-arm.h"
|
||||
#include "jit/AtomicOp.h"
|
||||
#include "jit/JitFrames.h"
|
||||
#include "jit/MoveResolver.h"
|
||||
|
||||
|
@ -1107,307 +1106,6 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
|
|||
ma_vmov(src, dest, cc);
|
||||
}
|
||||
|
||||
private:
|
||||
template<typename T>
|
||||
Register computePointer(const T& src, Register r);
|
||||
|
||||
template<typename T>
|
||||
void compareExchange(int nbytes, bool signExtend, const T& address, Register oldval,
|
||||
Register newval, Register output);
|
||||
|
||||
template<typename T>
|
||||
void atomicExchange(int nbytes, bool signExtend, const T& address, Register value,
|
||||
Register output);
|
||||
|
||||
template<typename T>
|
||||
void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32& value,
|
||||
const T& address, Register flagTemp, Register output);
|
||||
|
||||
template<typename T>
|
||||
void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register& value,
|
||||
const T& address, Register flagTemp, Register output);
|
||||
|
||||
template<typename T>
|
||||
void atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value, const T& address,
|
||||
Register flagTemp);
|
||||
|
||||
template<typename T>
|
||||
void atomicEffectOp(int nbytes, AtomicOp op, const Register& value, const T& address,
|
||||
Register flagTemp);
|
||||
|
||||
template<typename T>
|
||||
void atomicFetchOp64(AtomicOp op, Register64 value, const T& mem, Register64 temp,
|
||||
Register64 output);
|
||||
|
||||
public:
|
||||
// T in {Address,BaseIndex}
|
||||
// S in {Imm32,Register}
|
||||
|
||||
template<typename T>
|
||||
void compareExchange8SignExtend(const T& mem, Register oldval, Register newval, Register output)
|
||||
{
|
||||
compareExchange(1, true, mem, oldval, newval, output);
|
||||
}
|
||||
template<typename T>
|
||||
void compareExchange8ZeroExtend(const T& mem, Register oldval, Register newval, Register output)
|
||||
{
|
||||
compareExchange(1, false, mem, oldval, newval, output);
|
||||
}
|
||||
template<typename T>
|
||||
void compareExchange16SignExtend(const T& mem, Register oldval, Register newval, Register output)
|
||||
{
|
||||
compareExchange(2, true, mem, oldval, newval, output);
|
||||
}
|
||||
template<typename T>
|
||||
void compareExchange16ZeroExtend(const T& mem, Register oldval, Register newval, Register output)
|
||||
{
|
||||
compareExchange(2, false, mem, oldval, newval, output);
|
||||
}
|
||||
template<typename T>
|
||||
void compareExchange32(const T& mem, Register oldval, Register newval, Register output) {
|
||||
compareExchange(4, false, mem, oldval, newval, output);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void atomicExchange8SignExtend(const T& mem, Register value, Register output)
|
||||
{
|
||||
atomicExchange(1, true, mem, value, output);
|
||||
}
|
||||
template<typename T>
|
||||
void atomicExchange8ZeroExtend(const T& mem, Register value, Register output)
|
||||
{
|
||||
atomicExchange(1, false, mem, value, output);
|
||||
}
|
||||
template<typename T>
|
||||
void atomicExchange16SignExtend(const T& mem, Register value, Register output)
|
||||
{
|
||||
atomicExchange(2, true, mem, value, output);
|
||||
}
|
||||
template<typename T>
|
||||
void atomicExchange16ZeroExtend(const T& mem, Register value, Register output)
|
||||
{
|
||||
atomicExchange(2, false, mem, value, output);
|
||||
}
|
||||
template<typename T>
|
||||
void atomicExchange32(const T& mem, Register value, Register output) {
|
||||
atomicExchange(4, false, mem, value, output);
|
||||
}
|
||||
|
||||
template<typename T, typename S>
|
||||
void atomicFetchAdd8SignExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(1, true, AtomicFetchAddOp, value, mem, temp, output);
|
||||
}
|
||||
template<typename T, typename S>
|
||||
void atomicFetchAdd8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(1, false, AtomicFetchAddOp, value, mem, temp, output);
|
||||
}
|
||||
template<typename T, typename S>
|
||||
void atomicFetchAdd16SignExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(2, true, AtomicFetchAddOp, value, mem, temp, output);
|
||||
}
|
||||
template<typename T, typename S>
|
||||
void atomicFetchAdd16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(2, false, AtomicFetchAddOp, value, mem, temp, output);
|
||||
}
|
||||
template<typename T, typename S>
|
||||
void atomicFetchAdd32(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(4, false, AtomicFetchAddOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicAdd8(const S& value, const T& mem, Register flagTemp) {
|
||||
atomicEffectOp(1, AtomicFetchAddOp, value, mem, flagTemp);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicAdd16(const S& value, const T& mem, Register flagTemp) {
|
||||
atomicEffectOp(2, AtomicFetchAddOp, value, mem, flagTemp);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicAdd32(const S& value, const T& mem, Register flagTemp) {
|
||||
atomicEffectOp(4, AtomicFetchAddOp, value, mem, flagTemp);
|
||||
}
|
||||
|
||||
template<typename T, typename S>
|
||||
void atomicFetchSub8SignExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(1, true, AtomicFetchSubOp, value, mem, temp, output);
|
||||
}
|
||||
template<typename T, typename S>
|
||||
void atomicFetchSub8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(1, false, AtomicFetchSubOp, value, mem, temp, output);
|
||||
}
|
||||
template<typename T, typename S>
|
||||
void atomicFetchSub16SignExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(2, true, AtomicFetchSubOp, value, mem, temp, output);
|
||||
}
|
||||
template<typename T, typename S>
|
||||
void atomicFetchSub16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(2, false, AtomicFetchSubOp, value, mem, temp, output);
|
||||
}
|
||||
template<typename T, typename S>
|
||||
void atomicFetchSub32(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(4, false, AtomicFetchSubOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicSub8(const S& value, const T& mem, Register flagTemp) {
|
||||
atomicEffectOp(1, AtomicFetchSubOp, value, mem, flagTemp);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicSub16(const S& value, const T& mem, Register flagTemp) {
|
||||
atomicEffectOp(2, AtomicFetchSubOp, value, mem, flagTemp);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicSub32(const S& value, const T& mem, Register flagTemp) {
|
||||
atomicEffectOp(4, AtomicFetchSubOp, value, mem, flagTemp);
|
||||
}
|
||||
|
||||
template<typename T, typename S>
|
||||
void atomicFetchAnd8SignExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(1, true, AtomicFetchAndOp, value, mem, temp, output);
|
||||
}
|
||||
template<typename T, typename S>
|
||||
void atomicFetchAnd8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(1, false, AtomicFetchAndOp, value, mem, temp, output);
|
||||
}
|
||||
template<typename T, typename S>
|
||||
void atomicFetchAnd16SignExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(2, true, AtomicFetchAndOp, value, mem, temp, output);
|
||||
}
|
||||
template<typename T, typename S>
|
||||
void atomicFetchAnd16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(2, false, AtomicFetchAndOp, value, mem, temp, output);
|
||||
}
|
||||
template<typename T, typename S>
|
||||
void atomicFetchAnd32(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(4, false, AtomicFetchAndOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicAnd8(const S& value, const T& mem, Register flagTemp) {
|
||||
atomicEffectOp(1, AtomicFetchAndOp, value, mem, flagTemp);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicAnd16(const S& value, const T& mem, Register flagTemp) {
|
||||
atomicEffectOp(2, AtomicFetchAndOp, value, mem, flagTemp);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicAnd32(const S& value, const T& mem, Register flagTemp) {
|
||||
atomicEffectOp(4, AtomicFetchAndOp, value, mem, flagTemp);
|
||||
}
|
||||
|
||||
template<typename T, typename S>
|
||||
void atomicFetchOr8SignExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(1, true, AtomicFetchOrOp, value, mem, temp, output);
|
||||
}
|
||||
template<typename T, typename S>
|
||||
void atomicFetchOr8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(1, false, AtomicFetchOrOp, value, mem, temp, output);
|
||||
}
|
||||
template<typename T, typename S>
|
||||
void atomicFetchOr16SignExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(2, true, AtomicFetchOrOp, value, mem, temp, output);
|
||||
}
|
||||
template<typename T, typename S>
|
||||
void atomicFetchOr16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(2, false, AtomicFetchOrOp, value, mem, temp, output);
|
||||
}
|
||||
template<typename T, typename S>
|
||||
void atomicFetchOr32(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(4, false, AtomicFetchOrOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicOr8(const S& value, const T& mem, Register flagTemp) {
|
||||
atomicEffectOp(1, AtomicFetchOrOp, value, mem, flagTemp);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicOr16(const S& value, const T& mem, Register flagTemp) {
|
||||
atomicEffectOp(2, AtomicFetchOrOp, value, mem, flagTemp);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicOr32(const S& value, const T& mem, Register flagTemp) {
|
||||
atomicEffectOp(4, AtomicFetchOrOp, value, mem, flagTemp);
|
||||
}
|
||||
|
||||
template<typename T, typename S>
|
||||
void atomicFetchXor8SignExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(1, true, AtomicFetchXorOp, value, mem, temp, output);
|
||||
}
|
||||
template<typename T, typename S>
|
||||
void atomicFetchXor8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(1, false, AtomicFetchXorOp, value, mem, temp, output);
|
||||
}
|
||||
template<typename T, typename S>
|
||||
void atomicFetchXor16SignExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(2, true, AtomicFetchXorOp, value, mem, temp, output);
|
||||
}
|
||||
template<typename T, typename S>
|
||||
void atomicFetchXor16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(2, false, AtomicFetchXorOp, value, mem, temp, output);
|
||||
}
|
||||
template<typename T, typename S>
|
||||
void atomicFetchXor32(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(4, false, AtomicFetchXorOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicXor8(const S& value, const T& mem, Register flagTemp) {
|
||||
atomicEffectOp(1, AtomicFetchXorOp, value, mem, flagTemp);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicXor16(const S& value, const T& mem, Register flagTemp) {
|
||||
atomicEffectOp(2, AtomicFetchXorOp, value, mem, flagTemp);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicXor32(const S& value, const T& mem, Register flagTemp) {
|
||||
atomicEffectOp(4, AtomicFetchXorOp, value, mem, flagTemp);
|
||||
}
|
||||
|
||||
// Temp should be invalid; output must be (even,odd) pair.
|
||||
template<typename T>
|
||||
void atomicLoad64(const T& mem, Register64 temp, Register64 output);
|
||||
|
||||
// Registers must be distinct; temp and output must be (even,odd) pairs.
|
||||
template <typename T>
|
||||
void atomicFetchAdd64(Register64 value, const T& mem, Register64 temp, Register64 output) {
|
||||
atomicFetchOp64(AtomicFetchAddOp, value, mem, temp, output);
|
||||
}
|
||||
|
||||
// Registers must be distinct; temp and output must be (even,odd) pairs.
|
||||
template <typename T>
|
||||
void atomicFetchSub64(Register64 value, const T& mem, Register64 temp, Register64 output) {
|
||||
atomicFetchOp64(AtomicFetchSubOp, value, mem, temp, output);
|
||||
}
|
||||
|
||||
// Registers must be distinct; temp and output must be (even,odd) pairs.
|
||||
template <typename T>
|
||||
void atomicFetchAnd64(Register64 value, const T& mem, Register64 temp, Register64 output) {
|
||||
atomicFetchOp64(AtomicFetchAndOp, value, mem, temp, output);
|
||||
}
|
||||
|
||||
// Registers must be distinct; temp and output must be (even,odd) pairs.
|
||||
template <typename T>
|
||||
void atomicFetchOr64(Register64 value, const T& mem, Register64 temp, Register64 output) {
|
||||
atomicFetchOp64(AtomicFetchOrOp, value, mem, temp, output);
|
||||
}
|
||||
|
||||
// Registers must be distinct; temp and output must be (even,odd) pairs.
|
||||
template <typename T>
|
||||
void atomicFetchXor64(Register64 value, const T& mem, Register64 temp, Register64 output) {
|
||||
atomicFetchOp64(AtomicFetchXorOp, value, mem, temp, output);
|
||||
}
|
||||
|
||||
// Registers must be distinct; value and output must be (even,odd) pairs.
|
||||
template <typename T>
|
||||
void atomicExchange64(const T& mem, Register64 value, Register64 output);
|
||||
|
||||
// Registers must be distinct; replace and output must be (even,odd) pairs.
|
||||
template <typename T>
|
||||
void compareExchange64(const T& mem, Register64 expect, Register64 replace, Register64 output);
|
||||
|
||||
template<typename T>
|
||||
void compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register oldval, Register newval,
|
||||
Register temp, AnyRegister output);
|
||||
|
||||
template<typename T>
|
||||
void atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register value,
|
||||
Register temp, AnyRegister output);
|
||||
|
||||
inline void incrementInt32Value(const Address& addr);
|
||||
|
||||
void cmp32(Register lhs, Imm32 rhs);
|
||||
|
|
|
@ -728,10 +728,10 @@ CodeGeneratorARM64::visitCompareExchangeTypedArrayElement(LCompareExchangeTypedA
|
|||
|
||||
if (lir->index()->isConstant()) {
|
||||
Address dest(elements, ToInt32(lir->index()) * width);
|
||||
masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp, output);
|
||||
masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval, newval, temp, output);
|
||||
} else {
|
||||
BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
|
||||
masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp, output);
|
||||
masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval, newval, temp, output);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -749,10 +749,10 @@ CodeGeneratorARM64::visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArr
|
|||
|
||||
if (lir->index()->isConstant()) {
|
||||
Address dest(elements, ToInt32(lir->index()) * width);
|
||||
masm.atomicExchangeToTypedIntArray(arrayType, dest, value, temp, output);
|
||||
masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, temp, output);
|
||||
} else {
|
||||
BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
|
||||
masm.atomicExchangeToTypedIntArray(arrayType, dest, value, temp, output);
|
||||
masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, temp, output);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -233,89 +233,6 @@ MacroAssemblerCompat::breakpoint()
|
|||
Brk((code++) & 0xffff);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void
|
||||
MacroAssemblerCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
|
||||
Register oldval, Register newval,
|
||||
Register temp, AnyRegister output)
|
||||
{
|
||||
switch (arrayType) {
|
||||
case Scalar::Int8:
|
||||
compareExchange8SignExtend(mem, oldval, newval, output.gpr());
|
||||
break;
|
||||
case Scalar::Uint8:
|
||||
compareExchange8ZeroExtend(mem, oldval, newval, output.gpr());
|
||||
break;
|
||||
case Scalar::Int16:
|
||||
compareExchange16SignExtend(mem, oldval, newval, output.gpr());
|
||||
break;
|
||||
case Scalar::Uint16:
|
||||
compareExchange16ZeroExtend(mem, oldval, newval, output.gpr());
|
||||
break;
|
||||
case Scalar::Int32:
|
||||
compareExchange32(mem, oldval, newval, output.gpr());
|
||||
break;
|
||||
case Scalar::Uint32:
|
||||
// At the moment, the code in MCallOptimize.cpp requires the output
|
||||
// type to be double for uint32 arrays. See bug 1077305.
|
||||
MOZ_ASSERT(output.isFloat());
|
||||
compareExchange32(mem, oldval, newval, temp);
|
||||
convertUInt32ToDouble(temp, output.fpu());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array type");
|
||||
}
|
||||
}
|
||||
|
||||
template void
|
||||
MacroAssemblerCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
|
||||
Register oldval, Register newval, Register temp,
|
||||
AnyRegister output);
|
||||
template void
|
||||
MacroAssemblerCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
|
||||
Register oldval, Register newval, Register temp,
|
||||
AnyRegister output);
|
||||
|
||||
template<typename T>
|
||||
void
|
||||
MacroAssemblerCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
|
||||
Register value, Register temp, AnyRegister output)
|
||||
{
|
||||
switch (arrayType) {
|
||||
case Scalar::Int8:
|
||||
atomicExchange8SignExtend(mem, value, output.gpr());
|
||||
break;
|
||||
case Scalar::Uint8:
|
||||
atomicExchange8ZeroExtend(mem, value, output.gpr());
|
||||
break;
|
||||
case Scalar::Int16:
|
||||
atomicExchange16SignExtend(mem, value, output.gpr());
|
||||
break;
|
||||
case Scalar::Uint16:
|
||||
atomicExchange16ZeroExtend(mem, value, output.gpr());
|
||||
break;
|
||||
case Scalar::Int32:
|
||||
atomicExchange32(mem, value, output.gpr());
|
||||
break;
|
||||
case Scalar::Uint32:
|
||||
// At the moment, the code in MCallOptimize.cpp requires the output
|
||||
// type to be double for uint32 arrays. See bug 1077305.
|
||||
MOZ_ASSERT(output.isFloat());
|
||||
atomicExchange32(mem, value, temp);
|
||||
convertUInt32ToDouble(temp, output.fpu());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array type");
|
||||
}
|
||||
}
|
||||
|
||||
template void
|
||||
MacroAssemblerCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
|
||||
Register value, Register temp, AnyRegister output);
|
||||
template void
|
||||
MacroAssemblerCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
|
||||
Register value, Register temp, AnyRegister output);
|
||||
|
||||
void
|
||||
MacroAssembler::reserveStack(uint32_t amount)
|
||||
{
|
||||
|
@ -959,6 +876,105 @@ MacroAssembler::wasmTruncateFloat32ToInt32(FloatRegister input, Register output,
|
|||
MOZ_CRASH("NYI");
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Primitive atomic operations.
|
||||
|
||||
void
|
||||
MacroAssembler::compareExchange(Scalar::Type type, const Synchronization& sync, const Address& mem,
|
||||
Register oldval, Register newval, Register output)
|
||||
{
|
||||
MOZ_CRASH("NYI");
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::compareExchange(Scalar::Type type, const Synchronization& sync, const BaseIndex& mem,
|
||||
Register oldval, Register newval, Register output)
|
||||
{
|
||||
MOZ_CRASH("NYI");
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicExchange(Scalar::Type type, const Synchronization& sync, const Address& mem,
|
||||
Register value, Register output)
|
||||
{
|
||||
MOZ_CRASH("NYI");
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicExchange(Scalar::Type type, const Synchronization& sync, const BaseIndex& mem,
|
||||
Register value, Register output)
|
||||
{
|
||||
MOZ_CRASH("NYI");
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicFetchOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
|
||||
Register value, const Address& mem, Register temp, Register output)
|
||||
{
|
||||
MOZ_CRASH("NYI");
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicFetchOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
|
||||
Register value, const BaseIndex& mem, Register temp, Register output)
|
||||
{
|
||||
MOZ_CRASH("NYI");
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicEffectOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
|
||||
Register value, const Address& mem, Register temp)
|
||||
{
|
||||
MOZ_CRASH("NYI");
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicEffectOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
|
||||
Register value, const BaseIndex& mem, Register temp)
|
||||
{
|
||||
MOZ_CRASH("NYI");
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::compareExchange64(const Synchronization& sync, const Address& mem, Register64 expect,
|
||||
Register64 replace, Register64 output)
|
||||
{
|
||||
MOZ_CRASH("NYI");
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::compareExchange64(const Synchronization& sync, const BaseIndex& mem, Register64 expect,
|
||||
Register64 replace, Register64 output)
|
||||
{
|
||||
MOZ_CRASH("NYI");
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicExchange64(const Synchronization& sync, const Address& mem, Register64 value, Register64 output)
|
||||
{
|
||||
MOZ_CRASH("NYI");
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicExchange64(const Synchronization& sync, const BaseIndex& mem, Register64 value, Register64 output)
|
||||
{
|
||||
MOZ_CRASH("NYI");
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op, Register64 value, const Address& mem,
|
||||
Register64 temp, Register64 output)
|
||||
{
|
||||
MOZ_CRASH("NYI");
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op, Register64 value, const BaseIndex& mem,
|
||||
Register64 temp, Register64 output)
|
||||
{
|
||||
MOZ_CRASH("NYI");
|
||||
}
|
||||
|
||||
//}}} check_macroassembler_style
|
||||
|
||||
} // namespace jit
|
||||
|
|
|
@ -1874,311 +1874,6 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
|
|||
return value;
|
||||
}
|
||||
|
||||
private:
|
||||
template <typename T>
|
||||
void compareExchange(int nbytes, bool signExtend, const T& address, Register oldval,
|
||||
Register newval, Register output)
|
||||
{
|
||||
MOZ_CRASH("compareExchange");
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32& value,
|
||||
const T& address, Register temp, Register output)
|
||||
{
|
||||
MOZ_CRASH("atomicFetchOp");
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register& value,
|
||||
const T& address, Register temp, Register output)
|
||||
{
|
||||
MOZ_CRASH("atomicFetchOp");
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicEffectOp(int nbytes, AtomicOp op, const Register& value, const T& mem) {
|
||||
MOZ_CRASH("atomicEffectOp");
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value, const T& mem) {
|
||||
MOZ_CRASH("atomicEffectOp");
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchOp64(AtomicOp op, Register64 value, const T& mem, Register64 temp,
|
||||
Register64 output)
|
||||
{
|
||||
MOZ_CRASH("AtomicFetchOp64");
|
||||
}
|
||||
|
||||
public:
|
||||
// T in {Address,BaseIndex}
|
||||
// S in {Imm32,Register}
|
||||
|
||||
template <typename T>
|
||||
void compareExchange8SignExtend(const T& mem, Register oldval, Register newval, Register output)
|
||||
{
|
||||
compareExchange(1, true, mem, oldval, newval, output);
|
||||
}
|
||||
template <typename T>
|
||||
void compareExchange8ZeroExtend(const T& mem, Register oldval, Register newval, Register output)
|
||||
{
|
||||
compareExchange(1, false, mem, oldval, newval, output);
|
||||
}
|
||||
template <typename T>
|
||||
void compareExchange16SignExtend(const T& mem, Register oldval, Register newval, Register output)
|
||||
{
|
||||
compareExchange(2, true, mem, oldval, newval, output);
|
||||
}
|
||||
template <typename T>
|
||||
void compareExchange16ZeroExtend(const T& mem, Register oldval, Register newval, Register output)
|
||||
{
|
||||
compareExchange(2, false, mem, oldval, newval, output);
|
||||
}
|
||||
template <typename T>
|
||||
void compareExchange32(const T& mem, Register oldval, Register newval, Register output) {
|
||||
compareExchange(4, false, mem, oldval, newval, output);
|
||||
}
|
||||
template <typename T>
|
||||
void atomicExchange32(const T& mem, Register value, Register output) {
|
||||
MOZ_CRASH("atomicExchang32");
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicExchange8ZeroExtend(const T& mem, Register value, Register output) {
|
||||
MOZ_CRASH("atomicExchange8ZeroExtend");
|
||||
}
|
||||
template <typename T>
|
||||
void atomicExchange8SignExtend(const T& mem, Register value, Register output) {
|
||||
MOZ_CRASH("atomicExchange8SignExtend");
|
||||
}
|
||||
|
||||
template <typename T, typename S>
|
||||
void atomicFetchAdd8SignExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(1, true, AtomicFetchAddOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicFetchAdd8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(1, false, AtomicFetchAddOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicFetchAdd16SignExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(2, true, AtomicFetchAddOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicFetchAdd16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(2, false, AtomicFetchAddOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicFetchAdd32(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(4, false, AtomicFetchAddOp, value, mem, temp, output);
|
||||
}
|
||||
|
||||
template <typename T, typename S>
|
||||
void atomicAdd8(const S& value, const T& mem) {
|
||||
atomicEffectOp(1, AtomicFetchAddOp, value, mem);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicAdd16(const S& value, const T& mem) {
|
||||
atomicEffectOp(2, AtomicFetchAddOp, value, mem);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicAdd32(const S& value, const T& mem) {
|
||||
atomicEffectOp(4, AtomicFetchAddOp, value, mem);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicExchange16ZeroExtend(const T& mem, Register value, Register output) {
|
||||
MOZ_CRASH("atomicExchange16ZeroExtend");
|
||||
}
|
||||
template <typename T>
|
||||
void atomicExchange16SignExtend(const T& mem, Register value, Register output) {
|
||||
MOZ_CRASH("atomicExchange16SignExtend");
|
||||
}
|
||||
|
||||
template <typename T, typename S>
|
||||
void atomicFetchSub8SignExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(1, true, AtomicFetchSubOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicFetchSub8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(1, false, AtomicFetchSubOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicFetchSub16SignExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(2, true, AtomicFetchSubOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicFetchSub16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(2, false, AtomicFetchSubOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicFetchSub32(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(4, false, AtomicFetchSubOp, value, mem, temp, output);
|
||||
}
|
||||
|
||||
template <typename T, typename S>
|
||||
void atomicSub8(const S& value, const T& mem) {
|
||||
atomicEffectOp(1, AtomicFetchSubOp, value, mem);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicSub16(const S& value, const T& mem) {
|
||||
atomicEffectOp(2, AtomicFetchSubOp, value, mem);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicSub32(const S& value, const T& mem) {
|
||||
atomicEffectOp(4, AtomicFetchSubOp, value, mem);
|
||||
}
|
||||
|
||||
template <typename T, typename S>
|
||||
void atomicFetchAnd8SignExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(1, true, AtomicFetchAndOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicFetchAnd8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(1, false, AtomicFetchAndOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicFetchAnd16SignExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(2, true, AtomicFetchAndOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicFetchAnd16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(2, false, AtomicFetchAndOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicFetchAnd32(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(4, false, AtomicFetchAndOp, value, mem, temp, output);
|
||||
}
|
||||
|
||||
template <typename T, typename S>
|
||||
void atomicAnd8(const S& value, const T& mem) {
|
||||
atomicEffectOp(1, AtomicFetchAndOp, value, mem);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicAnd16(const S& value, const T& mem) {
|
||||
atomicEffectOp(2, AtomicFetchAndOp, value, mem);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicAnd32(const S& value, const T& mem) {
|
||||
atomicEffectOp(4, AtomicFetchAndOp, value, mem);
|
||||
}
|
||||
|
||||
template <typename T, typename S>
|
||||
void atomicFetchOr8SignExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(1, true, AtomicFetchOrOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicFetchOr8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(1, false, AtomicFetchOrOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicFetchOr16SignExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(2, true, AtomicFetchOrOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicFetchOr16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(2, false, AtomicFetchOrOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicFetchOr32(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(4, false, AtomicFetchOrOp, value, mem, temp, output);
|
||||
}
|
||||
|
||||
template <typename T, typename S>
|
||||
void atomicOr8(const S& value, const T& mem) {
|
||||
atomicEffectOp(1, AtomicFetchOrOp, value, mem);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicOr16(const S& value, const T& mem) {
|
||||
atomicEffectOp(2, AtomicFetchOrOp, value, mem);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicOr32(const S& value, const T& mem) {
|
||||
atomicEffectOp(4, AtomicFetchOrOp, value, mem);
|
||||
}
|
||||
|
||||
template <typename T, typename S>
|
||||
void atomicFetchXor8SignExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(1, true, AtomicFetchXorOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicFetchXor8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(1, false, AtomicFetchXorOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicFetchXor16SignExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(2, true, AtomicFetchXorOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicFetchXor16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(2, false, AtomicFetchXorOp, value, mem, temp, output);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicFetchXor32(const S& value, const T& mem, Register temp, Register output) {
|
||||
atomicFetchOp(4, false, AtomicFetchXorOp, value, mem, temp, output);
|
||||
}
|
||||
|
||||
template <typename T, typename S>
|
||||
void atomicXor8(const S& value, const T& mem) {
|
||||
atomicEffectOp(1, AtomicFetchXorOp, value, mem);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicXor16(const S& value, const T& mem) {
|
||||
atomicEffectOp(2, AtomicFetchXorOp, value, mem);
|
||||
}
|
||||
template <typename T, typename S>
|
||||
void atomicXor32(const S& value, const T& mem) {
|
||||
atomicEffectOp(4, AtomicFetchXorOp, value, mem);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchAdd64(Register64 value, const T& mem, Register64 temp, Register64 output) {
|
||||
atomicFetchOp64(AtomicFetchAddOp, value, mem, temp, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchSub64(Register64 value, const T& mem, Register64 temp, Register64 output) {
|
||||
atomicFetchOp64(AtomicFetchSubOp, value, mem, temp, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchAnd64(Register64 value, const T& mem, Register64 temp, Register64 output) {
|
||||
atomicFetchOp64(AtomicFetchAndOp, value, mem, temp, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchOr64(Register64 value, const T& mem, Register64 temp, Register64 output) {
|
||||
atomicFetchOp64(AtomicFetchOrOp, value, mem, temp, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchXor64(Register64 value, const T& mem, Register64 temp, Register64 output) {
|
||||
atomicFetchOp64(AtomicFetchXorOp, value, mem, temp, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicExchange64(const T& mem, Register64 src, Register64 output) {
|
||||
MOZ_CRASH("atomicExchange64");
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void compareExchange64(const T& mem, Register64 expected, Register64 replacement,
|
||||
Register64 output)
|
||||
{
|
||||
MOZ_CRASH("compareExchange64");
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register oldval, Register newval,
|
||||
Register temp, AnyRegister output);
|
||||
|
||||
template<typename T>
|
||||
void atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register value,
|
||||
Register temp, AnyRegister output);
|
||||
|
||||
// Emit a BLR or NOP instruction. ToggleCall can be used to patch
|
||||
// this instruction.
|
||||
CodeOffset toggledCall(JitCode* target, bool enabled) {
|
||||
|
|
|
@ -1899,7 +1899,7 @@ CodeGeneratorMIPSShared::emitWasmLoad(T* lir)
|
|||
default: MOZ_CRASH("unexpected array type");
|
||||
}
|
||||
|
||||
masm.memoryBarrier(mir->access().barrierBefore());
|
||||
masm.memoryBarrierBefore(mir->access().sync());
|
||||
|
||||
BaseIndex address(HeapReg, ptr, TimesOne);
|
||||
|
||||
|
@ -1919,7 +1919,7 @@ CodeGeneratorMIPSShared::emitWasmLoad(T* lir)
|
|||
isSigned ? SignExtend : ZeroExtend);
|
||||
}
|
||||
|
||||
masm.memoryBarrier(mir->access().barrierAfter());
|
||||
masm.memoryBarrierAfter(mir->access().sync());
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1938,7 +1938,8 @@ CodeGeneratorMIPSShared::emitWasmLoad(T* lir)
|
|||
isSigned ? SignExtend : ZeroExtend);
|
||||
}
|
||||
masm.append(mir->access(), masm.size() - 4, masm.framePushed());
|
||||
masm.memoryBarrier(mir->access().barrierAfter());
|
||||
|
||||
masm.memoryBarrierAfter(mir->access().sync());
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -1990,7 +1991,7 @@ CodeGeneratorMIPSShared::emitWasmStore(T* lir)
|
|||
default: MOZ_CRASH("unexpected array type");
|
||||
}
|
||||
|
||||
masm.memoryBarrier(mir->access().barrierBefore());
|
||||
masm.memoryBarrierBefore(mir->access().sync());
|
||||
|
||||
BaseIndex address(HeapReg, ptr, TimesOne);
|
||||
|
||||
|
@ -2010,7 +2011,7 @@ CodeGeneratorMIPSShared::emitWasmStore(T* lir)
|
|||
isSigned ? SignExtend : ZeroExtend);
|
||||
}
|
||||
|
||||
masm.memoryBarrier(mir->access().barrierAfter());
|
||||
masm.memoryBarrierAfter(mir->access().sync());
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2032,7 +2033,8 @@ CodeGeneratorMIPSShared::emitWasmStore(T* lir)
|
|||
}
|
||||
// Only the last emitted instruction is a memory access.
|
||||
masm.append(mir->access(), masm.size() - 4, masm.framePushed());
|
||||
masm.memoryBarrier(mir->access().barrierAfter());
|
||||
|
||||
masm.memoryBarrierAfter(mir->access().sync());
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -481,7 +481,7 @@ CodeGeneratorMIPS::emitWasmLoadI64(T* lir)
|
|||
default: MOZ_CRASH("unexpected array type");
|
||||
}
|
||||
|
||||
masm.memoryBarrier(mir->access().barrierBefore());
|
||||
masm.memoryBarrierBefore(mir->access().sync());
|
||||
|
||||
MOZ_ASSERT(INT64LOW_OFFSET == 0);
|
||||
if (IsUnaligned(mir->access())) {
|
||||
|
@ -503,10 +503,7 @@ CodeGeneratorMIPS::emitWasmLoadI64(T* lir)
|
|||
BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET), temp,
|
||||
SizeWord, SignExtend);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (byteSize <= 4) {
|
||||
} else if (byteSize <= 4) {
|
||||
masm.ma_load(output.low, BaseIndex(HeapReg, ptr, TimesOne),
|
||||
static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
|
||||
masm.append(mir->access(), masm.size() - 4 , masm.framePushed());
|
||||
|
@ -522,7 +519,7 @@ CodeGeneratorMIPS::emitWasmLoadI64(T* lir)
|
|||
masm.append(mir->access(), masm.size() - 4 , masm.framePushed());
|
||||
}
|
||||
|
||||
masm.memoryBarrier(mir->access().barrierAfter());
|
||||
masm.memoryBarrierAfter(mir->access().sync());
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -570,7 +567,7 @@ CodeGeneratorMIPS::emitWasmStoreI64(T* lir)
|
|||
default: MOZ_CRASH("unexpected array type");
|
||||
}
|
||||
|
||||
masm.memoryBarrier(mir->access().barrierBefore());
|
||||
masm.memoryBarrierBefore(mir->access().sync());
|
||||
|
||||
MOZ_ASSERT(INT64LOW_OFFSET == 0);
|
||||
if (IsUnaligned(mir->access())) {
|
||||
|
@ -587,10 +584,7 @@ CodeGeneratorMIPS::emitWasmStoreI64(T* lir)
|
|||
masm.ma_store_unaligned(mir->access(), value.low, BaseIndex(HeapReg, ptr, TimesOne),
|
||||
temp, SizeWord, ZeroExtend);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (byteSize <= 4) {
|
||||
} else if (byteSize <= 4) {
|
||||
masm.ma_store(value.low, BaseIndex(HeapReg, ptr, TimesOne),
|
||||
static_cast<LoadStoreSize>(8 * byteSize));
|
||||
masm.append(mir->access(), masm.size() - 4, masm.framePushed());
|
||||
|
@ -601,7 +595,7 @@ CodeGeneratorMIPS::emitWasmStoreI64(T* lir)
|
|||
masm.ma_store(value.low, BaseIndex(HeapReg, ptr, TimesOne), SizeWord);
|
||||
}
|
||||
|
||||
masm.memoryBarrier(mir->access().barrierAfter());
|
||||
masm.memoryBarrierAfter(mir->access().sync());
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -827,4 +821,4 @@ CodeGeneratorMIPS::setReturnDoubleRegs(LiveRegisterSet* regs)
|
|||
MOZ_ASSERT(ReturnFloat32Reg.code_ == ReturnDoubleReg.code_);
|
||||
regs->add(ReturnFloat32Reg);
|
||||
regs->add(ReturnDoubleReg);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -447,7 +447,7 @@ CodeGeneratorMIPS64::emitWasmLoadI64(T* lir)
|
|||
default: MOZ_CRASH("unexpected array type");
|
||||
}
|
||||
|
||||
masm.memoryBarrier(mir->access().barrierBefore());
|
||||
masm.memoryBarrierBefore(mir->access().sync());
|
||||
|
||||
if (IsUnaligned(mir->access())) {
|
||||
Register temp = ToRegister(lir->getTemp(1));
|
||||
|
@ -455,14 +455,13 @@ CodeGeneratorMIPS64::emitWasmLoadI64(T* lir)
|
|||
masm.ma_load_unaligned(mir->access(), ToOutRegister64(lir).reg, BaseIndex(HeapReg, ptr, TimesOne),
|
||||
temp, static_cast<LoadStoreSize>(8 * byteSize),
|
||||
isSigned ? SignExtend : ZeroExtend);
|
||||
return;
|
||||
} else {
|
||||
masm.ma_load(ToOutRegister64(lir).reg, BaseIndex(HeapReg, ptr, TimesOne),
|
||||
static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
|
||||
masm.append(mir->access(), masm.size() - 4, masm.framePushed());
|
||||
}
|
||||
|
||||
masm.ma_load(ToOutRegister64(lir).reg, BaseIndex(HeapReg, ptr, TimesOne),
|
||||
static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
|
||||
masm.append(mir->access(), masm.size() - 4, masm.framePushed());
|
||||
|
||||
masm.memoryBarrier(mir->access().barrierAfter());
|
||||
masm.memoryBarrierAfter(mir->access().sync());
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -513,7 +512,7 @@ CodeGeneratorMIPS64::emitWasmStoreI64(T* lir)
|
|||
default: MOZ_CRASH("unexpected array type");
|
||||
}
|
||||
|
||||
masm.memoryBarrier(mir->access().barrierBefore());
|
||||
masm.memoryBarrierBefore(mir->access().sync());
|
||||
|
||||
if (IsUnaligned(mir->access())) {
|
||||
Register temp = ToRegister(lir->getTemp(1));
|
||||
|
@ -521,13 +520,13 @@ CodeGeneratorMIPS64::emitWasmStoreI64(T* lir)
|
|||
masm.ma_store_unaligned(mir->access(), ToRegister64(lir->value()).reg, BaseIndex(HeapReg, ptr, TimesOne),
|
||||
temp, static_cast<LoadStoreSize>(8 * byteSize),
|
||||
isSigned ? SignExtend : ZeroExtend);
|
||||
return;
|
||||
} else {
|
||||
masm.ma_store(ToRegister64(lir->value()).reg, BaseIndex(HeapReg, ptr, TimesOne),
|
||||
static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
|
||||
masm.append(mir->access(), masm.size() - 4, masm.framePushed());
|
||||
}
|
||||
masm.ma_store(ToRegister64(lir->value()).reg, BaseIndex(HeapReg, ptr, TimesOne),
|
||||
static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
|
||||
masm.append(mir->access(), masm.size() - 4, masm.framePushed());
|
||||
|
||||
masm.memoryBarrier(mir->access().barrierAfter());
|
||||
masm.memoryBarrierAfter(mir->access().sync());
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -527,6 +527,8 @@ class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
|
|||
public:
|
||||
// The following functions are exposed for use in platform-shared code.
|
||||
|
||||
// TODO: These are no longer used in platform code.
|
||||
private:
|
||||
template<typename T>
|
||||
void compareExchange8SignExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
|
||||
Register offsetTemp, Register maskTemp, Register output)
|
||||
|
@ -834,6 +836,7 @@ class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
|
|||
atomicEffectOp(4, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
|
||||
}
|
||||
|
||||
public:
|
||||
template<typename T>
|
||||
void compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register oldval, Register newval,
|
||||
Register temp, Register valueTemp, Register offsetTemp, Register maskTemp,
|
||||
|
|
|
@ -310,65 +310,6 @@ class MacroAssemblerNone : public Assembler
|
|||
|
||||
template <typename T> void computeEffectiveAddress(T, Register) { MOZ_CRASH(); }
|
||||
|
||||
template <typename T> void compareExchange8SignExtend(const T& mem, Register oldval, Register newval, Register output) { MOZ_CRASH(); }
|
||||
template <typename T> void compareExchange8ZeroExtend(const T& mem, Register oldval, Register newval, Register output) { MOZ_CRASH(); }
|
||||
template <typename T> void compareExchange16SignExtend(const T& mem, Register oldval, Register newval, Register output) { MOZ_CRASH(); }
|
||||
template <typename T> void compareExchange16ZeroExtend(const T& mem, Register oldval, Register newval, Register output) { MOZ_CRASH(); }
|
||||
template <typename T> void compareExchange32(const T& mem, Register oldval, Register newval, Register output) { MOZ_CRASH(); }
|
||||
template<typename T> void atomicExchange8SignExtend(const T& mem, Register value, Register output) { MOZ_CRASH(); }
|
||||
template<typename T> void atomicExchange8ZeroExtend(const T& mem, Register value, Register output) { MOZ_CRASH(); }
|
||||
template<typename T> void atomicExchange16SignExtend(const T& mem, Register value, Register output) { MOZ_CRASH(); }
|
||||
template<typename T> void atomicExchange16ZeroExtend(const T& mem, Register value, Register output) { MOZ_CRASH(); }
|
||||
template<typename T> void atomicExchange32(const T& mem, Register value, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchAdd8SignExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchAdd8ZeroExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchAdd16SignExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchAdd16ZeroExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchAdd32(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicAdd8(const T& value, const S& mem) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicAdd16(const T& value, const S& mem) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicAdd32(const T& value, const S& mem) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchSub8SignExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchSub8ZeroExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchSub16SignExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchSub16ZeroExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchSub32(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicSub8(const T& value, const S& mem) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicSub16(const T& value, const S& mem) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicSub32(const T& value, const S& mem) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchAnd8SignExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchAnd8ZeroExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchAnd16SignExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchAnd16ZeroExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchAnd32(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicAnd8(const T& value, const S& mem) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicAnd16(const T& value, const S& mem) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicAnd32(const T& value, const S& mem) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchOr8SignExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchOr8ZeroExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchOr16SignExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchOr16ZeroExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchOr32(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicOr8(const T& value, const S& mem) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicOr16(const T& value, const S& mem) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicOr32(const T& value, const S& mem) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchXor8SignExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchXor8ZeroExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchXor16SignExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchXor16ZeroExtend(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchXor32(const T& value, const S& mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicXor8(const T& value, const S& mem) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicXor16(const T& value, const S& mem) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicXor32(const T& value, const S& mem) { MOZ_CRASH(); }
|
||||
|
||||
template <typename T> void atomicFetchAdd64(Register64 value, const T& mem, Register64 temp, Register64 output) { MOZ_CRASH(); }
|
||||
template <typename T> void atomicFetchSub64(Register64 value, const T& mem, Register64 temp, Register64 output) { MOZ_CRASH(); }
|
||||
template <typename T> void atomicFetchAnd64(Register64 value, const T& mem, Register64 temp, Register64 output) { MOZ_CRASH(); }
|
||||
template <typename T> void atomicFetchOr64(Register64 value, const T& mem, Register64 temp, Register64 output) { MOZ_CRASH(); }
|
||||
template <typename T> void atomicFetchXor64(Register64 value, const T& mem, Register64 temp, Register64 output) { MOZ_CRASH(); }
|
||||
template <typename T> void atomicExchange64(const T& mem, Register64 src, Register64 output) { MOZ_CRASH(); }
|
||||
template <typename T> void compareExchange64(const T& mem, Register64 expect, Register64 replace, Register64 output) { MOZ_CRASH(); }
|
||||
|
||||
Register splitTagForTest(ValueOperand) { MOZ_CRASH(); }
|
||||
|
||||
void boxDouble(FloatRegister, ValueOperand, FloatRegister) { MOZ_CRASH(); }
|
||||
|
|
|
@ -735,22 +735,19 @@ class MemoryAccessDesc
|
|||
uint32_t align_;
|
||||
Scalar::Type type_;
|
||||
unsigned numSimdElems_;
|
||||
jit::MemoryBarrierBits barrierBefore_;
|
||||
jit::MemoryBarrierBits barrierAfter_;
|
||||
jit::Synchronization sync_;
|
||||
mozilla::Maybe<wasm::BytecodeOffset> trapOffset_;
|
||||
|
||||
public:
|
||||
explicit MemoryAccessDesc(Scalar::Type type, uint32_t align, uint32_t offset,
|
||||
const mozilla::Maybe<BytecodeOffset>& trapOffset,
|
||||
unsigned numSimdElems = 0,
|
||||
jit::MemoryBarrierBits barrierBefore = jit::MembarNobits,
|
||||
jit::MemoryBarrierBits barrierAfter = jit::MembarNobits)
|
||||
const jit::Synchronization& sync = jit::Synchronization::None())
|
||||
: offset_(offset),
|
||||
align_(align),
|
||||
type_(type),
|
||||
numSimdElems_(numSimdElems),
|
||||
barrierBefore_(barrierBefore),
|
||||
barrierAfter_(barrierAfter),
|
||||
sync_(sync),
|
||||
trapOffset_(trapOffset)
|
||||
{
|
||||
MOZ_ASSERT(Scalar::isSimdType(type) == (numSimdElems > 0));
|
||||
|
@ -769,11 +766,10 @@ class MemoryAccessDesc
|
|||
: Scalar::byteSize(type());
|
||||
}
|
||||
unsigned numSimdElems() const { MOZ_ASSERT(isSimd()); return numSimdElems_; }
|
||||
jit::MemoryBarrierBits barrierBefore() const { return barrierBefore_; }
|
||||
jit::MemoryBarrierBits barrierAfter() const { return barrierAfter_; }
|
||||
const jit::Synchronization& sync() const { return sync_; }
|
||||
bool hasTrap() const { return !!trapOffset_; }
|
||||
BytecodeOffset trapOffset() const { return *trapOffset_; }
|
||||
bool isAtomic() const { return (barrierBefore_ | barrierAfter_) != jit::MembarNobits; }
|
||||
bool isAtomic() const { return !sync_.isNone(); }
|
||||
bool isSimd() const { return Scalar::isSimdType(type_); }
|
||||
bool isPlainAsmJS() const { return !hasTrap(); }
|
||||
|
||||
|
|
|
@ -400,7 +400,7 @@ CodeGeneratorX64::wasmStore(const wasm::MemoryAccessDesc& access, const LAllocat
|
|||
if (value->isConstant()) {
|
||||
MOZ_ASSERT(!access.isSimd());
|
||||
|
||||
masm.memoryBarrier(access.barrierBefore());
|
||||
masm.memoryBarrierBefore(access.sync());
|
||||
|
||||
const MConstant* mir = value->toConstant();
|
||||
Imm32 cst = Imm32(mir->type() == MIRType::Int32 ? mir->toInt32() : mir->toInt64());
|
||||
|
@ -432,7 +432,7 @@ CodeGeneratorX64::wasmStore(const wasm::MemoryAccessDesc& access, const LAllocat
|
|||
}
|
||||
masm.append(access, storeOffset, masm.framePushed());
|
||||
|
||||
masm.memoryBarrier(access.barrierAfter());
|
||||
masm.memoryBarrierAfter(access.sync());
|
||||
} else {
|
||||
masm.wasmStore(access, ToAnyRegister(value), dstAddr);
|
||||
}
|
||||
|
@ -562,15 +562,11 @@ CodeGeneratorX64::visitWasmCompareExchangeHeap(LWasmCompareExchangeHeap* ins)
|
|||
|
||||
if (accessType == Scalar::Int64) {
|
||||
MOZ_ASSERT(!mir->access().isPlainAsmJS());
|
||||
masm.compareExchange64(srcAddr, Register64(oldval), Register64(newval),
|
||||
ToOutRegister64(ins));
|
||||
masm.compareExchange64(Synchronization::Full(), srcAddr, Register64(oldval),
|
||||
Register64(newval), ToOutRegister64(ins));
|
||||
} else {
|
||||
masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
|
||||
srcAddr,
|
||||
oldval,
|
||||
newval,
|
||||
InvalidReg,
|
||||
ToAnyRegister(ins->output()));
|
||||
masm.compareExchange(accessType, Synchronization::Full(), srcAddr, oldval, newval,
|
||||
ToRegister(ins->output()));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -589,13 +585,11 @@ CodeGeneratorX64::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins)
|
|||
|
||||
if (accessType == Scalar::Int64) {
|
||||
MOZ_ASSERT(!mir->access().isPlainAsmJS());
|
||||
masm.atomicExchange64(srcAddr, Register64(value), ToOutRegister64(ins));
|
||||
masm.atomicExchange64(Synchronization::Full(), srcAddr, Register64(value),
|
||||
ToOutRegister64(ins));
|
||||
} else {
|
||||
masm.atomicExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
|
||||
srcAddr,
|
||||
value,
|
||||
InvalidReg,
|
||||
ToAnyRegister(ins->output()));
|
||||
masm.atomicExchange(accessType, Synchronization::Full(), srcAddr, value,
|
||||
ToRegister(ins->output()));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -608,7 +602,7 @@ CodeGeneratorX64::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins)
|
|||
Register ptr = ToRegister(ins->ptr());
|
||||
const LAllocation* value = ins->value();
|
||||
Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
|
||||
AnyRegister output = ToAnyRegister(ins->output());
|
||||
Register output = ToRegister(ins->output());
|
||||
MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
|
||||
|
||||
Scalar::Type accessType = mir->access().type();
|
||||
|
@ -620,22 +614,15 @@ CodeGeneratorX64::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins)
|
|||
|
||||
if (accessType == Scalar::Int64) {
|
||||
Register64 val = Register64(ToRegister(value));
|
||||
Register64 out = Register64(output.gpr());
|
||||
Register64 out = Register64(output);
|
||||
Register64 tmp = Register64(temp);
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp: masm.atomicFetchAdd64(val, srcAddr, tmp, out); break;
|
||||
case AtomicFetchSubOp: masm.atomicFetchSub64(val, srcAddr, tmp, out); break;
|
||||
case AtomicFetchAndOp: masm.atomicFetchAnd64(val, srcAddr, tmp, out); break;
|
||||
case AtomicFetchOrOp: masm.atomicFetchOr64(val, srcAddr, tmp, out); break;
|
||||
case AtomicFetchXorOp: masm.atomicFetchXor64(val, srcAddr, tmp, out); break;
|
||||
default: MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
masm.atomicFetchOp64(Synchronization::Full(), op, val, srcAddr, tmp, out);
|
||||
} else if (value->isConstant()) {
|
||||
atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), srcAddr, temp, InvalidReg,
|
||||
output);
|
||||
masm.atomicFetchOp(accessType, Synchronization::Full(), op, Imm32(ToInt32(value)),
|
||||
srcAddr, temp, output);
|
||||
} else {
|
||||
atomicBinopToTypedIntArray(op, accessType, ToRegister(value), srcAddr, temp, InvalidReg,
|
||||
output);
|
||||
masm.atomicFetchOp(accessType, Synchronization::Full(), op, ToRegister(value),
|
||||
srcAddr, temp, output);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -656,23 +643,17 @@ CodeGeneratorX64::visitWasmAtomicBinopHeapForEffect(LWasmAtomicBinopHeapForEffec
|
|||
|
||||
if (accessType == Scalar::Int64) {
|
||||
Register64 val = Register64(ToRegister(value));
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp: masm.atomicAdd64(srcAddr, val); break;
|
||||
case AtomicFetchSubOp: masm.atomicSub64(srcAddr, val); break;
|
||||
case AtomicFetchAndOp: masm.atomicAnd64(srcAddr, val); break;
|
||||
case AtomicFetchOrOp: masm.atomicOr64(srcAddr, val); break;
|
||||
case AtomicFetchXorOp: masm.atomicXor64(srcAddr, val); break;
|
||||
default: MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
masm.atomicEffectOp64(Synchronization::Full(), op, val, srcAddr);
|
||||
} else if (value->isConstant()) {
|
||||
Imm32 c(0);
|
||||
if (value->toConstant()->type() == MIRType::Int64)
|
||||
c = Imm32(ToInt64(value));
|
||||
else
|
||||
c = Imm32(ToInt32(value));
|
||||
atomicBinopToTypedIntArray(op, accessType, c, srcAddr);
|
||||
masm.atomicEffectOp(accessType, Synchronization::Full(), op, c, srcAddr, InvalidReg);
|
||||
} else {
|
||||
atomicBinopToTypedIntArray(op, accessType, ToRegister(value), srcAddr);
|
||||
masm.atomicEffectOp(accessType, Synchronization::Full(), op, ToRegister(value), srcAddr,
|
||||
InvalidReg);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -722,7 +722,7 @@ MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType value
|
|||
void
|
||||
MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access, Operand srcAddr, AnyRegister out)
|
||||
{
|
||||
memoryBarrier(access.barrierBefore());
|
||||
memoryBarrierBefore(access.sync());
|
||||
|
||||
size_t loadOffset = size();
|
||||
switch (access.type()) {
|
||||
|
@ -784,13 +784,13 @@ MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access, Operand srcAddr,
|
|||
}
|
||||
append(access, loadOffset, framePushed());
|
||||
|
||||
memoryBarrier(access.barrierAfter());
|
||||
memoryBarrierAfter(access.sync());
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access, Operand srcAddr, Register64 out)
|
||||
{
|
||||
memoryBarrier(access.barrierBefore());
|
||||
memoryBarrierBefore(access.sync());
|
||||
|
||||
MOZ_ASSERT(!access.isSimd());
|
||||
|
||||
|
@ -831,13 +831,13 @@ MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access, Operand srcAdd
|
|||
}
|
||||
append(access, loadOffset, framePushed());
|
||||
|
||||
memoryBarrier(access.barrierAfter());
|
||||
memoryBarrierAfter(access.sync());
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value, Operand dstAddr)
|
||||
{
|
||||
memoryBarrier(access.barrierBefore());
|
||||
memoryBarrierBefore(access.sync());
|
||||
|
||||
size_t storeOffset = size();
|
||||
switch (access.type()) {
|
||||
|
@ -896,7 +896,7 @@ MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister valu
|
|||
}
|
||||
append(access, storeOffset, framePushed());
|
||||
|
||||
memoryBarrier(access.barrierAfter());
|
||||
memoryBarrierAfter(access.sync());
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -923,4 +923,102 @@ MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input, Register output
|
|||
j(Assembler::Above, oolEntry);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Primitive atomic operations.
|
||||
|
||||
void
|
||||
MacroAssembler::compareExchange64(const Synchronization&, const Address& mem, Register64 expected,
|
||||
Register64 replacement, Register64 output)
|
||||
{
|
||||
MOZ_ASSERT(output.reg == rax);
|
||||
if (expected != output)
|
||||
movq(expected.reg, output.reg);
|
||||
lock_cmpxchgq(replacement.reg, Operand(mem));
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::compareExchange64(const Synchronization&, const BaseIndex& mem, Register64 expected,
|
||||
Register64 replacement, Register64 output)
|
||||
{
|
||||
MOZ_ASSERT(output.reg == rax);
|
||||
if (expected != output)
|
||||
movq(expected.reg, output.reg);
|
||||
lock_cmpxchgq(replacement.reg, Operand(mem));
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicExchange64(const Synchronization& sync, const Address& mem, Register64 value, Register64 output)
|
||||
{
|
||||
if (value != output)
|
||||
movq(value.reg, output.reg);
|
||||
xchgq(output.reg, Operand(mem));
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicExchange64(const Synchronization& sync, const BaseIndex& mem, Register64 value, Register64 output)
|
||||
{
|
||||
if (value != output)
|
||||
movq(value.reg, output.reg);
|
||||
xchgq(output.reg, Operand(mem));
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static void
|
||||
AtomicFetchOp64(MacroAssembler& masm, AtomicOp op, Register value, const T& mem, Register temp,
|
||||
Register output)
|
||||
{
|
||||
if (op == AtomicFetchAddOp) {
|
||||
if (value != output)
|
||||
masm.movq(value, output);
|
||||
masm.lock_xaddq(output, Operand(mem));
|
||||
} else if (op == AtomicFetchSubOp) {
|
||||
if (value != output)
|
||||
masm.movq(value, output);
|
||||
masm.negq(output);
|
||||
masm.lock_xaddq(output, Operand(mem));
|
||||
} else {
|
||||
Label again;
|
||||
MOZ_ASSERT(output == rax);
|
||||
masm.movq(Operand(mem), rax);
|
||||
masm.bind(&again);
|
||||
masm.movq(rax, temp);
|
||||
switch (op) {
|
||||
case AtomicFetchAndOp: masm.andq(value, temp); break;
|
||||
case AtomicFetchOrOp: masm.orq(value, temp); break;
|
||||
case AtomicFetchXorOp: masm.xorq(value, temp); break;
|
||||
default: MOZ_CRASH();
|
||||
}
|
||||
masm.lock_cmpxchgq(temp, Operand(mem));
|
||||
masm.j(MacroAssembler::NonZero, &again);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicFetchOp64(const Synchronization&, AtomicOp op, Register64 value,
|
||||
const Address& mem, Register64 temp, Register64 output)
|
||||
{
|
||||
AtomicFetchOp64(*this, op, value.reg, mem, temp.reg, output.reg);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicFetchOp64(const Synchronization&, AtomicOp op, Register64 value,
|
||||
const BaseIndex& mem, Register64 temp, Register64 output)
|
||||
{
|
||||
AtomicFetchOp64(*this, op, value.reg, mem, temp.reg, output.reg);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicEffectOp64(const Synchronization&, AtomicOp op, Register64 value,
|
||||
const BaseIndex& mem)
|
||||
{
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp: lock_addq(value.reg, Operand(mem)); break;
|
||||
case AtomicFetchSubOp: lock_subq(value.reg, Operand(mem)); break;
|
||||
case AtomicFetchAndOp: lock_andq(value.reg, Operand(mem)); break;
|
||||
case AtomicFetchOrOp: lock_orq(value.reg, Operand(mem)); break;
|
||||
case AtomicFetchXorOp: lock_xorq(value.reg, Operand(mem)); break;
|
||||
default: MOZ_CRASH();
|
||||
}
|
||||
}
|
||||
|
||||
//}}} check_macroassembler_style
|
||||
|
|
|
@ -643,88 +643,6 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
|
|||
storePtr(ImmWord(imm.value), address);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchAdd64(Register64 src, const T& mem, Register64 temp, Register64 output) {
|
||||
MOZ_ASSERT(temp.reg == InvalidReg);
|
||||
if (src != output)
|
||||
movq(src.reg, output.reg);
|
||||
lock_xaddq(output.reg, Operand(mem));
|
||||
}
|
||||
template <typename T>
|
||||
void atomicAdd64(const T& mem, Register64 value) {
|
||||
lock_addq(value.reg, Operand(mem));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchSub64(Register64 src, const T& mem, Register64 temp, Register64 output) {
|
||||
MOZ_ASSERT(temp.reg == InvalidReg);
|
||||
if (src != output)
|
||||
movq(src.reg, output.reg);
|
||||
negq(output.reg);
|
||||
lock_xaddq(output.reg, Operand(mem));
|
||||
}
|
||||
template <typename T>
|
||||
void atomicSub64(const T& mem, Register64 value) {
|
||||
lock_subq(value.reg, Operand(mem));
|
||||
}
|
||||
|
||||
// requires output == rax
|
||||
#define ATOMIC_BITOP_BODY(OP) \
|
||||
MOZ_ASSERT(output.reg == rax); \
|
||||
movq(Operand(mem), rax); \
|
||||
Label again; \
|
||||
bind(&again); \
|
||||
movq(rax, temp.reg); \
|
||||
OP(src.reg, temp.reg); \
|
||||
lock_cmpxchgq(temp.reg, Operand(mem)); \
|
||||
j(NonZero, &again);
|
||||
|
||||
template <typename S, typename T>
|
||||
void atomicFetchAnd64(const S& src, const T& mem, Register64 temp, Register64 output) {
|
||||
ATOMIC_BITOP_BODY(andq)
|
||||
}
|
||||
template <typename T>
|
||||
void atomicAnd64(const T& mem, Register64 value) {
|
||||
lock_andq(value.reg, Operand(mem));
|
||||
}
|
||||
|
||||
template <typename S, typename T>
|
||||
void atomicFetchOr64(const S& src, const T& mem, Register64 temp, Register64 output) {
|
||||
ATOMIC_BITOP_BODY(orq)
|
||||
}
|
||||
template <typename T>
|
||||
void atomicOr64(const T& mem, Register64 value) {
|
||||
lock_orq(value.reg, Operand(mem));
|
||||
}
|
||||
|
||||
template <typename S, typename T>
|
||||
void atomicFetchXor64(const S& src, const T& mem, Register64 temp, Register64 output) {
|
||||
ATOMIC_BITOP_BODY(xorq)
|
||||
}
|
||||
template <typename T>
|
||||
void atomicXor64(const T& mem, Register64 value) {
|
||||
lock_xorq(value.reg, Operand(mem));
|
||||
}
|
||||
|
||||
#undef ATOMIC_BITOP_BODY
|
||||
|
||||
template <typename T>
|
||||
void atomicExchange64(const T& mem, Register64 src, Register64 output) {
|
||||
if (src != output)
|
||||
movq(src.reg, output.reg);
|
||||
xchgq(output.reg, Operand(mem));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void compareExchange64(const T& mem, Register64 expected, Register64 replacement,
|
||||
Register64 output)
|
||||
{
|
||||
MOZ_ASSERT(output.reg == rax);
|
||||
if (expected != output)
|
||||
movq(expected.reg, output.reg);
|
||||
lock_cmpxchgq(replacement.reg, Operand(mem));
|
||||
}
|
||||
|
||||
void splitTag(Register src, Register dest) {
|
||||
if (src != dest)
|
||||
movq(src, dest);
|
||||
|
|
|
@ -4229,10 +4229,10 @@ CodeGeneratorX86Shared::visitCompareExchangeTypedArrayElement(LCompareExchangeTy
|
|||
|
||||
if (lir->index()->isConstant()) {
|
||||
Address dest(elements, ToInt32(lir->index()) * width);
|
||||
masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp, output);
|
||||
masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval, newval, temp, output);
|
||||
} else {
|
||||
BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
|
||||
masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp, output);
|
||||
masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval, newval, temp, output);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4250,273 +4250,26 @@ CodeGeneratorX86Shared::visitAtomicExchangeTypedArrayElement(LAtomicExchangeType
|
|||
|
||||
if (lir->index()->isConstant()) {
|
||||
Address dest(elements, ToInt32(lir->index()) * width);
|
||||
masm.atomicExchangeToTypedIntArray(arrayType, dest, value, temp, output);
|
||||
masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, temp, output);
|
||||
} else {
|
||||
BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
|
||||
masm.atomicExchangeToTypedIntArray(arrayType, dest, value, temp, output);
|
||||
masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, temp, output);
|
||||
}
|
||||
}
|
||||
|
||||
template<typename S, typename T>
|
||||
void
|
||||
CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
|
||||
const T& mem, Register temp1, Register temp2, AnyRegister output)
|
||||
{
|
||||
switch (arrayType) {
|
||||
case Scalar::Int8:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
masm.atomicFetchAdd8SignExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchSubOp:
|
||||
masm.atomicFetchSub8SignExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
masm.atomicFetchAnd8SignExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
masm.atomicFetchOr8SignExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
masm.atomicFetchXor8SignExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
break;
|
||||
case Scalar::Uint8:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
masm.atomicFetchAdd8ZeroExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchSubOp:
|
||||
masm.atomicFetchSub8ZeroExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
masm.atomicFetchAnd8ZeroExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
masm.atomicFetchOr8ZeroExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
masm.atomicFetchXor8ZeroExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
break;
|
||||
case Scalar::Int16:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
masm.atomicFetchAdd16SignExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchSubOp:
|
||||
masm.atomicFetchSub16SignExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
masm.atomicFetchAnd16SignExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
masm.atomicFetchOr16SignExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
masm.atomicFetchXor16SignExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
break;
|
||||
case Scalar::Uint16:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
masm.atomicFetchAdd16ZeroExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchSubOp:
|
||||
masm.atomicFetchSub16ZeroExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
masm.atomicFetchAnd16ZeroExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
masm.atomicFetchOr16ZeroExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
masm.atomicFetchXor16ZeroExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
break;
|
||||
case Scalar::Int32:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
masm.atomicFetchAdd32(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchSubOp:
|
||||
masm.atomicFetchSub32(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
masm.atomicFetchAnd32(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
masm.atomicFetchOr32(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
masm.atomicFetchXor32(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
break;
|
||||
case Scalar::Uint32:
|
||||
// At the moment, the code in MCallOptimize.cpp requires the output
|
||||
// type to be double for uint32 arrays. See bug 1077305.
|
||||
MOZ_ASSERT(output.isFloat());
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
masm.atomicFetchAdd32(value, mem, InvalidReg, temp1);
|
||||
break;
|
||||
case AtomicFetchSubOp:
|
||||
masm.atomicFetchSub32(value, mem, InvalidReg, temp1);
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
masm.atomicFetchAnd32(value, mem, temp2, temp1);
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
masm.atomicFetchOr32(value, mem, temp2, temp1);
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
masm.atomicFetchXor32(value, mem, temp2, temp1);
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
masm.convertUInt32ToDouble(temp1, output.fpu());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array type");
|
||||
}
|
||||
}
|
||||
|
||||
template void
|
||||
CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
|
||||
const Imm32& value, const Address& mem,
|
||||
Register temp1, Register temp2, AnyRegister output);
|
||||
template void
|
||||
CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
|
||||
const Imm32& value, const BaseIndex& mem,
|
||||
Register temp1, Register temp2, AnyRegister output);
|
||||
template void
|
||||
CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
|
||||
const Register& value, const Address& mem,
|
||||
Register temp1, Register temp2, AnyRegister output);
|
||||
template void
|
||||
CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
|
||||
const Register& value, const BaseIndex& mem,
|
||||
Register temp1, Register temp2, AnyRegister output);
|
||||
|
||||
// Binary operation for effect, result discarded.
|
||||
template<typename S, typename T>
|
||||
void
|
||||
CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
|
||||
const T& mem)
|
||||
{
|
||||
switch (arrayType) {
|
||||
case Scalar::Int8:
|
||||
case Scalar::Uint8:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
masm.atomicAdd8(value, mem);
|
||||
break;
|
||||
case AtomicFetchSubOp:
|
||||
masm.atomicSub8(value, mem);
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
masm.atomicAnd8(value, mem);
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
masm.atomicOr8(value, mem);
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
masm.atomicXor8(value, mem);
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
break;
|
||||
case Scalar::Int16:
|
||||
case Scalar::Uint16:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
masm.atomicAdd16(value, mem);
|
||||
break;
|
||||
case AtomicFetchSubOp:
|
||||
masm.atomicSub16(value, mem);
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
masm.atomicAnd16(value, mem);
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
masm.atomicOr16(value, mem);
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
masm.atomicXor16(value, mem);
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
break;
|
||||
case Scalar::Int32:
|
||||
case Scalar::Uint32:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
masm.atomicAdd32(value, mem);
|
||||
break;
|
||||
case AtomicFetchSubOp:
|
||||
masm.atomicSub32(value, mem);
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
masm.atomicAnd32(value, mem);
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
masm.atomicOr32(value, mem);
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
masm.atomicXor32(value, mem);
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array type");
|
||||
}
|
||||
}
|
||||
|
||||
template void
|
||||
CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
|
||||
const Imm32& value, const Address& mem);
|
||||
template void
|
||||
CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
|
||||
const Imm32& value, const BaseIndex& mem);
|
||||
template void
|
||||
CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
|
||||
const Register& value, const Address& mem);
|
||||
template void
|
||||
CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
|
||||
const Register& value, const BaseIndex& mem);
|
||||
|
||||
|
||||
template <typename T>
|
||||
static inline void
|
||||
AtomicBinopToTypedArray(CodeGeneratorX86Shared* cg, AtomicOp op,
|
||||
Scalar::Type arrayType, const LAllocation* value, const T& mem,
|
||||
Register temp1, Register temp2, AnyRegister output)
|
||||
AtomicBinopToTypedArray(MacroAssembler& masm, AtomicOp op, Scalar::Type arrayType,
|
||||
const LAllocation* value, const T& mem, Register temp1,
|
||||
Register temp2, AnyRegister output)
|
||||
{
|
||||
if (value->isConstant())
|
||||
cg->atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem, temp1, temp2, output);
|
||||
else
|
||||
cg->atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem, temp1, temp2, output);
|
||||
if (value->isConstant()) {
|
||||
masm.atomicFetchOpJS(arrayType, Synchronization::Full(), op, Imm32(ToInt32(value)), mem,
|
||||
temp1, temp2, output);
|
||||
} else {
|
||||
masm.atomicFetchOpJS(arrayType, Synchronization::Full(), op, ToRegister(value), mem, temp1,
|
||||
temp2, output);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -4535,22 +4288,25 @@ CodeGeneratorX86Shared::visitAtomicTypedArrayElementBinop(LAtomicTypedArrayEleme
|
|||
|
||||
if (lir->index()->isConstant()) {
|
||||
Address mem(elements, ToInt32(lir->index()) * width);
|
||||
AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, temp1, temp2, output);
|
||||
AtomicBinopToTypedArray(masm, lir->mir()->operation(), arrayType, value, mem, temp1, temp2, output);
|
||||
} else {
|
||||
BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
|
||||
AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, temp1, temp2, output);
|
||||
AtomicBinopToTypedArray(masm, lir->mir()->operation(), arrayType, value, mem, temp1, temp2, output);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static inline void
|
||||
AtomicBinopToTypedArray(CodeGeneratorX86Shared* cg, AtomicOp op,
|
||||
Scalar::Type arrayType, const LAllocation* value, const T& mem)
|
||||
AtomicBinopToTypedArray(MacroAssembler& masm, Scalar::Type arrayType, AtomicOp op,
|
||||
const LAllocation* value, const T& mem)
|
||||
{
|
||||
if (value->isConstant())
|
||||
cg->atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem);
|
||||
else
|
||||
cg->atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem);
|
||||
if (value->isConstant()) {
|
||||
masm.atomicEffectOpJS(arrayType, Synchronization::Full(), op, Imm32(ToInt32(value)), mem,
|
||||
InvalidReg);
|
||||
} else {
|
||||
masm.atomicEffectOpJS(arrayType, Synchronization::Full(), op, ToRegister(value), mem,
|
||||
InvalidReg);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -4565,10 +4321,10 @@ CodeGeneratorX86Shared::visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedA
|
|||
|
||||
if (lir->index()->isConstant()) {
|
||||
Address mem(elements, ToInt32(lir->index()) * width);
|
||||
AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem);
|
||||
AtomicBinopToTypedArray(masm, arrayType, lir->mir()->operation(), value, mem);
|
||||
} else {
|
||||
BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
|
||||
AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem);
|
||||
AtomicBinopToTypedArray(masm, arrayType, lir->mir()->operation(), value, mem);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -322,15 +322,6 @@ class CodeGeneratorX86Shared : public CodeGeneratorShared
|
|||
void visitOutOfLineSimdFloatToIntCheck(OutOfLineSimdFloatToIntCheck* ool);
|
||||
void generateInvalidateEpilogue();
|
||||
|
||||
// Generating a result.
|
||||
template<typename S, typename T>
|
||||
void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
|
||||
const T& mem, Register temp1, Register temp2, AnyRegister output);
|
||||
|
||||
// Generating no result.
|
||||
template<typename S, typename T>
|
||||
void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value, const T& mem);
|
||||
|
||||
void setReturnDoubleRegs(LiveRegisterSet* regs);
|
||||
|
||||
void canonicalizeIfDeterministic(Scalar::Type type, const LAllocation* value);
|
||||
|
|
|
@ -144,89 +144,6 @@ MacroAssemblerX86Shared::asMasm() const
|
|||
return *static_cast<const MacroAssembler*>(this);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void
|
||||
MacroAssemblerX86Shared::compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
|
||||
Register oldval, Register newval,
|
||||
Register temp, AnyRegister output)
|
||||
{
|
||||
switch (arrayType) {
|
||||
case Scalar::Int8:
|
||||
compareExchange8SignExtend(mem, oldval, newval, output.gpr());
|
||||
break;
|
||||
case Scalar::Uint8:
|
||||
compareExchange8ZeroExtend(mem, oldval, newval, output.gpr());
|
||||
break;
|
||||
case Scalar::Int16:
|
||||
compareExchange16SignExtend(mem, oldval, newval, output.gpr());
|
||||
break;
|
||||
case Scalar::Uint16:
|
||||
compareExchange16ZeroExtend(mem, oldval, newval, output.gpr());
|
||||
break;
|
||||
case Scalar::Int32:
|
||||
compareExchange32(mem, oldval, newval, output.gpr());
|
||||
break;
|
||||
case Scalar::Uint32:
|
||||
// At the moment, the code in MCallOptimize.cpp requires the output
|
||||
// type to be double for uint32 arrays. See bug 1077305.
|
||||
MOZ_ASSERT(output.isFloat());
|
||||
compareExchange32(mem, oldval, newval, temp);
|
||||
asMasm().convertUInt32ToDouble(temp, output.fpu());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array type");
|
||||
}
|
||||
}
|
||||
|
||||
template void
|
||||
MacroAssemblerX86Shared::compareExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
|
||||
Register oldval, Register newval, Register temp,
|
||||
AnyRegister output);
|
||||
template void
|
||||
MacroAssemblerX86Shared::compareExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
|
||||
Register oldval, Register newval, Register temp,
|
||||
AnyRegister output);
|
||||
|
||||
template<typename T>
|
||||
void
|
||||
MacroAssemblerX86Shared::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
|
||||
Register value, Register temp, AnyRegister output)
|
||||
{
|
||||
switch (arrayType) {
|
||||
case Scalar::Int8:
|
||||
atomicExchange8SignExtend(mem, value, output.gpr());
|
||||
break;
|
||||
case Scalar::Uint8:
|
||||
atomicExchange8ZeroExtend(mem, value, output.gpr());
|
||||
break;
|
||||
case Scalar::Int16:
|
||||
atomicExchange16SignExtend(mem, value, output.gpr());
|
||||
break;
|
||||
case Scalar::Uint16:
|
||||
atomicExchange16ZeroExtend(mem, value, output.gpr());
|
||||
break;
|
||||
case Scalar::Int32:
|
||||
atomicExchange32(mem, value, output.gpr());
|
||||
break;
|
||||
case Scalar::Uint32:
|
||||
// At the moment, the code in MCallOptimize.cpp requires the output
|
||||
// type to be double for uint32 arrays. See bug 1077305.
|
||||
MOZ_ASSERT(output.isFloat());
|
||||
atomicExchange32(mem, value, temp);
|
||||
asMasm().convertUInt32ToDouble(temp, output.fpu());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array type");
|
||||
}
|
||||
}
|
||||
|
||||
template void
|
||||
MacroAssemblerX86Shared::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
|
||||
Register value, Register temp, AnyRegister output);
|
||||
template void
|
||||
MacroAssemblerX86Shared::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
|
||||
Register value, Register temp, AnyRegister output);
|
||||
|
||||
template<class T, class Map>
|
||||
T*
|
||||
MacroAssemblerX86Shared::getConstant(const typename T::Pod& value, Map& map,
|
||||
|
@ -888,4 +805,381 @@ MacroAssembler::outOfLineWasmTruncateFloat32ToInt64(FloatRegister input, bool is
|
|||
jump(rejoin);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Primitive atomic operations.
|
||||
|
||||
static void
|
||||
ExtendTo32(MacroAssembler& masm, Scalar::Type type, Register r)
|
||||
{
|
||||
switch (Scalar::byteSize(type)) {
|
||||
case 1:
|
||||
if (Scalar::isSignedIntType(type))
|
||||
masm.movsbl(r, r);
|
||||
else
|
||||
masm.movzbl(r, r);
|
||||
break;
|
||||
case 2:
|
||||
if (Scalar::isSignedIntType(type))
|
||||
masm.movswl(r, r);
|
||||
else
|
||||
masm.movzwl(r, r);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
CheckBytereg(Register r) {
|
||||
#ifdef DEBUG
|
||||
AllocatableGeneralRegisterSet byteRegs(Registers::SingleByteRegs);
|
||||
MOZ_ASSERT(byteRegs.has(r));
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
CheckBytereg(Imm32 r) {
|
||||
// Nothing
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static void
|
||||
CompareExchange(MacroAssembler& masm, Scalar::Type type, const T& mem, Register oldval,
|
||||
Register newval, Register output)
|
||||
{
|
||||
MOZ_ASSERT(output == eax);
|
||||
|
||||
if (oldval != output)
|
||||
masm.movl(oldval, output);
|
||||
|
||||
switch (Scalar::byteSize(type)) {
|
||||
case 1:
|
||||
CheckBytereg(newval);
|
||||
masm.lock_cmpxchgb(newval, Operand(mem));
|
||||
break;
|
||||
case 2:
|
||||
masm.lock_cmpxchgw(newval, Operand(mem));
|
||||
break;
|
||||
case 4:
|
||||
masm.lock_cmpxchgl(newval, Operand(mem));
|
||||
break;
|
||||
}
|
||||
|
||||
ExtendTo32(masm, type, output);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::compareExchange(Scalar::Type type, const Synchronization&, const Address& mem,
|
||||
Register oldval, Register newval, Register output)
|
||||
{
|
||||
CompareExchange(*this, type, mem, oldval, newval, output);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::compareExchange(Scalar::Type type, const Synchronization&, const BaseIndex& mem,
|
||||
Register oldval, Register newval, Register output)
|
||||
{
|
||||
CompareExchange(*this, type, mem, oldval, newval, output);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static void
|
||||
AtomicExchange(MacroAssembler& masm, Scalar::Type type, const T& mem, Register value,
|
||||
Register output)
|
||||
|
||||
{
|
||||
if (value != output)
|
||||
masm.movl(value, output);
|
||||
|
||||
switch (Scalar::byteSize(type)) {
|
||||
case 1:
|
||||
CheckBytereg(output);
|
||||
masm.xchgb(output, Operand(mem));
|
||||
break;
|
||||
case 2:
|
||||
masm.xchgw(output, Operand(mem));
|
||||
break;
|
||||
case 4:
|
||||
masm.xchgl(output, Operand(mem));
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid");
|
||||
}
|
||||
ExtendTo32(masm, type, output);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicExchange(Scalar::Type type, const Synchronization&, const Address& mem,
|
||||
Register value, Register output)
|
||||
{
|
||||
AtomicExchange(*this, type, mem, value, output);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicExchange(Scalar::Type type, const Synchronization&, const BaseIndex& mem,
|
||||
Register value, Register output)
|
||||
{
|
||||
AtomicExchange(*this, type, mem, value, output);
|
||||
}
|
||||
|
||||
static void
|
||||
SetupValue(MacroAssembler& masm, AtomicOp op, Imm32 src, Register output) {
|
||||
if (op == AtomicFetchSubOp)
|
||||
masm.movl(Imm32(-src.value), output);
|
||||
else
|
||||
masm.movl(src, output);
|
||||
}
|
||||
|
||||
static void
|
||||
SetupValue(MacroAssembler& masm, AtomicOp op, Register src, Register output) {
|
||||
if (src != output)
|
||||
masm.movl(src, output);
|
||||
if (op == AtomicFetchSubOp)
|
||||
masm.negl(output);
|
||||
}
|
||||
|
||||
template<typename T, typename V>
|
||||
static void
|
||||
AtomicFetchOp(MacroAssembler& masm, Scalar::Type arrayType, AtomicOp op, V value,
|
||||
const T& mem, Register temp, Register output)
|
||||
{
|
||||
// Note value can be an Imm or a Register.
|
||||
|
||||
#define ATOMIC_BITOP_BODY(LOAD, OP, LOCK_CMPXCHG) \
|
||||
do { \
|
||||
MOZ_ASSERT(output != temp); \
|
||||
MOZ_ASSERT(output == eax); \
|
||||
masm.LOAD(Operand(mem), eax); \
|
||||
Label again; \
|
||||
masm.bind(&again); \
|
||||
masm.movl(eax, temp); \
|
||||
masm.OP(value, temp); \
|
||||
masm.LOCK_CMPXCHG(temp, Operand(mem)); \
|
||||
masm.j(MacroAssembler::NonZero, &again); \
|
||||
} while (0)
|
||||
|
||||
MOZ_ASSERT_IF(op == AtomicFetchAddOp || op == AtomicFetchSubOp, temp == InvalidReg);
|
||||
|
||||
switch (Scalar::byteSize(arrayType)) {
|
||||
case 1:
|
||||
CheckBytereg(value);
|
||||
CheckBytereg(output);
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
case AtomicFetchSubOp:
|
||||
SetupValue(masm, op, value, output);
|
||||
masm.lock_xaddb(output, Operand(mem));
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
CheckBytereg(temp);
|
||||
ATOMIC_BITOP_BODY(movb, andl, lock_cmpxchgb);
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
CheckBytereg(temp);
|
||||
ATOMIC_BITOP_BODY(movb, orl, lock_cmpxchgb);
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
CheckBytereg(temp);
|
||||
ATOMIC_BITOP_BODY(movb, xorl, lock_cmpxchgb);
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH();
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
case AtomicFetchSubOp:
|
||||
SetupValue(masm, op, value, output);
|
||||
masm.lock_xaddw(output, Operand(mem));
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
ATOMIC_BITOP_BODY(movw, andl, lock_cmpxchgw);
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
ATOMIC_BITOP_BODY(movw, orl, lock_cmpxchgw);
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
ATOMIC_BITOP_BODY(movw, xorl, lock_cmpxchgw);
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH();
|
||||
}
|
||||
break;
|
||||
case 4:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
case AtomicFetchSubOp:
|
||||
SetupValue(masm, op, value, output);
|
||||
masm.lock_xaddl(output, Operand(mem));
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
ATOMIC_BITOP_BODY(movl, andl, lock_cmpxchgl);
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
ATOMIC_BITOP_BODY(movl, orl, lock_cmpxchgl);
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
ATOMIC_BITOP_BODY(movl, xorl, lock_cmpxchgl);
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH();
|
||||
}
|
||||
break;
|
||||
}
|
||||
ExtendTo32(masm, arrayType, output);
|
||||
|
||||
#undef ATOMIC_BITOP_BODY
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicFetchOp(Scalar::Type arrayType, const Synchronization&, AtomicOp op,
|
||||
Register value, const BaseIndex& mem, Register temp, Register output)
|
||||
{
|
||||
AtomicFetchOp(*this, arrayType, op, value, mem, temp, output);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicFetchOp(Scalar::Type arrayType, const Synchronization&, AtomicOp op,
|
||||
Register value, const Address& mem, Register temp, Register output)
|
||||
{
|
||||
AtomicFetchOp(*this, arrayType, op, value, mem, temp, output);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicFetchOp(Scalar::Type arrayType, const Synchronization&, AtomicOp op,
|
||||
Imm32 value, const BaseIndex& mem, Register temp, Register output)
|
||||
{
|
||||
AtomicFetchOp(*this, arrayType, op, value, mem, temp, output);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicFetchOp(Scalar::Type arrayType, const Synchronization&, AtomicOp op,
|
||||
Imm32 value, const Address& mem, Register temp, Register output)
|
||||
{
|
||||
AtomicFetchOp(*this, arrayType, op, value, mem, temp, output);
|
||||
}
|
||||
|
||||
template<typename T, typename V>
|
||||
static void
|
||||
AtomicEffectOp(MacroAssembler& masm, Scalar::Type arrayType, AtomicOp op, V value, const T& mem)
|
||||
{
|
||||
switch (Scalar::byteSize(arrayType)) {
|
||||
case 1:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp: masm.lock_addb(value, Operand(mem)); break;
|
||||
case AtomicFetchSubOp: masm.lock_subb(value, Operand(mem)); break;
|
||||
case AtomicFetchAndOp: masm.lock_andb(value, Operand(mem)); break;
|
||||
case AtomicFetchOrOp: masm.lock_orb(value, Operand(mem)); break;
|
||||
case AtomicFetchXorOp: masm.lock_xorb(value, Operand(mem)); break;
|
||||
default:
|
||||
MOZ_CRASH();
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp: masm.lock_addw(value, Operand(mem)); break;
|
||||
case AtomicFetchSubOp: masm.lock_subw(value, Operand(mem)); break;
|
||||
case AtomicFetchAndOp: masm.lock_andw(value, Operand(mem)); break;
|
||||
case AtomicFetchOrOp: masm.lock_orw(value, Operand(mem)); break;
|
||||
case AtomicFetchXorOp: masm.lock_xorw(value, Operand(mem)); break;
|
||||
default:
|
||||
MOZ_CRASH();
|
||||
}
|
||||
break;
|
||||
case 4:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp: masm.lock_addl(value, Operand(mem)); break;
|
||||
case AtomicFetchSubOp: masm.lock_subl(value, Operand(mem)); break;
|
||||
case AtomicFetchAndOp: masm.lock_andl(value, Operand(mem)); break;
|
||||
case AtomicFetchOrOp: masm.lock_orl(value, Operand(mem)); break;
|
||||
case AtomicFetchXorOp: masm.lock_xorl(value, Operand(mem)); break;
|
||||
default:
|
||||
MOZ_CRASH();
|
||||
}
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicEffectOp(Scalar::Type arrayType, const Synchronization&, AtomicOp op,
|
||||
Register value, const BaseIndex& mem, Register temp)
|
||||
{
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
AtomicEffectOp(*this, arrayType, op, value, mem);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicEffectOp(Scalar::Type arrayType, const Synchronization&, AtomicOp op,
|
||||
Register value, const Address& mem, Register temp)
|
||||
{
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
AtomicEffectOp(*this, arrayType, op, value, mem);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicEffectOp(Scalar::Type arrayType, const Synchronization&, AtomicOp op,
|
||||
Imm32 value, const BaseIndex& mem, Register temp)
|
||||
{
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
AtomicEffectOp(*this, arrayType, op, value, mem);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicEffectOp(Scalar::Type arrayType, const Synchronization&, AtomicOp op,
|
||||
Imm32 value, const Address& mem, Register temp)
|
||||
{
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
AtomicEffectOp(*this, arrayType, op, value, mem);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// JS atomic operations.
|
||||
|
||||
template<typename T>
|
||||
static void
|
||||
AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
|
||||
AtomicOp op, Imm32 value, const T& mem, Register temp1, Register temp2,
|
||||
AnyRegister output)
|
||||
{
|
||||
if (arrayType == Scalar::Uint32) {
|
||||
masm.atomicFetchOp(arrayType, sync, op, value, mem, temp2, temp1);
|
||||
masm.convertUInt32ToDouble(temp1, output.fpu());
|
||||
} else {
|
||||
masm.atomicFetchOp(arrayType, sync, op, value, mem, temp1, output.gpr());
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
|
||||
Imm32 value, const Address& mem, Register temp1, Register temp2,
|
||||
AnyRegister output)
|
||||
{
|
||||
AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
|
||||
Imm32 value, const BaseIndex& mem, Register temp1, Register temp2,
|
||||
AnyRegister output)
|
||||
{
|
||||
AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
|
||||
Imm32 value, const Address& mem, Register temp)
|
||||
{
|
||||
atomicEffectOp(arrayType, sync, op, value, mem, temp);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
|
||||
Imm32 value, const BaseIndex& mem, Register temp)
|
||||
{
|
||||
atomicEffectOp(arrayType, sync, op, value, mem, temp);
|
||||
}
|
||||
|
||||
//}}} check_macroassembler_style
|
||||
|
|
|
@ -15,23 +15,6 @@
|
|||
# include "jit/x64/Assembler-x64.h"
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG
|
||||
#define CHECK_BYTEREG(reg) \
|
||||
JS_BEGIN_MACRO \
|
||||
AllocatableGeneralRegisterSet byteRegs(Registers::SingleByteRegs); \
|
||||
MOZ_ASSERT(byteRegs.has(reg)); \
|
||||
JS_END_MACRO
|
||||
#define CHECK_BYTEREGS(r1, r2) \
|
||||
JS_BEGIN_MACRO \
|
||||
AllocatableGeneralRegisterSet byteRegs(Registers::SingleByteRegs); \
|
||||
MOZ_ASSERT(byteRegs.has(r1)); \
|
||||
MOZ_ASSERT(byteRegs.has(r2)); \
|
||||
JS_END_MACRO
|
||||
#else
|
||||
#define CHECK_BYTEREG(reg) (void)0
|
||||
#define CHECK_BYTEREGS(r1, r2) (void)0
|
||||
#endif
|
||||
|
||||
namespace js {
|
||||
namespace jit {
|
||||
|
||||
|
@ -184,343 +167,6 @@ class MacroAssemblerX86Shared : public Assembler
|
|||
lock_decl(addr);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchAdd8SignExtend(Register src, const T& mem, Register temp, Register output) {
|
||||
CHECK_BYTEREGS(src, output);
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
if (src != output)
|
||||
movl(src, output);
|
||||
lock_xaddb(output, Operand(mem));
|
||||
movsbl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchAdd8ZeroExtend(Register src, const T& mem, Register temp, Register output) {
|
||||
CHECK_BYTEREGS(src, output);
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
if (src != output)
|
||||
movl(src, output);
|
||||
lock_xaddb(output, Operand(mem));
|
||||
movzbl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchAdd8SignExtend(Imm32 src, const T& mem, Register temp, Register output) {
|
||||
CHECK_BYTEREG(output);
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
movb(src, output);
|
||||
lock_xaddb(output, Operand(mem));
|
||||
movsbl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchAdd8ZeroExtend(Imm32 src, const T& mem, Register temp, Register output) {
|
||||
CHECK_BYTEREG(output);
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
movb(src, output);
|
||||
lock_xaddb(output, Operand(mem));
|
||||
movzbl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchAdd16SignExtend(Register src, const T& mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
if (src != output)
|
||||
movl(src, output);
|
||||
lock_xaddw(output, Operand(mem));
|
||||
movswl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchAdd16ZeroExtend(Register src, const T& mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
if (src != output)
|
||||
movl(src, output);
|
||||
lock_xaddw(output, Operand(mem));
|
||||
movzwl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchAdd16SignExtend(Imm32 src, const T& mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
movl(src, output);
|
||||
lock_xaddw(output, Operand(mem));
|
||||
movswl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchAdd16ZeroExtend(Imm32 src, const T& mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
movl(src, output);
|
||||
lock_xaddw(output, Operand(mem));
|
||||
movzwl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchAdd32(Register src, const T& mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
if (src != output)
|
||||
movl(src, output);
|
||||
lock_xaddl(output, Operand(mem));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchAdd32(Imm32 src, const T& mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
movl(src, output);
|
||||
lock_xaddl(output, Operand(mem));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchSub8SignExtend(Register src, const T& mem, Register temp, Register output) {
|
||||
CHECK_BYTEREGS(src, output);
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
if (src != output)
|
||||
movl(src, output);
|
||||
negl(output);
|
||||
lock_xaddb(output, Operand(mem));
|
||||
movsbl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchSub8ZeroExtend(Register src, const T& mem, Register temp, Register output) {
|
||||
CHECK_BYTEREGS(src, output);
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
if (src != output)
|
||||
movl(src, output);
|
||||
negl(output);
|
||||
lock_xaddb(output, Operand(mem));
|
||||
movzbl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchSub8SignExtend(Imm32 src, const T& mem, Register temp, Register output) {
|
||||
CHECK_BYTEREG(output);
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
movb(Imm32(-src.value), output);
|
||||
lock_xaddb(output, Operand(mem));
|
||||
movsbl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchSub8ZeroExtend(Imm32 src, const T& mem, Register temp, Register output) {
|
||||
CHECK_BYTEREG(output);
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
movb(Imm32(-src.value), output);
|
||||
lock_xaddb(output, Operand(mem));
|
||||
movzbl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchSub16SignExtend(Register src, const T& mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
if (src != output)
|
||||
movl(src, output);
|
||||
negl(output);
|
||||
lock_xaddw(output, Operand(mem));
|
||||
movswl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchSub16ZeroExtend(Register src, const T& mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
if (src != output)
|
||||
movl(src, output);
|
||||
negl(output);
|
||||
lock_xaddw(output, Operand(mem));
|
||||
movzwl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchSub16SignExtend(Imm32 src, const T& mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
movl(Imm32(-src.value), output);
|
||||
lock_xaddw(output, Operand(mem));
|
||||
movswl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchSub16ZeroExtend(Imm32 src, const T& mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
movl(Imm32(-src.value), output);
|
||||
lock_xaddw(output, Operand(mem));
|
||||
movzwl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchSub32(Register src, const T& mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
if (src != output)
|
||||
movl(src, output);
|
||||
negl(output);
|
||||
lock_xaddl(output, Operand(mem));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchSub32(Imm32 src, const T& mem, Register temp, Register output) {
|
||||
movl(Imm32(-src.value), output);
|
||||
lock_xaddl(output, Operand(mem));
|
||||
}
|
||||
|
||||
// Requires output == eax. Note src can be an Imm and we can't directly
|
||||
// assert that it's different from output or tmp.
|
||||
#define ATOMIC_BITOP_BODY(LOAD, OP, LOCK_CMPXCHG) \
|
||||
MOZ_ASSERT(output != temp); \
|
||||
MOZ_ASSERT(output == eax); \
|
||||
LOAD(Operand(mem), eax); \
|
||||
Label again; \
|
||||
bind(&again); \
|
||||
movl(eax, temp); \
|
||||
OP(src, temp); \
|
||||
LOCK_CMPXCHG(temp, Operand(mem)); \
|
||||
j(NonZero, &again);
|
||||
|
||||
template <typename S, typename T>
|
||||
void atomicFetchAnd8SignExtend(const S& src, const T& mem, Register temp, Register output) {
|
||||
CHECK_BYTEREG(temp);
|
||||
ATOMIC_BITOP_BODY(movb, andl, lock_cmpxchgb)
|
||||
movsbl(eax, eax);
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicFetchAnd8ZeroExtend(const S& src, const T& mem, Register temp, Register output) {
|
||||
CHECK_BYTEREG(temp);
|
||||
ATOMIC_BITOP_BODY(movb, andl, lock_cmpxchgb)
|
||||
movzbl(eax, eax);
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicFetchAnd16SignExtend(const S& src, const T& mem, Register temp, Register output) {
|
||||
ATOMIC_BITOP_BODY(movw, andl, lock_cmpxchgw)
|
||||
movswl(eax, eax);
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicFetchAnd16ZeroExtend(const S& src, const T& mem, Register temp, Register output) {
|
||||
ATOMIC_BITOP_BODY(movw, andl, lock_cmpxchgw)
|
||||
movzwl(eax, eax);
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicFetchAnd32(const S& src, const T& mem, Register temp, Register output) {
|
||||
ATOMIC_BITOP_BODY(movl, andl, lock_cmpxchgl)
|
||||
}
|
||||
|
||||
template <typename S, typename T>
|
||||
void atomicFetchOr8SignExtend(const S& src, const T& mem, Register temp, Register output) {
|
||||
CHECK_BYTEREG(temp);
|
||||
ATOMIC_BITOP_BODY(movb, orl, lock_cmpxchgb)
|
||||
movsbl(eax, eax);
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicFetchOr8ZeroExtend(const S& src, const T& mem, Register temp, Register output) {
|
||||
CHECK_BYTEREG(temp);
|
||||
ATOMIC_BITOP_BODY(movb, orl, lock_cmpxchgb)
|
||||
movzbl(eax, eax);
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicFetchOr16SignExtend(const S& src, const T& mem, Register temp, Register output) {
|
||||
ATOMIC_BITOP_BODY(movw, orl, lock_cmpxchgw)
|
||||
movswl(eax, eax);
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicFetchOr16ZeroExtend(const S& src, const T& mem, Register temp, Register output) {
|
||||
ATOMIC_BITOP_BODY(movw, orl, lock_cmpxchgw)
|
||||
movzwl(eax, eax);
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicFetchOr32(const S& src, const T& mem, Register temp, Register output) {
|
||||
ATOMIC_BITOP_BODY(movl, orl, lock_cmpxchgl)
|
||||
}
|
||||
|
||||
template <typename S, typename T>
|
||||
void atomicFetchXor8SignExtend(const S& src, const T& mem, Register temp, Register output) {
|
||||
CHECK_BYTEREG(temp);
|
||||
ATOMIC_BITOP_BODY(movb, xorl, lock_cmpxchgb)
|
||||
movsbl(eax, eax);
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicFetchXor8ZeroExtend(const S& src, const T& mem, Register temp, Register output) {
|
||||
CHECK_BYTEREG(temp);
|
||||
ATOMIC_BITOP_BODY(movb, xorl, lock_cmpxchgb)
|
||||
movzbl(eax, eax);
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicFetchXor16SignExtend(const S& src, const T& mem, Register temp, Register output) {
|
||||
ATOMIC_BITOP_BODY(movw, xorl, lock_cmpxchgw)
|
||||
movswl(eax, eax);
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicFetchXor16ZeroExtend(const S& src, const T& mem, Register temp, Register output) {
|
||||
ATOMIC_BITOP_BODY(movw, xorl, lock_cmpxchgw)
|
||||
movzwl(eax, eax);
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicFetchXor32(const S& src, const T& mem, Register temp, Register output) {
|
||||
ATOMIC_BITOP_BODY(movl, xorl, lock_cmpxchgl)
|
||||
}
|
||||
|
||||
#undef ATOMIC_BITOP_BODY
|
||||
|
||||
// S is Register or Imm32; T is Address or BaseIndex.
|
||||
|
||||
template <typename S, typename T>
|
||||
void atomicAdd8(const S& src, const T& mem) {
|
||||
lock_addb(src, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicAdd16(const S& src, const T& mem) {
|
||||
lock_addw(src, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicAdd32(const S& src, const T& mem) {
|
||||
lock_addl(src, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicSub8(const S& src, const T& mem) {
|
||||
lock_subb(src, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicSub16(const S& src, const T& mem) {
|
||||
lock_subw(src, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicSub32(const S& src, const T& mem) {
|
||||
lock_subl(src, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicAnd8(const S& src, const T& mem) {
|
||||
lock_andb(src, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicAnd16(const S& src, const T& mem) {
|
||||
lock_andw(src, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicAnd32(const S& src, const T& mem) {
|
||||
lock_andl(src, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicOr8(const S& src, const T& mem) {
|
||||
lock_orb(src, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicOr16(const S& src, const T& mem) {
|
||||
lock_orw(src, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicOr32(const S& src, const T& mem) {
|
||||
lock_orl(src, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicXor8(const S& src, const T& mem) {
|
||||
lock_xorb(src, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicXor16(const S& src, const T& mem) {
|
||||
lock_xorw(src, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicXor32(const S& src, const T& mem) {
|
||||
lock_xorl(src, Operand(mem));
|
||||
}
|
||||
|
||||
void storeLoadFence() {
|
||||
// This implementation follows Linux.
|
||||
if (HasSSE2())
|
||||
|
@ -669,40 +315,6 @@ class MacroAssemblerX86Shared : public Assembler
|
|||
AutoEnsureByteRegister ensure(this, dest, src);
|
||||
movb(ensure.reg(), Operand(dest));
|
||||
}
|
||||
template <typename T>
|
||||
void compareExchange8ZeroExtend(const T& mem, Register oldval, Register newval, Register output) {
|
||||
MOZ_ASSERT(output == eax);
|
||||
CHECK_BYTEREG(newval);
|
||||
if (oldval != output)
|
||||
movl(oldval, output);
|
||||
lock_cmpxchgb(newval, Operand(mem));
|
||||
movzbl(output, output);
|
||||
}
|
||||
template <typename T>
|
||||
void compareExchange8SignExtend(const T& mem, Register oldval, Register newval, Register output) {
|
||||
MOZ_ASSERT(output == eax);
|
||||
CHECK_BYTEREG(newval);
|
||||
if (oldval != output)
|
||||
movl(oldval, output);
|
||||
lock_cmpxchgb(newval, Operand(mem));
|
||||
movsbl(output, output);
|
||||
}
|
||||
template <typename T>
|
||||
void atomicExchange8ZeroExtend(const T& mem, Register value, Register output) {
|
||||
CHECK_BYTEREG(output);
|
||||
if (value != output)
|
||||
movl(value, output);
|
||||
xchgb(output, Operand(mem));
|
||||
movzbl(output, output);
|
||||
}
|
||||
template <typename T>
|
||||
void atomicExchange8SignExtend(const T& mem, Register value, Register output) {
|
||||
CHECK_BYTEREG(output);
|
||||
if (value != output)
|
||||
movl(value, output);
|
||||
xchgb(output, Operand(mem));
|
||||
movsbl(output, output);
|
||||
}
|
||||
void load16ZeroExtend(const Operand& src, Register dest) {
|
||||
movzwl(src, dest);
|
||||
}
|
||||
|
@ -716,36 +328,6 @@ class MacroAssemblerX86Shared : public Assembler
|
|||
void store16(const S& src, const T& dest) {
|
||||
movw(src, Operand(dest));
|
||||
}
|
||||
template <typename T>
|
||||
void compareExchange16ZeroExtend(const T& mem, Register oldval, Register newval, Register output) {
|
||||
MOZ_ASSERT(output == eax);
|
||||
if (oldval != output)
|
||||
movl(oldval, output);
|
||||
lock_cmpxchgw(newval, Operand(mem));
|
||||
movzwl(output, output);
|
||||
}
|
||||
template <typename T>
|
||||
void compareExchange16SignExtend(const T& mem, Register oldval, Register newval, Register output) {
|
||||
MOZ_ASSERT(output == eax);
|
||||
if (oldval != output)
|
||||
movl(oldval, output);
|
||||
lock_cmpxchgw(newval, Operand(mem));
|
||||
movswl(output, output);
|
||||
}
|
||||
template <typename T>
|
||||
void atomicExchange16ZeroExtend(const T& mem, Register value, Register output) {
|
||||
if (value != output)
|
||||
movl(value, output);
|
||||
xchgw(output, Operand(mem));
|
||||
movzwl(output, output);
|
||||
}
|
||||
template <typename T>
|
||||
void atomicExchange16SignExtend(const T& mem, Register value, Register output) {
|
||||
if (value != output)
|
||||
movl(value, output);
|
||||
xchgw(output, Operand(mem));
|
||||
movswl(output, output);
|
||||
}
|
||||
void load16SignExtend(const Operand& src, Register dest) {
|
||||
movswl(src, dest);
|
||||
}
|
||||
|
@ -768,19 +350,6 @@ class MacroAssemblerX86Shared : public Assembler
|
|||
void store32(const S& src, const T& dest) {
|
||||
movl(src, Operand(dest));
|
||||
}
|
||||
template <typename T>
|
||||
void compareExchange32(const T& mem, Register oldval, Register newval, Register output) {
|
||||
MOZ_ASSERT(output == eax);
|
||||
if (oldval != output)
|
||||
movl(oldval, output);
|
||||
lock_cmpxchgl(newval, Operand(mem));
|
||||
}
|
||||
template <typename T>
|
||||
void atomicExchange32(const T& mem, Register value, Register output) {
|
||||
if (value != output)
|
||||
movl(value, output);
|
||||
xchgl(output, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void store32_NoSecondScratch(const S& src, const T& dest) {
|
||||
store32(src, dest);
|
||||
|
@ -1349,14 +918,6 @@ class MacroAssemblerX86Shared : public Assembler
|
|||
ret();
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register oldval, Register newval,
|
||||
Register temp, AnyRegister output);
|
||||
|
||||
template<typename T>
|
||||
void atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register value,
|
||||
Register temp, AnyRegister output);
|
||||
|
||||
protected:
|
||||
bool buildOOLFakeExitFrame(void* fakeReturnAddr);
|
||||
};
|
||||
|
@ -1428,7 +989,4 @@ MacroAssemblerX86Shared::storeScalar<float>(FloatRegister src, const Address& de
|
|||
} // namespace jit
|
||||
} // namespace js
|
||||
|
||||
#undef CHECK_BYTEREG
|
||||
#undef CHECK_BYTEREGS
|
||||
|
||||
#endif /* jit_x86_shared_MacroAssembler_x86_shared_h */
|
||||
|
|
|
@ -510,16 +510,12 @@ CodeGeneratorX86::visitWasmCompareExchangeHeap(LWasmCompareExchangeHeap* ins)
|
|||
Register newval = ToRegister(ins->newValue());
|
||||
Register addrTemp = ToRegister(ins->addrTemp());
|
||||
Register memoryBase = ToRegister(ins->memoryBase());
|
||||
Register output = ToRegister(ins->output());
|
||||
|
||||
masm.leal(Operand(memoryBase, ptrReg, TimesOne, mir->access().offset()), addrTemp);
|
||||
|
||||
Address memAddr(addrTemp, 0);
|
||||
masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
|
||||
memAddr,
|
||||
oldval,
|
||||
newval,
|
||||
InvalidReg,
|
||||
ToAnyRegister(ins->output()));
|
||||
masm.compareExchange(accessType, Synchronization::Full(), memAddr, oldval, newval, output);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -532,15 +528,12 @@ CodeGeneratorX86::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins)
|
|||
Register value = ToRegister(ins->value());
|
||||
Register addrTemp = ToRegister(ins->addrTemp());
|
||||
Register memoryBase = ToRegister(ins->memoryBase());
|
||||
Register output = ToRegister(ins->output());
|
||||
|
||||
masm.leal(Operand(memoryBase, ptrReg, TimesOne, mir->access().offset()), addrTemp);
|
||||
|
||||
Address memAddr(addrTemp, 0);
|
||||
masm.atomicExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
|
||||
memAddr,
|
||||
value,
|
||||
InvalidReg,
|
||||
ToAnyRegister(ins->output()));
|
||||
masm.atomicExchange(accessType, Synchronization::Full(), memAddr, value, output);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -552,6 +545,7 @@ CodeGeneratorX86::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins)
|
|||
Register ptrReg = ToRegister(ins->ptr());
|
||||
Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
|
||||
Register addrTemp = ToRegister(ins->addrTemp());
|
||||
Register out = ToRegister(ins->output());
|
||||
const LAllocation* value = ins->value();
|
||||
AtomicOp op = mir->operation();
|
||||
Register memoryBase = ToRegister(ins->memoryBase());
|
||||
|
@ -560,19 +554,11 @@ CodeGeneratorX86::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins)
|
|||
|
||||
Address memAddr(addrTemp, 0);
|
||||
if (value->isConstant()) {
|
||||
atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
|
||||
Imm32(ToInt32(value)),
|
||||
memAddr,
|
||||
temp,
|
||||
InvalidReg,
|
||||
ToAnyRegister(ins->output()));
|
||||
masm.atomicFetchOp(accessType, Synchronization::Full(), op, Imm32(ToInt32(value)),
|
||||
memAddr, temp, out);
|
||||
} else {
|
||||
atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
|
||||
ToRegister(value),
|
||||
memAddr,
|
||||
temp,
|
||||
InvalidReg,
|
||||
ToAnyRegister(ins->output()));
|
||||
masm.atomicFetchOp(accessType, Synchronization::Full(), op, ToRegister(value),
|
||||
memAddr, temp, out);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -592,10 +578,13 @@ CodeGeneratorX86::visitWasmAtomicBinopHeapForEffect(LWasmAtomicBinopHeapForEffec
|
|||
masm.leal(Operand(memoryBase, ptrReg, TimesOne, mir->access().offset()), addrTemp);
|
||||
|
||||
Address memAddr(addrTemp, 0);
|
||||
if (value->isConstant())
|
||||
atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), memAddr);
|
||||
else
|
||||
atomicBinopToTypedIntArray(op, accessType, ToRegister(value), memAddr);
|
||||
if (value->isConstant()) {
|
||||
masm.atomicEffectOp(accessType, Synchronization::Full(), op, Imm32(ToInt32(value)), memAddr,
|
||||
InvalidReg);
|
||||
} else {
|
||||
masm.atomicEffectOp(accessType, Synchronization::Full(), op, ToRegister(value), memAddr,
|
||||
InvalidReg);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -715,14 +704,7 @@ CodeGeneratorX86::visitWasmAtomicBinopI64(LWasmAtomicBinopI64* ins)
|
|||
Address valueAddr(esp, 0);
|
||||
|
||||
// Here the `value` register acts as a temp, we'll restore it below.
|
||||
switch (ins->operation()) {
|
||||
case AtomicFetchAddOp: masm.atomicFetchAdd64(valueAddr, srcAddr, value, output); break;
|
||||
case AtomicFetchSubOp: masm.atomicFetchSub64(valueAddr, srcAddr, value, output); break;
|
||||
case AtomicFetchAndOp: masm.atomicFetchAnd64(valueAddr, srcAddr, value, output); break;
|
||||
case AtomicFetchOrOp: masm.atomicFetchOr64(valueAddr, srcAddr, value, output); break;
|
||||
case AtomicFetchXorOp: masm.atomicFetchXor64(valueAddr, srcAddr, value, output); break;
|
||||
default: MOZ_CRASH();
|
||||
}
|
||||
masm.atomicFetchOp64(Synchronization::Full(), ins->operation(), valueAddr, srcAddr, value, output);
|
||||
|
||||
masm.Pop(ebx);
|
||||
masm.Pop(ecx);
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include "mozilla/Alignment.h"
|
||||
#include "mozilla/Casting.h"
|
||||
|
||||
#include "jit/AtomicOp.h"
|
||||
#include "jit/Bailouts.h"
|
||||
#include "jit/BaselineFrame.h"
|
||||
#include "jit/JitFrames.h"
|
||||
|
@ -660,7 +661,7 @@ MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access, Operand srcAddr,
|
|||
{
|
||||
MOZ_ASSERT(srcAddr.kind() == Operand::MEM_REG_DISP || srcAddr.kind() == Operand::MEM_SCALE);
|
||||
|
||||
memoryBarrier(access.barrierBefore());
|
||||
memoryBarrierBefore(access.sync());
|
||||
|
||||
size_t loadOffset = size();
|
||||
switch (access.type()) {
|
||||
|
@ -721,7 +722,7 @@ MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access, Operand srcAddr,
|
|||
}
|
||||
append(access, loadOffset, framePushed());
|
||||
|
||||
memoryBarrier(access.barrierAfter());
|
||||
memoryBarrierAfter(access.sync());
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -732,7 +733,7 @@ MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access, Operand srcAdd
|
|||
MOZ_ASSERT(!access.isSimd());
|
||||
MOZ_ASSERT(srcAddr.kind() == Operand::MEM_REG_DISP || srcAddr.kind() == Operand::MEM_SCALE);
|
||||
|
||||
memoryBarrier(access.barrierBefore());
|
||||
memoryBarrierBefore(access.sync());
|
||||
|
||||
size_t loadOffset = size();
|
||||
switch (access.type()) {
|
||||
|
@ -804,7 +805,7 @@ MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access, Operand srcAdd
|
|||
MOZ_CRASH("unexpected array type");
|
||||
}
|
||||
|
||||
memoryBarrier(access.barrierAfter());
|
||||
memoryBarrierAfter(access.sync());
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -812,7 +813,7 @@ MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister valu
|
|||
{
|
||||
MOZ_ASSERT(dstAddr.kind() == Operand::MEM_REG_DISP || dstAddr.kind() == Operand::MEM_SCALE);
|
||||
|
||||
memoryBarrier(access.barrierBefore());
|
||||
memoryBarrierBefore(access.sync());
|
||||
|
||||
size_t storeOffset = size();
|
||||
switch (access.type()) {
|
||||
|
@ -870,7 +871,7 @@ MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister valu
|
|||
}
|
||||
append(access, storeOffset, framePushed());
|
||||
|
||||
memoryBarrier(access.barrierAfter());
|
||||
memoryBarrierAfter(access.sync());
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -890,94 +891,158 @@ MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 va
|
|||
append(access, storeOffset, framePushed());
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static void
|
||||
AtomicLoad64(MacroAssembler& masm, const T& address, Register64 temp, Register64 output)
|
||||
{
|
||||
MOZ_ASSERT(temp.low == ebx);
|
||||
MOZ_ASSERT(temp.high == ecx);
|
||||
MOZ_ASSERT(output.high == edx);
|
||||
MOZ_ASSERT(output.low == eax);
|
||||
|
||||
// In the event edx:eax matches what's in memory, ecx:ebx will be
|
||||
// stored. The two pairs must therefore have the same values.
|
||||
|
||||
masm.movl(edx, ecx);
|
||||
masm.movl(eax, ebx);
|
||||
|
||||
masm.lock_cmpxchg8b(edx, eax, ecx, ebx, Operand(address));
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicLoad64(const Synchronization&, const Address& mem, Register64 temp,
|
||||
Register64 output)
|
||||
{
|
||||
AtomicLoad64(*this, mem, temp, output);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicLoad64(const Synchronization&, const BaseIndex& mem, Register64 temp,
|
||||
Register64 output)
|
||||
{
|
||||
AtomicLoad64(*this, mem, temp, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static void
|
||||
CompareExchange64(MacroAssembler& masm, const T& mem, Register64 expected,
|
||||
Register64 replacement, Register64 output)
|
||||
{
|
||||
MOZ_ASSERT(expected == output);
|
||||
MOZ_ASSERT(expected.high == edx);
|
||||
MOZ_ASSERT(expected.low == eax);
|
||||
MOZ_ASSERT(replacement.high == ecx);
|
||||
MOZ_ASSERT(replacement.low == ebx);
|
||||
|
||||
masm.lock_cmpxchg8b(edx, eax, ecx, ebx, Operand(mem));
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::compareExchange64(const Synchronization&, const Address& mem, Register64 expected,
|
||||
Register64 replacement, Register64 output)
|
||||
{
|
||||
CompareExchange64(*this, mem, expected, replacement, output);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::compareExchange64(const Synchronization&, const BaseIndex& mem, Register64 expected,
|
||||
Register64 replacement, Register64 output)
|
||||
{
|
||||
CompareExchange64(*this, mem, expected, replacement, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static void
|
||||
AtomicExchange64(MacroAssembler& masm, const T& mem, Register64 value, Register64 output)
|
||||
{
|
||||
MOZ_ASSERT(value.low == ebx);
|
||||
MOZ_ASSERT(value.high == ecx);
|
||||
MOZ_ASSERT(output.high == edx);
|
||||
MOZ_ASSERT(output.low == eax);
|
||||
|
||||
// edx:eax has garbage initially, and that is the best we can do unless
|
||||
// we can guess with high probability what's in memory.
|
||||
|
||||
Label again;
|
||||
masm.bind(&again);
|
||||
masm.lock_cmpxchg8b(edx, eax, ecx, ebx, Operand(mem));
|
||||
masm.j(MacroAssembler::NonZero, &again);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicExchange64(const Synchronization&, const Address& mem, Register64 value,
|
||||
Register64 output)
|
||||
{
|
||||
AtomicExchange64(*this, mem, value, output);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicExchange64(const Synchronization&, const BaseIndex& mem, Register64 value,
|
||||
Register64 output)
|
||||
{
|
||||
AtomicExchange64(*this, mem, value, output);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static void
|
||||
AtomicFetchOp64(MacroAssembler& masm, AtomicOp op, const Address& value, const T& mem,
|
||||
Register64 temp, Register64 output)
|
||||
{
|
||||
|
||||
// We don't have enough registers for all the operands on x86, so the rhs
|
||||
// operand is in memory.
|
||||
|
||||
#define ATOMIC_OP_BODY(OPERATE) \
|
||||
MOZ_ASSERT(output.low == eax); \
|
||||
MOZ_ASSERT(output.high == edx); \
|
||||
MOZ_ASSERT(temp.low == ebx); \
|
||||
MOZ_ASSERT(temp.high == ecx); \
|
||||
load64(address, output); \
|
||||
Label again; \
|
||||
bind(&again); \
|
||||
asMasm().move64(output, temp); \
|
||||
OPERATE(value, temp); \
|
||||
lock_cmpxchg8b(edx, eax, ecx, ebx, Operand(address)); \
|
||||
j(NonZero, &again);
|
||||
#define ATOMIC_OP_BODY(OPERATE) \
|
||||
do { \
|
||||
MOZ_ASSERT(output.low == eax); \
|
||||
MOZ_ASSERT(output.high == edx); \
|
||||
MOZ_ASSERT(temp.low == ebx); \
|
||||
MOZ_ASSERT(temp.high == ecx); \
|
||||
masm.load64(mem, output); \
|
||||
Label again; \
|
||||
masm.bind(&again); \
|
||||
masm.move64(output, temp); \
|
||||
masm.OPERATE(Operand(value), temp); \
|
||||
masm.lock_cmpxchg8b(edx, eax, ecx, ebx, Operand(mem)); \
|
||||
masm.j(MacroAssembler::NonZero, &again); \
|
||||
} while(0)
|
||||
|
||||
template <typename T, typename U>
|
||||
void
|
||||
MacroAssemblerX86::atomicFetchAdd64(const T& value, const U& address, Register64 temp,
|
||||
Register64 output)
|
||||
{
|
||||
ATOMIC_OP_BODY(add64)
|
||||
}
|
||||
|
||||
template <typename T, typename U>
|
||||
void
|
||||
MacroAssemblerX86::atomicFetchSub64(const T& value, const U& address, Register64 temp,
|
||||
Register64 output)
|
||||
{
|
||||
ATOMIC_OP_BODY(sub64)
|
||||
}
|
||||
|
||||
template <typename T, typename U>
|
||||
void
|
||||
MacroAssemblerX86::atomicFetchAnd64(const T& value, const U& address, Register64 temp,
|
||||
Register64 output)
|
||||
{
|
||||
ATOMIC_OP_BODY(and64)
|
||||
}
|
||||
|
||||
template <typename T, typename U>
|
||||
void
|
||||
MacroAssemblerX86::atomicFetchOr64(const T& value, const U& address, Register64 temp,
|
||||
Register64 output)
|
||||
{
|
||||
ATOMIC_OP_BODY(or64)
|
||||
}
|
||||
|
||||
template <typename T, typename U>
|
||||
void
|
||||
MacroAssemblerX86::atomicFetchXor64(const T& value, const U& address, Register64 temp,
|
||||
Register64 output)
|
||||
{
|
||||
ATOMIC_OP_BODY(xor64)
|
||||
}
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
ATOMIC_OP_BODY(add64FromMemory);
|
||||
break;
|
||||
case AtomicFetchSubOp:
|
||||
ATOMIC_OP_BODY(sub64FromMemory);
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
ATOMIC_OP_BODY(and64FromMemory);
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
ATOMIC_OP_BODY(or64FromMemory);
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
ATOMIC_OP_BODY(xor64FromMemory);
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH();
|
||||
}
|
||||
|
||||
#undef ATOMIC_OP_BODY
|
||||
}
|
||||
|
||||
template void
|
||||
js::jit::MacroAssemblerX86::atomicFetchAdd64(const Address& value, const Address& address,
|
||||
Register64 temp, Register64 output);
|
||||
template void
|
||||
js::jit::MacroAssemblerX86::atomicFetchAdd64(const Address& value, const BaseIndex& address,
|
||||
Register64 temp, Register64 output);
|
||||
template void
|
||||
js::jit::MacroAssemblerX86::atomicFetchSub64(const Address& value, const Address& address,
|
||||
Register64 temp, Register64 output);
|
||||
template void
|
||||
js::jit::MacroAssemblerX86::atomicFetchSub64(const Address& value, const BaseIndex& address,
|
||||
Register64 temp, Register64 output);
|
||||
template void
|
||||
js::jit::MacroAssemblerX86::atomicFetchAnd64(const Address& value, const Address& address,
|
||||
Register64 temp, Register64 output);
|
||||
template void
|
||||
js::jit::MacroAssemblerX86::atomicFetchAnd64(const Address& value, const BaseIndex& address,
|
||||
Register64 temp, Register64 output);
|
||||
template void
|
||||
js::jit::MacroAssemblerX86::atomicFetchOr64(const Address& value, const Address& address,
|
||||
Register64 temp, Register64 output);
|
||||
template void
|
||||
js::jit::MacroAssemblerX86::atomicFetchOr64(const Address& value, const BaseIndex& address,
|
||||
Register64 temp, Register64 output);
|
||||
template void
|
||||
js::jit::MacroAssemblerX86::atomicFetchXor64(const Address& value, const Address& address,
|
||||
Register64 temp, Register64 output);
|
||||
template void
|
||||
js::jit::MacroAssemblerX86::atomicFetchXor64(const Address& value, const BaseIndex& address,
|
||||
Register64 temp, Register64 output);
|
||||
void
|
||||
MacroAssembler::atomicFetchOp64(const Synchronization&, AtomicOp op, const Address& value,
|
||||
const Address& mem, Register64 temp, Register64 output)
|
||||
{
|
||||
AtomicFetchOp64(*this, op, value, mem, temp, output);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::atomicFetchOp64(const Synchronization&, AtomicOp op, const Address& value,
|
||||
const BaseIndex& mem, Register64 temp, Register64 output)
|
||||
{
|
||||
AtomicFetchOp64(*this, op, value, mem, temp, output);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input, Register output, Label* oolEntry)
|
||||
|
|
|
@ -94,48 +94,31 @@ class MacroAssemblerX86 : public MacroAssemblerX86Shared
|
|||
}
|
||||
|
||||
template <typename T>
|
||||
void add64(const T& address, Register64 dest) {
|
||||
void add64FromMemory(const T& address, Register64 dest) {
|
||||
addl(Operand(LowWord(address)), dest.low);
|
||||
adcl(Operand(HighWord(address)), dest.high);
|
||||
}
|
||||
template <typename T>
|
||||
void sub64(const T& address, Register64 dest) {
|
||||
void sub64FromMemory(const T& address, Register64 dest) {
|
||||
subl(Operand(LowWord(address)), dest.low);
|
||||
sbbl(Operand(HighWord(address)), dest.high);
|
||||
}
|
||||
template <typename T>
|
||||
void and64(const T& address, Register64 dest) {
|
||||
void and64FromMemory(const T& address, Register64 dest) {
|
||||
andl(Operand(LowWord(address)), dest.low);
|
||||
andl(Operand(HighWord(address)), dest.high);
|
||||
}
|
||||
template <typename T>
|
||||
void or64(const T& address, Register64 dest) {
|
||||
void or64FromMemory(const T& address, Register64 dest) {
|
||||
orl(Operand(LowWord(address)), dest.low);
|
||||
orl(Operand(HighWord(address)), dest.high);
|
||||
}
|
||||
template <typename T>
|
||||
void xor64(const T& address, Register64 dest) {
|
||||
void xor64FromMemory(const T& address, Register64 dest) {
|
||||
xorl(Operand(LowWord(address)), dest.low);
|
||||
xorl(Operand(HighWord(address)), dest.high);
|
||||
}
|
||||
|
||||
// Here, `value` is an address to an Int64 because we don't have enough
|
||||
// registers for all the operands. It is allowed to be SP-relative.
|
||||
template <typename T, typename U>
|
||||
void atomicFetchAdd64(const T& value, const U& address, Register64 temp, Register64 output);
|
||||
|
||||
template <typename T, typename U>
|
||||
void atomicFetchSub64(const T& value, const U& address, Register64 temp, Register64 output);
|
||||
|
||||
template <typename T, typename U>
|
||||
void atomicFetchAnd64(const T& value, const U& address, Register64 temp, Register64 output);
|
||||
|
||||
template <typename T, typename U>
|
||||
void atomicFetchOr64(const T& value, const U& address, Register64 temp, Register64 output);
|
||||
|
||||
template <typename T, typename U>
|
||||
void atomicFetchXor64(const T& value, const U& address, Register64 temp, Register64 output);
|
||||
|
||||
/////////////////////////////////////////////////////////////////
|
||||
// X86/X64-common interface.
|
||||
/////////////////////////////////////////////////////////////////
|
||||
|
@ -675,50 +658,6 @@ class MacroAssemblerX86 : public MacroAssemblerX86Shared
|
|||
movl(imm.hi(), Operand(HighWord(address)));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicLoad64(const T& address, Register64 temp, Register64 output) {
|
||||
MOZ_ASSERT(temp.low == ebx);
|
||||
MOZ_ASSERT(temp.high == ecx);
|
||||
MOZ_ASSERT(output.high == edx);
|
||||
MOZ_ASSERT(output.low == eax);
|
||||
|
||||
// In the event edx:eax matches what's in memory, ecx:ebx will be
|
||||
// stored. The two pairs must therefore have the same values.
|
||||
movl(edx, ecx);
|
||||
movl(eax, ebx);
|
||||
|
||||
lock_cmpxchg8b(edx, eax, ecx, ebx, Operand(address));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicExchange64(const T& address, Register64 value, Register64 output) {
|
||||
MOZ_ASSERT(value.low == ebx);
|
||||
MOZ_ASSERT(value.high == ecx);
|
||||
MOZ_ASSERT(output.high == edx);
|
||||
MOZ_ASSERT(output.low == eax);
|
||||
|
||||
// edx:eax has garbage initially, and that is the best we can do unless
|
||||
// we can guess with high probability what's in memory.
|
||||
|
||||
Label again;
|
||||
bind(&again);
|
||||
lock_cmpxchg8b(edx, eax, ecx, ebx, Operand(address));
|
||||
j(Assembler::Condition::NonZero, &again);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void compareExchange64(const T& address, Register64 expected, Register64 replacement,
|
||||
Register64 output)
|
||||
{
|
||||
MOZ_ASSERT(expected == output);
|
||||
MOZ_ASSERT(expected.high == edx);
|
||||
MOZ_ASSERT(expected.low == eax);
|
||||
MOZ_ASSERT(replacement.high == ecx);
|
||||
MOZ_ASSERT(replacement.low == ebx);
|
||||
|
||||
lock_cmpxchg8b(edx, eax, ecx, ebx, Operand(address));
|
||||
}
|
||||
|
||||
void setStackArg(Register reg, uint32_t arg) {
|
||||
movl(reg, Operand(esp, arg * sizeof(intptr_t)));
|
||||
}
|
||||
|
|
|
@ -4181,6 +4181,7 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
void
|
||||
atomicRMW32(T srcAddr, Scalar::Type viewType, AtomicOp op, RegI32 rv, RegI32 rd, RegI32 temp)
|
||||
{
|
||||
Synchronization sync = Synchronization::Full();
|
||||
switch (viewType) {
|
||||
case Scalar::Uint8: {
|
||||
#ifdef JS_CODEGEN_X86
|
||||
|
@ -4190,39 +4191,14 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
if (op != AtomicFetchAddOp && op != AtomicFetchSubOp)
|
||||
temp = scratch;
|
||||
#endif
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp: masm.atomicFetchAdd8ZeroExtend(rv, srcAddr, temp, rd); break;
|
||||
case AtomicFetchSubOp: masm.atomicFetchSub8ZeroExtend(rv, srcAddr, temp, rd); break;
|
||||
case AtomicFetchAndOp: masm.atomicFetchAnd8ZeroExtend(rv, srcAddr, temp, rd); break;
|
||||
case AtomicFetchOrOp: masm.atomicFetchOr8ZeroExtend(rv, srcAddr, temp, rd); break;
|
||||
case AtomicFetchXorOp: masm.atomicFetchXor8ZeroExtend(rv, srcAddr, temp, rd); break;
|
||||
default: MOZ_CRASH("No such op");
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Scalar::Uint16: {
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp: masm.atomicFetchAdd16ZeroExtend(rv, srcAddr, temp, rd); break;
|
||||
case AtomicFetchSubOp: masm.atomicFetchSub16ZeroExtend(rv, srcAddr, temp, rd); break;
|
||||
case AtomicFetchAndOp: masm.atomicFetchAnd16ZeroExtend(rv, srcAddr, temp, rd); break;
|
||||
case AtomicFetchOrOp: masm.atomicFetchOr16ZeroExtend(rv, srcAddr, temp, rd); break;
|
||||
case AtomicFetchXorOp: masm.atomicFetchXor16ZeroExtend(rv, srcAddr, temp, rd); break;
|
||||
default: MOZ_CRASH("No such op");
|
||||
}
|
||||
masm.atomicFetchOp(viewType, sync, op, rv, srcAddr, temp, rd);
|
||||
break;
|
||||
}
|
||||
case Scalar::Uint16:
|
||||
case Scalar::Int32:
|
||||
case Scalar::Uint32: {
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp: masm.atomicFetchAdd32(rv, srcAddr, temp, rd); break;
|
||||
case AtomicFetchSubOp: masm.atomicFetchSub32(rv, srcAddr, temp, rd); break;
|
||||
case AtomicFetchAndOp: masm.atomicFetchAnd32(rv, srcAddr, temp, rd); break;
|
||||
case AtomicFetchOrOp: masm.atomicFetchOr32(rv, srcAddr, temp, rd); break;
|
||||
case AtomicFetchXorOp: masm.atomicFetchXor32(rv, srcAddr, temp, rd); break;
|
||||
default: MOZ_CRASH("No such op");
|
||||
}
|
||||
case Scalar::Uint32:
|
||||
masm.atomicFetchOp(viewType, sync, op, rv, srcAddr, temp, rd);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
MOZ_CRASH("Bad type for atomic operation");
|
||||
}
|
||||
|
@ -4234,20 +4210,14 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
template <typename T, typename V>
|
||||
void
|
||||
atomicRMW64(const T& srcAddr, AtomicOp op, V value, Register64 temp, Register64 rd) {
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp: masm.atomicFetchAdd64(value, srcAddr, temp, rd); break;
|
||||
case AtomicFetchSubOp: masm.atomicFetchSub64(value, srcAddr, temp, rd); break;
|
||||
case AtomicFetchAndOp: masm.atomicFetchAnd64(value, srcAddr, temp, rd); break;
|
||||
case AtomicFetchOrOp: masm.atomicFetchOr64(value, srcAddr, temp, rd); break;
|
||||
case AtomicFetchXorOp: masm.atomicFetchXor64(value, srcAddr, temp, rd); break;
|
||||
default: MOZ_CRASH("No such op");
|
||||
}
|
||||
masm.atomicFetchOp64(Synchronization::Full(), op, value, srcAddr, temp, rd);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void
|
||||
atomicCmpXchg32(T srcAddr, Scalar::Type viewType, RegI32 rexpect, RegI32 rnew, RegI32 rd)
|
||||
{
|
||||
Synchronization sync = Synchronization::Full();
|
||||
switch (viewType) {
|
||||
case Scalar::Uint8: {
|
||||
#if defined(JS_CODEGEN_X86)
|
||||
|
@ -4259,15 +4229,13 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
rnew = scratch;
|
||||
}
|
||||
#endif
|
||||
masm.compareExchange8ZeroExtend(srcAddr, rexpect, rnew, rd);
|
||||
masm.compareExchange(viewType, sync, srcAddr, rexpect, rnew, rd);
|
||||
break;
|
||||
}
|
||||
case Scalar::Uint16:
|
||||
masm.compareExchange16ZeroExtend(srcAddr, rexpect, rnew, rd);
|
||||
break;
|
||||
case Scalar::Int32:
|
||||
case Scalar::Uint32:
|
||||
masm.compareExchange32(srcAddr, rexpect, rnew, rd);
|
||||
masm.compareExchange(viewType, sync, srcAddr, rexpect, rnew, rd);
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Bad type for atomic operation");
|
||||
|
@ -4278,28 +4246,26 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
void
|
||||
atomicXchg32(T srcAddr, Scalar::Type viewType, RegI32 rv, RegI32 rd)
|
||||
{
|
||||
Synchronization sync = Synchronization::Full();
|
||||
switch (viewType) {
|
||||
case Scalar::Uint8: {
|
||||
case Scalar::Uint8:
|
||||
#if defined(JS_CODEGEN_X86)
|
||||
{
|
||||
if (!ra.isSingleByteI32(rd)) {
|
||||
ScratchI8 scratch(*this);
|
||||
// The output register must have a byte persona.
|
||||
masm.atomicExchange8ZeroExtend(srcAddr, rv, scratch);
|
||||
masm.atomicExchange(viewType, sync, srcAddr, rv, scratch);
|
||||
masm.movl(scratch, rd);
|
||||
} else {
|
||||
masm.atomicExchange8ZeroExtend(srcAddr, rv, rd);
|
||||
masm.atomicExchange(viewType, sync, srcAddr, rv, rd);
|
||||
}
|
||||
#else
|
||||
masm.atomicExchange8ZeroExtend(srcAddr, rv, rd);
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
case Scalar::Uint16:
|
||||
masm.atomicExchange16ZeroExtend(srcAddr, rv, rd);
|
||||
break;
|
||||
case Scalar::Int32:
|
||||
case Scalar::Uint32:
|
||||
masm.atomicExchange32(srcAddr, rv, rd);
|
||||
masm.atomicExchange(viewType, sync, srcAddr, rv, rd);
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Bad type for atomic operation");
|
||||
|
@ -4557,13 +4523,14 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
atomicCmpXchg64(T srcAddr, RegI32 ebx) {
|
||||
MOZ_ASSERT(ebx == js::jit::ebx);
|
||||
bc->masm.move32(rnew.low, ebx);
|
||||
bc->masm.compareExchange64(srcAddr, rexpect, bc->specific.ecx_ebx, getRd());
|
||||
bc->masm.compareExchange64(Synchronization::Full(), srcAddr, rexpect,
|
||||
bc->specific.ecx_ebx, getRd());
|
||||
}
|
||||
#else
|
||||
template<typename T>
|
||||
void
|
||||
atomicCmpXchg64(T srcAddr) {
|
||||
bc->masm.compareExchange64(srcAddr, rexpect, rnew, getRd());
|
||||
bc->masm.compareExchange64(Synchronization::Full(), srcAddr, rexpect, rnew, getRd());
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
@ -4601,13 +4568,13 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
void
|
||||
atomicLoad64(T srcAddr, RegI32 ebx) {
|
||||
MOZ_ASSERT(ebx == js::jit::ebx);
|
||||
bc->masm.atomicLoad64(srcAddr, bc->specific.ecx_ebx, getRd());
|
||||
bc->masm.atomicLoad64(Synchronization::Full(), srcAddr, bc->specific.ecx_ebx, getRd());
|
||||
}
|
||||
# else
|
||||
template<typename T>
|
||||
void
|
||||
atomicLoad64(T srcAddr) {
|
||||
bc->masm.atomicLoad64(srcAddr, RegI64::Invalid(), getRd());
|
||||
bc->masm.atomicLoad64(Synchronization::Full(), srcAddr, RegI64::Invalid(), getRd());
|
||||
}
|
||||
# endif
|
||||
};
|
||||
|
@ -4850,12 +4817,12 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
void atomicXchg64(T srcAddr, RegI32 ebx) const {
|
||||
MOZ_ASSERT(ebx == js::jit::ebx);
|
||||
bc->masm.move32(rv.low, ebx);
|
||||
bc->masm.atomicExchange64(srcAddr, bc->specific.ecx_ebx, getRd());
|
||||
bc->masm.atomicExchange64(Synchronization::Full(), srcAddr, bc->specific.ecx_ebx, getRd());
|
||||
}
|
||||
#else
|
||||
template<typename T>
|
||||
void atomicXchg64(T srcAddr) const {
|
||||
bc->masm.atomicExchange64(srcAddr, rv, getRd());
|
||||
bc->masm.atomicExchange64(Synchronization::Full(), srcAddr, rv, getRd());
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
@ -8175,7 +8142,7 @@ BaseCompiler::emitAtomicCmpXchg(ValType type, Scalar::Type viewType)
|
|||
return true;
|
||||
|
||||
MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(bytecodeOffset()),
|
||||
/*numSimdExprs=*/ 0, MembarFull, MembarFull);
|
||||
/*numSimdExprs=*/ 0, Synchronization::Full());
|
||||
|
||||
if (Scalar::byteSize(viewType) <= 4) {
|
||||
PopAtomicCmpXchg32Regs regs(this, type);
|
||||
|
@ -8231,7 +8198,7 @@ BaseCompiler::emitAtomicLoad(ValType type, Scalar::Type viewType)
|
|||
return true;
|
||||
|
||||
MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(bytecodeOffset()),
|
||||
/*numSimdElems=*/ 0, MembarBeforeLoad, MembarAfterLoad);
|
||||
/*numSimdElems=*/ 0, Synchronization::Load());
|
||||
|
||||
if (Scalar::byteSize(viewType) <= sizeof(void*))
|
||||
return loadCommon(&access, type);
|
||||
|
@ -8275,7 +8242,7 @@ BaseCompiler::emitAtomicRMW(ValType type, Scalar::Type viewType, AtomicOp op)
|
|||
return true;
|
||||
|
||||
MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(bytecodeOffset()),
|
||||
/*numSimdElems=*/ 0, MembarFull, MembarFull);
|
||||
/*numSimdElems=*/ 0, Synchronization::Full());
|
||||
|
||||
if (Scalar::byteSize(viewType) <= 4) {
|
||||
PopAtomicRMW32Regs regs(this, type, viewType, op);
|
||||
|
@ -8338,7 +8305,7 @@ BaseCompiler::emitAtomicStore(ValType type, Scalar::Type viewType)
|
|||
return true;
|
||||
|
||||
MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(bytecodeOffset()),
|
||||
/*numSimdElems=*/ 0, MembarBeforeStore, MembarAfterStore);
|
||||
/*numSimdElems=*/ 0, Synchronization::Store());
|
||||
|
||||
if (Scalar::byteSize(viewType) <= sizeof(void*))
|
||||
return storeCommon(&access, type);
|
||||
|
@ -8366,7 +8333,7 @@ BaseCompiler::emitAtomicXchg(ValType type, Scalar::Type viewType)
|
|||
|
||||
AccessCheck check;
|
||||
MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(bytecodeOffset()),
|
||||
/*numSimdElems=*/ 0, MembarFull, MembarFull);
|
||||
/*numSimdElems=*/ 0, Synchronization::Full());
|
||||
|
||||
if (Scalar::byteSize(viewType) <= 4) {
|
||||
PopAtomicXchg32Regs regs(this, type);
|
||||
|
|
|
@ -2781,7 +2781,7 @@ EmitOldAtomicsLoad(FunctionCompiler& f)
|
|||
return false;
|
||||
|
||||
MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(f.bytecodeOffset()),
|
||||
/*numSimdExprs=*/ 0, MembarBeforeLoad, MembarAfterLoad);
|
||||
/*numSimdExprs=*/ 0, Synchronization::Load());
|
||||
|
||||
auto* ins = f.load(addr.base, &access, ValType::I32);
|
||||
if (!f.inDeadCode() && !ins)
|
||||
|
@ -2801,7 +2801,7 @@ EmitOldAtomicsStore(FunctionCompiler& f)
|
|||
return false;
|
||||
|
||||
MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(f.bytecodeOffset()),
|
||||
/*numSimdExprs=*/ 0, MembarBeforeStore, MembarAfterStore);
|
||||
/*numSimdExprs=*/ 0, Synchronization::Store());
|
||||
|
||||
f.store(addr.base, &access, value);
|
||||
f.iter().setResult(value);
|
||||
|
@ -2819,7 +2819,7 @@ EmitOldAtomicsBinOp(FunctionCompiler& f)
|
|||
return false;
|
||||
|
||||
MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(f.bytecodeOffset()),
|
||||
/*numSimdExprs=*/ 0, MembarFull, MembarFull);
|
||||
/*numSimdExprs=*/ 0, Synchronization::Full());
|
||||
|
||||
auto* ins = f.atomicBinopHeap(op, addr.base, &access, ValType::I32, value);
|
||||
if (!f.inDeadCode() && !ins)
|
||||
|
@ -2840,7 +2840,7 @@ EmitOldAtomicsCompareExchange(FunctionCompiler& f)
|
|||
return false;
|
||||
|
||||
MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(f.bytecodeOffset()),
|
||||
/*numSimdExprs=*/ 0, MembarFull, MembarFull);
|
||||
/*numSimdExprs=*/ 0, Synchronization::Full());
|
||||
|
||||
auto* ins = f.atomicCompareExchangeHeap(addr.base, &access, ValType::I32, oldValue, newValue);
|
||||
if (!f.inDeadCode() && !ins)
|
||||
|
@ -2860,7 +2860,7 @@ EmitOldAtomicsExchange(FunctionCompiler& f)
|
|||
return false;
|
||||
|
||||
MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(f.bytecodeOffset()),
|
||||
/*numSimdExprs=*/ 0, MembarFull, MembarFull);
|
||||
/*numSimdExprs=*/ 0, Synchronization::Full());
|
||||
|
||||
auto* ins = f.atomicExchangeHeap(addr.base, &access, ValType::I32, value);
|
||||
if (!f.inDeadCode() && !ins)
|
||||
|
@ -3416,7 +3416,7 @@ EmitAtomicCmpXchg(FunctionCompiler& f, ValType type, Scalar::Type viewType)
|
|||
return false;
|
||||
|
||||
MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(f.bytecodeOffset()),
|
||||
/*numSimdExprs=*/ 0, MembarFull, MembarFull);
|
||||
/*numSimdExprs=*/ 0, Synchronization::Full());
|
||||
auto* ins = f.atomicCompareExchangeHeap(addr.base, &access, type, oldValue, newValue);
|
||||
if (!f.inDeadCode() && !ins)
|
||||
return false;
|
||||
|
@ -3433,7 +3433,7 @@ EmitAtomicLoad(FunctionCompiler& f, ValType type, Scalar::Type viewType)
|
|||
return false;
|
||||
|
||||
MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(f.bytecodeOffset()),
|
||||
/*numSimdExprs=*/ 0, MembarBeforeLoad, MembarAfterLoad);
|
||||
/*numSimdExprs=*/ 0, Synchronization::Load());
|
||||
auto* ins = f.load(addr.base, &access, type);
|
||||
if (!f.inDeadCode() && !ins)
|
||||
return false;
|
||||
|
@ -3451,7 +3451,7 @@ EmitAtomicRMW(FunctionCompiler& f, ValType type, Scalar::Type viewType, jit::Ato
|
|||
return false;
|
||||
|
||||
MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(f.bytecodeOffset()),
|
||||
/*numSimdExprs=*/ 0, MembarFull, MembarFull);
|
||||
/*numSimdExprs=*/ 0, Synchronization::Full());
|
||||
auto* ins = f.atomicBinopHeap(op, addr.base, &access, type, value);
|
||||
if (!f.inDeadCode() && !ins)
|
||||
return false;
|
||||
|
@ -3469,7 +3469,7 @@ EmitAtomicStore(FunctionCompiler& f, ValType type, Scalar::Type viewType)
|
|||
return false;
|
||||
|
||||
MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(f.bytecodeOffset()),
|
||||
/*numSimdExprs=*/ 0, MembarBeforeStore, MembarAfterStore);
|
||||
/*numSimdExprs=*/ 0, Synchronization::Store());
|
||||
f.store(addr.base, &access, value);
|
||||
return true;
|
||||
}
|
||||
|
@ -3573,7 +3573,7 @@ EmitAtomicXchg(FunctionCompiler& f, ValType type, Scalar::Type viewType)
|
|||
return false;
|
||||
|
||||
MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(f.bytecodeOffset()),
|
||||
/*numSimdExprs=*/ 0, MembarFull, MembarFull);
|
||||
/*numSimdExprs=*/ 0, Synchronization::Full());
|
||||
MDefinition* ins = f.atomicExchangeHeap(addr.base, &access, type, value);
|
||||
if (!f.inDeadCode() && !ins)
|
||||
return false;
|
||||
|
|
Загрузка…
Ссылка в новой задаче