Backed out 3 changesets (bug 1420838) for bustage: check_macroassembler_style.py. CLOSED TREE

Backed out changeset 490e4771b990 (bug 1420838)
Backed out changeset 8ca4961a705c (bug 1420838)
Backed out changeset 663444bb705a (bug 1420838)
This commit is contained in:
Sebastian Hengst 2018-02-03 00:57:26 +02:00
Родитель 197f5e3acb
Коммит 48d66b2f69
35 изменённых файлов: 2087 добавлений и 2642 удалений

Просмотреть файл

@ -486,7 +486,7 @@ function runTests() {
if (is_little)
assertEq(t2[0], 37);
else
assertEq(t2[0], 37 << 8);
assertEq(t2[0], 37 << 16);
t1[0] = 0;
// Test that invoking as Atomics.whatever() works, on correct arguments.

Просмотреть файл

@ -341,13 +341,7 @@ AtomicOperations::isLockfreeJS(int32_t size)
// participate in the memory exclusivity monitors implemented by the simulator.
// Such a solution is likely to be difficult.
#if defined(JS_SIMULATOR_MIPS32)
# if defined(__clang__) || defined(__GNUC__)
# include "jit/mips-shared/AtomicOperations-mips-shared.h"
# else
# error "No AtomicOperations support for this platform+compiler combination"
# endif
#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || defined(_M_IX86)
#if defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || defined(_M_IX86)
# if defined(__clang__) || defined(__GNUC__)
# include "jit/x86-shared/AtomicOperations-x86-shared-gcc.h"
# elif defined(_MSC_VER)

Просмотреть файл

@ -3263,6 +3263,113 @@ MacroAssembler::emitPreBarrierFastPath(JSRuntime* rt, MIRType type, Register tem
branchTestPtr(Assembler::NonZero, temp2, temp1, noBarrier);
}
// ========================================================================
// JS atomic operations.
template<typename T>
static void
CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
const T& mem, Register oldval, Register newval, Register temp, AnyRegister output)
{
if (arrayType == Scalar::Uint32) {
masm.compareExchange(arrayType, sync, mem, oldval, newval, temp);
masm.convertUInt32ToDouble(temp, output.fpu());
} else {
masm.compareExchange(arrayType, sync, mem, oldval, newval, output.gpr());
}
}
void
MacroAssembler::compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
const Address& mem, Register oldval, Register newval,
Register temp, AnyRegister output)
{
CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
}
void
MacroAssembler::compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
const BaseIndex& mem, Register oldval, Register newval,
Register temp, AnyRegister output)
{
CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
}
template<typename T>
static void
AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
const T& mem, Register value, Register temp, AnyRegister output)
{
if (arrayType == Scalar::Uint32) {
masm.atomicExchange(arrayType, sync, mem, value, temp);
masm.convertUInt32ToDouble(temp, output.fpu());
} else {
masm.atomicExchange(arrayType, sync, mem, value, output.gpr());
}
}
void
MacroAssembler::atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
const Address& mem, Register value, Register temp,
AnyRegister output)
{
AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
}
void
MacroAssembler::atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
const BaseIndex& mem, Register value, Register temp,
AnyRegister output)
{
AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
}
template<typename T>
static void
AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
AtomicOp op, Register value, const T& mem, Register temp1, Register temp2,
AnyRegister output)
{
if (arrayType == Scalar::Uint32) {
masm.atomicFetchOp(arrayType, sync, op, value, mem, temp2, temp1);
masm.convertUInt32ToDouble(temp1, output.fpu());
} else {
masm.atomicFetchOp(arrayType, sync, op, value, mem, temp1, output.gpr());
}
}
void
MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const Address& mem, Register temp1, Register temp2,
AnyRegister output)
{
AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
}
void
MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const BaseIndex& mem, Register temp1, Register temp2,
AnyRegister output)
{
AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
}
void
MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const BaseIndex& mem, Register temp)
{
atomicEffectOp(arrayType, sync, op, value, mem, temp);
}
void
MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const Address& mem, Register temp)
{
atomicEffectOp(arrayType, sync, op, value, mem, temp);
}
//}}} check_macroassembler_style
void
MacroAssembler::memoryBarrierBefore(const Synchronization& sync) {
memoryBarrier(sync.barrierBefore);

Просмотреть файл

@ -1594,16 +1594,13 @@ class MacroAssembler : public MacroAssemblerSpecific
// 8-bit, 16-bit, and 32-bit wide operations.
//
// The 8-bit and 16-bit operations zero-extend or sign-extend the result to
// 32 bits, according to `type`. On 64-bit systems, the upper 32 bits of the
// result will be zero on some platforms (eg, on x64) and will be the sign
// extension of the lower bits on other platforms (eg, MIPS).
// 32 bits, according to `type`. On 64-bit systems, the upper 32 bits of
// the result will be zero.
// CompareExchange with memory. Return the value that was in memory,
// whether we wrote or not.
//
// x86-shared: `output` must be eax.
// MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
// and 16-bit wide operations.
void compareExchange(Scalar::Type type, const Synchronization& sync, const Address& mem,
Register expected, Register replacement, Register output)
@ -1613,20 +1610,7 @@ class MacroAssembler : public MacroAssemblerSpecific
Register expected, Register replacement, Register output)
DEFINED_ON(arm, arm64, x86_shared);
void compareExchange(Scalar::Type type, const Synchronization& sync, const Address& mem,
Register expected, Register replacement, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
DEFINED_ON(mips_shared);
void compareExchange(Scalar::Type type, const Synchronization& sync, const BaseIndex& mem,
Register expected, Register replacement, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
DEFINED_ON(mips_shared);
// Exchange with memory. Return the value initially in memory.
// MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
// and 16-bit wide operations.
void atomicExchange(Scalar::Type type, const Synchronization& sync, const Address& mem,
Register value, Register output)
@ -1636,16 +1620,6 @@ class MacroAssembler : public MacroAssemblerSpecific
Register value, Register output)
DEFINED_ON(arm, arm64, x86_shared);
void atomicExchange(Scalar::Type type, const Synchronization& sync, const Address& mem,
Register value, Register valueTemp, Register offsetTemp, Register maskTemp,
Register output)
DEFINED_ON(mips_shared);
void atomicExchange(Scalar::Type type, const Synchronization& sync, const BaseIndex& mem,
Register value, Register valueTemp, Register offsetTemp, Register maskTemp,
Register output)
DEFINED_ON(mips_shared);
// Read-modify-write with memory. Return the value in memory before the
// operation.
//
@ -1655,8 +1629,6 @@ class MacroAssembler : public MacroAssemblerSpecific
// For And, Or, and Xor, `output` must be eax and `temp` must have a byte subregister.
//
// ARM: Registers `value` and `output` must differ.
// MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
// and 16-bit wide operations; `value` and `output` must differ.
void atomicFetchOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
Register value, const Address& mem, Register temp, Register output)
@ -1674,19 +1646,7 @@ class MacroAssembler : public MacroAssemblerSpecific
Imm32 value, const BaseIndex& mem, Register temp, Register output)
DEFINED_ON(x86_shared);
void atomicFetchOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
Register value, const Address& mem, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
DEFINED_ON(mips_shared);
void atomicFetchOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
Register value, const BaseIndex& mem, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
DEFINED_ON(mips_shared);
// Read-modify-write with memory. Return no value.
// MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
// and 16-bit wide operations.
void atomicEffectOp(Scalar::Type type, const Synchronization& sync, AtomicOp op, Register value,
const Address& mem, Register temp)
@ -1704,15 +1664,6 @@ class MacroAssembler : public MacroAssemblerSpecific
const BaseIndex& mem, Register temp)
DEFINED_ON(x86_shared);
void atomicEffectOp(Scalar::Type type, const Synchronization& sync, AtomicOp op, Register value,
const Address& mem, Register valueTemp, Register offsetTemp, Register maskTemp)
DEFINED_ON(mips_shared);
void atomicEffectOp(Scalar::Type type, const Synchronization& sync, AtomicOp op, Register value,
const BaseIndex& mem, Register valueTemp, Register offsetTemp, Register maskTemp)
DEFINED_ON(mips_shared);
// 64-bit wide operations.
// 64-bit atomic load. On 64-bit systems, use regular wasm load with
@ -1720,51 +1671,50 @@ class MacroAssembler : public MacroAssemblerSpecific
//
// x86: `temp` must be ecx:ebx; `output` must be edx:eax.
// ARM: `temp` should be invalid; `output` must be (even,odd) pair.
// MIPS32: `temp` should be invalid.
void atomicLoad64(const Synchronization& sync, const Address& mem, Register64 temp,
Register64 output)
DEFINED_ON(arm, mips32, x86);
DEFINED_ON(arm, x86);
void atomicLoad64(const Synchronization& sync, const BaseIndex& mem, Register64 temp,
Register64 output)
DEFINED_ON(arm, mips32, x86);
DEFINED_ON(arm, x86);
// x86: `expected` must be the same as `output`, and must be edx:eax
// x86: `replacement` must be ecx:ebx
// x64: `output` must be rax.
// ARM: Registers must be distinct; `replacement` and `output` must be (even,odd) pairs.
// MIPS: Registers must be distinct.
void compareExchange64(const Synchronization& sync, const Address& mem, Register64 expected,
Register64 replacement, Register64 output) PER_ARCH;
Register64 replacement, Register64 output)
DEFINED_ON(arm, arm64, x64, x86);
void compareExchange64(const Synchronization& sync, const BaseIndex& mem, Register64 expected,
Register64 replacement, Register64 output) PER_ARCH;
Register64 replacement, Register64 output)
DEFINED_ON(arm, arm64, x64, x86);
// x86: `value` must be ecx:ebx; `output` must be edx:eax.
// ARM: Registers must be distinct; `value` and `output` must be (even,odd) pairs.
// MIPS: Registers must be distinct.
void atomicExchange64(const Synchronization& sync, const Address& mem, Register64 value,
Register64 output) PER_ARCH;
Register64 output)
DEFINED_ON(arm, arm64, x64, x86);
void atomicExchange64(const Synchronization& sync, const BaseIndex& mem, Register64 value,
Register64 output) PER_ARCH;
Register64 output)
DEFINED_ON(arm, arm64, x64, x86);
// x86: `output` must be edx:eax, `temp` must be ecx:ebx.
// x64: For And, Or, and Xor `output` must be rax.
// ARM: Registers must be distinct; `temp` and `output` must be (even,odd) pairs.
// MIPS: Registers must be distinct.
// MIPS32: `temp` should be invalid.
void atomicFetchOp64(const Synchronization& sync, AtomicOp op, Register64 value,
const Address& mem, Register64 temp, Register64 output)
DEFINED_ON(arm, arm64, mips32, mips64, x64);
DEFINED_ON(arm, arm64, x64);
void atomicFetchOp64(const Synchronization& sync, AtomicOp op, Register64 value,
const BaseIndex& mem, Register64 temp, Register64 output)
DEFINED_ON(arm, arm64, mips32, mips64, x64);
DEFINED_ON(arm, arm64, x64);
void atomicFetchOp64(const Synchronization& sync, AtomicOp op, const Address& value,
const Address& mem, Register64 temp, Register64 output)
@ -1798,54 +1748,25 @@ class MacroAssembler : public MacroAssemblerSpecific
void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync, const Address& mem,
Register expected, Register replacement, Register temp,
AnyRegister output)
DEFINED_ON(arm, arm64, x86_shared);
AnyRegister output);
void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
const BaseIndex& mem, Register expected, Register replacement,
Register temp, AnyRegister output)
DEFINED_ON(arm, arm64, x86_shared);
void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync, const Address& mem,
Register expected, Register replacement, Register valueTemp,
Register offsetTemp, Register maskTemp, Register temp,
AnyRegister output)
DEFINED_ON(mips_shared);
void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync, const BaseIndex& mem,
Register expected, Register replacement, Register valueTemp,
Register offsetTemp, Register maskTemp, Register temp,
AnyRegister output)
DEFINED_ON(mips_shared);
Register temp, AnyRegister output);
void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync, const Address& mem,
Register value, Register temp, AnyRegister output)
DEFINED_ON(arm, arm64, x86_shared);
Register value, Register temp, AnyRegister output);
void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync, const BaseIndex& mem,
Register value, Register temp, AnyRegister output)
DEFINED_ON(arm, arm64, x86_shared);
void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync, const Address& mem,
Register value, Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp, AnyRegister output)
DEFINED_ON(mips_shared);
void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync, const BaseIndex& mem,
Register value, Register valueTemp, Register offsetTemp,
Register maskTemp, Register temp, AnyRegister output)
DEFINED_ON(mips_shared);
Register value, Register temp, AnyRegister output);
void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const Address& mem, Register temp1, Register temp2,
AnyRegister output)
DEFINED_ON(arm, arm64, x86_shared);
AnyRegister output);
void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const BaseIndex& mem, Register temp1, Register temp2,
AnyRegister output)
DEFINED_ON(arm, arm64, x86_shared);
AnyRegister output);
void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Imm32 value, const Address& mem, Register temp1, Register temp2,
@ -1857,25 +1778,11 @@ class MacroAssembler : public MacroAssemblerSpecific
AnyRegister output)
DEFINED_ON(x86_shared);
void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const Address& mem, Register valueTemp,
Register offsetTemp, Register maskTemp, Register temp,
AnyRegister output)
DEFINED_ON(mips_shared);
void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const BaseIndex& mem, Register valueTemp,
Register offsetTemp, Register maskTemp, Register temp,
AnyRegister output)
DEFINED_ON(mips_shared);
void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const Address& mem, Register temp);
void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const Address& mem, Register temp)
DEFINED_ON(arm, arm64, x86_shared);
void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const BaseIndex& mem, Register temp)
DEFINED_ON(arm, arm64, x86_shared);
Register value, const BaseIndex& mem, Register temp);
void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Imm32 value, const Address& mem, Register temp)
@ -1885,16 +1792,6 @@ class MacroAssembler : public MacroAssemblerSpecific
Imm32 value, const BaseIndex& mem, Register temp)
DEFINED_ON(x86_shared);
void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const Address& mem, Register valueTemp,
Register offsetTemp, Register maskTemp)
DEFINED_ON(mips_shared);
void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const BaseIndex& mem, Register valueTemp,
Register offsetTemp, Register maskTemp)
DEFINED_ON(mips_shared);
//}}} check_macroassembler_decl_style
public:

Просмотреть файл

@ -5597,111 +5597,6 @@ MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op, Regist
AtomicFetchOp64(*this, sync, op, value, mem, temp, output);
}
// ========================================================================
// JS atomic operations.
template<typename T>
static void
CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
const T& mem, Register oldval, Register newval, Register temp, AnyRegister output)
{
if (arrayType == Scalar::Uint32) {
masm.compareExchange(arrayType, sync, mem, oldval, newval, temp);
masm.convertUInt32ToDouble(temp, output.fpu());
} else {
masm.compareExchange(arrayType, sync, mem, oldval, newval, output.gpr());
}
}
void
MacroAssembler::compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
const Address& mem, Register oldval, Register newval,
Register temp, AnyRegister output)
{
CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
}
void
MacroAssembler::compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
const BaseIndex& mem, Register oldval, Register newval,
Register temp, AnyRegister output)
{
CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
}
template<typename T>
static void
AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
const T& mem, Register value, Register temp, AnyRegister output)
{
if (arrayType == Scalar::Uint32) {
masm.atomicExchange(arrayType, sync, mem, value, temp);
masm.convertUInt32ToDouble(temp, output.fpu());
} else {
masm.atomicExchange(arrayType, sync, mem, value, output.gpr());
}
}
void
MacroAssembler::atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
const Address& mem, Register value, Register temp,
AnyRegister output)
{
AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
}
void
MacroAssembler::atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
const BaseIndex& mem, Register value, Register temp,
AnyRegister output)
{
AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
}
template<typename T>
static void
AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
AtomicOp op, Register value, const T& mem, Register temp1, Register temp2,
AnyRegister output)
{
if (arrayType == Scalar::Uint32) {
masm.atomicFetchOp(arrayType, sync, op, value, mem, temp2, temp1);
masm.convertUInt32ToDouble(temp1, output.fpu());
} else {
masm.atomicFetchOp(arrayType, sync, op, value, mem, temp1, output.gpr());
}
}
void
MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const Address& mem, Register temp1, Register temp2,
AnyRegister output)
{
AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
}
void
MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const BaseIndex& mem, Register temp1, Register temp2,
AnyRegister output)
{
AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
}
void
MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const BaseIndex& mem, Register temp)
{
atomicEffectOp(arrayType, sync, op, value, mem, temp);
}
void
MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const Address& mem, Register temp)
{
atomicEffectOp(arrayType, sync, op, value, mem, temp);
}
// ========================================================================
// Convert floating point.

Просмотреть файл

@ -1063,111 +1063,6 @@ MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op, Regist
MOZ_CRASH("NYI");
}
// ========================================================================
// JS atomic operations.
template<typename T>
static void
CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
const T& mem, Register oldval, Register newval, Register temp, AnyRegister output)
{
if (arrayType == Scalar::Uint32) {
masm.compareExchange(arrayType, sync, mem, oldval, newval, temp);
masm.convertUInt32ToDouble(temp, output.fpu());
} else {
masm.compareExchange(arrayType, sync, mem, oldval, newval, output.gpr());
}
}
void
MacroAssembler::compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
const Address& mem, Register oldval, Register newval,
Register temp, AnyRegister output)
{
CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
}
void
MacroAssembler::compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
const BaseIndex& mem, Register oldval, Register newval,
Register temp, AnyRegister output)
{
CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
}
template<typename T>
static void
AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
const T& mem, Register value, Register temp, AnyRegister output)
{
if (arrayType == Scalar::Uint32) {
masm.atomicExchange(arrayType, sync, mem, value, temp);
masm.convertUInt32ToDouble(temp, output.fpu());
} else {
masm.atomicExchange(arrayType, sync, mem, value, output.gpr());
}
}
void
MacroAssembler::atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
const Address& mem, Register value, Register temp,
AnyRegister output)
{
AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
}
void
MacroAssembler::atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
const BaseIndex& mem, Register value, Register temp,
AnyRegister output)
{
AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
}
template<typename T>
static void
AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
AtomicOp op, Register value, const T& mem, Register temp1, Register temp2,
AnyRegister output)
{
if (arrayType == Scalar::Uint32) {
masm.atomicFetchOp(arrayType, sync, op, value, mem, temp2, temp1);
masm.convertUInt32ToDouble(temp1, output.fpu());
} else {
masm.atomicFetchOp(arrayType, sync, op, value, mem, temp1, output.gpr());
}
}
void
MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const Address& mem, Register temp1, Register temp2,
AnyRegister output)
{
AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
}
void
MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const BaseIndex& mem, Register temp1, Register temp2,
AnyRegister output)
{
AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
}
void
MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const BaseIndex& mem, Register temp)
{
atomicEffectOp(arrayType, sync, op, value, mem, temp);
}
void
MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const Address& mem, Register temp)
{
atomicEffectOp(arrayType, sync, op, value, mem, temp);
}
//}}} check_macroassembler_style
} // namespace jit

Просмотреть файл

@ -823,13 +823,6 @@ AssemblerMIPSShared::as_ll(Register rd, Register rs, int16_t off)
return writeInst(InstImm(op_ll, rs, rd, Imm16(off)).encode());
}
BufferOffset
AssemblerMIPSShared::as_lld(Register rd, Register rs, int16_t off)
{
spew("lld %3s, (0x%x)%2s", rd.name(), off, rs.name());
return writeInst(InstImm(op_lld, rs, rd, Imm16(off)).encode());
}
BufferOffset
AssemblerMIPSShared::as_ld(Register rd, Register rs, int16_t off)
{
@ -893,14 +886,6 @@ AssemblerMIPSShared::as_sc(Register rd, Register rs, int16_t off)
return writeInst(InstImm(op_sc, rs, rd, Imm16(off)).encode());
}
BufferOffset
AssemblerMIPSShared::as_scd(Register rd, Register rs, int16_t off)
{
spew("scd %3s, (0x%x)%2s", rd.name(), off, rs.name());
return writeInst(InstImm(op_scd, rs, rd, Imm16(off)).encode());
}
BufferOffset
AssemblerMIPSShared::as_sd(Register rd, Register rs, int16_t off)
{
@ -1941,6 +1926,8 @@ void
AssemblerMIPSShared::as_sync(uint32_t stype)
{
MOZ_ASSERT(stype <= 31);
if (isLoongson())
stype = 0;
spew("sync %d", stype);
writeInst(InstReg(op_special, zero, zero, zero, stype, ff_sync).encode());
}

Просмотреть файл

@ -291,7 +291,6 @@ enum Opcode {
op_ll = 48 << OpcodeShift,
op_lwc1 = 49 << OpcodeShift,
op_lwc2 = 50 << OpcodeShift,
op_lld = 52 << OpcodeShift,
op_ldc1 = 53 << OpcodeShift,
op_ldc2 = 54 << OpcodeShift,
op_ld = 55 << OpcodeShift,
@ -299,7 +298,6 @@ enum Opcode {
op_sc = 56 << OpcodeShift,
op_swc1 = 57 << OpcodeShift,
op_swc2 = 58 << OpcodeShift,
op_scd = 60 << OpcodeShift,
op_sdc1 = 61 << OpcodeShift,
op_sdc2 = 62 << OpcodeShift,
op_sd = 63 << OpcodeShift,
@ -1066,7 +1064,6 @@ class AssemblerMIPSShared : public AssemblerShared
BufferOffset as_lwl(Register rd, Register rs, int16_t off);
BufferOffset as_lwr(Register rd, Register rs, int16_t off);
BufferOffset as_ll(Register rd, Register rs, int16_t off);
BufferOffset as_lld(Register rd, Register rs, int16_t off);
BufferOffset as_ld(Register rd, Register rs, int16_t off);
BufferOffset as_ldl(Register rd, Register rs, int16_t off);
BufferOffset as_ldr(Register rd, Register rs, int16_t off);
@ -1076,7 +1073,6 @@ class AssemblerMIPSShared : public AssemblerShared
BufferOffset as_swl(Register rd, Register rs, int16_t off);
BufferOffset as_swr(Register rd, Register rs, int16_t off);
BufferOffset as_sc(Register rd, Register rs, int16_t off);
BufferOffset as_scd(Register rd, Register rs, int16_t off);
BufferOffset as_sd(Register rd, Register rs, int16_t off);
BufferOffset as_sdl(Register rd, Register rs, int16_t off);
BufferOffset as_sdr(Register rd, Register rs, int16_t off);

Просмотреть файл

@ -6,12 +6,15 @@
/* For documentation, see jit/AtomicOperations.h */
// NOTE, this file is *not* used with the MIPS simulator, only when compiling
// for actual MIPS hardware. The simulators get the files that are appropriate
// for the hardware the simulator is running on. See the comments before the
// #include nest at the bottom of jit/AtomicOperations.h for more information.
// NOTE, MIPS32 unlike MIPS64 doesn't provide hardware support for lock-free
// 64-bit atomics. We lie down below about 8-byte atomics being always lock-
// free in order to support wasm jit. The 64-bit atomic for MIPS32 do not use
// __atomic intrinsic and therefore do not relay on -latomic.
// Access to a aspecific 64-bit variable in memory is protected by an AddressLock
// whose instance is shared between jit and AtomicOperations.
// free in order to support wasm jit. It is necessary to link with -latomic to
// get the 64-bit atomic intrinsics on MIPS32.
#ifndef jit_mips_shared_AtomicOperations_mips_shared_h
#define jit_mips_shared_AtomicOperations_mips_shared_h
@ -19,52 +22,12 @@
#include "mozilla/Assertions.h"
#include "mozilla/Types.h"
#include "builtin/AtomicsObject.h"
#include "vm/ArrayBufferObject.h"
#if !defined(__clang__) && !defined(__GNUC__)
# error "This file only for gcc-compatible compilers"
#endif
#if defined(JS_SIMULATOR_MIPS32) && !defined(__i386__)
# error "The MIPS32 simulator atomics assume x86"
#endif
namespace js { namespace jit {
#if defined(JS_CODEGEN_MIPS32)
struct AddressLock
{
public:
void acquire();
void release();
private:
uint32_t spinlock;
};
static_assert(sizeof(AddressLock) == sizeof(uint32_t),
"AddressLock must be 4 bytes for it to be consumed by jit");
// For now use a single global AddressLock.
static AddressLock gAtomic64Lock;
struct MOZ_RAII AddressGuard
{
explicit AddressGuard(void* addr)
{
gAtomic64Lock.acquire();
}
~AddressGuard() {
gAtomic64Lock.release();
}
};
#endif
} }
inline bool
js::jit::AtomicOperations::hasAtomic8()
{
@ -77,7 +40,7 @@ js::jit::AtomicOperations::isLockfree8()
MOZ_ASSERT(__atomic_always_lock_free(sizeof(int8_t), 0));
MOZ_ASSERT(__atomic_always_lock_free(sizeof(int16_t), 0));
MOZ_ASSERT(__atomic_always_lock_free(sizeof(int32_t), 0));
# if defined(JS_64BIT)
# if _MIPS_SIM == _ABI64
MOZ_ASSERT(__atomic_always_lock_free(sizeof(int64_t), 0));
# endif
return true;
@ -93,319 +56,86 @@ template<typename T>
inline T
js::jit::AtomicOperations::loadSeqCst(T* addr)
{
static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only");
static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
T v;
__atomic_load(addr, &v, __ATOMIC_SEQ_CST);
return v;
}
namespace js { namespace jit {
#if defined(JS_CODEGEN_MIPS32)
template<>
inline int64_t
js::jit::AtomicOperations::loadSeqCst(int64_t* addr)
{
AddressGuard guard(addr);
return *addr;
}
template<>
inline uint64_t
js::jit::AtomicOperations::loadSeqCst(uint64_t* addr)
{
AddressGuard guard(addr);
return *addr;
}
#endif
} }
template<typename T>
inline void
js::jit::AtomicOperations::storeSeqCst(T* addr, T val)
{
static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only");
static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
__atomic_store(addr, &val, __ATOMIC_SEQ_CST);
}
namespace js { namespace jit {
#if defined(JS_CODEGEN_MIPS32)
template<>
inline void
js::jit::AtomicOperations::storeSeqCst(int64_t* addr, int64_t val)
{
AddressGuard guard(addr);
*addr = val;
}
template<>
inline void
js::jit::AtomicOperations::storeSeqCst(uint64_t* addr, uint64_t val)
{
AddressGuard guard(addr);
*addr = val;
}
#endif
} }
template<typename T>
inline T
js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, T newval)
{
static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only");
static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
__atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
return oldval;
}
namespace js { namespace jit {
#if defined(JS_CODEGEN_MIPS32)
template<>
inline int64_t
js::jit::AtomicOperations::compareExchangeSeqCst(int64_t* addr, int64_t oldval, int64_t newval)
{
AddressGuard guard(addr);
int64_t val = *addr;
if (val == oldval)
*addr = newval;
return val;
}
template<>
inline uint64_t
js::jit::AtomicOperations::compareExchangeSeqCst(uint64_t* addr, uint64_t oldval, uint64_t newval)
{
AddressGuard guard(addr);
uint64_t val = *addr;
if (val == oldval)
*addr = newval;
return val;
}
#endif
} }
template<typename T>
inline T
js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val)
{
static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only");
static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST);
}
namespace js { namespace jit {
#if defined(JS_CODEGEN_MIPS32)
template<>
inline int64_t
js::jit::AtomicOperations::fetchAddSeqCst(int64_t* addr, int64_t val)
{
AddressGuard guard(addr);
int64_t old = *addr;
*addr = old + val;
return old;
}
template<>
inline uint64_t
js::jit::AtomicOperations::fetchAddSeqCst(uint64_t* addr, uint64_t val)
{
AddressGuard guard(addr);
uint64_t old = *addr;
*addr = old + val;
return old;
}
#endif
} }
template<typename T>
inline T
js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val)
{
static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only");
static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST);
}
namespace js { namespace jit {
#if defined(JS_CODEGEN_MIPS32)
template<>
inline int64_t
js::jit::AtomicOperations::fetchSubSeqCst(int64_t* addr, int64_t val)
{
AddressGuard guard(addr);
int64_t old = *addr;
*addr = old - val;
return old;
}
template<>
inline uint64_t
js::jit::AtomicOperations::fetchSubSeqCst(uint64_t* addr, uint64_t val)
{
AddressGuard guard(addr);
uint64_t old = *addr;
*addr = old - val;
return old;
}
#endif
} }
template<typename T>
inline T
js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val)
{
static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only");
static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST);
}
namespace js { namespace jit {
#if defined(JS_CODEGEN_MIPS32)
template<>
inline int64_t
js::jit::AtomicOperations::fetchAndSeqCst(int64_t* addr, int64_t val)
{
AddressGuard guard(addr);
int64_t old = *addr;
*addr = old & val;
return old;
}
template<>
inline uint64_t
js::jit::AtomicOperations::fetchAndSeqCst(uint64_t* addr, uint64_t val)
{
AddressGuard guard(addr);
uint64_t old = *addr;
*addr = old & val;
return old;
}
#endif
} }
template<typename T>
inline T
js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val)
{
static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only");
static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST);
}
namespace js { namespace jit {
#if defined(JS_CODEGEN_MIPS32)
template<>
inline int64_t
js::jit::AtomicOperations::fetchOrSeqCst(int64_t* addr, int64_t val)
{
AddressGuard guard(addr);
int64_t old = *addr;
*addr = old | val;
return old;
}
template<>
inline uint64_t
js::jit::AtomicOperations::fetchOrSeqCst(uint64_t* addr, uint64_t val)
{
AddressGuard guard(addr);
uint64_t old = *addr;
*addr = old | val;
return old;
}
#endif
} }
template<typename T>
inline T
js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val)
{
static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only");
static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST);
}
namespace js { namespace jit {
#if defined(JS_CODEGEN_MIPS32)
template<>
inline int64_t
js::jit::AtomicOperations::fetchXorSeqCst(int64_t* addr, int64_t val)
{
AddressGuard guard(addr);
int64_t old = *addr;
*addr = old ^ val;
return old;
}
template<>
inline uint64_t
js::jit::AtomicOperations::fetchXorSeqCst(uint64_t* addr, uint64_t val)
{
AddressGuard guard(addr);
uint64_t old = *addr;
*addr = old ^ val;
return old;
}
#endif
} }
template<typename T>
inline T
js::jit::AtomicOperations::loadSafeWhenRacy(T* addr)
{
static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only");
T v;
__atomic_load(addr, &v, __ATOMIC_RELAXED);
return v;
static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
if (__atomic_always_lock_free(sizeof(T), 0)) {
T v;
__atomic_load(addr, &v, __ATOMIC_RELAXED);
return v;
} else {
return *addr;
}
}
namespace js { namespace jit {
#if defined(JS_CODEGEN_MIPS32)
template<>
inline int64_t
js::jit::AtomicOperations::loadSafeWhenRacy(int64_t* addr)
{
return *addr;
}
template<>
inline uint64_t
js::jit::AtomicOperations::loadSafeWhenRacy(uint64_t* addr)
{
return *addr;
}
#endif
template<>
inline uint8_clamped
js::jit::AtomicOperations::loadSafeWhenRacy(uint8_clamped* addr)
@ -435,30 +165,16 @@ template<typename T>
inline void
js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val)
{
static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only");
__atomic_store(addr, &val, __ATOMIC_RELAXED);
static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
if (__atomic_always_lock_free(sizeof(T), 0)) {
__atomic_store(addr, &val, __ATOMIC_RELAXED);
} else {
*addr = val;
}
}
namespace js { namespace jit {
#if defined(JS_CODEGEN_MIPS32)
template<>
inline void
js::jit::AtomicOperations::storeSafeWhenRacy(int64_t* addr, int64_t val)
{
*addr = val;
}
template<>
inline void
js::jit::AtomicOperations::storeSafeWhenRacy(uint64_t* addr, uint64_t val)
{
*addr = val;
}
#endif
template<>
inline void
js::jit::AtomicOperations::storeSafeWhenRacy(uint8_clamped* addr, uint8_clamped val)
@ -500,61 +216,10 @@ template<typename T>
inline T
js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val)
{
static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only");
static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
T v;
__atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
return v;
}
namespace js { namespace jit {
#if defined(JS_CODEGEN_MIPS32)
template<>
inline int64_t
js::jit::AtomicOperations::exchangeSeqCst(int64_t* addr, int64_t val)
{
AddressGuard guard(addr);
int64_t old = *addr;
*addr = val;
return old;
}
template<>
inline uint64_t
js::jit::AtomicOperations::exchangeSeqCst(uint64_t* addr, uint64_t val)
{
AddressGuard guard(addr);
uint64_t old = *addr;
*addr = val;
return old;
}
#endif
} }
#if defined(JS_CODEGEN_MIPS32)
inline void
js::jit::AddressLock::acquire()
{
uint32_t zero = 0;
uint32_t one = 1;
while (!__atomic_compare_exchange(&spinlock, &zero, &one, true, __ATOMIC_SEQ_CST,
__ATOMIC_SEQ_CST))
{
zero = 0;
}
}
inline void
js::jit::AddressLock::release()
{
uint32_t zero = 0;
__atomic_store(&spinlock, &zero, __ATOMIC_SEQ_CST);
}
#endif
#endif // jit_mips_shared_AtomicOperations_mips_shared_h

Просмотреть файл

@ -2215,18 +2215,21 @@ CodeGeneratorMIPSShared::visitWasmCompareExchangeHeap(LWasmCompareExchangeHeap*
{
MWasmCompareExchangeHeap* mir = ins->mir();
Scalar::Type vt = mir->access().type();
Register ptrReg = ToRegister(ins->ptr());
BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
const LAllocation* ptr = ins->ptr();
Register ptrReg = ToRegister(ptr);
BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
Register oldval = ToRegister(ins->oldValue());
Register newval = ToRegister(ins->newValue());
Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
Register valueTemp = ToRegister(ins->valueTemp());
Register offsetTemp = ToRegister(ins->offsetTemp());
Register maskTemp = ToRegister(ins->maskTemp());
masm.compareExchange(vt, Synchronization::Full(), srcAddr, oldval, newval, valueTemp,
offsetTemp, maskTemp, ToRegister(ins->output()));
masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
srcAddr, oldval, newval, InvalidReg,
valueTemp, offsetTemp, maskTemp,
ToAnyRegister(ins->output()));
}
void
@ -2236,15 +2239,16 @@ CodeGeneratorMIPSShared::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* in
Scalar::Type vt = mir->access().type();
Register ptrReg = ToRegister(ins->ptr());
Register value = ToRegister(ins->value());
BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
Register valueTemp = ToRegister(ins->valueTemp());
Register offsetTemp = ToRegister(ins->offsetTemp());
Register maskTemp = ToRegister(ins->maskTemp());
masm.atomicExchange(vt, Synchronization::Full(), srcAddr, value, valueTemp, offsetTemp,
maskTemp, ToRegister(ins->output()));
masm.atomicExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
srcAddr, value, InvalidReg, valueTemp,
offsetTemp, maskTemp, ToAnyRegister(ins->output()));
}
void
@ -2256,14 +2260,25 @@ CodeGeneratorMIPSShared::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins)
MWasmAtomicBinopHeap* mir = ins->mir();
Scalar::Type vt = mir->access().type();
Register ptrReg = ToRegister(ins->ptr());
Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
Register flagTemp = ToRegister(ins->flagTemp());
Register valueTemp = ToRegister(ins->valueTemp());
Register offsetTemp = ToRegister(ins->offsetTemp());
Register maskTemp = ToRegister(ins->maskTemp());
const LAllocation* value = ins->value();
AtomicOp op = mir->operation();
BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
masm.atomicFetchOp(vt, Synchronization::Full(), mir->operation(), ToRegister(ins->value()),
srcAddr, valueTemp, offsetTemp, maskTemp, ToRegister(ins->output()));
if (value->isConstant())
atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
Imm32(ToInt32(value)), srcAddr, flagTemp, InvalidReg,
valueTemp, offsetTemp, maskTemp,
ToAnyRegister(ins->output()));
else
atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
ToRegister(value), srcAddr, flagTemp, InvalidReg,
valueTemp, offsetTemp, maskTemp,
ToAnyRegister(ins->output()));
}
void
@ -2275,13 +2290,21 @@ CodeGeneratorMIPSShared::visitWasmAtomicBinopHeapForEffect(LWasmAtomicBinopHeapF
MWasmAtomicBinopHeap* mir = ins->mir();
Scalar::Type vt = mir->access().type();
Register ptrReg = ToRegister(ins->ptr());
Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
Register flagTemp = ToRegister(ins->flagTemp());
Register valueTemp = ToRegister(ins->valueTemp());
Register offsetTemp = ToRegister(ins->offsetTemp());
Register maskTemp = ToRegister(ins->maskTemp());
const LAllocation* value = ins->value();
AtomicOp op = mir->operation();
BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
masm.atomicEffectOp(vt, Synchronization::Full(), mir->operation(), ToRegister(ins->value()),
srcAddr, valueTemp, offsetTemp, maskTemp);
BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
if (value->isConstant())
atomicBinopToTypedIntArray(op, vt, Imm32(ToInt32(value)), srcAddr, flagTemp,
valueTemp, offsetTemp, maskTemp);
else
atomicBinopToTypedIntArray(op, vt, ToRegister(value), srcAddr, flagTemp,
valueTemp, offsetTemp, maskTemp);
}
void
@ -2462,6 +2485,281 @@ CodeGeneratorMIPSShared::visitNegF(LNegF* ins)
masm.as_negs(output, input);
}
template<typename S, typename T>
void
CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const S& value, const T& mem, Register flagTemp,
Register outTemp, Register valueTemp,
Register offsetTemp, Register maskTemp,
AnyRegister output)
{
MOZ_ASSERT(flagTemp != InvalidReg);
MOZ_ASSERT_IF(arrayType == Scalar::Uint32, outTemp != InvalidReg);
switch (arrayType) {
case Scalar::Int8:
switch (op) {
case AtomicFetchAddOp:
masm.atomicFetchAdd8SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case AtomicFetchSubOp:
masm.atomicFetchSub8SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case AtomicFetchAndOp:
masm.atomicFetchAnd8SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case AtomicFetchOrOp:
masm.atomicFetchOr8SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case AtomicFetchXorOp:
masm.atomicFetchXor8SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Uint8:
switch (op) {
case AtomicFetchAddOp:
masm.atomicFetchAdd8ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case AtomicFetchSubOp:
masm.atomicFetchSub8ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case AtomicFetchAndOp:
masm.atomicFetchAnd8ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case AtomicFetchOrOp:
masm.atomicFetchOr8ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case AtomicFetchXorOp:
masm.atomicFetchXor8ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Int16:
switch (op) {
case AtomicFetchAddOp:
masm.atomicFetchAdd16SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case AtomicFetchSubOp:
masm.atomicFetchSub16SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case AtomicFetchAndOp:
masm.atomicFetchAnd16SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case AtomicFetchOrOp:
masm.atomicFetchOr16SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case AtomicFetchXorOp:
masm.atomicFetchXor16SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Uint16:
switch (op) {
case AtomicFetchAddOp:
masm.atomicFetchAdd16ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case AtomicFetchSubOp:
masm.atomicFetchSub16ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case AtomicFetchAndOp:
masm.atomicFetchAnd16ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case AtomicFetchOrOp:
masm.atomicFetchOr16ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case AtomicFetchXorOp:
masm.atomicFetchXor16ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Int32:
switch (op) {
case AtomicFetchAddOp:
masm.atomicFetchAdd32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case AtomicFetchSubOp:
masm.atomicFetchSub32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case AtomicFetchAndOp:
masm.atomicFetchAnd32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case AtomicFetchOrOp:
masm.atomicFetchOr32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case AtomicFetchXorOp:
masm.atomicFetchXor32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Uint32:
// At the moment, the code in MCallOptimize.cpp requires the output
// type to be double for uint32 arrays. See bug 1077305.
MOZ_ASSERT(output.isFloat());
switch (op) {
case AtomicFetchAddOp:
masm.atomicFetchAdd32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, outTemp);
break;
case AtomicFetchSubOp:
masm.atomicFetchSub32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, outTemp);
break;
case AtomicFetchAndOp:
masm.atomicFetchAnd32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, outTemp);
break;
case AtomicFetchOrOp:
masm.atomicFetchOr32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, outTemp);
break;
case AtomicFetchXorOp:
masm.atomicFetchXor32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, outTemp);
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
masm.convertUInt32ToDouble(outTemp, output.fpu());
break;
default:
MOZ_CRASH("Invalid typed array type");
}
}
template void
CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Imm32& value, const Address& mem,
Register flagTemp, Register outTemp,
Register valueTemp, Register offsetTemp,
Register maskTemp, AnyRegister output);
template void
CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Imm32& value, const BaseIndex& mem,
Register flagTemp, Register outTemp,
Register valueTemp, Register offsetTemp,
Register maskTemp, AnyRegister output);
template void
CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Register& value, const Address& mem,
Register flagTemp, Register outTemp,
Register valueTemp, Register offsetTemp,
Register maskTemp, AnyRegister output);
template void
CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Register& value, const BaseIndex& mem,
Register flagTemp, Register outTemp,
Register valueTemp, Register offsetTemp,
Register maskTemp, AnyRegister output);
// Binary operation for effect, result discarded.
template<typename S, typename T>
void
CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
const T& mem, Register flagTemp, Register valueTemp,
Register offsetTemp, Register maskTemp)
{
MOZ_ASSERT(flagTemp != InvalidReg);
switch (arrayType) {
case Scalar::Int8:
case Scalar::Uint8:
switch (op) {
case AtomicFetchAddOp:
masm.atomicAdd8(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
break;
case AtomicFetchSubOp:
masm.atomicSub8(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
break;
case AtomicFetchAndOp:
masm.atomicAnd8(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
break;
case AtomicFetchOrOp:
masm.atomicOr8(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
break;
case AtomicFetchXorOp:
masm.atomicXor8(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Int16:
case Scalar::Uint16:
switch (op) {
case AtomicFetchAddOp:
masm.atomicAdd16(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
break;
case AtomicFetchSubOp:
masm.atomicSub16(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
break;
case AtomicFetchAndOp:
masm.atomicAnd16(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
break;
case AtomicFetchOrOp:
masm.atomicOr16(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
break;
case AtomicFetchXorOp:
masm.atomicXor16(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Int32:
case Scalar::Uint32:
switch (op) {
case AtomicFetchAddOp:
masm.atomicAdd32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
break;
case AtomicFetchSubOp:
masm.atomicSub32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
break;
case AtomicFetchAndOp:
masm.atomicAnd32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
break;
case AtomicFetchOrOp:
masm.atomicOr32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
break;
case AtomicFetchXorOp:
masm.atomicXor32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
default:
MOZ_CRASH("Invalid typed array type");
}
}
template void
CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Imm32& value, const Address& mem,
Register flagTemp, Register valueTemp,
Register offsetTemp, Register maskTemp);
template void
CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Imm32& value, const BaseIndex& mem,
Register flagTemp, Register valueTemp,
Register offsetTemp, Register maskTemp);
template void
CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Register& value, const Address& mem,
Register flagTemp, Register valueTemp,
Register offsetTemp, Register maskTemp);
template void
CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Register& value, const BaseIndex& mem,
Register flagTemp, Register valueTemp,
Register offsetTemp, Register maskTemp);
void
CodeGeneratorMIPSShared::visitWasmAddOffset(LWasmAddOffset* lir)
{
@ -2472,6 +2770,20 @@ CodeGeneratorMIPSShared::visitWasmAddOffset(LWasmAddOffset* lir)
masm.ma_addTestCarry(out, base, Imm32(mir->offset()), oldTrap(mir, wasm::Trap::OutOfBounds));
}
template <typename T>
static inline void
AtomicBinopToTypedArray(CodeGeneratorMIPSShared* cg, AtomicOp op,
Scalar::Type arrayType, const LAllocation* value, const T& mem,
Register flagTemp, Register outTemp, Register valueTemp,
Register offsetTemp, Register maskTemp, AnyRegister output)
{
if (value->isConstant())
cg->atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem, flagTemp, outTemp,
valueTemp, offsetTemp, maskTemp, output);
else
cg->atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem, flagTemp, outTemp,
valueTemp, offsetTemp, maskTemp, output);
}
void
CodeGeneratorMIPSShared::visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir)
@ -2480,47 +2792,63 @@ CodeGeneratorMIPSShared::visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElem
AnyRegister output = ToAnyRegister(lir->output());
Register elements = ToRegister(lir->elements());
Register outTemp = ToTempRegisterOrInvalid(lir->temp2());
Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
Register value = ToRegister(lir->value());
Register flagTemp = ToRegister(lir->temp1());
Register outTemp = lir->temp2()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp2());
Register valueTemp = ToRegister(lir->valueTemp());
Register offsetTemp = ToRegister(lir->offsetTemp());
Register maskTemp = ToRegister(lir->maskTemp());
const LAllocation* value = lir->value();
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address mem(elements, ToInt32(lir->index()) * width);
masm.atomicFetchOpJS(arrayType, Synchronization::Full(), lir->mir()->operation(), value,
mem, valueTemp, offsetTemp, maskTemp, outTemp, output);
AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, flagTemp, outTemp,
valueTemp, offsetTemp, maskTemp, output);
} else {
BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
masm.atomicFetchOpJS(arrayType, Synchronization::Full(), lir->mir()->operation(), value,
mem, valueTemp, offsetTemp, maskTemp, outTemp, output);
AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, flagTemp, outTemp,
valueTemp, offsetTemp, maskTemp, output);
}
}
template <typename T>
static inline void
AtomicBinopToTypedArray(CodeGeneratorMIPSShared* cg, AtomicOp op, Scalar::Type arrayType,
const LAllocation* value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
if (value->isConstant())
cg->atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem,
flagTemp, valueTemp, offsetTemp, maskTemp);
else
cg->atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem,
flagTemp, valueTemp, offsetTemp, maskTemp);
}
void
CodeGeneratorMIPSShared::visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir)
{
MOZ_ASSERT(!lir->mir()->hasUses());
Register elements = ToRegister(lir->elements());
Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
Register value = ToRegister(lir->value());
Register flagTemp = ToRegister(lir->flagTemp());
Register valueTemp = ToRegister(lir->valueTemp());
Register offsetTemp = ToRegister(lir->offsetTemp());
Register maskTemp = ToRegister(lir->maskTemp());
const LAllocation* value = lir->value();
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address mem(elements, ToInt32(lir->index()) * width);
masm.atomicEffectOpJS(arrayType, Synchronization::Full(), lir->mir()->operation(), value,
mem, valueTemp, offsetTemp, maskTemp);
AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem,
flagTemp, valueTemp, offsetTemp, maskTemp);
} else {
BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
masm.atomicEffectOpJS(arrayType, Synchronization::Full(), lir->mir()->operation(), value,
mem, valueTemp, offsetTemp, maskTemp);
AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem,
flagTemp, valueTemp, offsetTemp, maskTemp);
}
}
@ -2529,25 +2857,25 @@ CodeGeneratorMIPSShared::visitCompareExchangeTypedArrayElement(LCompareExchangeT
{
Register elements = ToRegister(lir->elements());
AnyRegister output = ToAnyRegister(lir->output());
Register outTemp = ToTempRegisterOrInvalid(lir->temp());
Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
Register oldval = ToRegister(lir->oldval());
Register newval = ToRegister(lir->newval());
Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
Register valueTemp = ToRegister(lir->valueTemp());
Register offsetTemp = ToRegister(lir->offsetTemp());
Register maskTemp = ToRegister(lir->maskTemp());
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address dest(elements, ToInt32(lir->index()) * width);
masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval, newval,
valueTemp, offsetTemp, maskTemp, outTemp, output);
masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp,
valueTemp, offsetTemp, maskTemp, output);
} else {
BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval, newval,
valueTemp, offsetTemp, maskTemp, outTemp, output);
masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp,
valueTemp, offsetTemp, maskTemp, output);
}
}
@ -2556,68 +2884,23 @@ CodeGeneratorMIPSShared::visitAtomicExchangeTypedArrayElement(LAtomicExchangeTyp
{
Register elements = ToRegister(lir->elements());
AnyRegister output = ToAnyRegister(lir->output());
Register outTemp = ToTempRegisterOrInvalid(lir->temp());
Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
Register value = ToRegister(lir->value());
Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
Register valueTemp = ToRegister(lir->valueTemp());
Register offsetTemp = ToRegister(lir->offsetTemp());
Register maskTemp = ToRegister(lir->maskTemp());
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address dest(elements, ToInt32(lir->index()) * width);
masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, valueTemp,
offsetTemp, maskTemp, outTemp, output);
masm.atomicExchangeToTypedIntArray(arrayType, dest, value, temp,
valueTemp, offsetTemp, maskTemp, output);
} else {
BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, valueTemp,
offsetTemp, maskTemp, outTemp, output);
masm.atomicExchangeToTypedIntArray(arrayType, dest, value, temp,
valueTemp, offsetTemp, maskTemp, output);
}
}
void
CodeGeneratorMIPSShared::visitWasmCompareExchangeI64(LWasmCompareExchangeI64* lir)
{
Register ptr = ToRegister(lir->ptr());
Register64 oldValue = ToRegister64(lir->oldValue());
Register64 newValue = ToRegister64(lir->newValue());
Register64 output = ToOutRegister64(lir);
uint32_t offset = lir->mir()->access().offset();
BaseIndex addr(HeapReg, ptr, TimesOne, offset);
masm.compareExchange64(Synchronization::Full(), addr, oldValue, newValue, output);
}
void
CodeGeneratorMIPSShared::visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64* lir)
{
Register ptr = ToRegister(lir->ptr());
Register64 value = ToRegister64(lir->value());
Register64 output = ToOutRegister64(lir);
uint32_t offset = lir->mir()->access().offset();
BaseIndex addr(HeapReg, ptr, TimesOne, offset);
masm.atomicExchange64(Synchronization::Full(), addr, value, output);
}
void
CodeGeneratorMIPSShared::visitWasmAtomicBinopI64(LWasmAtomicBinopI64* lir)
{
Register ptr = ToRegister(lir->ptr());
Register64 value = ToRegister64(lir->value());
Register64 output = ToOutRegister64(lir);
#ifdef JS_CODEGEN_MIPS32
Register64 temp(ToRegister(lir->getTemp(0)), ToRegister(lir->getTemp(1)));
#else
Register64 temp(ToRegister(lir->getTemp(0)));
#endif
uint32_t offset = lir->mir()->access().offset();
BaseIndex addr(HeapReg, ptr, TimesOne, offset);
masm.atomicFetchOp64(Synchronization::Full(), lir->mir()->operation(), value, addr, temp,
output);
}

Просмотреть файл

@ -251,10 +251,6 @@ class CodeGeneratorMIPSShared : public CodeGeneratorShared
const T& mem, Register flagTemp, Register valueTemp,
Register offsetTemp, Register maskTemp);
void visitWasmCompareExchangeI64(LWasmCompareExchangeI64* lir);
void visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64* lir);
void visitWasmAtomicBinopI64(LWasmAtomicBinopI64* lir);
protected:
void visitEffectiveAddress(LEffectiveAddress* ins);
void visitUDivOrMod(LUDivOrMod* ins);

Просмотреть файл

@ -388,78 +388,6 @@ class LWasmUnalignedStoreI64 : public details::LWasmUnalignedStoreBase<1 + INT64
}
};
class LWasmCompareExchangeI64 : public LInstructionHelper<INT64_PIECES, 1 + INT64_PIECES + INT64_PIECES, 0>
{
public:
LIR_HEADER(WasmCompareExchangeI64);
LWasmCompareExchangeI64(const LAllocation& ptr, const LInt64Allocation& oldValue, const LInt64Allocation& newValue)
{
setOperand(0, ptr);
setInt64Operand(1, oldValue);
setInt64Operand(1 + INT64_PIECES, newValue);
}
const LAllocation* ptr() {
return getOperand(0);
}
const LInt64Allocation oldValue() {
return getInt64Operand(1);
}
const LInt64Allocation newValue() {
return getInt64Operand(1 + INT64_PIECES);
}
const MWasmCompareExchangeHeap* mir() const {
return mir_->toWasmCompareExchangeHeap();
}
};
class LWasmAtomicExchangeI64 : public LInstructionHelper<INT64_PIECES, 1 + INT64_PIECES, 0>
{
public:
LIR_HEADER(WasmAtomicExchangeI64);
LWasmAtomicExchangeI64(const LAllocation& ptr, const LInt64Allocation& value)
{
setOperand(0, ptr);
setInt64Operand(1, value);
}
const LAllocation* ptr() {
return getOperand(0);
}
const LInt64Allocation value() {
return getInt64Operand(1);
}
const MWasmAtomicExchangeHeap* mir() const {
return mir_->toWasmAtomicExchangeHeap();
}
};
class LWasmAtomicBinopI64 : public LInstructionHelper<INT64_PIECES, 1 + INT64_PIECES, 2>
{
public:
LIR_HEADER(WasmAtomicBinopI64);
LWasmAtomicBinopI64(const LAllocation& ptr, const LInt64Allocation& value)
{
setOperand(0, ptr);
setInt64Operand(1, value);
}
const LAllocation* ptr() {
return getOperand(0);
}
const LInt64Allocation value() {
return getInt64Operand(1);
}
const MWasmAtomicBinopHeap* mir() const {
return mir_->toWasmAtomicBinopHeap();
}
};
} // namespace jit
} // namespace js

Просмотреть файл

@ -364,14 +364,6 @@ LIRGeneratorMIPSShared::visitWasmLoad(MWasmLoad* ins)
}
if (ins->type() == MIRType::Int64) {
#ifdef JS_CODEGEN_MIPS32
if(ins->access().isAtomic()) {
auto* lir = new(alloc()) LWasmAtomicLoadI64(ptr);
defineInt64(lir, ins);
return;
}
#endif
auto* lir = new(alloc()) LWasmLoadI64(ptr);
if (ins->access().offset())
lir->setTemp(0, tempCopy(base, 0));
@ -394,9 +386,9 @@ LIRGeneratorMIPSShared::visitWasmStore(MWasmStore* ins)
MOZ_ASSERT(base->type() == MIRType::Int32);
MDefinition* value = ins->value();
LAllocation baseAlloc = useRegisterAtStart(base);
if (IsUnaligned(ins->access())) {
LAllocation baseAlloc = useRegisterAtStart(base);
if (ins->access().type() == Scalar::Int64) {
LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
auto* lir = new(alloc()) LWasmUnalignedStoreI64(baseAlloc, valueAlloc, temp());
@ -417,16 +409,6 @@ LIRGeneratorMIPSShared::visitWasmStore(MWasmStore* ins)
}
if (ins->access().type() == Scalar::Int64) {
#ifdef JS_CODEGEN_MIPS32
if(ins->access().isAtomic()) {
auto* lir = new(alloc()) LWasmAtomicStoreI64(useRegister(base), useInt64Register(value), temp());
add(lir, ins);
return;
}
#endif
LAllocation baseAlloc = useRegisterAtStart(base);
LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
auto* lir = new(alloc()) LWasmStoreI64(baseAlloc, valueAlloc);
if (ins->access().offset())
@ -436,7 +418,6 @@ LIRGeneratorMIPSShared::visitWasmStore(MWasmStore* ins)
return;
}
LAllocation baseAlloc = useRegisterAtStart(base);
LAllocation valueAlloc = useRegisterAtStart(value);
auto* lir = new(alloc()) LWasmStore(baseAlloc, valueAlloc);
if (ins->access().offset())
@ -597,24 +578,14 @@ LIRGeneratorMIPSShared::visitCompareExchangeTypedArrayElement(MCompareExchangeTy
const LAllocation newval = useRegister(ins->newval());
const LAllocation oldval = useRegister(ins->oldval());
LDefinition outTemp = LDefinition::BogusTemp();
LDefinition valueTemp = LDefinition::BogusTemp();
LDefinition offsetTemp = LDefinition::BogusTemp();
LDefinition maskTemp = LDefinition::BogusTemp();
LDefinition uint32Temp = LDefinition::BogusTemp();
if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type()))
outTemp = temp();
if (Scalar::byteSize(ins->arrayType()) < 4) {
valueTemp = temp();
offsetTemp = temp();
maskTemp = temp();
}
uint32Temp = temp();
LCompareExchangeTypedArrayElement* lir =
new(alloc()) LCompareExchangeTypedArrayElement(elements, index, oldval, newval, outTemp,
valueTemp, offsetTemp, maskTemp);
new(alloc()) LCompareExchangeTypedArrayElement(elements, index, oldval, newval, uint32Temp,
/* valueTemp= */ temp(), /* offsetTemp= */ temp(),
/* maskTemp= */ temp());
define(lir, ins);
}
@ -634,25 +605,16 @@ LIRGeneratorMIPSShared::visitAtomicExchangeTypedArrayElement(MAtomicExchangeType
// CodeGenerator level for creating the result.
const LAllocation value = useRegister(ins->value());
LDefinition outTemp = LDefinition::BogusTemp();
LDefinition valueTemp = LDefinition::BogusTemp();
LDefinition offsetTemp = LDefinition::BogusTemp();
LDefinition maskTemp = LDefinition::BogusTemp();
LDefinition uint32Temp = LDefinition::BogusTemp();
if (ins->arrayType() == Scalar::Uint32) {
MOZ_ASSERT(ins->type() == MIRType::Double);
outTemp = temp();
}
if (Scalar::byteSize(ins->arrayType()) < 4) {
valueTemp = temp();
offsetTemp = temp();
maskTemp = temp();
uint32Temp = temp();
}
LAtomicExchangeTypedArrayElement* lir =
new(alloc()) LAtomicExchangeTypedArrayElement(elements, index, value, outTemp,
valueTemp, offsetTemp, maskTemp);
new(alloc()) LAtomicExchangeTypedArrayElement(elements, index, value, uint32Temp,
/* valueTemp= */ temp(), /* offsetTemp= */ temp(),
/* maskTemp= */ temp());
define(lir, ins);
}
@ -660,31 +622,19 @@ LIRGeneratorMIPSShared::visitAtomicExchangeTypedArrayElement(MAtomicExchangeType
void
LIRGeneratorMIPSShared::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap* ins)
{
MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
MOZ_ASSERT(ins->access().type() < Scalar::Float32);
MOZ_ASSERT(ins->access().offset() == 0);
if (ins->access().type() == Scalar::Int64) {
auto* lir = new(alloc()) LWasmCompareExchangeI64(useRegister(ins->base()),
useInt64Register(ins->oldValue()),
useInt64Register(ins->newValue()));
defineInt64(lir, ins);
return;
}
LDefinition valueTemp = LDefinition::BogusTemp();
LDefinition offsetTemp = LDefinition::BogusTemp();
LDefinition maskTemp = LDefinition::BogusTemp();
if (ins->access().byteSize() < 4) {
valueTemp = temp();
offsetTemp = temp();
maskTemp = temp();
}
MDefinition* base = ins->base();
MOZ_ASSERT(base->type() == MIRType::Int32);
LWasmCompareExchangeHeap* lir =
new(alloc()) LWasmCompareExchangeHeap(useRegister(ins->base()),
new(alloc()) LWasmCompareExchangeHeap(useRegister(base),
useRegister(ins->oldValue()),
useRegister(ins->newValue()),
valueTemp, offsetTemp, maskTemp);
/* valueTemp= */ temp(),
/* offsetTemp= */ temp(),
/* maskTemp= */ temp());
define(lir, ins);
}
@ -693,70 +643,52 @@ void
LIRGeneratorMIPSShared::visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap* ins)
{
MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
MOZ_ASSERT(ins->access().offset() == 0);
if (ins->access().type() == Scalar::Int64) {
auto* lir = new(alloc()) LWasmAtomicExchangeI64(useRegister(ins->base()),
useInt64Register(ins->value()));
defineInt64(lir, ins);
return;
}
const LAllocation base = useRegister(ins->base());
const LAllocation value = useRegister(ins->value());
LDefinition valueTemp = LDefinition::BogusTemp();
LDefinition offsetTemp = LDefinition::BogusTemp();
LDefinition maskTemp = LDefinition::BogusTemp();
if (ins->access().byteSize() < 4) {
valueTemp = temp();
offsetTemp = temp();
maskTemp = temp();
}
// The output may not be used but will be clobbered regardless,
// so ignore the case where we're not using the value and just
// use the output register as a temp.
LWasmAtomicExchangeHeap* lir =
new(alloc()) LWasmAtomicExchangeHeap(useRegister(ins->base()),
useRegister(ins->value()),
valueTemp, offsetTemp, maskTemp);
new(alloc()) LWasmAtomicExchangeHeap(base, value,
/* valueTemp= */ temp(),
/* offsetTemp= */ temp(),
/* maskTemp= */ temp());
define(lir, ins);
}
void
LIRGeneratorMIPSShared::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins)
{
MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
MOZ_ASSERT(ins->access().type() < Scalar::Float32);
MOZ_ASSERT(ins->access().offset() == 0);
if (ins->access().type() == Scalar::Int64) {
auto* lir = new(alloc()) LWasmAtomicBinopI64(useRegister(ins->base()),
useInt64Register(ins->value()));
lir->setTemp(0, temp());
#ifdef JS_CODEGEN_MIPS32
lir->setTemp(1, temp());
#endif
defineInt64(lir, ins);
return;
}
LDefinition valueTemp = LDefinition::BogusTemp();
LDefinition offsetTemp = LDefinition::BogusTemp();
LDefinition maskTemp = LDefinition::BogusTemp();
if (ins->access().byteSize() < 4) {
valueTemp = temp();
offsetTemp = temp();
maskTemp = temp();
}
MDefinition* base = ins->base();
MOZ_ASSERT(base->type() == MIRType::Int32);
if (!ins->hasUses()) {
LWasmAtomicBinopHeapForEffect* lir =
new(alloc()) LWasmAtomicBinopHeapForEffect(useRegister(ins->base()),
new(alloc()) LWasmAtomicBinopHeapForEffect(useRegister(base),
useRegister(ins->value()),
valueTemp, offsetTemp, maskTemp);
/* flagTemp= */ temp(),
/* valueTemp= */ temp(),
/* offsetTemp= */ temp(),
/* maskTemp= */ temp());
add(lir, ins);
return;
}
LWasmAtomicBinopHeap* lir =
new(alloc()) LWasmAtomicBinopHeap(useRegister(ins->base()),
new(alloc()) LWasmAtomicBinopHeap(useRegister(base),
useRegister(ins->value()),
valueTemp, offsetTemp, maskTemp);
/* temp= */ LDefinition::BogusTemp(),
/* flagTemp= */ temp(),
/* valueTemp= */ temp(),
/* offsetTemp= */ temp(),
/* maskTemp= */ temp());
define(lir, ins);
}
@ -775,20 +707,13 @@ LIRGeneratorMIPSShared::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayEleme
const LAllocation index = useRegisterOrConstant(ins->index());
const LAllocation value = useRegister(ins->value());
LDefinition valueTemp = LDefinition::BogusTemp();
LDefinition offsetTemp = LDefinition::BogusTemp();
LDefinition maskTemp = LDefinition::BogusTemp();
if (Scalar::byteSize(ins->arrayType()) < 4) {
valueTemp = temp();
offsetTemp = temp();
maskTemp = temp();
}
if (!ins->hasUses()) {
LAtomicTypedArrayElementBinopForEffect* lir =
new(alloc()) LAtomicTypedArrayElementBinopForEffect(elements, index, value,
valueTemp, offsetTemp, maskTemp);
/* flagTemp= */ temp(),
/* valueTemp= */ temp(),
/* offsetTemp= */ temp(),
/* maskTemp= */ temp());
add(lir, ins);
return;
}
@ -796,14 +721,18 @@ LIRGeneratorMIPSShared::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayEleme
// For a Uint32Array with a known double result we need a temp for
// the intermediate output.
LDefinition flagTemp = temp();
LDefinition outTemp = LDefinition::BogusTemp();
if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type()))
outTemp = temp();
// On mips, map flagTemp to temp1 and outTemp to temp2, at least for now.
LAtomicTypedArrayElementBinop* lir =
new(alloc()) LAtomicTypedArrayElementBinop(elements, index, value, outTemp,
valueTemp, offsetTemp, maskTemp);
new(alloc()) LAtomicTypedArrayElementBinop(elements, index, value, flagTemp, outTemp,
/* valueTemp= */ temp(), /* offsetTemp= */ temp(),
/* maskTemp= */ temp());
define(lir, ins);
}

Просмотреть файл

@ -1010,7 +1010,14 @@ MacroAssembler::storeFloat32x3(FloatRegister src, const BaseIndex& dest)
void
MacroAssembler::memoryBarrier(MemoryBarrierBits barrier)
{
as_sync();
if (barrier == MembarLoadLoad)
as_sync(19);
else if (barrier == MembarStoreStore)
as_sync(4);
else if (barrier & MembarSynchronizing)
as_sync();
else if (barrier)
as_sync(16);
}
// ===============================================================

Просмотреть файл

@ -1317,6 +1317,270 @@ MacroAssemblerMIPSShared::asMasm() const
return *static_cast<const MacroAssembler*>(this);
}
void
MacroAssemblerMIPSShared::atomicEffectOpMIPSr2(int nbytes, AtomicOp op,
const Register& value, const Register& addr,
Register flagTemp, Register valueTemp,
Register offsetTemp, Register maskTemp)
{
atomicFetchOpMIPSr2(nbytes, false, op, value, addr, flagTemp,
valueTemp, offsetTemp, maskTemp, InvalidReg);
}
void
MacroAssemblerMIPSShared::atomicFetchOpMIPSr2(int nbytes, bool signExtend, AtomicOp op, const Register& value,
const Register& addr, Register flagTemp, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
Label again;
as_andi(offsetTemp, addr, 3);
asMasm().subPtr(offsetTemp, addr);
as_sll(offsetTemp, offsetTemp, 3);
ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
as_sllv(maskTemp, maskTemp, offsetTemp);
bind(&again);
as_sync(16);
as_ll(flagTemp, addr, 0);
as_sllv(valueTemp, value, offsetTemp);
if (output != InvalidReg) {
as_and(output, flagTemp, maskTemp);
as_srlv(output, output, offsetTemp);
if (signExtend) {
switch (nbytes) {
case 1:
ma_seb(output, output);
break;
case 2:
ma_seh(output, output);
break;
case 4:
break;
default:
MOZ_CRASH("NYI");
}
}
}
switch (op) {
case AtomicFetchAddOp:
as_addu(valueTemp, flagTemp, valueTemp);
break;
case AtomicFetchSubOp:
as_subu(valueTemp, flagTemp, valueTemp);
break;
case AtomicFetchAndOp:
as_and(valueTemp, flagTemp, valueTemp);
break;
case AtomicFetchOrOp:
as_or(valueTemp, flagTemp, valueTemp);
break;
case AtomicFetchXorOp:
as_xor(valueTemp, flagTemp, valueTemp);
break;
default:
MOZ_CRASH("NYI");
}
as_and(valueTemp, valueTemp, maskTemp);
as_or(flagTemp, flagTemp, maskTemp);
as_xor(flagTemp, flagTemp, maskTemp);
as_or(flagTemp, flagTemp, valueTemp);
as_sc(flagTemp, addr, 0);
ma_b(flagTemp, flagTemp, &again, Zero, ShortJump);
as_sync(0);
}
void
MacroAssemblerMIPSShared::atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value,
const Address& address, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
ma_li(SecondScratchReg, value);
asMasm().computeEffectiveAddress(address, ScratchRegister);
atomicEffectOpMIPSr2(nbytes, op, SecondScratchReg, ScratchRegister,
flagTemp, valueTemp, offsetTemp, maskTemp);
}
void
MacroAssemblerMIPSShared::atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value,
const BaseIndex& address, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
ma_li(SecondScratchReg, value);
asMasm().computeEffectiveAddress(address, ScratchRegister);
atomicEffectOpMIPSr2(nbytes, op, SecondScratchReg, ScratchRegister,
flagTemp, valueTemp, offsetTemp, maskTemp);
}
void
MacroAssemblerMIPSShared::atomicEffectOp(int nbytes, AtomicOp op, const Register& value,
const Address& address, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
asMasm().computeEffectiveAddress(address, ScratchRegister);
atomicEffectOpMIPSr2(nbytes, op, value, ScratchRegister,
flagTemp, valueTemp, offsetTemp, maskTemp);
}
void
MacroAssemblerMIPSShared::atomicEffectOp(int nbytes, AtomicOp op, const Register& value,
const BaseIndex& address, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
asMasm().computeEffectiveAddress(address, ScratchRegister);
atomicEffectOpMIPSr2(nbytes, op, value, ScratchRegister,
flagTemp, valueTemp, offsetTemp, maskTemp);
}
void
MacroAssemblerMIPSShared::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32& value,
const Address& address, Register flagTemp, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
ma_li(SecondScratchReg, value);
asMasm().computeEffectiveAddress(address, ScratchRegister);
atomicFetchOpMIPSr2(nbytes, signExtend, op, SecondScratchReg, ScratchRegister,
flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
void
MacroAssemblerMIPSShared::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32& value,
const BaseIndex& address, Register flagTemp, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
ma_li(SecondScratchReg, value);
asMasm().computeEffectiveAddress(address, ScratchRegister);
atomicFetchOpMIPSr2(nbytes, signExtend, op, SecondScratchReg, ScratchRegister,
flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
void
MacroAssemblerMIPSShared::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register& value,
const Address& address, Register flagTemp, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
asMasm().computeEffectiveAddress(address, ScratchRegister);
atomicFetchOpMIPSr2(nbytes, signExtend, op, value, ScratchRegister,
flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
void
MacroAssemblerMIPSShared::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register& value,
const BaseIndex& address, Register flagTemp, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
asMasm().computeEffectiveAddress(address, ScratchRegister);
atomicFetchOpMIPSr2(nbytes, signExtend, op, value, ScratchRegister,
flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
void
MacroAssemblerMIPSShared::compareExchangeMIPSr2(int nbytes, bool signExtend, const Register& addr,
Register oldval, Register newval, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp,
Register output)
{
Label again, end;
as_andi(offsetTemp, addr, 3);
asMasm().subPtr(offsetTemp, addr);
as_sll(offsetTemp, offsetTemp, 3);
ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
as_sllv(maskTemp, maskTemp, offsetTemp);
bind(&again);
as_sync(16);
as_ll(flagTemp, addr, 0);
as_and(output, flagTemp, maskTemp);
// If oldval is valid register, do compareExchange
if (InvalidReg != oldval) {
as_sllv(valueTemp, oldval, offsetTemp);
as_and(valueTemp, valueTemp, maskTemp);
ma_b(output, valueTemp, &end, NotEqual, ShortJump);
}
as_sllv(valueTemp, newval, offsetTemp);
as_and(valueTemp, valueTemp, maskTemp);
as_or(flagTemp, flagTemp, maskTemp);
as_xor(flagTemp, flagTemp, maskTemp);
as_or(flagTemp, flagTemp, valueTemp);
as_sc(flagTemp, addr, 0);
ma_b(flagTemp, flagTemp, &again, Zero, ShortJump);
as_sync(0);
bind(&end);
as_srlv(output, output, offsetTemp);
if (signExtend) {
switch (nbytes) {
case 1:
ma_seb(output, output);
break;
case 2:
ma_seh(output, output);
break;
case 4:
break;
default:
MOZ_CRASH("NYI");
}
}
}
void
MacroAssemblerMIPSShared::compareExchange(int nbytes, bool signExtend, const Address& address,
Register oldval, Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
asMasm().computeEffectiveAddress(address, ScratchRegister);
compareExchangeMIPSr2(nbytes, signExtend, ScratchRegister, oldval, newval, SecondScratchReg,
valueTemp, offsetTemp, maskTemp, output);
}
void
MacroAssemblerMIPSShared::compareExchange(int nbytes, bool signExtend, const BaseIndex& address,
Register oldval, Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
asMasm().computeEffectiveAddress(address, ScratchRegister);
compareExchangeMIPSr2(nbytes, signExtend, ScratchRegister, oldval, newval, SecondScratchReg,
valueTemp, offsetTemp, maskTemp, output);
}
void
MacroAssemblerMIPSShared::atomicExchange(int nbytes, bool signExtend, const Address& address,
Register value, Register valueTemp, Register offsetTemp,
Register maskTemp, Register output)
{
asMasm().computeEffectiveAddress(address, ScratchRegister);
compareExchangeMIPSr2(nbytes, signExtend, ScratchRegister, InvalidReg, value, SecondScratchReg,
valueTemp, offsetTemp, maskTemp, output);
}
void
MacroAssemblerMIPSShared::atomicExchange(int nbytes, bool signExtend, const BaseIndex& address,
Register value, Register valueTemp, Register offsetTemp,
Register maskTemp, Register output)
{
asMasm().computeEffectiveAddress(address, ScratchRegister);
compareExchangeMIPSr2(nbytes, signExtend, ScratchRegister, InvalidReg, value, SecondScratchReg,
valueTemp, offsetTemp, maskTemp, output);
}
//{{{ check_macroassembler_style
// ===============================================================
// MacroAssembler high-level usage.
@ -1640,648 +1904,4 @@ MacroAssembler::wasmTruncateFloat32ToInt32(FloatRegister input, Register output,
ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
}
// ========================================================================
// Primitive atomic operations.
template<typename T>
static void
CompareExchange(MacroAssembler& masm, Scalar::Type type, const Synchronization& sync, const T& mem,
Register oldval, Register newval, Register valueTemp, Register offsetTemp,
Register maskTemp, Register output)
{
bool signExtend = Scalar::isSignedIntType(type);
unsigned nbytes = Scalar::byteSize(type);
switch (nbytes) {
case 1:
case 2:
break;
case 4:
MOZ_ASSERT(valueTemp == InvalidReg);
MOZ_ASSERT(offsetTemp == InvalidReg);
MOZ_ASSERT(maskTemp == InvalidReg);
break;
default:
MOZ_CRASH();
}
Label again, end;
masm.computeEffectiveAddress(mem, SecondScratchReg);
if (nbytes == 4) {
masm.memoryBarrierBefore(sync);
masm.bind(&again);
masm.as_ll(output, SecondScratchReg, 0);
masm.ma_b(output, oldval, &end, Assembler::NotEqual, ShortJump);
masm.ma_move(ScratchRegister, newval);
masm.as_sc(ScratchRegister, SecondScratchReg, 0);
masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero, ShortJump);
masm.memoryBarrierAfter(sync);
masm.bind(&end);
return;
}
masm.as_andi(offsetTemp, SecondScratchReg, 3);
masm.subPtr(offsetTemp, SecondScratchReg);
#if !MOZ_LITTLE_ENDIAN
masm.as_xori(offsetTemp, offsetTemp, 3);
#endif
masm.as_sll(offsetTemp, offsetTemp, 3);
masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
masm.as_sllv(maskTemp, maskTemp, offsetTemp);
masm.as_nor(maskTemp, zero, maskTemp);
masm.memoryBarrierBefore(sync);
masm.bind(&again);
masm.as_ll(ScratchRegister, SecondScratchReg, 0);
masm.as_srlv(output, ScratchRegister, offsetTemp);
switch (nbytes) {
case 1:
if (signExtend) {
masm.ma_seb(valueTemp, oldval);
masm.ma_seb(output, output);
} else {
masm.as_andi(valueTemp, oldval, 0xff);
masm.as_andi(output, output, 0xff);
}
break;
case 2:
if (signExtend) {
masm.ma_seh(valueTemp, oldval);
masm.ma_seh(output, output);
} else {
masm.as_andi(valueTemp, oldval, 0xffff);
masm.as_andi(output, output, 0xffff);
}
break;
}
masm.ma_b(output, valueTemp, &end, Assembler::NotEqual, ShortJump);
masm.as_sllv(valueTemp, newval, offsetTemp);
masm.as_and(ScratchRegister, ScratchRegister, maskTemp);
masm.as_or(ScratchRegister, ScratchRegister, valueTemp);
masm.as_sc(ScratchRegister, SecondScratchReg, 0);
masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero, ShortJump);
masm.memoryBarrierAfter(sync);
masm.bind(&end);
}
void
MacroAssembler::compareExchange(Scalar::Type type, const Synchronization& sync, const Address& mem,
Register oldval, Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
CompareExchange(*this, type, sync, mem, oldval, newval, valueTemp, offsetTemp, maskTemp,
output);
}
void
MacroAssembler::compareExchange(Scalar::Type type, const Synchronization& sync, const BaseIndex& mem,
Register oldval, Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
CompareExchange(*this, type, sync, mem, oldval, newval, valueTemp, offsetTemp, maskTemp,
output);
}
template<typename T>
static void
AtomicExchange(MacroAssembler& masm, Scalar::Type type, const Synchronization& sync, const T& mem,
Register value, Register valueTemp, Register offsetTemp, Register maskTemp,
Register output)
{
bool signExtend = Scalar::isSignedIntType(type);
unsigned nbytes = Scalar::byteSize(type);
switch (nbytes) {
case 1:
case 2:
break;
case 4:
MOZ_ASSERT(valueTemp == InvalidReg);
MOZ_ASSERT(offsetTemp == InvalidReg);
MOZ_ASSERT(maskTemp == InvalidReg);
break;
default:
MOZ_CRASH();
}
Label again;
masm.computeEffectiveAddress(mem, SecondScratchReg);
if (nbytes == 4) {
masm.memoryBarrierBefore(sync);
masm.bind(&again);
masm.as_ll(output, SecondScratchReg, 0);
masm.ma_move(ScratchRegister, value);
masm.as_sc(ScratchRegister, SecondScratchReg, 0);
masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero, ShortJump);
masm.memoryBarrierAfter(sync);
return;
}
masm.as_andi(offsetTemp, SecondScratchReg, 3);
masm.subPtr(offsetTemp, SecondScratchReg);
#if !MOZ_LITTLE_ENDIAN
masm.as_xori(offsetTemp, offsetTemp, 3);
#endif
masm.as_sll(offsetTemp, offsetTemp, 3);
masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
masm.as_sllv(maskTemp, maskTemp, offsetTemp);
masm.as_nor(maskTemp, zero, maskTemp);
switch (nbytes) {
case 1:
masm.as_andi(valueTemp, value, 0xff);
break;
case 2:
masm.as_andi(valueTemp, value, 0xffff);
break;
}
masm.as_sllv(valueTemp, valueTemp, offsetTemp);
masm.memoryBarrierBefore(sync);
masm.bind(&again);
masm.as_ll(output, SecondScratchReg, 0);
masm.as_and(ScratchRegister, output, maskTemp);
masm.as_or(ScratchRegister, ScratchRegister, valueTemp);
masm.as_sc(ScratchRegister, SecondScratchReg, 0);
masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero, ShortJump);
masm.as_srlv(output, output, offsetTemp);
switch (nbytes) {
case 1:
if (signExtend) {
masm.ma_seb(output, output);
} else {
masm.as_andi(output, output, 0xff);
}
break;
case 2:
if (signExtend) {
masm.ma_seh(output, output);
} else {
masm.as_andi(output, output, 0xffff);
}
break;
}
masm.memoryBarrierAfter(sync);
}
void
MacroAssembler::atomicExchange(Scalar::Type type, const Synchronization& sync, const Address& mem,
Register value, Register valueTemp, Register offsetTemp,
Register maskTemp, Register output)
{
AtomicExchange(*this, type, sync, mem, value, valueTemp, offsetTemp, maskTemp, output);
}
void
MacroAssembler::atomicExchange(Scalar::Type type, const Synchronization& sync, const BaseIndex& mem,
Register value, Register valueTemp, Register offsetTemp,
Register maskTemp, Register output)
{
AtomicExchange(*this, type, sync, mem, value, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T>
static void
AtomicFetchOp(MacroAssembler& masm, Scalar::Type type, const Synchronization& sync,
AtomicOp op, const T& mem, Register value, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
bool signExtend = Scalar::isSignedIntType(type);
unsigned nbytes = Scalar::byteSize(type);
switch (nbytes) {
case 1:
case 2:
break;
case 4:
MOZ_ASSERT(valueTemp == InvalidReg);
MOZ_ASSERT(offsetTemp == InvalidReg);
MOZ_ASSERT(maskTemp == InvalidReg);
break;
default:
MOZ_CRASH();
}
Label again;
masm.computeEffectiveAddress(mem, SecondScratchReg);
if (nbytes == 4) {
masm.memoryBarrierBefore(sync);
masm.bind(&again);
masm.as_ll(output, SecondScratchReg, 0);
switch (op) {
case AtomicFetchAddOp:
masm.as_addu(ScratchRegister, output, value);
break;
case AtomicFetchSubOp:
masm.as_subu(ScratchRegister, output, value);
break;
case AtomicFetchAndOp:
masm.as_and(ScratchRegister, output, value);
break;
case AtomicFetchOrOp:
masm.as_or(ScratchRegister, output, value);
break;
case AtomicFetchXorOp:
masm.as_xor(ScratchRegister, output, value);
break;
default:
MOZ_CRASH();
}
masm.as_sc(ScratchRegister, SecondScratchReg, 0);
masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero, ShortJump);
masm.memoryBarrierAfter(sync);
return;
}
masm.as_andi(offsetTemp, SecondScratchReg, 3);
masm.subPtr(offsetTemp, SecondScratchReg);
#if !MOZ_LITTLE_ENDIAN
masm.as_xori(offsetTemp, offsetTemp, 3);
#endif
masm.as_sll(offsetTemp, offsetTemp, 3);
masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
masm.as_sllv(maskTemp, maskTemp, offsetTemp);
masm.as_nor(maskTemp, zero, maskTemp);
masm.memoryBarrierBefore(sync);
masm.bind(&again);
masm.as_ll(ScratchRegister, SecondScratchReg, 0);
masm.as_srlv(output, ScratchRegister, offsetTemp);
switch (op) {
case AtomicFetchAddOp:
masm.as_addu(valueTemp, output, value);
break;
case AtomicFetchSubOp:
masm.as_subu(valueTemp, output, value);
break;
case AtomicFetchAndOp:
masm.as_and(valueTemp, output, value);
break;
case AtomicFetchOrOp:
masm.as_or(valueTemp, output, value);
break;
case AtomicFetchXorOp:
masm.as_xor(valueTemp, output, value);
break;
default:
MOZ_CRASH();
}
switch (nbytes) {
case 1:
masm.as_andi(valueTemp, valueTemp, 0xff);
break;
case 2:
masm.as_andi(valueTemp, valueTemp, 0xffff);
break;
}
masm.as_sllv(valueTemp, valueTemp, offsetTemp);
masm.as_and(ScratchRegister, ScratchRegister, maskTemp);
masm.as_or(ScratchRegister, ScratchRegister, valueTemp);
masm.as_sc(ScratchRegister, SecondScratchReg, 0);
masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero, ShortJump);
switch (nbytes) {
case 1:
if (signExtend) {
masm.ma_seb(output, output);
} else {
masm.as_andi(output, output, 0xff);
}
break;
case 2:
if (signExtend) {
masm.ma_seh(output, output);
} else {
masm.as_andi(output, output, 0xffff);
}
break;
}
masm.memoryBarrierAfter(sync);
}
void
MacroAssembler::atomicFetchOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
Register value, const Address& mem, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
AtomicFetchOp(*this, type, sync, op, mem, value, valueTemp, offsetTemp, maskTemp, output);
}
void
MacroAssembler::atomicFetchOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
Register value, const BaseIndex& mem, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
AtomicFetchOp(*this, type, sync, op, mem, value, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T>
static void
AtomicEffectOp(MacroAssembler& masm, Scalar::Type type, const Synchronization& sync, AtomicOp op,
const T& mem, Register value, Register valueTemp, Register offsetTemp, Register maskTemp)
{
unsigned nbytes = Scalar::byteSize(type);
switch (nbytes) {
case 1:
case 2:
break;
case 4:
MOZ_ASSERT(valueTemp == InvalidReg);
MOZ_ASSERT(offsetTemp == InvalidReg);
MOZ_ASSERT(maskTemp == InvalidReg);
break;
default:
MOZ_CRASH();
}
Label again;
masm.computeEffectiveAddress(mem, SecondScratchReg);
if (nbytes == 4) {
masm.memoryBarrierBefore(sync);
masm.bind(&again);
masm.as_ll(ScratchRegister, SecondScratchReg, 0);
switch (op) {
case AtomicFetchAddOp:
masm.as_addu(ScratchRegister, ScratchRegister, value);
break;
case AtomicFetchSubOp:
masm.as_subu(ScratchRegister, ScratchRegister, value);
break;
case AtomicFetchAndOp:
masm.as_and(ScratchRegister, ScratchRegister, value);
break;
case AtomicFetchOrOp:
masm.as_or(ScratchRegister, ScratchRegister, value);
break;
case AtomicFetchXorOp:
masm.as_xor(ScratchRegister, ScratchRegister, value);
break;
default:
MOZ_CRASH();
}
masm.as_sc(ScratchRegister, SecondScratchReg, 0);
masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero, ShortJump);
masm.memoryBarrierAfter(sync);
return;
}
masm.as_andi(offsetTemp, SecondScratchReg, 3);
masm.subPtr(offsetTemp, SecondScratchReg);
#if !MOZ_LITTLE_ENDIAN
masm.as_xori(offsetTemp, offsetTemp, 3);
#endif
masm.as_sll(offsetTemp, offsetTemp, 3);
masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
masm.as_sllv(maskTemp, maskTemp, offsetTemp);
masm.as_nor(maskTemp, zero, maskTemp);
masm.memoryBarrierBefore(sync);
masm.bind(&again);
masm.as_ll(ScratchRegister, SecondScratchReg, 0);
masm.as_srlv(valueTemp, ScratchRegister, offsetTemp);
switch (op) {
case AtomicFetchAddOp:
masm.as_addu(valueTemp, valueTemp, value);
break;
case AtomicFetchSubOp:
masm.as_subu(valueTemp, valueTemp, value);
break;
case AtomicFetchAndOp:
masm.as_and(valueTemp, valueTemp, value);
break;
case AtomicFetchOrOp:
masm.as_or(valueTemp, valueTemp, value);
break;
case AtomicFetchXorOp:
masm.as_xor(valueTemp, valueTemp, value);
break;
default:
MOZ_CRASH();
}
switch (nbytes) {
case 1:
masm.as_andi(valueTemp, valueTemp, 0xff);
break;
case 2:
masm.as_andi(valueTemp, valueTemp, 0xffff);
break;
}
masm.as_sllv(valueTemp, valueTemp, offsetTemp);
masm.as_and(ScratchRegister, ScratchRegister, maskTemp);
masm.as_or(ScratchRegister, ScratchRegister, valueTemp);
masm.as_sc(ScratchRegister, SecondScratchReg, 0);
masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero, ShortJump);
masm.memoryBarrierAfter(sync);
}
void
MacroAssembler::atomicEffectOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
Register value, const Address& mem, Register valueTemp,
Register offsetTemp, Register maskTemp)
{
AtomicEffectOp(*this, type, sync, op, mem, value, valueTemp, offsetTemp, maskTemp);
}
void
MacroAssembler::atomicEffectOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
Register value, const BaseIndex& mem, Register valueTemp,
Register offsetTemp, Register maskTemp)
{
AtomicEffectOp(*this, type, sync, op, mem, value, valueTemp, offsetTemp, maskTemp);
}
// ========================================================================
// JS atomic operations.
template<typename T>
static void
CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
const T& mem, Register oldval, Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp, Register temp, AnyRegister output)
{
if (arrayType == Scalar::Uint32) {
masm.compareExchange(arrayType, sync, mem, oldval, newval, valueTemp, offsetTemp, maskTemp,
temp);
masm.convertUInt32ToDouble(temp, output.fpu());
} else {
masm.compareExchange(arrayType, sync, mem, oldval, newval, valueTemp, maskTemp, temp,
output.gpr());
}
}
void
MacroAssembler::compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
const Address& mem, Register oldval, Register newval,
Register valueTemp, Register offsetTemp, Register maskTemp,
Register temp, AnyRegister output)
{
CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, valueTemp, offsetTemp, maskTemp,
temp, output);
}
void
MacroAssembler::compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
const BaseIndex& mem, Register oldval, Register newval,
Register valueTemp, Register offsetTemp, Register maskTemp,
Register temp, AnyRegister output)
{
CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval,valueTemp, offsetTemp, maskTemp,
temp, output);
}
template<typename T>
static void
AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
const T& mem, Register value, Register valueTemp,
Register offsetTemp, Register maskTemp, Register temp, AnyRegister output)
{
if (arrayType == Scalar::Uint32) {
masm.atomicExchange(arrayType, sync, mem, value, valueTemp, offsetTemp, maskTemp, temp);
masm.convertUInt32ToDouble(temp, output.fpu());
} else {
masm.atomicExchange(arrayType, sync, mem, value, valueTemp, offsetTemp, maskTemp,
output.gpr());
}
}
void
MacroAssembler::atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
const Address& mem, Register value, Register valueTemp,
Register offsetTemp, Register maskTemp, Register temp,
AnyRegister output)
{
AtomicExchangeJS(*this, arrayType, sync, mem, value, valueTemp, offsetTemp, maskTemp, temp,
output);
}
void
MacroAssembler::atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
const BaseIndex& mem, Register value, Register valueTemp,
Register offsetTemp, Register maskTemp, Register temp,
AnyRegister output)
{
AtomicExchangeJS(*this, arrayType, sync, mem, value, valueTemp, offsetTemp, maskTemp, temp, output);
}
template<typename T>
static void
AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
AtomicOp op, Register value, const T& mem, Register valueTemp,
Register offsetTemp, Register maskTemp, Register temp,
AnyRegister output)
{
if (arrayType == Scalar::Uint32) {
masm.atomicFetchOp(arrayType, sync, op, value, mem, valueTemp, offsetTemp, maskTemp, temp);
masm.convertUInt32ToDouble(temp, output.fpu());
} else {
masm.atomicFetchOp(arrayType, sync, op, value, mem, valueTemp, offsetTemp, maskTemp,
output.gpr());
}
}
void
MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const Address& mem, Register valueTemp,
Register offsetTemp, Register maskTemp, Register temp,
AnyRegister output)
{
AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, valueTemp, offsetTemp, maskTemp, temp,
output);
}
void
MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const BaseIndex& mem, Register valueTemp,
Register offsetTemp, Register maskTemp, Register temp,
AnyRegister output)
{
AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, valueTemp, offsetTemp, maskTemp, temp,
output);
}
void
MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const BaseIndex& mem, Register valueTemp,
Register offsetTemp, Register maskTemp)
{
atomicEffectOp(arrayType, sync, op, value, mem, valueTemp, offsetTemp, maskTemp);
}
void
MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const Address& mem, Register valueTemp,
Register offsetTemp, Register maskTemp)
{
atomicEffectOp(arrayType, sync, op, value, mem, valueTemp, offsetTemp, maskTemp);
}
//}}} check_macroassembler_style

Просмотреть файл

@ -216,6 +216,53 @@ class MacroAssemblerMIPSShared : public Assembler
// Handle NaN specially if handleNaN is true.
void minMaxDouble(FloatRegister srcDest, FloatRegister other, bool handleNaN, bool isMax);
void minMaxFloat32(FloatRegister srcDest, FloatRegister other, bool handleNaN, bool isMax);
private:
void atomicEffectOpMIPSr2(int nbytes, AtomicOp op, const Register& value, const Register& addr,
Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp);
void atomicFetchOpMIPSr2(int nbytes, bool signExtend, AtomicOp op, const Register& value, const Register& addr,
Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp,
Register output);
void compareExchangeMIPSr2(int nbytes, bool signExtend, const Register& addr, Register oldval,
Register newval, Register flagTemp, Register valueTemp, Register offsetTemp,
Register maskTemp, Register output);
protected:
void atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value, const Address& address,
Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp);
void atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value, const BaseIndex& address,
Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp);
void atomicEffectOp(int nbytes, AtomicOp op, const Register& value, const Address& address,
Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp);
void atomicEffectOp(int nbytes, AtomicOp op, const Register& value, const BaseIndex& address,
Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp);
void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32& value,
const Address& address, Register flagTemp, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output);
void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32& value,
const BaseIndex& address, Register flagTemp, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output);
void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register& value,
const Address& address, Register flagTemp, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output);
void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register& value,
const BaseIndex& address, Register flagTemp, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output);
void compareExchange(int nbytes, bool signExtend, const Address& address, Register oldval,
Register newval, Register valueTemp, Register offsetTemp, Register maskTemp,
Register output);
void compareExchange(int nbytes, bool signExtend, const BaseIndex& address, Register oldval,
Register newval, Register valueTemp, Register offsetTemp, Register maskTemp,
Register output);
void atomicExchange(int nbytes, bool signExtend, const Address& address, Register value,
Register valueTemp, Register offsetTemp, Register maskTemp,
Register output);
void atomicExchange(int nbytes, bool signExtend, const BaseIndex& address, Register value,
Register valueTemp, Register offsetTemp, Register maskTemp,
Register output);
};
} // namespace jit

Просмотреть файл

@ -822,28 +822,3 @@ CodeGeneratorMIPS::setReturnDoubleRegs(LiveRegisterSet* regs)
regs->add(ReturnFloat32Reg);
regs->add(ReturnDoubleReg);
}
void
CodeGeneratorMIPS::visitWasmAtomicLoadI64(LWasmAtomicLoadI64* lir)
{
Register ptr = ToRegister(lir->ptr());
Register64 output = ToOutRegister64(lir);
uint32_t offset = lir->mir()->access().offset();
BaseIndex addr(HeapReg, ptr, TimesOne, offset);
masm.atomicLoad64(Synchronization::Full(), addr, Register64::Invalid(), output);
}
void
CodeGeneratorMIPS::visitWasmAtomicStoreI64(LWasmAtomicStoreI64* lir)
{
Register ptr = ToRegister(lir->ptr());
Register64 value = ToRegister64(lir->value());
Register tmp = ToRegister(lir->tmp());
uint32_t offset = lir->mir()->access().offset();
BaseIndex addr(HeapReg, ptr, TimesOne, offset);
masm.atomicStore64(addr, tmp, value);
}

Просмотреть файл

@ -85,8 +85,6 @@ class CodeGeneratorMIPS : public CodeGeneratorMIPSShared
void visitBoxFloatingPoint(LBoxFloatingPoint* box);
void visitUnbox(LUnbox* unbox);
void setReturnDoubleRegs(LiveRegisterSet* regs);
void visitWasmAtomicLoadI64(LWasmAtomicLoadI64* lir);
void visitWasmAtomicStoreI64(LWasmAtomicStoreI64* lir);
};
typedef CodeGeneratorMIPS CodeGeneratorSpecific;

Просмотреть файл

@ -177,49 +177,6 @@ class LInt64ToFloatingPoint : public LCallInstructionHelper<1, INT64_PIECES, 0>
}
};
class LWasmAtomicLoadI64 : public LInstructionHelper<INT64_PIECES, 1, 0>
{
public:
LIR_HEADER(WasmAtomicLoadI64);
LWasmAtomicLoadI64(const LAllocation& ptr)
{
setOperand(0, ptr);
}
const LAllocation* ptr() {
return getOperand(0);
}
const MWasmLoad* mir() const {
return mir_->toWasmLoad();
}
};
class LWasmAtomicStoreI64 : public LInstructionHelper<0, 1 + INT64_PIECES, 1>
{
public:
LIR_HEADER(WasmAtomicStoreI64);
LWasmAtomicStoreI64(const LAllocation& ptr, const LInt64Allocation& value, const LDefinition& tmp)
{
setOperand(0, ptr);
setInt64Operand(1, value);
setTemp(0, tmp);
}
const LAllocation* ptr() {
return getOperand(0);
}
const LInt64Allocation value() {
return getInt64Operand(1);
}
const LDefinition* tmp() {
return getTemp(0);
}
const MWasmStore* mir() const {
return mir_->toWasmStore();
}
};
} // namespace jit
} // namespace js

Просмотреть файл

@ -20,11 +20,6 @@
_(WasmUnalignedLoadI64) \
_(WasmUnalignedStoreI64) \
_(WasmTruncateToInt64) \
_(Int64ToFloatingPoint) \
_(WasmCompareExchangeI64) \
_(WasmAtomicExchangeI64) \
_(WasmAtomicBinopI64) \
_(WasmAtomicLoadI64) \
_(WasmAtomicStoreI64) \
_(Int64ToFloatingPoint)
#endif // jit_mips32_LOpcodes_mips32_h__

Просмотреть файл

@ -1942,6 +1942,99 @@ MacroAssemblerMIPSCompat::handleFailureWithHandlerTail(void* handler, Label* pro
ret();
}
template<typename T>
void
MacroAssemblerMIPSCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
Register oldval, Register newval,
Register temp, Register valueTemp,
Register offsetTemp, Register maskTemp,
AnyRegister output)
{
switch (arrayType) {
case Scalar::Int8:
compareExchange8SignExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case Scalar::Uint8:
compareExchange8ZeroExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case Scalar::Int16:
compareExchange16SignExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case Scalar::Uint16:
compareExchange16ZeroExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case Scalar::Int32:
compareExchange32(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case Scalar::Uint32:
// At the moment, the code in MCallOptimize.cpp requires the output
// type to be double for uint32 arrays. See bug 1077305.
MOZ_ASSERT(output.isFloat());
compareExchange32(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, temp);
convertUInt32ToDouble(temp, output.fpu());
break;
default:
MOZ_CRASH("Invalid typed array type");
}
}
template void
MacroAssemblerMIPSCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
Register oldval, Register newval, Register temp,
Register valueTemp, Register offsetTemp, Register maskTemp,
AnyRegister output);
template void
MacroAssemblerMIPSCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
Register oldval, Register newval, Register temp,
Register valueTemp, Register offsetTemp, Register maskTemp,
AnyRegister output);
template<typename T>
void
MacroAssemblerMIPSCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
Register value, Register temp, Register valueTemp,
Register offsetTemp, Register maskTemp,
AnyRegister output)
{
switch (arrayType) {
case Scalar::Int8:
atomicExchange8SignExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case Scalar::Uint8:
atomicExchange8ZeroExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case Scalar::Int16:
atomicExchange16SignExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case Scalar::Uint16:
atomicExchange16ZeroExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case Scalar::Int32:
atomicExchange32(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case Scalar::Uint32:
// At the moment, the code in MCallOptimize.cpp requires the output
// type to be double for uint32 arrays. See bug 1077305.
MOZ_ASSERT(output.isFloat());
atomicExchange32(mem, value, valueTemp, offsetTemp, maskTemp, temp);
convertUInt32ToDouble(temp, output.fpu());
break;
default:
MOZ_CRASH("Invalid typed array type");
}
}
template void
MacroAssemblerMIPSCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
Register value, Register temp, Register valueTemp,
Register offsetTemp, Register maskTemp,
AnyRegister output);
template void
MacroAssemblerMIPSCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
Register value, Register temp, Register valueTemp,
Register offsetTemp, Register maskTemp,
AnyRegister output);
CodeOffset
MacroAssemblerMIPSCompat::toggledJump(Label* label)
{
@ -2394,214 +2487,6 @@ MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input, Register output
bind(&done);
}
static void
EnterAtomic64Region(MacroAssembler& masm, Register addr, Register spinlock, Register scratch)
{
masm.movePtr(wasm::SymbolicAddress::js_jit_gAtomic64Lock, spinlock);
masm.as_lbu(zero, addr, 7); // Force memory trap on invalid access before we enter the spinlock.
Label tryLock;
masm.memoryBarrier(MembarFull);
masm.bind(&tryLock);
masm.as_ll(scratch, spinlock, 0);
masm.ma_b(scratch, scratch, &tryLock, Assembler::NonZero, ShortJump);
masm.ma_li(scratch, Imm32(1));
masm.as_sc(scratch, spinlock, 0);
masm.ma_b(scratch, scratch, &tryLock, Assembler::Zero, ShortJump);
masm.memoryBarrier(MembarFull);
}
static void
ExitAtomic64Region(MacroAssembler& masm, Register spinlock)
{
masm.memoryBarrier(MembarFull);
masm.as_sw(zero, spinlock, 0);
masm.memoryBarrier(MembarFull);
}
template <typename T>
static void
AtomicLoad64(MacroAssembler& masm, const T& mem, Register64 temp, Register64 output)
{
MOZ_ASSERT(temp.low == InvalidReg && temp.high == InvalidReg);
masm.computeEffectiveAddress(mem, SecondScratchReg);
EnterAtomic64Region(masm, /* addr= */ SecondScratchReg, /* spinlock= */ ScratchRegister,
/* scratch= */ output.low);
masm.load64(Address(SecondScratchReg, 0), output);
ExitAtomic64Region(masm, /* spinlock= */ ScratchRegister);
}
void
MacroAssembler::atomicLoad64(const Synchronization&, const Address& mem, Register64 temp,
Register64 output)
{
AtomicLoad64(*this, mem, temp, output);
}
void
MacroAssembler::atomicLoad64(const Synchronization&, const BaseIndex& mem, Register64 temp,
Register64 output)
{
AtomicLoad64(*this, mem, temp, output);
}
template<typename T>
void
MacroAssemblerMIPSCompat::atomicStore64(const T& mem, Register temp, Register64 value)
{
computeEffectiveAddress(mem, SecondScratchReg);
EnterAtomic64Region(asMasm(), /* addr= */ SecondScratchReg, /* spinlock= */ ScratchRegister,
/* scratch= */ temp);
store64(value, Address(SecondScratchReg, 0));
ExitAtomic64Region(asMasm(), /* spinlock= */ ScratchRegister);
}
template void
MacroAssemblerMIPSCompat::atomicStore64(const Address& mem, Register temp, Register64 value);
template void
MacroAssemblerMIPSCompat::atomicStore64(const BaseIndex& mem, Register temp, Register64 value);
template <typename T>
static void
CompareExchange64(MacroAssembler& masm, const T& mem, Register64 expect, Register64 replace,
Register64 output)
{
MOZ_ASSERT(output != expect);
MOZ_ASSERT(output != replace);
Label exit;
masm.computeEffectiveAddress(mem, SecondScratchReg);
Address addr(SecondScratchReg, 0);
EnterAtomic64Region(masm, /* addr= */ SecondScratchReg, /* spinlock= */ ScratchRegister,
/* scratch= */ output.low);
masm.load64(addr, output);
masm.ma_b(output.low, expect.low, &exit, Assembler::NotEqual, ShortJump);
masm.ma_b(output.high, expect.high, &exit, Assembler::NotEqual, ShortJump);
masm.store64(replace, addr);
masm.bind(&exit);
ExitAtomic64Region(masm, /* spinlock= */ ScratchRegister);
}
void
MacroAssembler::compareExchange64(const Synchronization&, const Address& mem, Register64 expect,
Register64 replace, Register64 output)
{
CompareExchange64(*this, mem, expect, replace, output);
}
void
MacroAssembler::compareExchange64(const Synchronization&, const BaseIndex& mem, Register64 expect,
Register64 replace, Register64 output)
{
CompareExchange64(*this, mem, expect, replace, output);
}
template <typename T>
static void
AtomicExchange64(MacroAssembler& masm, const T& mem, Register64 src, Register64 output)
{
masm.computeEffectiveAddress(mem, SecondScratchReg);
Address addr(SecondScratchReg, 0);
EnterAtomic64Region(masm, /* addr= */ SecondScratchReg, /* spinlock= */ ScratchRegister,
/* scratch= */ output.low);
masm.load64(addr, output);
masm.store64(src, addr);
ExitAtomic64Region(masm, /* spinlock= */ ScratchRegister);
}
void
MacroAssembler::atomicExchange64(const Synchronization&, const Address& mem, Register64 src,
Register64 output)
{
AtomicExchange64(*this, mem, src, output);
}
void
MacroAssembler::atomicExchange64(const Synchronization&, const BaseIndex& mem, Register64 src,
Register64 output)
{
AtomicExchange64(*this, mem, src, output);
}
template<typename T>
static void
AtomicFetchOp64(MacroAssembler& masm, AtomicOp op, Register64 value, const T& mem,
Register64 temp, Register64 output)
{
masm.computeEffectiveAddress(mem, SecondScratchReg);
EnterAtomic64Region(masm, /* addr= */ SecondScratchReg, /* spinlock= */ ScratchRegister,
/* scratch= */ output.low);
masm.load64(Address(SecondScratchReg, 0), output);
switch(op) {
case AtomicFetchAddOp:
masm.as_addu(temp.low, output.low, value.low);
masm.as_sltu(temp.high, temp.low, output.low);
masm.as_addu(temp.high, temp.high, output.high);
masm.as_addu(temp.high, temp.high, value.high);
break;
case AtomicFetchSubOp:
masm.as_sltu(temp.high, output.low, value.low);
masm.as_subu(temp.high, output.high, temp.high);
masm.as_subu(temp.low, output.low, value.low);
masm.as_subu(temp.high, temp.high, value.high);
break;
case AtomicFetchAndOp:
masm.as_and(temp.low, output.low, value.low);
masm.as_and(temp.high, output.high, value.high);
break;
case AtomicFetchOrOp:
masm.as_or(temp.low, output.low, value.low);
masm.as_or(temp.high, output.high, value.high);
break;
case AtomicFetchXorOp:
masm.as_xor(temp.low, output.low, value.low);
masm.as_xor(temp.high, output.high, value.high);
break;
default:
MOZ_CRASH();
}
masm.store64(temp, Address(SecondScratchReg, 0));
ExitAtomic64Region(masm, /* spinlock= */ ScratchRegister);
}
void
MacroAssembler::atomicFetchOp64(const Synchronization&, AtomicOp op, Register64 value,
const Address& mem, Register64 temp, Register64 output)
{
AtomicFetchOp64(*this, op, value, mem, temp, output);
}
void
MacroAssembler::atomicFetchOp64(const Synchronization&, AtomicOp op, Register64 value,
const BaseIndex& mem, Register64 temp, Register64 output)
{
AtomicFetchOp64(*this, op, value, mem, temp, output);
}
// ========================================================================
// Convert floating point.
@ -2625,4 +2510,3 @@ MacroAssembler::convertUInt64ToDouble(Register64 src, FloatRegister dest, Regist
}
//}}} check_macroassembler_style

Просмотреть файл

@ -426,7 +426,6 @@ class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS
uint32_t getType(const Value& val);
void moveData(const Value& val, Register data);
public:
void moveValue(const Value& val, Register type, Register data);
@ -553,16 +552,329 @@ class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS
void handleFailureWithHandlerTail(void* handler, Label* profilerExitTail);
template <typename T>
void atomicStore64(const T& mem, Register temp, Register64 value);
/////////////////////////////////////////////////////////////////
// Common interface.
/////////////////////////////////////////////////////////////////
public:
// The following functions are exposed for use in platform-shared code.
template<typename T>
void compareExchange8SignExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
compareExchange(1, true, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T>
void compareExchange8ZeroExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
compareExchange(1, false, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T>
void compareExchange16SignExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
compareExchange(2, true, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T>
void compareExchange16ZeroExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
compareExchange(2, false, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T>
void compareExchange32(const T& mem, Register oldval, Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
compareExchange(4, false, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T>
void atomicExchange8SignExtend(const T& mem, Register value, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
atomicExchange(1, true, mem, value, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T>
void atomicExchange8ZeroExtend(const T& mem, Register value, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
atomicExchange(1, false, mem, value, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T>
void atomicExchange16SignExtend(const T& mem, Register value, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
atomicExchange(2, true, mem, value, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T>
void atomicExchange16ZeroExtend(const T& mem, Register value, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
atomicExchange(2, false, mem, value, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T>
void atomicExchange32(const T& mem, Register value, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
atomicExchange(4, false, mem, value, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchAdd8SignExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(1, true, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchAdd8ZeroExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(1, false, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchAdd16SignExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(2, true, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchAdd16ZeroExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(2, false, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchAdd32(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(4, false, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template <typename T, typename S>
void atomicAdd8(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(1, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template <typename T, typename S>
void atomicAdd16(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(2, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template <typename T, typename S>
void atomicAdd32(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(4, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template<typename T, typename S>
void atomicFetchSub8SignExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(1, true, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchSub8ZeroExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(1, false, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchSub16SignExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(2, true, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchSub16ZeroExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(2, false, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchSub32(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(4, false, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template <typename T, typename S>
void atomicSub8(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(1, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template <typename T, typename S>
void atomicSub16(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(2, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template <typename T, typename S>
void atomicSub32(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(4, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template<typename T, typename S>
void atomicFetchAnd8SignExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(1, true, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchAnd8ZeroExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(1, false, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchAnd16SignExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(2, true, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchAnd16ZeroExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(2, false, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchAnd32(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(4, false, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template <typename T, typename S>
void atomicAnd8(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(1, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template <typename T, typename S>
void atomicAnd16(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(2, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template <typename T, typename S>
void atomicAnd32(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(4, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template<typename T, typename S>
void atomicFetchOr8SignExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(1, true, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchOr8ZeroExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(1, false, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchOr16SignExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(2, true, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchOr16ZeroExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(2, false, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchOr32(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(4, false, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template <typename T, typename S>
void atomicOr8(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(1, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template <typename T, typename S>
void atomicOr16(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(2, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template <typename T, typename S>
void atomicOr32(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(4, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template<typename T, typename S>
void atomicFetchXor8SignExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(1, true, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchXor8ZeroExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(1, false, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchXor16SignExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(2, true, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchXor16ZeroExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(2, false, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchXor32(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(4, false, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template <typename T, typename S>
void atomicXor8(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(1, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template <typename T, typename S>
void atomicXor16(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(2, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template <typename T, typename S>
void atomicXor32(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(4, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template<typename T>
void compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register oldval, Register newval,
Register temp, Register valueTemp, Register offsetTemp, Register maskTemp,
AnyRegister output);
template<typename T>
void atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register value,
Register temp, Register valueTemp, Register offsetTemp, Register maskTemp,
AnyRegister output);
inline void incrementInt32Value(const Address& addr);
void move32(Imm32 imm, Register dest);
@ -714,12 +1026,127 @@ class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS
// convert it to double. Else, branch to failure.
void ensureDouble(const ValueOperand& source, FloatRegister dest, Label* failure);
template<typename T>
void atomicFetchAdd8ZeroExtend(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicFetchSub8ZeroExtend(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicFetchAnd8ZeroExtend(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicFetchOr8ZeroExtend(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicFetchXor8ZeroExtend(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void compareExchange8ZeroExtend(const T& mem, Register oldval, Register newval,
Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicExchange8ZeroExtend(const T& mem, Register value, Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicFetchAdd16ZeroExtend(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicFetchSub16ZeroExtend(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicFetchAnd16ZeroExtend(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicFetchOr16ZeroExtend(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicFetchXor16ZeroExtend(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void compareExchange16ZeroExtend(const T& mem, Register oldval, Register newval,
Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicExchange16ZeroExtend(const T& mem, Register value, Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicFetchAdd32(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicFetchSub32(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicFetchAnd32(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicFetchOr32(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicFetchXor32(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void compareExchange32(const T& mem, Register oldval, Register newval, Register output) {
MOZ_CRASH();
}
template<typename T> void atomicExchange32(const T& mem, Register value, Register output) {
MOZ_CRASH();
}
template <typename T>
void atomicFetchAdd64(Register64 value, const T& mem, Register64 temp, Register64 output) {
MOZ_CRASH();
}
template <typename T>
void atomicFetchSub64(Register64 value, const T& mem, Register64 temp, Register64 output) {
MOZ_CRASH();
}
template <typename T>
void atomicFetchAnd64(Register64 value, const T& mem, Register64 temp, Register64 output) {
MOZ_CRASH();
}
template <typename T>
void atomicFetchOr64(Register64 value, const T& mem, Register64 temp, Register64 output) {
MOZ_CRASH();
}
template <typename T>
void atomicFetchXor64(Register64 value, const T& mem, Register64 temp, Register64 output) {
MOZ_CRASH();
}
template <typename T>
void atomicExchange64(const T& mem, Register64 src, Register64 output) {
MOZ_CRASH();
}
template <typename T>
void compareExchange64(const T& mem, Register64 expect, Register64 replace, Register64 output) {
MOZ_CRASH();
}
template <typename T>
void atomicLoad64(const T& mem, Register64 temp, Register64 output) {
MOZ_CRASH();
}
protected:
bool buildOOLFakeExitFrame(void* fakeReturnAddr);
void enterAtomic64Region(Register addr, Register spinlock, Register tmp);
void exitAtomic64Region(Register spinlock);
public:
CodeOffset labelForPatch() {
return CodeOffset(nextOffset().getOffset());

Просмотреть файл

@ -1925,10 +1925,6 @@ int
Simulator::loadLinkedW(uint32_t addr, SimInstruction* instr)
{
if ((addr & kPointerAlignmentMask) == 0) {
if (handleWasmFault(addr, 1))
return -1;
volatile int32_t* ptr = reinterpret_cast<volatile int32_t*>(addr);
int32_t value = *ptr;
lastLLValue_ = value;
@ -2027,10 +2023,6 @@ typedef int64_t (*Prototype_General7)(int32_t arg0, int32_t arg1, int32_t arg2,
int32_t arg4, int32_t arg5, int32_t arg6);
typedef int64_t (*Prototype_General8)(int32_t arg0, int32_t arg1, int32_t arg2, int32_t arg3,
int32_t arg4, int32_t arg5, int32_t arg6, int32_t arg7);
typedef int64_t (*Prototype_GeneralGeneralGeneralInt64)(int32_t arg0, int32_t arg1, int32_t arg2,
int64_t arg3);
typedef int64_t (*Prototype_GeneralGeneralInt64Int64)(int32_t arg0, int32_t arg1, int64_t arg2,
int64_t arg3);
typedef double (*Prototype_Double_None)();
typedef double (*Prototype_Double_Double)(double arg0);
@ -2054,13 +2046,6 @@ typedef double (*Prototype_Double_DoubleDoubleDouble)(double arg0, double arg1,
typedef double (*Prototype_Double_DoubleDoubleDoubleDouble)(double arg0, double arg1,
double arg2, double arg3);
static int64_t
MakeInt64(int32_t first, int32_t second)
{
// Little-endian order.
return ((int64_t)second << 32) | (uint32_t)first;
}
// Software interrupt instructions are used by the simulator to call into C++.
void
Simulator::softwareInterrupt(SimInstruction* instr)
@ -2169,21 +2154,6 @@ Simulator::softwareInterrupt(SimInstruction* instr)
setRegister(v0, res);
break;
}
case Args_Int_GeneralGeneralGeneralInt64: {
Prototype_GeneralGeneralGeneralInt64 target =
reinterpret_cast<Prototype_GeneralGeneralGeneralInt64>(external);
// The int64 arg is not split across register and stack
int64_t result = target(arg0, arg1, arg2, MakeInt64(arg4, arg5));
setCallResult(result);
break;
}
case Args_Int_GeneralGeneralInt64Int64: {
Prototype_GeneralGeneralInt64Int64 target =
reinterpret_cast<Prototype_GeneralGeneralInt64Int64>(external);
int64_t result = target(arg0, arg1, MakeInt64(arg2, arg3), MakeInt64(arg4, arg5));
setCallResult(result);
break;
}
case Args_Int64_Double: {
double dval0, dval1;
int32_t ival;

Просмотреть файл

@ -156,8 +156,6 @@ class Simulator {
Simulator();
~Simulator();
static bool supportsAtomics() { return true; }
// The currently executing Simulator instance. Potentially there can be one
// for each native thread.
static Simulator* Current();

Просмотреть файл

@ -482,6 +482,8 @@ CodeGeneratorMIPS64::emitWasmStoreI64(T* lir)
{
const MWasmStore* mir = lir->mir();
MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
uint32_t offset = mir->access().offset();
MOZ_ASSERT(offset < wasm::OffsetGuardLimit);

Просмотреть файл

@ -19,9 +19,6 @@
_(WasmUnalignedLoadI64) \
_(WasmUnalignedStoreI64) \
_(WasmTruncateToInt64) \
_(Int64ToFloatingPoint) \
_(WasmCompareExchangeI64) \
_(WasmAtomicExchangeI64) \
_(WasmAtomicBinopI64) \
_(Int64ToFloatingPoint)
#endif // jit_mips64_LOpcodes_mips64_h__

Просмотреть файл

@ -2056,6 +2056,99 @@ MacroAssemblerMIPS64Compat::handleFailureWithHandlerTail(void* handler, Label* p
ret();
}
template<typename T>
void
MacroAssemblerMIPS64Compat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
Register oldval, Register newval,
Register temp, Register valueTemp,
Register offsetTemp, Register maskTemp,
AnyRegister output)
{
switch (arrayType) {
case Scalar::Int8:
compareExchange8SignExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case Scalar::Uint8:
compareExchange8ZeroExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case Scalar::Int16:
compareExchange16SignExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case Scalar::Uint16:
compareExchange16ZeroExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case Scalar::Int32:
compareExchange32(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case Scalar::Uint32:
// At the moment, the code in MCallOptimize.cpp requires the output
// type to be double for uint32 arrays. See bug 1077305.
MOZ_ASSERT(output.isFloat());
compareExchange32(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, temp);
convertUInt32ToDouble(temp, output.fpu());
break;
default:
MOZ_CRASH("Invalid typed array type");
}
}
template void
MacroAssemblerMIPS64Compat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
Register oldval, Register newval, Register temp,
Register valueTemp, Register offsetTemp, Register maskTemp,
AnyRegister output);
template void
MacroAssemblerMIPS64Compat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
Register oldval, Register newval, Register temp,
Register valueTemp, Register offsetTemp, Register maskTemp,
AnyRegister output);
template<typename T>
void
MacroAssemblerMIPS64Compat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
Register value, Register temp, Register valueTemp,
Register offsetTemp, Register maskTemp,
AnyRegister output)
{
switch (arrayType) {
case Scalar::Int8:
atomicExchange8SignExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case Scalar::Uint8:
atomicExchange8ZeroExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case Scalar::Int16:
atomicExchange16SignExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case Scalar::Uint16:
atomicExchange16ZeroExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case Scalar::Int32:
atomicExchange32(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
break;
case Scalar::Uint32:
// At the moment, the code in MCallOptimize.cpp requires the output
// type to be double for uint32 arrays. See bug 1077305.
MOZ_ASSERT(output.isFloat());
atomicExchange32(mem, value, valueTemp, offsetTemp, maskTemp, temp);
convertUInt32ToDouble(temp, output.fpu());
break;
default:
MOZ_CRASH("Invalid typed array type");
}
}
template void
MacroAssemblerMIPS64Compat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
Register value, Register temp, Register valueTemp,
Register offsetTemp, Register maskTemp,
AnyRegister output);
template void
MacroAssemblerMIPS64Compat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
Register value, Register temp, Register valueTemp,
Register offsetTemp, Register maskTemp,
AnyRegister output);
CodeOffset
MacroAssemblerMIPS64Compat::toggledJump(Label* label)
{
@ -2445,135 +2538,6 @@ MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input, Register output
}
template <typename T>
static void
CompareExchange64(MacroAssembler& masm, const Synchronization& sync, const T& mem,
Register64 expect, Register64 replace, Register64 output)
{
masm.computeEffectiveAddress(mem, SecondScratchReg);
Label tryAgain;
Label exit;
masm.memoryBarrierBefore(sync);
masm.bind(&tryAgain);
masm.as_lld(output.reg, SecondScratchReg, 0);
masm.ma_b(output.reg, expect.reg, &exit, Assembler::NotEqual, ShortJump);
masm.movePtr(replace.reg, ScratchRegister);
masm.as_scd(ScratchRegister, SecondScratchReg, 0);
masm.ma_b(ScratchRegister, ScratchRegister, &tryAgain, Assembler::Zero, ShortJump);
masm.memoryBarrierAfter(sync);
masm.bind(&exit);
}
void
MacroAssembler::compareExchange64(const Synchronization& sync, const Address& mem,
Register64 expect, Register64 replace, Register64 output)
{
CompareExchange64(*this, sync, mem, expect, replace, output);
}
void
MacroAssembler::compareExchange64(const Synchronization& sync, const BaseIndex& mem,
Register64 expect, Register64 replace, Register64 output)
{
CompareExchange64(*this, sync, mem, expect, replace, output);
}
template <typename T>
static void
AtomicExchange64(MacroAssembler& masm, const Synchronization& sync, const T& mem,
Register64 src, Register64 output)
{
masm.computeEffectiveAddress(mem, SecondScratchReg);
Label tryAgain;
masm.memoryBarrierBefore(sync);
masm.bind(&tryAgain);
masm.as_lld(output.reg, SecondScratchReg, 0);
masm.movePtr(src.reg, ScratchRegister);
masm.as_scd(ScratchRegister, SecondScratchReg, 0);
masm.ma_b(ScratchRegister, ScratchRegister, &tryAgain, Assembler::Zero, ShortJump);
masm.memoryBarrierAfter(sync);
}
void
MacroAssembler::atomicExchange64(const Synchronization& sync, const Address& mem, Register64 src,
Register64 output)
{
AtomicExchange64(*this, sync, mem, src, output);
}
void
MacroAssembler::atomicExchange64(const Synchronization& sync, const BaseIndex& mem, Register64 src,
Register64 output)
{
AtomicExchange64(*this, sync, mem, src, output);
}
template<typename T>
static void
AtomicFetchOp64(MacroAssembler& masm, const Synchronization& sync, AtomicOp op, Register64 value,
const T& mem, Register64 temp, Register64 output)
{
masm.computeEffectiveAddress(mem, SecondScratchReg);
Label tryAgain;
masm.memoryBarrierBefore(sync);
masm.bind(&tryAgain);
masm.as_lld(output.reg, SecondScratchReg, 0);
switch(op) {
case AtomicFetchAddOp:
masm.as_daddu(temp.reg, output.reg, value.reg);
break;
case AtomicFetchSubOp:
masm.as_dsubu(temp.reg, output.reg, value.reg);
break;
case AtomicFetchAndOp:
masm.as_and(temp.reg, output.reg, value.reg);
break;
case AtomicFetchOrOp:
masm.as_or(temp.reg, output.reg, value.reg);
break;
case AtomicFetchXorOp:
masm.as_xor(temp.reg, output.reg, value.reg);
break;
default:
MOZ_CRASH();
}
masm.as_scd(temp.reg, SecondScratchReg, 0);
masm.ma_b(temp.reg, temp.reg, &tryAgain, Assembler::Zero, ShortJump);
masm.memoryBarrierAfter(sync);
}
void
MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op, Register64 value,
const Address& mem, Register64 temp, Register64 output)
{
AtomicFetchOp64(*this, sync, op, value, mem, temp, output);
}
void
MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op, Register64 value,
const BaseIndex& mem, Register64 temp, Register64 output)
{
AtomicFetchOp64(*this, sync, op, value, mem, temp, output);
}
// ========================================================================
// Convert floating point.

Просмотреть файл

@ -590,6 +590,326 @@ class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
public:
// The following functions are exposed for use in platform-shared code.
// TODO: These are no longer used in platform code.
private:
template<typename T>
void compareExchange8SignExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
compareExchange(1, true, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T>
void compareExchange8ZeroExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
compareExchange(1, false, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T>
void compareExchange16SignExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
compareExchange(2, true, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T>
void compareExchange16ZeroExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
compareExchange(2, false, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T>
void compareExchange32(const T& mem, Register oldval, Register newval, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
compareExchange(4, false, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T>
void atomicExchange8SignExtend(const T& mem, Register value, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
atomicExchange(1, true, mem, value, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T>
void atomicExchange8ZeroExtend(const T& mem, Register value, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
atomicExchange(1, false, mem, value, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T>
void atomicExchange16SignExtend(const T& mem, Register value, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
atomicExchange(2, true, mem, value, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T>
void atomicExchange16ZeroExtend(const T& mem, Register value, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
atomicExchange(2, false, mem, value, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T>
void atomicExchange32(const T& mem, Register value, Register valueTemp,
Register offsetTemp, Register maskTemp, Register output)
{
atomicExchange(4, false, mem, value, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchAdd8SignExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(1, true, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchAdd8ZeroExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(1, false, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchAdd16SignExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(2, true, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchAdd16ZeroExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(2, false, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchAdd32(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(4, false, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template <typename T, typename S>
void atomicAdd8(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(1, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template <typename T, typename S>
void atomicAdd16(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(2, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template <typename T, typename S>
void atomicAdd32(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(4, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template<typename T, typename S>
void atomicFetchSub8SignExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(1, true, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchSub8ZeroExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(1, false, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchSub16SignExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(2, true, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchSub16ZeroExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(2, false, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchSub32(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(4, false, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template <typename T, typename S>
void atomicSub8(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(1, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template <typename T, typename S>
void atomicSub16(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(2, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template <typename T, typename S>
void atomicSub32(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(4, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template<typename T, typename S>
void atomicFetchAnd8SignExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(1, true, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchAnd8ZeroExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(1, false, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchAnd16SignExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(2, true, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchAnd16ZeroExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(2, false, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchAnd32(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(4, false, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template <typename T, typename S>
void atomicAnd8(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(1, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template <typename T, typename S>
void atomicAnd16(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(2, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template <typename T, typename S>
void atomicAnd32(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(4, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template<typename T, typename S>
void atomicFetchOr8SignExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(1, true, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchOr8ZeroExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(1, false, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchOr16SignExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(2, true, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchOr16ZeroExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(2, false, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchOr32(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(4, false, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template <typename T, typename S>
void atomicOr8(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(1, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template <typename T, typename S>
void atomicOr16(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(2, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template <typename T, typename S>
void atomicOr32(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(4, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template<typename T, typename S>
void atomicFetchXor8SignExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(1, true, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchXor8ZeroExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(1, false, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchXor16SignExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(2, true, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchXor16ZeroExtend(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(2, false, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template<typename T, typename S>
void atomicFetchXor32(const S& value, const T& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
{
atomicFetchOp(4, false, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
}
template <typename T, typename S>
void atomicXor8(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(1, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template <typename T, typename S>
void atomicXor16(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(2, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
template <typename T, typename S>
void atomicXor32(const T& value, const S& mem, Register flagTemp,
Register valueTemp, Register offsetTemp, Register maskTemp)
{
atomicEffectOp(4, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
}
public:
template<typename T>
void compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register oldval, Register newval,
Register temp, Register valueTemp, Register offsetTemp, Register maskTemp,
AnyRegister output);
template<typename T>
void atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register value,
Register temp, Register valueTemp, Register offsetTemp, Register maskTemp,
AnyRegister output);
inline void incrementInt32Value(const Address& addr);
void move32(Imm32 imm, Register dest);
@ -747,6 +1067,124 @@ class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
ma_cmp_set(dest, lhs, rhs, cond);
}
template<typename T>
void atomicFetchAdd8ZeroExtend(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicFetchSub8ZeroExtend(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicFetchAnd8ZeroExtend(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicFetchOr8ZeroExtend(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicFetchXor8ZeroExtend(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void compareExchange8ZeroExtend(const T& mem, Register oldval, Register newval,
Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicExchange8ZeroExtend(const T& mem, Register value, Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicFetchAdd16ZeroExtend(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicFetchSub16ZeroExtend(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicFetchAnd16ZeroExtend(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicFetchOr16ZeroExtend(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicFetchXor16ZeroExtend(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void compareExchange16ZeroExtend(const T& mem, Register oldval, Register newval,
Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicExchange16ZeroExtend(const T& mem, Register value, Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicFetchAdd32(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicFetchSub32(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicFetchAnd32(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicFetchOr32(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void atomicFetchXor32(Register value, const T& mem, Register temp, Register output) {
MOZ_CRASH();
}
template<typename T>
void compareExchange32(const T& mem, Register oldval, Register newval, Register output) {
MOZ_CRASH();
}
template<typename T> void atomicExchange32(const T& mem, Register value, Register output) {
MOZ_CRASH();
}
template <typename T>
void atomicFetchAdd64(Register64 value, const T& mem, Register64 temp, Register64 output) {
MOZ_CRASH();
}
template <typename T>
void atomicFetchSub64(Register64 value, const T& mem, Register64 temp, Register64 output) {
MOZ_CRASH();
}
template <typename T>
void atomicFetchAnd64(Register64 value, const T& mem, Register64 temp, Register64 output) {
MOZ_CRASH();
}
template <typename T>
void atomicFetchOr64(Register64 value, const T& mem, Register64 temp, Register64 output) {
MOZ_CRASH();
}
template <typename T>
void atomicFetchXor64(Register64 value, const T& mem, Register64 temp, Register64 output) {
MOZ_CRASH();
}
template <typename T>
void atomicExchange64(const T& mem, Register64 src, Register64 output) {
MOZ_CRASH();
}
template <typename T>
void compareExchange64(const T& mem, Register64 expect, Register64 replace, Register64 output) {
MOZ_CRASH();
}
template <typename T>
void atomicLoad64(const T& mem, Register64 temp, Register64 output) {
MOZ_CRASH();
}
protected:
bool buildOOLFakeExitFrame(void* fakeReturnAddr);

Просмотреть файл

@ -37,7 +37,6 @@
#include <float.h>
#include "jit/AtomicOperations.h"
#include "jit/mips64/Assembler-mips64.h"
#include "threading/LockGuard.h"
#include "vm/Runtime.h"
@ -466,7 +465,6 @@ SimInstruction::instructionType() const
case op_lwl:
case op_lwr:
case op_ll:
case op_lld:
case op_ld:
case op_ldl:
case op_ldr:
@ -476,7 +474,6 @@ SimInstruction::instructionType() const
case op_swl:
case op_swr:
case op_sc:
case op_scd:
case op_sd:
case op_sdl:
case op_sdr:
@ -1154,10 +1151,10 @@ GetCachePageLocked(SimulatorProcess::ICacheMap& i_cache, void* page)
SimulatorProcess::ICacheMap::AddPtr p = i_cache.lookupForAdd(page);
if (p)
return p->value();
AutoEnterOOMUnsafeRegion oomUnsafe;
CachePage* new_page = js_new<CachePage>();
if (!new_page || !i_cache.add(p, page, new_page))
oomUnsafe.crash("Simulator CachePage");
if (!i_cache.add(p, page, new_page))
return nullptr;
return new_page;
}
@ -1286,9 +1283,6 @@ Simulator::Simulator()
for (int i = 0; i < Simulator::FPURegister::kNumFPURegisters; i++)
FPUregisters_[i] = 0;
FCSR_ = 0;
LLBit_ = false;
LLAddr_ = 0;
lastLLValue_ = 0;
// The ra and pc are initialized to a known bad value that will cause an
// access violation if the simulator ever tries to execute it.
@ -1364,10 +1358,11 @@ class Redirection
}
}
AutoEnterOOMUnsafeRegion oomUnsafe;
Redirection* redir = (Redirection*)js_malloc(sizeof(Redirection));
if (!redir) {
oomUnsafe.crash("Simulator redirection");
MOZ_ReportAssertionFailure("[unhandlable oom] Simulator redirection",
__FILE__, __LINE__);
MOZ_CRASH();
}
new(redir) Redirection(nativeFunction, type);
return redir;
@ -1620,83 +1615,13 @@ Simulator::get_pc() const
void
Simulator::startInterrupt(JitActivation* activation)
{
JS::ProfilingFrameIterator::RegisterState state;
state.pc = (void*) get_pc();
state.fp = (void*) getRegister(fp);
state.sp = (void*) getRegister(sp);
state.lr = (void*) getRegister(ra);
activation->startWasmInterrupt(state);
MOZ_CRASH("NIY");
}
// The signal handler only redirects the PC to the interrupt stub when the PC is
// in function code. However, this guard is racy for the simulator since the
// signal handler samples PC in the middle of simulating an instruction and thus
// the current PC may have advanced once since the signal handler's guard. So we
// re-check here.
void
Simulator::handleWasmInterrupt()
{
if (!wasm::CodeExists)
return;
void* pc = (void*)get_pc();
void* fp = (void*)getRegister(Register::fp);
JitActivation* activation = TlsContext.get()->activation()->asJit();
const wasm::CodeSegment* segment = wasm::LookupCodeSegment(pc);
if (!segment || !segment->containsCodePC(pc))
return;
// fp can be null during the prologue/epilogue of the entry function.
if (!fp)
return;
startInterrupt(activation);
set_pc(int64_t(segment->interruptCode()));
}
// WebAssembly memories contain an extra region of guard pages (see
// WasmArrayRawBuffer comment). The guard pages catch out-of-bounds accesses
// using a signal handler that redirects PC to a stub that safely reports an
// error. However, if the handler is hit by the simulator, the PC is in C++ code
// and cannot be redirected. Therefore, we must avoid hitting the handler by
// redirecting in the simulator before the real handler would have been hit.
bool
Simulator::handleWasmFault(uint64_t addr, unsigned numBytes)
{
if (!wasm::CodeExists)
return false;
JSContext* cx = TlsContext.get();
if (!cx->activation() || !cx->activation()->isJit())
return false;
JitActivation* act = cx->activation()->asJit();
void* pc = reinterpret_cast<void*>(get_pc());
uint8_t* fp = reinterpret_cast<uint8_t*>(getRegister(Register::fp));
const wasm::CodeSegment* segment = wasm::LookupCodeSegment(pc);
if (!segment)
return false;
wasm::Instance* instance = wasm::LookupFaultingInstance(*segment, pc, fp);
if (!instance || !instance->memoryAccessInGuardRegion((uint8_t*)addr, numBytes))
return false;
LLBit_ = false;
const wasm::MemoryAccess* memoryAccess = instance->code().lookupMemoryAccess(pc);
if (!memoryAccess) {
startInterrupt(act);
if (!instance->code().containsCodePC(pc))
MOZ_CRASH("Cannot map PC to trap handler");
set_pc(int64_t(segment->outOfBoundsCode()));
return true;
}
MOZ_ASSERT(memoryAccess->hasTrapOutOfLineCode());
set_pc(int64_t(memoryAccess->trapOutOfLineCode(segment->base())));
return true;
MOZ_CRASH("NIY");
}
bool
@ -1727,22 +1652,17 @@ Simulator::handleWasmTrapFault()
return true;
}
// MIPS memory instructions (except lw(d)l/r , sw(d)l/r) trap on unaligned memory
// access enabling the OS to handle them via trap-and-emulate.
// Note that simulator runs have the runtime system running directly on the host
// system and only generated code is executed in the simulator.
// Since the host is typically IA32 it will not trap on unaligned memory access.
// We assume that that executing correct generated code will not produce unaligned
// memory access, so we explicitly check for address alignment and trap.
// Note that trapping does not occur when executing wasm code, which requires that
// unaligned memory access provides correct result.
// The MIPS cannot do unaligned reads and writes. On some MIPS platforms an
// interrupt is caused. On others it does a funky rotation thing. For now we
// simply disallow unaligned reads, but at some point we may want to move to
// emulating the rotate behaviour. Note that simulator runs have the runtime
// system running directly on the host system and only generated code is
// executed in the simulator. Since the host is typically IA32 we will not
// get the correct MIPS-like behaviour on unaligned accesses.
uint8_t
Simulator::readBU(uint64_t addr, SimInstruction* instr)
{
if (handleWasmFault(addr, 1))
return 0xff;
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
return* ptr;
}
@ -1750,9 +1670,6 @@ Simulator::readBU(uint64_t addr, SimInstruction* instr)
int8_t
Simulator::readB(uint64_t addr, SimInstruction* instr)
{
if (handleWasmFault(addr, 1))
return -1;
int8_t* ptr = reinterpret_cast<int8_t*>(addr);
return* ptr;
}
@ -1760,9 +1677,6 @@ Simulator::readB(uint64_t addr, SimInstruction* instr)
void
Simulator::writeB(uint64_t addr, uint8_t value, SimInstruction* instr)
{
if (handleWasmFault(addr, 1))
return;
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
*ptr = value;
}
@ -1770,9 +1684,6 @@ Simulator::writeB(uint64_t addr, uint8_t value, SimInstruction* instr)
void
Simulator::writeB(uint64_t addr, int8_t value, SimInstruction* instr)
{
if (handleWasmFault(addr, 1))
return;
int8_t* ptr = reinterpret_cast<int8_t*>(addr);
*ptr = value;
}
@ -1780,10 +1691,7 @@ Simulator::writeB(uint64_t addr, int8_t value, SimInstruction* instr)
uint16_t
Simulator::readHU(uint64_t addr, SimInstruction* instr)
{
if (handleWasmFault(addr, 2))
return 0xffff;
if ((addr & 1) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) {
if ((addr & 1) == 0) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr;
}
@ -1796,10 +1704,7 @@ Simulator::readHU(uint64_t addr, SimInstruction* instr)
int16_t
Simulator::readH(uint64_t addr, SimInstruction* instr)
{
if (handleWasmFault(addr, 2))
return -1;
if ((addr & 1) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) {
if ((addr & 1) == 0) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
return *ptr;
}
@ -1812,12 +1717,8 @@ Simulator::readH(uint64_t addr, SimInstruction* instr)
void
Simulator::writeH(uint64_t addr, uint16_t value, SimInstruction* instr)
{
if (handleWasmFault(addr, 2))
return;
if ((addr & 1) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) {
if ((addr & 1) == 0) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
LLBit_ = false;
*ptr = value;
return;
}
@ -1829,12 +1730,8 @@ Simulator::writeH(uint64_t addr, uint16_t value, SimInstruction* instr)
void
Simulator::writeH(uint64_t addr, int16_t value, SimInstruction* instr)
{
if (handleWasmFault(addr, 2))
return;
if ((addr & 1) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) {
if ((addr & 1) == 0) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
LLBit_ = false;
*ptr = value;
return;
}
@ -1846,10 +1743,13 @@ Simulator::writeH(uint64_t addr, int16_t value, SimInstruction* instr)
uint32_t
Simulator::readWU(uint64_t addr, SimInstruction* instr)
{
if (handleWasmFault(addr, 4))
return -1;
if ((addr & 3) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) {
if (addr < 0x400) {
// This has to be a NULL-dereference, drop into debugger.
printf("Memory read from bad address: 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
addr, reinterpret_cast<intptr_t>(instr));
MOZ_CRASH();
}
if ((addr & 3) == 0) {
uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
return *ptr;
}
@ -1862,10 +1762,13 @@ Simulator::readWU(uint64_t addr, SimInstruction* instr)
int32_t
Simulator::readW(uint64_t addr, SimInstruction* instr)
{
if (handleWasmFault(addr, 4))
return -1;
if ((addr & 3) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) {
if (addr < 0x400) {
// This has to be a NULL-dereference, drop into debugger.
printf("Memory read from bad address: 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
addr, reinterpret_cast<intptr_t>(instr));
MOZ_CRASH();
}
if ((addr & 3) == 0) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
return *ptr;
}
@ -1878,12 +1781,14 @@ Simulator::readW(uint64_t addr, SimInstruction* instr)
void
Simulator::writeW(uint64_t addr, uint32_t value, SimInstruction* instr)
{
if (handleWasmFault(addr, 4))
return;
if ((addr & 3) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) {
if (addr < 0x400) {
// This has to be a NULL-dereference, drop into debugger.
printf("Memory write to bad address: 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
addr, reinterpret_cast<intptr_t>(instr));
MOZ_CRASH();
}
if ((addr & 3) == 0) {
uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
LLBit_ = false;
*ptr = value;
return;
}
@ -1895,12 +1800,14 @@ Simulator::writeW(uint64_t addr, uint32_t value, SimInstruction* instr)
void
Simulator::writeW(uint64_t addr, int32_t value, SimInstruction* instr)
{
if (handleWasmFault(addr, 4))
return;
if ((addr & 3) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) {
if (addr < 0x400) {
// This has to be a NULL-dereference, drop into debugger.
printf("Memory write to bad address: 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
addr, reinterpret_cast<intptr_t>(instr));
MOZ_CRASH();
}
if ((addr & 3) == 0) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
LLBit_ = false;
*ptr = value;
return;
}
@ -1912,12 +1819,15 @@ Simulator::writeW(uint64_t addr, int32_t value, SimInstruction* instr)
int64_t
Simulator::readDW(uint64_t addr, SimInstruction* instr)
{
if (handleWasmFault(addr, 8))
return -1;
if ((addr & kPointerAlignmentMask) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
return *ptr;
if (addr < 0x400) {
// This has to be a NULL-dereference, drop into debugger.
printf("Memory read from bad address: 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
addr, reinterpret_cast<intptr_t>(instr));
MOZ_CRASH();
}
if ((addr & kPointerAlignmentMask) == 0) {
int64_t* ptr = reinterpret_cast<int64_t*>(addr);
return* ptr;
}
printf("Unaligned read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
addr, reinterpret_cast<intptr_t>(instr));
@ -1928,12 +1838,14 @@ Simulator::readDW(uint64_t addr, SimInstruction* instr)
void
Simulator::writeDW(uint64_t addr, int64_t value, SimInstruction* instr)
{
if (handleWasmFault(addr, 8))
return;
if ((addr & kPointerAlignmentMask) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) {
if (addr < 0x400) {
// This has to be a NULL-dereference, drop into debugger.
printf("Memory write to bad address: 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
addr, reinterpret_cast<intptr_t>(instr));
MOZ_CRASH();
}
if ((addr & kPointerAlignmentMask) == 0) {
int64_t* ptr = reinterpret_cast<int64_t*>(addr);
LLBit_ = false;
*ptr = value;
return;
}
@ -1945,10 +1857,7 @@ Simulator::writeDW(uint64_t addr, int64_t value, SimInstruction* instr)
double
Simulator::readD(uint64_t addr, SimInstruction* instr)
{
if (handleWasmFault(addr, 8))
return NAN;
if ((addr & kDoubleAlignmentMask) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) {
if ((addr & kDoubleAlignmentMask) == 0) {
double* ptr = reinterpret_cast<double*>(addr);
return *ptr;
}
@ -1961,12 +1870,8 @@ Simulator::readD(uint64_t addr, SimInstruction* instr)
void
Simulator::writeD(uint64_t addr, double value, SimInstruction* instr)
{
if (handleWasmFault(addr, 8))
return;
if ((addr & kDoubleAlignmentMask) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) {
if ((addr & kDoubleAlignmentMask) == 0) {
double* ptr = reinterpret_cast<double*>(addr);
LLBit_ = false;
*ptr = value;
return;
}
@ -1975,110 +1880,6 @@ Simulator::writeD(uint64_t addr, double value, SimInstruction* instr)
MOZ_CRASH();
}
int
Simulator::loadLinkedW(uint64_t addr, SimInstruction* instr)
{
if ((addr & 3) == 0) {
if (handleWasmFault(addr, 4))
return -1;
volatile int32_t* ptr = reinterpret_cast<volatile int32_t*>(addr);
int32_t value = *ptr;
lastLLValue_ = value;
LLAddr_ = addr;
// Note that any memory write or "external" interrupt should reset this value to false.
LLBit_ = true;
return value;
}
printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
addr, reinterpret_cast<intptr_t>(instr));
MOZ_CRASH();
return 0;
}
int
Simulator::storeConditionalW(uint64_t addr, int value, SimInstruction* instr)
{
// Correct behavior in this case, as defined by architecture, is to just return 0,
// but there is no point at allowing that. It is certainly an indicator of a bug.
if (addr != LLAddr_) {
printf("SC to bad address: 0x%016" PRIx64 ", pc=0x%016" PRIx64 ", expected: 0x%016" PRIx64 "\n",
addr, reinterpret_cast<intptr_t>(instr), LLAddr_);
MOZ_CRASH();
}
if ((addr & 3) == 0) {
SharedMem<int32_t*> ptr = SharedMem<int32_t*>::shared(reinterpret_cast<int32_t*>(addr));
if (!LLBit_) {
return 0;
}
LLBit_ = false;
LLAddr_ = 0;
int32_t expected = int32_t(lastLLValue_);
int32_t old = AtomicOperations::compareExchangeSeqCst(ptr, expected, int32_t(value));
return (old == expected) ? 1:0;
}
printf("Unaligned SC at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
addr, reinterpret_cast<intptr_t>(instr));
MOZ_CRASH();
return 0;
}
int64_t
Simulator::loadLinkedD(uint64_t addr, SimInstruction* instr)
{
if ((addr & kPointerAlignmentMask) == 0) {
if (handleWasmFault(addr, 8))
return -1;
volatile int64_t* ptr = reinterpret_cast<volatile int64_t*>(addr);
int64_t value = *ptr;
lastLLValue_ = value;
LLAddr_ = addr;
// Note that any memory write or "external" interrupt should reset this value to false.
LLBit_ = true;
return value;
}
printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
addr, reinterpret_cast<intptr_t>(instr));
MOZ_CRASH();
return 0;
}
int
Simulator::storeConditionalD(uint64_t addr, int64_t value, SimInstruction* instr)
{
// Correct behavior in this case, as defined by architecture, is to just return 0,
// but there is no point at allowing that. It is certainly an indicator of a bug.
if (addr != LLAddr_) {
printf("SC to bad address: 0x%016" PRIx64 ", pc=0x%016" PRIx64 ", expected: 0x%016" PRIx64 "\n",
addr, reinterpret_cast<intptr_t>(instr), LLAddr_);
MOZ_CRASH();
}
if ((addr & kPointerAlignmentMask) == 0) {
SharedMem<int64_t*> ptr = SharedMem<int64_t*>::shared(reinterpret_cast<int64_t*>(addr));
if (!LLBit_) {
return 0;
}
LLBit_ = false;
LLAddr_ = 0;
int64_t expected = lastLLValue_;
int64_t old = AtomicOperations::compareExchangeSeqCst(ptr, expected, int64_t(value));
return (old == expected) ? 1:0;
}
printf("Unaligned SC at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
addr, reinterpret_cast<intptr_t>(instr));
MOZ_CRASH();
return 0;
}
uintptr_t
Simulator::stackLimit() const
{
@ -2131,10 +1932,7 @@ typedef int64_t (*Prototype_General7)(int64_t arg0, int64_t arg1, int64_t arg2,
int64_t arg4, int64_t arg5, int64_t arg6);
typedef int64_t (*Prototype_General8)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3,
int64_t arg4, int64_t arg5, int64_t arg6, int64_t arg7);
typedef int64_t (*Prototype_GeneralGeneralGeneralInt64)(int64_t arg0, int64_t arg1, int64_t arg2,
int64_t arg3);
typedef int64_t (*Prototype_GeneralGeneralInt64Int64)(int64_t arg0, int64_t arg1, int64_t arg2,
int64_t arg3);
typedef double (*Prototype_Double_None)();
typedef double (*Prototype_Double_Double)(double arg0);
typedef double (*Prototype_Double_Int)(int64_t arg0);
@ -2143,8 +1941,6 @@ typedef int64_t (*Prototype_Int_DoubleIntInt)(double arg0, int64_t arg1, int64_t
typedef int64_t (*Prototype_Int_IntDoubleIntInt)(int64_t arg0, double arg1, int64_t arg2,
int64_t arg3);
typedef float (*Prototype_Float32_Float32)(float arg0);
typedef float (*Prototype_Float32_Float32Float32)(float arg0, float arg1);
typedef float (*Prototype_Float32_IntInt)(int arg0, int arg1);
typedef double (*Prototype_DoubleInt)(double arg0, int64_t arg1);
typedef double (*Prototype_Double_IntDouble)(int64_t arg0, double arg1);
@ -2212,9 +2008,6 @@ Simulator::softwareInterrupt(SimInstruction* instr)
case Args_General3: {
Prototype_General3 target = reinterpret_cast<Prototype_General3>(external);
int64_t result = target(arg0, arg1, arg2);
if(external == intptr_t(&js::wasm::Instance::wake)) {
result = int32_t(result);
}
setCallResult(result);
break;
}
@ -2264,26 +2057,6 @@ Simulator::softwareInterrupt(SimInstruction* instr)
setRegister(v0, res);
break;
}
case Args_Int_GeneralGeneralGeneralInt64: {
Prototype_GeneralGeneralGeneralInt64 target =
reinterpret_cast<Prototype_GeneralGeneralGeneralInt64>(external);
int64_t result = target(arg0, arg1, arg2, arg3);
if(external == intptr_t(&js::wasm::Instance::wait_i32)) {
result = int32_t(result);
}
setCallResult(result);
break;
}
case Args_Int_GeneralGeneralInt64Int64: {
Prototype_GeneralGeneralInt64Int64 target =
reinterpret_cast<Prototype_GeneralGeneralInt64Int64>(external);
int64_t result = target(arg0, arg1, arg2, arg3);
if(external == intptr_t(&js::wasm::Instance::wait_i64)) {
result = int32_t(result);
}
setCallResult(result);
break;
}
case Args_Int_DoubleIntInt: {
double dval = getFpuRegisterDouble(12);
Prototype_Int_DoubleIntInt target = reinterpret_cast<Prototype_Int_DoubleIntInt>(external);
@ -2313,22 +2086,6 @@ Simulator::softwareInterrupt(SimInstruction* instr)
setCallResultFloat(fresult);
break;
}
case Args_Float32_Float32Float32: {
float fval0;
float fval1;
fval0 = getFpuRegisterFloat(12);
fval1 = getFpuRegisterFloat(13);
Prototype_Float32_Float32Float32 target = reinterpret_cast<Prototype_Float32_Float32Float32>(external);
float fresult = target(fval0, fval1);
setCallResultFloat(fresult);
break;
}
case Args_Float32_IntInt: {
Prototype_Float32_IntInt target = reinterpret_cast<Prototype_Float32_IntInt>(external);
float fresult = target(arg0, arg1);
setCallResultFloat(fresult);
break;
}
case Args_Double_Int: {
Prototype_Double_Int target = reinterpret_cast<Prototype_Double_Int>(external);
double dresult = target(arg0);
@ -3177,21 +2934,6 @@ Simulator::decodeTypeRegister(SimInstruction* instr)
case ff_c_f_fmt:
MOZ_CRASH();
break;
case ff_movf_fmt:
if (testFCSRBit(fcsr_cc)) {
setFpuRegisterFloat(fd_reg, getFpuRegisterFloat(fs_reg));
}
break;
case ff_movz_fmt:
if (rt == 0) {
setFpuRegisterFloat(fd_reg, getFpuRegisterFloat(fs_reg));
}
break;
case ff_movn_fmt:
if (rt != 0) {
setFpuRegisterFloat(fd_reg, getFpuRegisterFloat(fs_reg));
}
break;
default:
MOZ_CRASH();
}
@ -3369,8 +3111,7 @@ Simulator::decodeTypeRegister(SimInstruction* instr)
setFpuRegisterDouble(fd_reg, static_cast<double>(i64));
break;
case ff_cvt_s_fmt:
i64 = getFpuRegister(fs_reg);
setFpuRegisterFloat(fd_reg, static_cast<float>(i64));
MOZ_CRASH();
break;
default:
MOZ_CRASH();
@ -3737,11 +3478,7 @@ Simulator::decodeTypeImmediate(SimInstruction* instr)
}
case op_ll:
addr = rs + se_imm16;
alu_out = loadLinkedW(addr, instr);
break;
case op_lld:
addr = rs + se_imm16;
alu_out = loadLinkedD(addr, instr);
alu_out = readW(addr, instr);
break;
case op_ld:
addr = rs + se_imm16;
@ -3798,9 +3535,6 @@ Simulator::decodeTypeImmediate(SimInstruction* instr)
case op_sc:
addr = rs + se_imm16;
break;
case op_scd:
addr = rs + se_imm16;
break;
case op_sd:
addr = rs + se_imm16;
break;
@ -3882,7 +3616,6 @@ Simulator::decodeTypeImmediate(SimInstruction* instr)
case op_lwl:
case op_lwr:
case op_ll:
case op_lld:
case op_ld:
case op_ldl:
case op_ldr:
@ -3904,10 +3637,8 @@ Simulator::decodeTypeImmediate(SimInstruction* instr)
writeW(addr, I32(mem_value), instr);
break;
case op_sc:
setRegister(rt_reg, storeConditionalW(addr, I32(rt), instr));
break;
case op_scd:
setRegister(rt_reg, storeConditionalD(addr, rt, instr));
writeW(addr, I32(rt), instr);
setRegister(rt_reg, 1);
break;
case op_sd:
writeDW(addr, rt, instr);

Просмотреть файл

@ -160,8 +160,6 @@ class Simulator {
Simulator();
~Simulator();
static bool supportsAtomics() { return true; }
// The currently executing Simulator instance. Potentially there can be one
// for each native thread.
static Simulator* Current();
@ -278,12 +276,6 @@ class Simulator {
inline double readD(uint64_t addr, SimInstruction* instr);
inline void writeD(uint64_t addr, double value, SimInstruction* instr);
inline int32_t loadLinkedW(uint64_t addr, SimInstruction* instr);
inline int storeConditionalW(uint64_t addr, int32_t value, SimInstruction* instr);
inline int64_t loadLinkedD(uint64_t addr, SimInstruction* instr);
inline int storeConditionalD(uint64_t addr, int64_t value, SimInstruction* instr);
// Helper function for decodeTypeRegister.
void configureTypeRegister(SimInstruction* instr,
int64_t& alu_out,
@ -316,8 +308,6 @@ class Simulator {
void handleWasmInterrupt();
void startInterrupt(JitActivation* act);
// Handle any wasm faults, returning true if the fault was handled.
bool handleWasmFault(uint64_t addr, unsigned numBytes);
bool handleWasmTrapFault();
// Executes one instruction.
@ -360,10 +350,6 @@ class Simulator {
// FPU control register.
uint32_t FCSR_;
bool LLBit_;
uintptr_t LLAddr_;
int64_t lastLLValue_;
// Simulator support.
char* stack_;
uintptr_t stackLimit_;

Просмотреть файл

@ -6410,7 +6410,6 @@ class LCompareExchangeTypedArrayElement : public LInstructionHelper<1, 4, 4>
public:
LIR_HEADER(CompareExchangeTypedArrayElement)
// ARM, ARM64, x86, x64
LCompareExchangeTypedArrayElement(const LAllocation& elements, const LAllocation& index,
const LAllocation& oldval, const LAllocation& newval,
const LDefinition& temp)
@ -6421,7 +6420,6 @@ class LCompareExchangeTypedArrayElement : public LInstructionHelper<1, 4, 4>
setOperand(3, newval);
setTemp(0, temp);
}
// MIPS32, MIPS64
LCompareExchangeTypedArrayElement(const LAllocation& elements, const LAllocation& index,
const LAllocation& oldval, const LAllocation& newval,
const LDefinition& temp, const LDefinition& valueTemp,
@ -6474,7 +6472,6 @@ class LAtomicExchangeTypedArrayElement : public LInstructionHelper<1, 3, 4>
public:
LIR_HEADER(AtomicExchangeTypedArrayElement)
// ARM, ARM64, x86, x64
LAtomicExchangeTypedArrayElement(const LAllocation& elements, const LAllocation& index,
const LAllocation& value, const LDefinition& temp)
{
@ -6483,7 +6480,6 @@ class LAtomicExchangeTypedArrayElement : public LInstructionHelper<1, 3, 4>
setOperand(2, value);
setTemp(0, temp);
}
// MIPS32, MIPS64
LAtomicExchangeTypedArrayElement(const LAllocation& elements, const LAllocation& index,
const LAllocation& value, const LDefinition& temp,
const LDefinition& valueTemp, const LDefinition& offsetTemp,
@ -6534,7 +6530,6 @@ class LAtomicTypedArrayElementBinop : public LInstructionHelper<1, 3, 5>
static const int32_t valueOp = 2;
// ARM, ARM64, x86, x64
LAtomicTypedArrayElementBinop(const LAllocation& elements, const LAllocation& index,
const LAllocation& value, const LDefinition& temp1,
const LDefinition& temp2)
@ -6545,16 +6540,15 @@ class LAtomicTypedArrayElementBinop : public LInstructionHelper<1, 3, 5>
setTemp(0, temp1);
setTemp(1, temp2);
}
// MIPS32, MIPS64
LAtomicTypedArrayElementBinop(const LAllocation& elements, const LAllocation& index,
const LAllocation& value, const LDefinition& temp2,
const LDefinition& valueTemp, const LDefinition& offsetTemp,
const LDefinition& maskTemp)
const LAllocation& value, const LDefinition& temp1,
const LDefinition& temp2, const LDefinition& valueTemp,
const LDefinition& offsetTemp, const LDefinition& maskTemp)
{
setOperand(0, elements);
setOperand(1, index);
setOperand(2, value);
setTemp(0, LDefinition::BogusTemp());
setTemp(0, temp1);
setTemp(1, temp2);
setTemp(2, valueTemp);
setTemp(3, offsetTemp);
@ -6600,7 +6594,6 @@ class LAtomicTypedArrayElementBinopForEffect : public LInstructionHelper<0, 3, 4
public:
LIR_HEADER(AtomicTypedArrayElementBinopForEffect)
// ARM, ARM64, x86, x64
LAtomicTypedArrayElementBinopForEffect(const LAllocation& elements, const LAllocation& index,
const LAllocation& value,
const LDefinition& flagTemp = LDefinition::BogusTemp())
@ -6610,15 +6603,15 @@ class LAtomicTypedArrayElementBinopForEffect : public LInstructionHelper<0, 3, 4
setOperand(2, value);
setTemp(0, flagTemp);
}
// MIPS32, MIPS64
LAtomicTypedArrayElementBinopForEffect(const LAllocation& elements, const LAllocation& index,
const LAllocation& value, const LDefinition& valueTemp,
const LDefinition& offsetTemp, const LDefinition& maskTemp)
const LAllocation& value, const LDefinition& flagTemp,
const LDefinition& valueTemp, const LDefinition& offsetTemp,
const LDefinition& maskTemp)
{
setOperand(0, elements);
setOperand(1, index);
setOperand(2, value);
setTemp(0, LDefinition::BogusTemp());
setTemp(0, flagTemp);
setTemp(1, valueTemp);
setTemp(2, offsetTemp);
setTemp(3, maskTemp);
@ -8739,15 +8732,16 @@ class LWasmAtomicBinopHeap : public LInstructionHelper<1, 3, 6>
}
// MIPS32, MIPS64
LWasmAtomicBinopHeap(const LAllocation& ptr, const LAllocation& value,
const LDefinition& temp, const LDefinition& flagTemp,
const LDefinition& valueTemp, const LDefinition& offsetTemp,
const LDefinition& maskTemp)
{
setOperand(0, ptr);
setOperand(1, value);
setOperand(2, LAllocation());
setTemp(0, LDefinition::BogusTemp());
setTemp(0, temp);
setTemp(1, LDefinition::BogusTemp());
setTemp(2, LDefinition::BogusTemp());
setTemp(2, flagTemp);
setTemp(3, valueTemp);
setTemp(4, offsetTemp);
setTemp(5, maskTemp);
@ -8812,14 +8806,14 @@ class LWasmAtomicBinopHeapForEffect : public LInstructionHelper<0, 3, 5>
}
// MIPS32, MIPS64
LWasmAtomicBinopHeapForEffect(const LAllocation& ptr, const LAllocation& value,
const LDefinition& valueTemp, const LDefinition& offsetTemp,
const LDefinition& maskTemp)
const LDefinition& flagTemp, const LDefinition& valueTemp,
const LDefinition& offsetTemp, const LDefinition& maskTemp)
{
setOperand(0, ptr);
setOperand(1, value);
setOperand(2, LAllocation());
setTemp(0, LDefinition::BogusTemp());
setTemp(1, LDefinition::BogusTemp());
setTemp(1, flagTemp);
setTemp(2, valueTemp);
setTemp(3, offsetTemp);
setTemp(4, maskTemp);

Просмотреть файл

@ -1145,122 +1145,6 @@ MacroAssembler::atomicEffectOp(Scalar::Type arrayType, const Synchronization&, A
// ========================================================================
// JS atomic operations.
template<typename T>
static void
CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
const T& mem, Register oldval, Register newval, Register temp, AnyRegister output)
{
if (arrayType == Scalar::Uint32) {
masm.compareExchange(arrayType, sync, mem, oldval, newval, temp);
masm.convertUInt32ToDouble(temp, output.fpu());
} else {
masm.compareExchange(arrayType, sync, mem, oldval, newval, output.gpr());
}
}
void
MacroAssembler::compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
const Address& mem, Register oldval, Register newval,
Register temp, AnyRegister output)
{
CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
}
void
MacroAssembler::compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
const BaseIndex& mem, Register oldval, Register newval,
Register temp, AnyRegister output)
{
CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
}
template<typename T>
static void
AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
const T& mem, Register value, Register temp, AnyRegister output)
{
if (arrayType == Scalar::Uint32) {
masm.atomicExchange(arrayType, sync, mem, value, temp);
masm.convertUInt32ToDouble(temp, output.fpu());
} else {
masm.atomicExchange(arrayType, sync, mem, value, output.gpr());
}
}
void
MacroAssembler::atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
const Address& mem, Register value, Register temp,
AnyRegister output)
{
AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
}
void
MacroAssembler::atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
const BaseIndex& mem, Register value, Register temp,
AnyRegister output)
{
AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
}
template<typename T>
static void
AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
AtomicOp op, Register value, const T& mem, Register temp1, Register temp2,
AnyRegister output)
{
if (arrayType == Scalar::Uint32) {
masm.atomicFetchOp(arrayType, sync, op, value, mem, temp2, temp1);
masm.convertUInt32ToDouble(temp1, output.fpu());
} else {
masm.atomicFetchOp(arrayType, sync, op, value, mem, temp1, output.gpr());
}
}
void
MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const Address& mem, Register temp1, Register temp2,
AnyRegister output)
{
AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
}
void
MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const BaseIndex& mem, Register temp1, Register temp2,
AnyRegister output)
{
AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
}
void
MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const BaseIndex& mem, Register temp)
{
atomicEffectOp(arrayType, sync, op, value, mem, temp);
}
void
MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Register value, const Address& mem, Register temp)
{
atomicEffectOp(arrayType, sync, op, value, mem, temp);
}
void
MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Imm32 value, const Address& mem, Register temp)
{
atomicEffectOp(arrayType, sync, op, value, mem, temp);
}
void
MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Imm32 value, const BaseIndex& mem, Register temp)
{
atomicEffectOp(arrayType, sync, op, value, mem, temp);
}
template<typename T>
static void
AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
@ -1291,4 +1175,18 @@ MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& s
AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
}
void
MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Imm32 value, const Address& mem, Register temp)
{
atomicEffectOp(arrayType, sync, op, value, mem, temp);
}
void
MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
Imm32 value, const BaseIndex& mem, Register temp)
{
atomicEffectOp(arrayType, sync, op, value, mem, temp);
}
//}}} check_macroassembler_style

Просмотреть файл

@ -24,7 +24,6 @@
#include "fdlibm.h"
#include "jslibmath.h"
#include "jit/AtomicOperations.h"
#include "jit/InlinableNatives.h"
#include "jit/MacroAssembler.h"
#include "threading/Mutex.h"
@ -594,12 +593,8 @@ AddressOf(SymbolicAddress imm, ABIFunctionType* abiType)
*abiType = Args_Int_GeneralGeneralInt64Int64;
return FuncCast(Instance::wait_i64, *abiType);
case SymbolicAddress::Wake:
*abiType = Args_General3;
*abiType = Args_General2;
return FuncCast(Instance::wake, *abiType);
#if defined(JS_CODEGEN_MIPS32)
case SymbolicAddress::js_jit_gAtomic64Lock:
return &js::jit::gAtomic64Lock;
#endif
case SymbolicAddress::Limit:
break;
}
@ -626,9 +621,6 @@ wasm::NeedsBuiltinThunk(SymbolicAddress sym)
case SymbolicAddress::CallImport_F64:
case SymbolicAddress::CoerceInPlace_ToInt32: // GenerateImportJitExit
case SymbolicAddress::CoerceInPlace_ToNumber:
#if defined(JS_CODEGEN_MIPS32)
case SymbolicAddress::js_jit_gAtomic64Lock:
#endif
return false;
case SymbolicAddress::ToInt32:
case SymbolicAddress::DivI64:

Просмотреть файл

@ -1039,10 +1039,6 @@ ThunkedNativeToDescription(SymbolicAddress func)
return "call to native i64.wait (in wasm)";
case SymbolicAddress::Wake:
return "call to native wake (in wasm)";
#if defined(JS_CODEGEN_MIPS32)
case SymbolicAddress::js_jit_gAtomic64Lock:
MOZ_CRASH();
#endif
case SymbolicAddress::Limit:
break;
}

Просмотреть файл

@ -1388,9 +1388,6 @@ enum class SymbolicAddress
WaitI32,
WaitI64,
Wake,
#if defined(JS_CODEGEN_MIPS32)
js_jit_gAtomic64Lock,
#endif
Limit
};