Bug 1421244: Remove supports for atomics on ARM < v7; r=lth, sr=luke

MozReview-Commit-ID: LQKX0y49mlq

--HG--
extra : rebase_source : da2704c5f9d064e940f30a709a1d107da3fec446
This commit is contained in:
Benjamin Bouvier 2017-11-29 13:24:10 +01:00
Родитель 8904b64329
Коммит 7c8fe68f05
14 изменённых файлов: 16 добавлений и 450 удалений

Просмотреть файл

@ -516,163 +516,6 @@ js::atomics_isLockFree(JSContext* cx, unsigned argc, Value* vp)
return true;
}
// asm.js callouts for platforms that do not have non-word-sized
// atomics where we don't want to inline the logic for the atomics.
//
// Memory will always be shared since the callouts are only called from
// code that checks that the memory is shared.
//
// To test this, either run on eg Raspberry Pi Model 1, or invoke the ARM
// simulator build with ARMHWCAP=vfp set. Do not set any other flags; other
// vfp/neon flags force ARMv7 to be set.
int32_t
js::atomics_add_asm_callout(wasm::Instance* instance, int32_t vt, int32_t offset, int32_t value)
{
if (size_t(offset) >= instance->memory()->volatileMemoryLength())
return 0;
SharedMem<void*> heap = instance->memoryBase().cast<void*>();
switch (Scalar::Type(vt)) {
case Scalar::Int8:
return PerformAdd::operate(heap.cast<int8_t*>() + offset, value);
case Scalar::Uint8:
return PerformAdd::operate(heap.cast<uint8_t*>() + offset, value);
case Scalar::Int16:
return PerformAdd::operate(heap.cast<int16_t*>() + (offset >> 1), value);
case Scalar::Uint16:
return PerformAdd::operate(heap.cast<uint16_t*>() + (offset >> 1), value);
default:
MOZ_CRASH("Invalid size");
}
}
int32_t
js::atomics_sub_asm_callout(wasm::Instance* instance, int32_t vt, int32_t offset, int32_t value)
{
if (size_t(offset) >= instance->memory()->volatileMemoryLength())
return 0;
SharedMem<void*> heap = instance->memoryBase().cast<void*>();
switch (Scalar::Type(vt)) {
case Scalar::Int8:
return PerformSub::operate(heap.cast<int8_t*>() + offset, value);
case Scalar::Uint8:
return PerformSub::operate(heap.cast<uint8_t*>() + offset, value);
case Scalar::Int16:
return PerformSub::operate(heap.cast<int16_t*>() + (offset >> 1), value);
case Scalar::Uint16:
return PerformSub::operate(heap.cast<uint16_t*>() + (offset >> 1), value);
default:
MOZ_CRASH("Invalid size");
}
}
int32_t
js::atomics_and_asm_callout(wasm::Instance* instance, int32_t vt, int32_t offset, int32_t value)
{
if (size_t(offset) >= instance->memory()->volatileMemoryLength())
return 0;
SharedMem<void*> heap = instance->memoryBase().cast<void*>();
switch (Scalar::Type(vt)) {
case Scalar::Int8:
return PerformAnd::operate(heap.cast<int8_t*>() + offset, value);
case Scalar::Uint8:
return PerformAnd::operate(heap.cast<uint8_t*>() + offset, value);
case Scalar::Int16:
return PerformAnd::operate(heap.cast<int16_t*>() + (offset >> 1), value);
case Scalar::Uint16:
return PerformAnd::operate(heap.cast<uint16_t*>() + (offset >> 1), value);
default:
MOZ_CRASH("Invalid size");
}
}
int32_t
js::atomics_or_asm_callout(wasm::Instance* instance, int32_t vt, int32_t offset, int32_t value)
{
if (size_t(offset) >= instance->memory()->volatileMemoryLength())
return 0;
SharedMem<void*> heap = instance->memoryBase().cast<void*>();
switch (Scalar::Type(vt)) {
case Scalar::Int8:
return PerformOr::operate(heap.cast<int8_t*>() + offset, value);
case Scalar::Uint8:
return PerformOr::operate(heap.cast<uint8_t*>() + offset, value);
case Scalar::Int16:
return PerformOr::operate(heap.cast<int16_t*>() + (offset >> 1), value);
case Scalar::Uint16:
return PerformOr::operate(heap.cast<uint16_t*>() + (offset >> 1), value);
default:
MOZ_CRASH("Invalid size");
}
}
int32_t
js::atomics_xor_asm_callout(wasm::Instance* instance, int32_t vt, int32_t offset, int32_t value)
{
if (size_t(offset) >= instance->memory()->volatileMemoryLength())
return 0;
SharedMem<void*> heap = instance->memoryBase().cast<void*>();
switch (Scalar::Type(vt)) {
case Scalar::Int8:
return PerformXor::operate(heap.cast<int8_t*>() + offset, value);
case Scalar::Uint8:
return PerformXor::operate(heap.cast<uint8_t*>() + offset, value);
case Scalar::Int16:
return PerformXor::operate(heap.cast<int16_t*>() + (offset >> 1), value);
case Scalar::Uint16:
return PerformXor::operate(heap.cast<uint16_t*>() + (offset >> 1), value);
default:
MOZ_CRASH("Invalid size");
}
}
int32_t
js::atomics_xchg_asm_callout(wasm::Instance* instance, int32_t vt, int32_t offset, int32_t value)
{
if (size_t(offset) >= instance->memory()->volatileMemoryLength())
return 0;
SharedMem<void*> heap = instance->memoryBase().cast<void*>();
switch (Scalar::Type(vt)) {
case Scalar::Int8:
return ExchangeOrStore<DoExchange>(Scalar::Int8, value, heap, offset);
case Scalar::Uint8:
return ExchangeOrStore<DoExchange>(Scalar::Uint8, value, heap, offset);
case Scalar::Int16:
return ExchangeOrStore<DoExchange>(Scalar::Int16, value, heap, offset>>1);
case Scalar::Uint16:
return ExchangeOrStore<DoExchange>(Scalar::Uint16, value, heap, offset>>1);
default:
MOZ_CRASH("Invalid size");
}
}
int32_t
js::atomics_cmpxchg_asm_callout(wasm::Instance* instance, int32_t vt, int32_t offset, int32_t oldval, int32_t newval)
{
if (size_t(offset) >= instance->memory()->volatileMemoryLength())
return 0;
SharedMem<void*> heap = instance->memoryBase().cast<void*>();
switch (Scalar::Type(vt)) {
case Scalar::Int8:
return CompareExchange(Scalar::Int8, oldval, newval, heap, offset);
case Scalar::Uint8:
return CompareExchange(Scalar::Uint8, oldval, newval, heap, offset);
case Scalar::Int16:
return CompareExchange(Scalar::Int16, oldval, newval, heap, offset>>1);
case Scalar::Uint16:
return CompareExchange(Scalar::Uint16, oldval, newval, heap, offset>>1);
default:
MOZ_CRASH("Invalid size");
}
}
namespace js {
// Represents one waiting worker.

Просмотреть файл

@ -38,16 +38,6 @@ MOZ_MUST_USE bool atomics_isLockFree(JSContext* cx, unsigned argc, Value* vp);
MOZ_MUST_USE bool atomics_wait(JSContext* cx, unsigned argc, Value* vp);
MOZ_MUST_USE bool atomics_wake(JSContext* cx, unsigned argc, Value* vp);
/* asm.js callouts */
namespace wasm { class Instance; }
int32_t atomics_add_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t value);
int32_t atomics_sub_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t value);
int32_t atomics_and_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t value);
int32_t atomics_or_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t value);
int32_t atomics_xor_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t value);
int32_t atomics_cmpxchg_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t oldval, int32_t newval);
int32_t atomics_xchg_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t value);
class FutexThread
{
friend class AutoLockFutexAPI;

Просмотреть файл

@ -2548,33 +2548,6 @@ CodeGeneratorARM::visitWasmCompareExchangeHeap(LWasmCompareExchangeHeap* ins)
ToAnyRegister(ins->output()));
}
void
CodeGeneratorARM::visitWasmCompareExchangeCallout(LWasmCompareExchangeCallout* ins)
{
const MWasmCompareExchangeHeap* mir = ins->mir();
MOZ_ASSERT(mir->access().offset() == 0);
Register ptr = ToRegister(ins->ptr());
Register oldval = ToRegister(ins->oldval());
Register newval = ToRegister(ins->newval());
Register tls = ToRegister(ins->tls());
Register instance = ToRegister(ins->getTemp(0));
Register viewType = ToRegister(ins->getTemp(1));
MOZ_ASSERT(ToRegister(ins->output()) == ReturnReg);
masm.loadPtr(Address(tls, offsetof(wasm::TlsData, instance)), instance);
masm.ma_mov(Imm32(mir->access().type()), viewType);
masm.setupWasmABICall();
masm.passABIArg(instance);
masm.passABIArg(viewType);
masm.passABIArg(ptr);
masm.passABIArg(oldval);
masm.passABIArg(newval);
masm.callWithABI(mir->bytecodeOffset(), wasm::SymbolicAddress::AtomicCmpXchg);
}
void
CodeGeneratorARM::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins)
{
@ -2590,31 +2563,6 @@ CodeGeneratorARM::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins)
srcAddr, value, InvalidReg, ToAnyRegister(ins->output()));
}
void
CodeGeneratorARM::visitWasmAtomicExchangeCallout(LWasmAtomicExchangeCallout* ins)
{
const MWasmAtomicExchangeHeap* mir = ins->mir();
MOZ_ASSERT(mir->access().offset() == 0);
Register ptr = ToRegister(ins->ptr());
Register value = ToRegister(ins->value());
Register tls = ToRegister(ins->tls());
Register instance = ToRegister(ins->getTemp(0));
Register viewType = ToRegister(ins->getTemp(1));
MOZ_ASSERT(ToRegister(ins->output()) == ReturnReg);
masm.loadPtr(Address(tls, offsetof(wasm::TlsData, instance)), instance);
masm.ma_mov(Imm32(mir->access().type()), viewType);
masm.setupWasmABICall();
masm.passABIArg(instance);
masm.passABIArg(viewType);
masm.passABIArg(ptr);
masm.passABIArg(value);
masm.callWithABI(mir->bytecodeOffset(), wasm::SymbolicAddress::AtomicXchg);
}
void
CodeGeneratorARM::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins)
{
@ -2662,49 +2610,6 @@ CodeGeneratorARM::visitWasmAtomicBinopHeapForEffect(LWasmAtomicBinopHeapForEffec
atomicBinopToTypedIntArray(op, vt, ToRegister(value), srcAddr, flagTemp);
}
void
CodeGeneratorARM::visitWasmAtomicBinopCallout(LWasmAtomicBinopCallout* ins)
{
const MWasmAtomicBinopHeap* mir = ins->mir();
MOZ_ASSERT(mir->access().offset() == 0);
Register ptr = ToRegister(ins->ptr());
Register value = ToRegister(ins->value());
Register tls = ToRegister(ins->tls());
Register instance = ToRegister(ins->getTemp(0));
Register viewType = ToRegister(ins->getTemp(1));
masm.loadPtr(Address(tls, offsetof(wasm::TlsData, instance)), instance);
masm.move32(Imm32(mir->access().type()), viewType);
masm.setupWasmABICall();
masm.passABIArg(instance);
masm.passABIArg(viewType);
masm.passABIArg(ptr);
masm.passABIArg(value);
wasm::BytecodeOffset bytecodeOffset = mir->bytecodeOffset();
switch (mir->operation()) {
case AtomicFetchAddOp:
masm.callWithABI(bytecodeOffset, wasm::SymbolicAddress::AtomicFetchAdd);
break;
case AtomicFetchSubOp:
masm.callWithABI(bytecodeOffset, wasm::SymbolicAddress::AtomicFetchSub);
break;
case AtomicFetchAndOp:
masm.callWithABI(bytecodeOffset, wasm::SymbolicAddress::AtomicFetchAnd);
break;
case AtomicFetchOrOp:
masm.callWithABI(bytecodeOffset, wasm::SymbolicAddress::AtomicFetchOr);
break;
case AtomicFetchXorOp:
masm.callWithABI(bytecodeOffset, wasm::SymbolicAddress::AtomicFetchXor);
break;
default:
MOZ_CRASH("Unknown op");
}
}
void
CodeGeneratorARM::visitWasmStackArg(LWasmStackArg* ins)
{

Просмотреть файл

@ -249,12 +249,9 @@ class CodeGeneratorARM : public CodeGeneratorShared
void visitAsmJSLoadHeap(LAsmJSLoadHeap* ins);
void visitAsmJSStoreHeap(LAsmJSStoreHeap* ins);
void visitWasmCompareExchangeHeap(LWasmCompareExchangeHeap* ins);
void visitWasmCompareExchangeCallout(LWasmCompareExchangeCallout* ins);
void visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins);
void visitWasmAtomicExchangeCallout(LWasmAtomicExchangeCallout* ins);
void visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins);
void visitWasmAtomicBinopHeapForEffect(LWasmAtomicBinopHeapForEffect* ins);
void visitWasmAtomicBinopCallout(LWasmAtomicBinopCallout* ins);
void visitWasmStackArg(LWasmStackArg* ins);
void visitWasmTruncateToInt32(LWasmTruncateToInt32* ins);
void visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool);

Просмотреть файл

@ -462,98 +462,6 @@ class LSoftUDivOrMod : public LBinaryCallInstructionHelper<1, 0>
}
};
class LWasmCompareExchangeCallout : public LCallInstructionHelper<1, 4, 2>
{
public:
LIR_HEADER(WasmCompareExchangeCallout)
LWasmCompareExchangeCallout(const LAllocation& ptr, const LAllocation& oldval,
const LAllocation& newval, const LAllocation& tls,
const LDefinition& temp1, const LDefinition& temp2)
{
setOperand(0, ptr);
setOperand(1, oldval);
setOperand(2, newval);
setOperand(3, tls);
setTemp(0, temp1);
setTemp(1, temp2);
}
const LAllocation* ptr() {
return getOperand(0);
}
const LAllocation* oldval() {
return getOperand(1);
}
const LAllocation* newval() {
return getOperand(2);
}
const LAllocation* tls() {
return getOperand(3);
}
const MWasmCompareExchangeHeap* mir() const {
return mir_->toWasmCompareExchangeHeap();
}
};
class LWasmAtomicExchangeCallout : public LCallInstructionHelper<1, 3, 2>
{
public:
LIR_HEADER(WasmAtomicExchangeCallout)
LWasmAtomicExchangeCallout(const LAllocation& ptr, const LAllocation& value,
const LAllocation& tls, const LDefinition& temp1,
const LDefinition& temp2)
{
setOperand(0, ptr);
setOperand(1, value);
setOperand(2, tls);
setTemp(0, temp1);
setTemp(1, temp2);
}
const LAllocation* ptr() {
return getOperand(0);
}
const LAllocation* value() {
return getOperand(1);
}
const LAllocation* tls() {
return getOperand(2);
}
const MWasmAtomicExchangeHeap* mir() const {
return mir_->toWasmAtomicExchangeHeap();
}
};
class LWasmAtomicBinopCallout : public LCallInstructionHelper<1, 3, 2>
{
public:
LIR_HEADER(WasmAtomicBinopCallout)
LWasmAtomicBinopCallout(const LAllocation& ptr, const LAllocation& value,
const LAllocation& tls, const LDefinition& temp1,
const LDefinition& temp2)
{
setOperand(0, ptr);
setOperand(1, value);
setOperand(2, tls);
setTemp(0, temp1);
setTemp(1, temp2);
}
const LAllocation* ptr() {
return getOperand(0);
}
const LAllocation* value() {
return getOperand(1);
}
const LAllocation* tls() {
return getOperand(2);
}
const MWasmAtomicBinopHeap* mir() const {
return mir_->toWasmAtomicBinopHeap();
}
};
class LWasmTruncateToInt64 : public LCallInstructionHelper<INT64_PIECES, 1, 0>
{
public:

Просмотреть файл

@ -17,9 +17,6 @@
_(UDiv) \
_(UMod) \
_(SoftUDivOrMod) \
_(WasmCompareExchangeCallout) \
_(WasmAtomicExchangeCallout) \
_(WasmAtomicBinopCallout) \
_(DivOrModI64) \
_(UDivOrModI64) \
_(WasmTruncateToInt64) \

Просмотреть файл

@ -932,19 +932,7 @@ LIRGeneratorARM::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap* ins)
}
MOZ_ASSERT(ins->access().type() < Scalar::Float32);
if (byteSize(ins->access().type()) != 4 && !HasLDSTREXBHD()) {
MOZ_ASSERT(ins->access().offset() == 0);
LWasmCompareExchangeCallout* lir =
new(alloc()) LWasmCompareExchangeCallout(useFixedAtStart(base, IntArgReg2),
useFixedAtStart(ins->oldValue(), IntArgReg3),
useFixedAtStart(ins->newValue(), CallTempReg0),
useFixedAtStart(ins->tls(), WasmTlsReg),
tempFixed(IntArgReg0),
tempFixed(IntArgReg1));
defineReturn(lir, ins);
return;
}
MOZ_ASSERT(HasLDSTREXBHD(), "by HasCompilerSupport() constraints");
LWasmCompareExchangeHeap* lir =
new(alloc()) LWasmCompareExchangeHeap(useRegister(base),
@ -971,17 +959,7 @@ LIRGeneratorARM::visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap* ins)
}
MOZ_ASSERT(ins->access().type() < Scalar::Float32);
if (byteSize(ins->access().type()) < 4 && !HasLDSTREXBHD()) {
MOZ_ASSERT(ins->access().offset() == 0);
// Call out on ARMv6.
defineReturn(new(alloc()) LWasmAtomicExchangeCallout(useFixedAtStart(ins->base(), IntArgReg2),
useFixedAtStart(ins->value(), IntArgReg3),
useFixedAtStart(ins->tls(), WasmTlsReg),
tempFixed(IntArgReg0),
tempFixed(IntArgReg1)), ins);
return;
}
MOZ_ASSERT(HasLDSTREXBHD(), "by HasCompilerSupport() constraints");
const LAllocation base = useRegister(ins->base());
const LAllocation value = useRegister(ins->value());
@ -1004,22 +982,11 @@ LIRGeneratorARM::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins)
}
MOZ_ASSERT(ins->access().type() < Scalar::Float32);
MOZ_ASSERT(HasLDSTREXBHD(), "by HasCompilerSupport() constraints");
MDefinition* base = ins->base();
MOZ_ASSERT(base->type() == MIRType::Int32);
if (byteSize(ins->access().type()) != 4 && !HasLDSTREXBHD()) {
MOZ_ASSERT(ins->access().offset() == 0);
LWasmAtomicBinopCallout* lir =
new(alloc()) LWasmAtomicBinopCallout(useFixedAtStart(base, IntArgReg2),
useFixedAtStart(ins->value(), IntArgReg3),
useFixedAtStart(ins->tls(), WasmTlsReg),
tempFixed(IntArgReg0),
tempFixed(IntArgReg1));
defineReturn(lir, ins);
return;
}
if (!ins->hasUses()) {
LWasmAtomicBinopHeapForEffect* lir =
new(alloc()) LWasmAtomicBinopHeapForEffect(useRegister(base),

Просмотреть файл

@ -112,11 +112,13 @@ class Simulator
explicit Simulator(JSContext* cx);
~Simulator();
static bool supportsAtomics() { return HasLDSTREXBHD(); }
// The currently executing Simulator instance. Potentially there can be one
// for each native thread.
static Simulator* Current();
static inline uintptr_t StackLimit() {
static uintptr_t StackLimit() {
return Simulator::Current()->stackLimit();
}

Просмотреть файл

@ -716,9 +716,12 @@ class Simulator : public DecoderVisitor {
void setFP32Result(float result);
void setFP64Result(double result);
void VisitCallRedirection(const Instruction* instr);
static inline uintptr_t StackLimit() {
static uintptr_t StackLimit() {
return Simulator::Current()->stackLimit();
}
static bool supportsAtomics() {
return true;
}
void ResetState();

Просмотреть файл

@ -506,27 +506,6 @@ AddressOf(SymbolicAddress imm, ABIFunctionType* abiType)
case SymbolicAddress::aeabi_uidivmod:
*abiType = Args_General2;
return FuncCast(__aeabi_uidivmod, *abiType);
case SymbolicAddress::AtomicCmpXchg:
*abiType = Args_General5;
return FuncCast(atomics_cmpxchg_asm_callout, *abiType);
case SymbolicAddress::AtomicXchg:
*abiType = Args_General4;
return FuncCast(atomics_xchg_asm_callout, *abiType);
case SymbolicAddress::AtomicFetchAdd:
*abiType = Args_General4;
return FuncCast(atomics_add_asm_callout, *abiType);
case SymbolicAddress::AtomicFetchSub:
*abiType = Args_General4;
return FuncCast(atomics_sub_asm_callout, *abiType);
case SymbolicAddress::AtomicFetchAnd:
*abiType = Args_General4;
return FuncCast(atomics_and_asm_callout, *abiType);
case SymbolicAddress::AtomicFetchOr:
*abiType = Args_General4;
return FuncCast(atomics_or_asm_callout, *abiType);
case SymbolicAddress::AtomicFetchXor:
*abiType = Args_General4;
return FuncCast(atomics_xor_asm_callout, *abiType);
#endif
case SymbolicAddress::ModD:
*abiType = Args_Double_DoubleDouble;
@ -640,13 +619,6 @@ wasm::NeedsBuiltinThunk(SymbolicAddress sym)
#if defined(JS_CODEGEN_ARM)
case SymbolicAddress::aeabi_idivmod:
case SymbolicAddress::aeabi_uidivmod:
case SymbolicAddress::AtomicCmpXchg:
case SymbolicAddress::AtomicXchg:
case SymbolicAddress::AtomicFetchAdd:
case SymbolicAddress::AtomicFetchSub:
case SymbolicAddress::AtomicFetchAnd:
case SymbolicAddress::AtomicFetchOr:
case SymbolicAddress::AtomicFetchXor:
#endif
case SymbolicAddress::ModD:
case SymbolicAddress::SinD:

Просмотреть файл

@ -983,20 +983,6 @@ ThunkedNativeToDescription(SymbolicAddress func)
return "call to native i32.div_s (in wasm)";
case SymbolicAddress::aeabi_uidivmod:
return "call to native i32.div_u (in wasm)";
case SymbolicAddress::AtomicCmpXchg:
return "call to native atomic compare exchange (in wasm)";
case SymbolicAddress::AtomicXchg:
return "call to native atomic exchange (in wasm)";
case SymbolicAddress::AtomicFetchAdd:
return "call to native atomic fetch add (in wasm)";
case SymbolicAddress::AtomicFetchSub:
return "call to native atomic fetch sub (in wasm)";
case SymbolicAddress::AtomicFetchAnd:
return "call to native atomic fetch and (in wasm)";
case SymbolicAddress::AtomicFetchOr:
return "call to native atomic fetch or (in wasm)";
case SymbolicAddress::AtomicFetchXor:
return "call to native atomic fetch xor (in wasm)";
#endif
case SymbolicAddress::ModD:
return "call to asm.js native f64 % (mod)";

Просмотреть файл

@ -801,9 +801,7 @@ class FunctionCompiler
// Fold a constant base into the offset (so the base is 0 in which case
// the codegen is optimized), if it doesn't wrap or trigger an
// MWasmAddOffset.
if (!access->isAtomic() && !env_.isAsmJS() && // TODO bug 1421244
(*base)->isConstant())
{
if ((*base)->isConstant()) {
uint32_t basePtr = (*base)->toConstant()->toInt32();
uint32_t offset = access->offset();

Просмотреть файл

@ -74,6 +74,11 @@ wasm::HasCompilerSupport(JSContext* cx)
return false;
#endif
#ifdef JS_SIMULATOR
if (!Simulator::supportsAtomics())
return false;
#endif
#if defined(JS_CODEGEN_NONE) || defined(JS_CODEGEN_ARM64)
return false;
#else

Просмотреть файл

@ -1312,13 +1312,6 @@ enum class SymbolicAddress
#if defined(JS_CODEGEN_ARM)
aeabi_idivmod,
aeabi_uidivmod,
AtomicCmpXchg,
AtomicXchg,
AtomicFetchAdd,
AtomicFetchSub,
AtomicFetchAnd,
AtomicFetchOr,
AtomicFetchXor,
#endif
ModD,
SinD,