зеркало из https://github.com/mozilla/gecko-dev.git
Backed out changeset efe724729178 (bug 1175556) for mass build bustage in assembler code CLOSED TREE
This commit is contained in:
Родитель
b1a0212509
Коммит
535b1965fa
|
@ -131,11 +131,6 @@ static const unsigned PushedRetAddr = 4;
|
|||
static const unsigned PushedFP = 16;
|
||||
static const unsigned StoredFP = 20;
|
||||
static const unsigned PostStorePrePopFP = 4;
|
||||
#elif defined(JS_CODEGEN_ARM64)
|
||||
static const unsigned PushedRetAddr = 0;
|
||||
static const unsigned PushedFP = 0;
|
||||
static const unsigned StoredFP = 0;
|
||||
static const unsigned PostStorePrePopFP = 0;
|
||||
#elif defined(JS_CODEGEN_MIPS)
|
||||
static const unsigned PushedRetAddr = 8;
|
||||
static const unsigned PushedFP = 24;
|
||||
|
@ -221,7 +216,7 @@ GenerateProfilingEpilogue(MacroAssembler& masm, unsigned framePushed, AsmJSExit:
|
|||
Label* profilingReturn)
|
||||
{
|
||||
Register scratch = ABIArgGenerator::NonReturn_VolatileReg0;
|
||||
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS)
|
||||
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
|
||||
Register scratch2 = ABIArgGenerator::NonReturn_VolatileReg1;
|
||||
#endif
|
||||
|
||||
|
@ -245,11 +240,11 @@ GenerateProfilingEpilogue(MacroAssembler& masm, unsigned framePushed, AsmJSExit:
|
|||
// and the async interrupt exit. Since activation.fp can be read at any
|
||||
// time and still points to the current frame, be careful to only update
|
||||
// sp after activation.fp has been repointed to the caller's frame.
|
||||
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS)
|
||||
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
|
||||
masm.loadPtr(Address(masm.getStackPointer(), 0), scratch2);
|
||||
masm.storePtr(scratch2, Address(scratch, AsmJSActivation::offsetOfFP()));
|
||||
DebugOnly<uint32_t> prePop = masm.currentOffset();
|
||||
masm.addToStackPtr(Imm32(sizeof(void *)));
|
||||
masm.add32(Imm32(4), masm.getStackPointer());
|
||||
MOZ_ASSERT(PostStorePrePopFP == masm.currentOffset() - prePop);
|
||||
#else
|
||||
masm.pop(Address(scratch, AsmJSActivation::offsetOfFP()));
|
||||
|
|
|
@ -1787,9 +1787,6 @@ AsmJSModule::setProfilingEnabled(bool enabled, JSContext* cx)
|
|||
BOffImm calleeOffset;
|
||||
callerInsn->as<InstBLImm>()->extractImm(&calleeOffset);
|
||||
void* callee = calleeOffset.getDest(callerInsn);
|
||||
#elif defined(JS_CODEGEN_ARM64)
|
||||
MOZ_CRASH();
|
||||
void* callee = nullptr;
|
||||
#elif defined(JS_CODEGEN_MIPS)
|
||||
Instruction* instr = (Instruction*)(callerRetAddr - 4 * sizeof(uint32_t));
|
||||
void* callee = (void*)Assembler::ExtractLuiOriValue(instr, instr->next());
|
||||
|
@ -1814,8 +1811,6 @@ AsmJSModule::setProfilingEnabled(bool enabled, JSContext* cx)
|
|||
X86Encoding::SetRel32(callerRetAddr, newCallee);
|
||||
#elif defined(JS_CODEGEN_ARM)
|
||||
new (caller) InstBLImm(BOffImm(newCallee - caller), Assembler::Always);
|
||||
#elif defined(JS_CODEGEN_ARM64)
|
||||
MOZ_CRASH();
|
||||
#elif defined(JS_CODEGEN_MIPS)
|
||||
Assembler::WriteLuiOriInstructions(instr, instr->next(),
|
||||
ScratchRegister, (uint32_t)newCallee);
|
||||
|
@ -1879,8 +1874,6 @@ AsmJSModule::setProfilingEnabled(bool enabled, JSContext* cx)
|
|||
MOZ_ASSERT(reinterpret_cast<Instruction*>(jump)->is<InstBImm>());
|
||||
new (jump) InstNOP();
|
||||
}
|
||||
#elif defined(JS_CODEGEN_ARM64)
|
||||
MOZ_CRASH();
|
||||
#elif defined(JS_CODEGEN_MIPS)
|
||||
Instruction* instr = (Instruction*)jump;
|
||||
if (enabled) {
|
||||
|
|
|
@ -1164,8 +1164,8 @@ RedirectJitCodeToInterruptCheck(JSRuntime* rt, CONTEXT* context)
|
|||
const AsmJSModule& module = activation->module();
|
||||
|
||||
#ifdef JS_SIMULATOR
|
||||
if (module.containsFunctionPC(rt->simulator()->get_pc_as<void*>()))
|
||||
rt->simulator()->set_resume_pc(module.interruptExit());
|
||||
if (module.containsFunctionPC((void*)rt->simulator()->get_pc()))
|
||||
rt->simulator()->set_resume_pc(int32_t(module.interruptExit()));
|
||||
#endif
|
||||
|
||||
uint8_t** ppc = ContextToPC(context);
|
||||
|
|
|
@ -9080,8 +9080,7 @@ GenerateAsyncInterruptExit(ModuleCompiler& m, Label* throwLabel)
|
|||
masm.transferReg(lr);
|
||||
masm.finishDataTransfer();
|
||||
masm.ret();
|
||||
#elif defined(JS_CODEGEN_ARM64)
|
||||
MOZ_CRASH();
|
||||
|
||||
#elif defined (JS_CODEGEN_NONE)
|
||||
MOZ_CRASH();
|
||||
#else
|
||||
|
|
|
@ -112,14 +112,6 @@ GetBuildConfiguration(JSContext* cx, unsigned argc, jsval* vp)
|
|||
if (!JS_SetProperty(cx, info, "arm-simulator", value))
|
||||
return false;
|
||||
|
||||
#ifdef JS_SIMULATOR_ARM64
|
||||
value = BooleanValue(true);
|
||||
#else
|
||||
value = BooleanValue(false);
|
||||
#endif
|
||||
if (!JS_SetProperty(cx, info, "arm64-simulator", value))
|
||||
return false;
|
||||
|
||||
#ifdef MOZ_ASAN
|
||||
value = BooleanValue(true);
|
||||
#else
|
||||
|
|
|
@ -120,12 +120,6 @@ NativeRegExpMacroAssembler::GenerateCode(JSContext* cx, bool match_only)
|
|||
// registers we need.
|
||||
masm.bind(&entry_label_);
|
||||
|
||||
#ifdef JS_CODEGEN_ARM64
|
||||
// ARM64 communicates stack address via sp, but uses a pseudo-sp for addressing.
|
||||
MOZ_ASSERT(!masm.GetStackPointer64().Is(sp));
|
||||
masm.moveStackPtrTo(masm.getStackPointer());
|
||||
#endif
|
||||
|
||||
// Push non-volatile registers which might be modified by jitcode.
|
||||
size_t pushedNonVolatileRegisters = 0;
|
||||
for (GeneralRegisterForwardIterator iter(savedNonVolatileRegisters); iter.more(); ++iter) {
|
||||
|
@ -393,7 +387,7 @@ NativeRegExpMacroAssembler::GenerateCode(JSContext* cx, bool match_only)
|
|||
|
||||
// Save registers before calling C function
|
||||
LiveGeneralRegisterSet volatileRegs(GeneralRegisterSet::Volatile());
|
||||
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
|
||||
#if defined(JS_CODEGEN_ARM)
|
||||
volatileRegs.add(Register::FromCode(Registers::lr));
|
||||
#elif defined(JS_CODEGEN_MIPS)
|
||||
volatileRegs.add(Register::FromCode(Registers::ra));
|
||||
|
|
|
@ -9,8 +9,6 @@
|
|||
|
||||
#if defined(JS_CODEGEN_ARM)
|
||||
# include "jit/arm/AtomicOperations-arm.h"
|
||||
#elif defined(JS_CODEGEN_ARM64)
|
||||
# include "jit/arm64/AtomicOperations-arm64.h"
|
||||
#elif defined(JS_CODEGEN_MIPS)
|
||||
# include "jit/mips/AtomicOperations-mips.h"
|
||||
#elif defined(JS_CODEGEN_NONE)
|
||||
|
@ -21,4 +19,4 @@
|
|||
# error "Atomic operations must be defined for this platform"
|
||||
#endif
|
||||
|
||||
#endif // jit_AtomicOperations_inl_h
|
||||
#endif // jit_AtomicOperations_inl_h
|
||||
|
|
|
@ -18,8 +18,6 @@
|
|||
# include "jit/x64/CodeGenerator-x64.h"
|
||||
#elif defined(JS_CODEGEN_ARM)
|
||||
# include "jit/arm/CodeGenerator-arm.h"
|
||||
#elif defined(JS_CODEGEN_ARM64)
|
||||
# include "jit/arm64/CodeGenerator-arm64.h"
|
||||
#elif defined(JS_CODEGEN_MIPS)
|
||||
# include "jit/mips/CodeGenerator-mips.h"
|
||||
#elif defined(JS_CODEGEN_NONE)
|
||||
|
|
|
@ -9,8 +9,6 @@
|
|||
|
||||
#if defined(JS_CODEGEN_ARM)
|
||||
# include "jit/arm/Assembler-arm.h"
|
||||
#elif defined(JS_CODEGEN_ARM64)
|
||||
# include "jit/arm64/Assembler-arm64.h"
|
||||
#elif defined(JS_CODEGEN_MIPS)
|
||||
# include "jit/mips/Assembler-mips.h"
|
||||
#endif
|
||||
|
|
|
@ -11,13 +11,11 @@
|
|||
|
||||
#if defined(JS_SIMULATOR_ARM)
|
||||
#include "jit/arm/Simulator-arm.h"
|
||||
#elif defined(JS_SIMULATOR_ARM64)
|
||||
# include "jit/arm64/vixl/Simulator-vixl.h"
|
||||
#elif defined(JS_SIMULATOR_MIPS)
|
||||
#include "jit/mips/Simulator-mips.h"
|
||||
#endif
|
||||
|
||||
#ifdef JS_SIMULATOR
|
||||
#if defined(JS_SIMULATOR_ARM) || defined(JS_SIMULATOR_MIPS)
|
||||
// Call into cross-jitted code by following the ABI of the simulated architecture.
|
||||
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
|
||||
(js::jit::Simulator::Current()->call( \
|
||||
|
|
|
@ -2593,12 +2593,6 @@ MachineState::FromBailout(RegisterDump::GPRArray& regs, RegisterDump::FPUArray&
|
|||
machine.setRegisterLocation(FloatRegister(i, FloatRegisters::Int32x4), &fpregs[i]);
|
||||
machine.setRegisterLocation(FloatRegister(i, FloatRegisters::Float32x4), &fpregs[i]);
|
||||
}
|
||||
#elif defined(JS_CODEGEN_ARM64)
|
||||
for (unsigned i = 0; i < FloatRegisters::TotalPhys; i++) {
|
||||
machine.setRegisterLocation(FloatRegister(i, FloatRegisters::Single), &fpregs[i]);
|
||||
machine.setRegisterLocation(FloatRegister(i, FloatRegisters::Double), &fpregs[i]);
|
||||
}
|
||||
|
||||
#elif defined(JS_CODEGEN_NONE)
|
||||
MOZ_CRASH();
|
||||
#else
|
||||
|
|
|
@ -1825,8 +1825,6 @@ LAllocation::toRegister() const
|
|||
# include "jit/x86-shared/LIR-x86-shared.h"
|
||||
#elif defined(JS_CODEGEN_ARM)
|
||||
# include "jit/arm/LIR-arm.h"
|
||||
#elif defined(JS_CODEGEN_ARM64)
|
||||
# include "jit/arm64/LIR-arm64.h"
|
||||
#elif defined(JS_CODEGEN_MIPS)
|
||||
# include "jit/mips/LIR-mips.h"
|
||||
#elif defined(JS_CODEGEN_NONE)
|
||||
|
|
|
@ -359,8 +359,6 @@
|
|||
# include "jit/x64/LOpcodes-x64.h"
|
||||
#elif defined(JS_CODEGEN_ARM)
|
||||
# include "jit/arm/LOpcodes-arm.h"
|
||||
#elif defined(JS_CODEGEN_ARM64)
|
||||
# include "jit/arm64/LOpcodes-arm64.h"
|
||||
#elif defined(JS_CODEGEN_MIPS)
|
||||
# include "jit/mips/LOpcodes-mips.h"
|
||||
#elif defined(JS_CODEGEN_NONE)
|
||||
|
|
|
@ -17,8 +17,6 @@
|
|||
# include "jit/x64/Lowering-x64.h"
|
||||
#elif defined(JS_CODEGEN_ARM)
|
||||
# include "jit/arm/Lowering-arm.h"
|
||||
#elif defined(JS_CODEGEN_ARM64)
|
||||
# include "jit/arm64/Lowering-arm64.h"
|
||||
#elif defined(JS_CODEGEN_MIPS)
|
||||
# include "jit/mips/Lowering-mips.h"
|
||||
#elif defined(JS_CODEGEN_NONE)
|
||||
|
|
|
@ -1594,7 +1594,8 @@ MacroAssembler::generateBailoutTail(Register scratch, Register bailoutInfo)
|
|||
regs.take(bailoutInfo);
|
||||
|
||||
// Reset SP to the point where clobbering starts.
|
||||
loadStackPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, incomingStack)));
|
||||
loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, incomingStack)),
|
||||
BaselineStackReg);
|
||||
|
||||
Register copyCur = regs.takeAny();
|
||||
Register copyEnd = regs.takeAny();
|
||||
|
@ -1609,7 +1610,7 @@ MacroAssembler::generateBailoutTail(Register scratch, Register bailoutInfo)
|
|||
bind(©Loop);
|
||||
branchPtr(Assembler::BelowOrEqual, copyCur, copyEnd, &endOfCopy);
|
||||
subPtr(Imm32(4), copyCur);
|
||||
subFromStackPtr(Imm32(4));
|
||||
subPtr(Imm32(4), BaselineStackReg);
|
||||
load32(Address(copyCur, 0), temp);
|
||||
store32(temp, Address(BaselineStackReg, 0));
|
||||
jump(©Loop);
|
||||
|
@ -2509,12 +2510,9 @@ MacroAssembler::MacroAssembler(JSContext* cx, IonScript* ion,
|
|||
jitContext_.emplace(cx, (js::jit::TempAllocator*)nullptr);
|
||||
alloc_.emplace(cx);
|
||||
moveResolver_.setAllocator(*jitContext_->temp);
|
||||
#if defined(JS_CODEGEN_ARM)
|
||||
#ifdef JS_CODEGEN_ARM
|
||||
initWithAllocator();
|
||||
m_buffer.id = GetJitContext()->getNextAssemblerId();
|
||||
#elif defined(JS_CODEGEN_ARM64)
|
||||
initWithAllocator();
|
||||
armbuffer_.id = GetJitContext()->getNextAssemblerId();
|
||||
#endif
|
||||
if (ion) {
|
||||
setFramePushed(ion->frameSize());
|
||||
|
@ -2709,12 +2707,12 @@ MacroAssembler::freeStack(uint32_t amount)
|
|||
{
|
||||
MOZ_ASSERT(amount <= framePushed_);
|
||||
if (amount)
|
||||
addToStackPtr(Imm32(amount));
|
||||
addPtr(Imm32(amount), StackPointer);
|
||||
framePushed_ -= amount;
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::freeStack(Register amount)
|
||||
{
|
||||
addToStackPtr(amount);
|
||||
addPtr(amount, StackPointer);
|
||||
}
|
||||
|
|
|
@ -17,8 +17,6 @@
|
|||
# include "jit/x64/MacroAssembler-x64.h"
|
||||
#elif defined(JS_CODEGEN_ARM)
|
||||
# include "jit/arm/MacroAssembler-arm.h"
|
||||
#elif defined(JS_CODEGEN_ARM64)
|
||||
# include "jit/arm64/MacroAssembler-arm64.h"
|
||||
#elif defined(JS_CODEGEN_MIPS)
|
||||
# include "jit/mips/MacroAssembler-mips.h"
|
||||
#elif defined(JS_CODEGEN_NONE)
|
||||
|
@ -46,8 +44,6 @@
|
|||
# define ONLY_X86_X64
|
||||
#elif defined(JS_CODEGEN_ARM)
|
||||
# define ONLY_X86_X64 = delete
|
||||
#elif defined(JS_CODEGEN_ARM64)
|
||||
# define ONLY_X86_X64 = delete
|
||||
#elif defined(JS_CODEGEN_MIPS)
|
||||
# define ONLY_X86_X64 = delete
|
||||
#elif defined(JS_CODEGEN_NONE)
|
||||
|
@ -237,13 +233,9 @@ class MacroAssembler : public MacroAssemblerSpecific
|
|||
}
|
||||
|
||||
moveResolver_.setAllocator(*jcx->temp);
|
||||
|
||||
#if defined(JS_CODEGEN_ARM)
|
||||
#ifdef JS_CODEGEN_ARM
|
||||
initWithAllocator();
|
||||
m_buffer.id = jcx->getNextAssemblerId();
|
||||
#elif defined(JS_CODEGEN_ARM64)
|
||||
initWithAllocator();
|
||||
armbuffer_.id = jcx->getNextAssemblerId();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -258,12 +250,9 @@ class MacroAssembler : public MacroAssemblerSpecific
|
|||
: emitProfilingInstrumentation_(false),
|
||||
framePushed_(0)
|
||||
{
|
||||
#if defined(JS_CODEGEN_ARM)
|
||||
#ifdef JS_CODEGEN_ARM
|
||||
initWithAllocator();
|
||||
m_buffer.id = 0;
|
||||
#elif defined(JS_CODEGEN_ARM64)
|
||||
initWithAllocator();
|
||||
armbuffer_.id = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -583,7 +572,7 @@ class MacroAssembler : public MacroAssemblerSpecific
|
|||
}
|
||||
#elif defined(JS_PUNBOX64)
|
||||
if (dest.valueReg() != JSReturnReg)
|
||||
mov(JSReturnReg, dest.valueReg());
|
||||
movq(JSReturnReg, dest.valueReg());
|
||||
#else
|
||||
#error "Bad architecture"
|
||||
#endif
|
||||
|
|
|
@ -11,8 +11,6 @@
|
|||
# include "jit/x86-shared/MoveEmitter-x86-shared.h"
|
||||
#elif defined(JS_CODEGEN_ARM)
|
||||
# include "jit/arm/MoveEmitter-arm.h"
|
||||
#elif defined(JS_CODEGEN_ARM64)
|
||||
# include "jit/arm64/MoveEmitter-arm64.h"
|
||||
#elif defined(JS_CODEGEN_MIPS)
|
||||
# include "jit/mips/MoveEmitter-mips.h"
|
||||
#elif defined(JS_CODEGEN_NONE)
|
||||
|
|
|
@ -278,10 +278,6 @@ class RegisterAllocator
|
|||
#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
|
||||
allRegisters_.take(AnyRegister(HeapReg));
|
||||
allRegisters_.take(AnyRegister(GlobalReg));
|
||||
#elif defined(JS_CODEGEN_ARM64)
|
||||
allRegisters_.take(AnyRegister(HeapReg));
|
||||
allRegisters_.take(AnyRegister(HeapLenReg));
|
||||
allRegisters_.take(AnyRegister(GlobalReg));
|
||||
#endif
|
||||
} else {
|
||||
if (FramePointer != InvalidReg && mir->instrumentedProfiling())
|
||||
|
|
|
@ -14,8 +14,6 @@
|
|||
# include "jit/x86-shared/Architecture-x86-shared.h"
|
||||
#elif defined(JS_CODEGEN_ARM)
|
||||
# include "jit/arm/Architecture-arm.h"
|
||||
#elif defined(JS_CODEGEN_ARM64)
|
||||
# include "jit/arm64/Architecture-arm64.h"
|
||||
#elif defined(JS_CODEGEN_MIPS)
|
||||
# include "jit/mips/Architecture-mips.h"
|
||||
#elif defined(JS_CODEGEN_NONE)
|
||||
|
@ -44,7 +42,8 @@ struct Register {
|
|||
Register r = { Encoding(code) };
|
||||
return r;
|
||||
}
|
||||
MOZ_CONSTEXPR Code code() const {
|
||||
Code code() const {
|
||||
MOZ_ASSERT(Code(reg_) < Registers::Total);
|
||||
return Code(reg_);
|
||||
}
|
||||
Encoding encoding() const {
|
||||
|
|
|
@ -161,11 +161,8 @@ class Simulator
|
|||
void set_pc(int32_t value);
|
||||
int32_t get_pc() const;
|
||||
|
||||
template <typename T>
|
||||
T get_pc_as() const { return reinterpret_cast<T>(get_pc()); }
|
||||
|
||||
void set_resume_pc(void* value) {
|
||||
resume_pc_ = int32_t(value);
|
||||
void set_resume_pc(int32_t value) {
|
||||
resume_pc_ = value;
|
||||
}
|
||||
|
||||
void enable_single_stepping(SingleStepCallback cb, void* arg);
|
||||
|
|
|
@ -323,7 +323,7 @@ Assembler::addPatchableJump(BufferOffset src, Relocation::Kind reloc)
|
|||
}
|
||||
|
||||
void
|
||||
PatchJump(CodeLocationJump& jump_, CodeLocationLabel label, ReprotectCode reprotect)
|
||||
PatchJump(CodeLocationJump& jump_, CodeLocationLabel label)
|
||||
{
|
||||
MOZ_CRASH("PatchJump");
|
||||
}
|
||||
|
|
|
@ -553,10 +553,10 @@ GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out)
|
|||
return false;
|
||||
*out = CallTempNonArgRegs[usedIntArgs];
|
||||
return true;
|
||||
|
||||
}
|
||||
|
||||
void PatchJump(CodeLocationJump& jump_, CodeLocationLabel label,
|
||||
ReprotectCode reprotect = DontReprotect);
|
||||
void PatchJump(CodeLocationJump& jump_, CodeLocationLabel label);
|
||||
|
||||
static inline void
|
||||
PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitRuntime::BackedgeTarget target)
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
#include "jit/arm64/MacroAssembler-arm64.h"
|
||||
|
||||
#include "jit/arm64/MoveEmitter-arm64.h"
|
||||
// TODO #include "jit/arm64/MoveEmitter-arm64.h"
|
||||
#include "jit/arm64/SharedICRegisters-arm64.h"
|
||||
#include "jit/Bailouts.h"
|
||||
#include "jit/BaselineFrame.h"
|
||||
|
|
|
@ -179,6 +179,9 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
|
|||
vixl::MacroAssembler::Push(scratch64);
|
||||
}
|
||||
}
|
||||
void push(ImmMaybeNurseryPtr imm) {
|
||||
push(noteMaybeNurseryPtr(imm));
|
||||
}
|
||||
void push(ARMRegister reg) {
|
||||
vixl::MacroAssembler::Push(reg);
|
||||
}
|
||||
|
@ -806,19 +809,13 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
|
|||
BufferOffset load = movePatchablePtr(ImmPtr(imm.value), dest);
|
||||
writeDataRelocation(imm, load);
|
||||
}
|
||||
void movePtr(ImmMaybeNurseryPtr imm, Register dest) {
|
||||
movePtr(noteMaybeNurseryPtr(imm), dest);
|
||||
}
|
||||
|
||||
void mov(ImmWord imm, Register dest) {
|
||||
movePtr(imm, dest);
|
||||
}
|
||||
void mov(ImmPtr imm, Register dest) {
|
||||
movePtr(imm, dest);
|
||||
}
|
||||
void mov(AsmJSImmPtr imm, Register dest) {
|
||||
movePtr(imm, dest);
|
||||
}
|
||||
void mov(Register src, Register dest) {
|
||||
movePtr(src, dest);
|
||||
}
|
||||
|
||||
void move32(Imm32 imm, Register dest) {
|
||||
Mov(ARMRegister(dest, 32), (int64_t)imm.value);
|
||||
|
@ -1221,6 +1218,9 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
|
|||
movePtr(rhs, scratch);
|
||||
cmpPtr(lhs, scratch);
|
||||
}
|
||||
void cmpPtr(Register lhs, ImmMaybeNurseryPtr rhs) {
|
||||
cmpPtr(lhs, noteMaybeNurseryPtr(rhs));
|
||||
}
|
||||
|
||||
void cmpPtr(const Address& lhs, Register rhs) {
|
||||
vixl::UseScratchRegisterScope temps(this);
|
||||
|
@ -1794,6 +1794,9 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
|
|||
B(cond, label);
|
||||
|
||||
}
|
||||
void branchPtr(Condition cond, Address lhs, ImmMaybeNurseryPtr ptr, Label* label) {
|
||||
branchPtr(cond, lhs, noteMaybeNurseryPtr(ptr), label);
|
||||
}
|
||||
void branchPtr(Condition cond, Register lhs, Register rhs, Label* label) {
|
||||
Cmp(ARMRegister(lhs, 64), ARMRegister(rhs, 64));
|
||||
B(label, cond);
|
||||
|
|
|
@ -215,12 +215,6 @@ class Register : public CPURegister {
|
|||
return IsValidRegister();
|
||||
}
|
||||
|
||||
js::jit::Register asUnsized() const {
|
||||
if (code_ == kSPRegInternalCode)
|
||||
return js::jit::Register::FromCode((js::jit::Register::Code)kZeroRegCode);
|
||||
return js::jit::Register::FromCode((js::jit::Register::Code)code_);
|
||||
}
|
||||
|
||||
static const Register& WRegFromCode(unsigned code);
|
||||
static const Register& XRegFromCode(unsigned code);
|
||||
|
||||
|
@ -503,19 +497,6 @@ class Operand {
|
|||
// <shift_amount> is uint2_t.
|
||||
explicit Operand(Register reg, Extend extend, unsigned shift_amount = 0);
|
||||
|
||||
// FIXME: Temporary constructors for compilation.
|
||||
// FIXME: These should be removed -- Operand should not leak into shared code.
|
||||
// FIXME: Something like an LAllocationUnion for {gpreg, fpreg, Address} is wanted.
|
||||
explicit Operand(js::jit::Register) {
|
||||
MOZ_CRASH("Operand with Register");
|
||||
}
|
||||
explicit Operand(js::jit::FloatRegister) {
|
||||
MOZ_CRASH("Operand with FloatRegister");
|
||||
}
|
||||
explicit Operand(js::jit::Register, int32_t) {
|
||||
MOZ_CRASH("Operand with implicit Address");
|
||||
}
|
||||
|
||||
bool IsImmediate() const;
|
||||
bool IsShiftedRegister() const;
|
||||
bool IsExtendedRegister() const;
|
||||
|
@ -584,10 +565,8 @@ class MemOperand {
|
|||
AddrMode addrmode = Offset);
|
||||
|
||||
// Adapter constructors using C++11 delegating.
|
||||
// TODO: If sp == kSPRegInternalCode, the xzr check isn't necessary.
|
||||
explicit MemOperand(js::jit::Address addr)
|
||||
: MemOperand(addr.base.code() == 31 ? sp : Register(addr.base, 64),
|
||||
(ptrdiff_t)addr.offset) {
|
||||
: MemOperand(Register(addr.base, 64), (ptrdiff_t)addr.offset) {
|
||||
}
|
||||
|
||||
const Register& base() const {
|
||||
|
@ -694,29 +673,6 @@ class Assembler : public MozBaseAssembler {
|
|||
// called before executing or copying code from the buffer.
|
||||
void FinalizeCode();
|
||||
|
||||
#define COPYENUM(v) static const Condition v = vixl::v
|
||||
#define COPYENUM_(v) static const Condition v = vixl::v##_
|
||||
COPYENUM(Equal);
|
||||
COPYENUM(Zero);
|
||||
COPYENUM(NotEqual);
|
||||
COPYENUM(NonZero);
|
||||
COPYENUM(AboveOrEqual);
|
||||
COPYENUM(Below);
|
||||
COPYENUM(Signed);
|
||||
COPYENUM(NotSigned);
|
||||
COPYENUM(Overflow);
|
||||
COPYENUM(NoOverflow);
|
||||
COPYENUM(Above);
|
||||
COPYENUM(BelowOrEqual);
|
||||
COPYENUM_(GreaterThanOrEqual);
|
||||
COPYENUM_(LessThan);
|
||||
COPYENUM_(GreaterThan);
|
||||
COPYENUM_(LessThanOrEqual);
|
||||
COPYENUM(Always);
|
||||
COPYENUM(Never);
|
||||
#undef COPYENUM
|
||||
#undef COPYENUM_
|
||||
|
||||
// Bit set when a DoubleCondition does not map to a single ARM condition.
|
||||
// The MacroAssembler must special-case these conditions, or else
|
||||
// ConditionFromDoubleCondition will complain.
|
||||
|
|
|
@ -1204,7 +1204,7 @@ void MacroAssembler::PrintfNoPreserve(const char * format, const CPURegister& ar
|
|||
Adr(x0, &format_address);
|
||||
|
||||
// Emit the format string directly in the instruction stream.
|
||||
{
|
||||
{
|
||||
flushBuffer();
|
||||
Label after_data;
|
||||
B(&after_data);
|
||||
|
|
|
@ -61,16 +61,16 @@ ptrdiff_t Assembler::LinkAndGetOffsetTo(BufferOffset branch, Label* label) {
|
|||
if (armbuffer_.oom())
|
||||
return js::jit::LabelBase::INVALID_OFFSET;
|
||||
|
||||
// The label is bound: all uses are already linked.
|
||||
if (label->bound()) {
|
||||
// The label is bound: all uses are already linked.
|
||||
ptrdiff_t branch_offset = ptrdiff_t(branch.getOffset() / element_size);
|
||||
ptrdiff_t label_offset = ptrdiff_t(label->offset() / element_size);
|
||||
return label_offset - branch_offset;
|
||||
}
|
||||
|
||||
// The label is unbound and unused: store the offset in the label itself
|
||||
// for patching by bind().
|
||||
if (!label->used()) {
|
||||
// The label is unbound and unused: store the offset in the label itself
|
||||
// for patching by bind().
|
||||
label->use(branch.getOffset());
|
||||
return js::jit::LabelBase::INVALID_OFFSET;
|
||||
}
|
||||
|
@ -120,7 +120,7 @@ void Assembler::b(Instruction* at, int imm19, Condition cond) {
|
|||
|
||||
|
||||
BufferOffset Assembler::b(Label* label) {
|
||||
// Flush the instruction buffer if necessary before getting an offset.
|
||||
// Flush the instruction buffer before calculating relative offset.
|
||||
BufferOffset branch = b(0);
|
||||
Instruction* ins = getInstructionAt(branch);
|
||||
VIXL_ASSERT(ins->IsUncondBranchImm());
|
||||
|
@ -132,7 +132,7 @@ BufferOffset Assembler::b(Label* label) {
|
|||
|
||||
|
||||
BufferOffset Assembler::b(Label* label, Condition cond) {
|
||||
// Flush the instruction buffer if necessary before getting an offset.
|
||||
// Flush the instruction buffer before calculating relative offset.
|
||||
BufferOffset branch = b(0, Always);
|
||||
Instruction* ins = getInstructionAt(branch);
|
||||
VIXL_ASSERT(ins->IsCondBranchImm());
|
||||
|
@ -154,7 +154,7 @@ void Assembler::bl(Instruction* at, int imm26) {
|
|||
|
||||
|
||||
void Assembler::bl(Label* label) {
|
||||
// Flush the instruction buffer if necessary before getting an offset.
|
||||
// Flush the instruction buffer before calculating relative offset.
|
||||
BufferOffset branch = b(0);
|
||||
Instruction* ins = getInstructionAt(branch);
|
||||
|
||||
|
@ -174,7 +174,7 @@ void Assembler::cbz(Instruction* at, const Register& rt, int imm19) {
|
|||
|
||||
|
||||
void Assembler::cbz(const Register& rt, Label* label) {
|
||||
// Flush the instruction buffer if necessary before getting an offset.
|
||||
// Flush the instruction buffer before calculating relative offset.
|
||||
BufferOffset branch = b(0);
|
||||
Instruction* ins = getInstructionAt(branch);
|
||||
|
||||
|
@ -194,7 +194,7 @@ void Assembler::cbnz(Instruction* at, const Register& rt, int imm19) {
|
|||
|
||||
|
||||
void Assembler::cbnz(const Register& rt, Label* label) {
|
||||
// Flush the instruction buffer if necessary before getting an offset.
|
||||
// Flush the instruction buffer before calculating relative offset.
|
||||
BufferOffset branch = b(0);
|
||||
Instruction* ins = getInstructionAt(branch);
|
||||
|
||||
|
@ -216,7 +216,7 @@ void Assembler::tbz(Instruction* at, const Register& rt, unsigned bit_pos, int i
|
|||
|
||||
|
||||
void Assembler::tbz(const Register& rt, unsigned bit_pos, Label* label) {
|
||||
// Flush the instruction buffer if necessary before getting an offset.
|
||||
// Flush the instruction buffer before calculating relative offset.
|
||||
BufferOffset branch = b(0);
|
||||
Instruction* ins = getInstructionAt(branch);
|
||||
|
||||
|
@ -238,7 +238,7 @@ void Assembler::tbnz(Instruction* at, const Register& rt, unsigned bit_pos, int
|
|||
|
||||
|
||||
void Assembler::tbnz(const Register& rt, unsigned bit_pos, Label* label) {
|
||||
// Flush the instruction buffer if necessary before getting an offset.
|
||||
// Flush the instruction buffer before calculating relative offset.
|
||||
BufferOffset branch = b(0);
|
||||
Instruction* ins = getInstructionAt(branch);
|
||||
|
||||
|
@ -260,8 +260,8 @@ void Assembler::adr(Instruction* at, const Register& rd, int imm21) {
|
|||
|
||||
|
||||
void Assembler::adr(const Register& rd, Label* label) {
|
||||
// Flush the instruction buffer if necessary before getting an offset.
|
||||
// Note that ADR is not a branch, but it encodes an offset like a branch.
|
||||
// Flush the instruction buffer before calculating relative offset.
|
||||
// ADR is not a branch.
|
||||
BufferOffset offset = Emit(0);
|
||||
Instruction* ins = getInstructionAt(offset);
|
||||
|
||||
|
@ -285,7 +285,6 @@ void Assembler::adrp(Instruction* at, const Register& rd, int imm21) {
|
|||
void Assembler::adrp(const Register& rd, Label* label) {
|
||||
VIXL_ASSERT(AllowPageOffsetDependentCode());
|
||||
|
||||
// Flush the instruction buffer if necessary before getting an offset.
|
||||
BufferOffset offset = Emit(0);
|
||||
Instruction* ins = getInstructionAt(offset);
|
||||
|
||||
|
@ -402,8 +401,7 @@ bool MozBaseAssembler::PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr
|
|||
// as written by InsertIndexIntoTag().
|
||||
uint32_t index = load->ImmLLiteral();
|
||||
|
||||
// Each entry in the literal pool is uint32_t-sized,
|
||||
// but literals may use multiple entries.
|
||||
// Each entry in the literal pool is uint32_t-sized.
|
||||
uint32_t* constPool = reinterpret_cast<uint32_t*>(constPoolAddr);
|
||||
Instruction* source = reinterpret_cast<Instruction*>(&constPool[index]);
|
||||
|
||||
|
@ -426,10 +424,6 @@ struct PoolHeader {
|
|||
union {
|
||||
struct {
|
||||
uint32_t size : 15;
|
||||
|
||||
// "Natural" guards are part of the normal instruction stream,
|
||||
// while "non-natural" guards are inserted for the sole purpose
|
||||
// of skipping around a pool.
|
||||
bool isNatural : 1;
|
||||
uint32_t ONES : 16;
|
||||
};
|
||||
|
@ -475,13 +469,14 @@ void MozBaseAssembler::WritePoolHeader(uint8_t* start, js::jit::Pool* p, bool is
|
|||
JS_STATIC_ASSERT(sizeof(PoolHeader) == 4);
|
||||
|
||||
// Get the total size of the pool.
|
||||
const uintptr_t totalPoolSize = sizeof(PoolHeader) + p->getPoolSize();
|
||||
const uintptr_t totalPoolInstructions = totalPoolSize / sizeof(Instruction);
|
||||
uint8_t* pool = start + sizeof(PoolHeader) + p->getPoolSize();
|
||||
|
||||
VIXL_ASSERT((totalPoolSize & 0x3) == 0);
|
||||
VIXL_ASSERT(totalPoolInstructions < (1 << 15));
|
||||
uintptr_t size = pool - start;
|
||||
VIXL_ASSERT((size & 3) == 0);
|
||||
size = size >> 2;
|
||||
VIXL_ASSERT(size < (1 << 15));
|
||||
|
||||
PoolHeader header(totalPoolInstructions, isNatural);
|
||||
PoolHeader header(size, isNatural);
|
||||
*(PoolHeader*)start = header;
|
||||
}
|
||||
|
||||
|
|
|
@ -157,7 +157,7 @@ Simulator* Simulator::Create() {
|
|||
sim->init(decoder, stdout);
|
||||
|
||||
return sim;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Simulator::Destroy(Simulator* sim) {
|
||||
|
@ -207,8 +207,8 @@ bool Simulator::overRecursedWithExtra(uint32_t extra) const {
|
|||
}
|
||||
|
||||
|
||||
void Simulator::set_resume_pc(void* new_resume_pc) {
|
||||
resume_pc_ = AddressUntag(reinterpret_cast<Instruction*>(new_resume_pc));
|
||||
void Simulator::set_resume_pc(const Instruction* new_resume_pc) {
|
||||
resume_pc_ = AddressUntag(new_resume_pc);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
|
||||
#include "mozilla/Vector.h"
|
||||
|
||||
#include "js-config.h"
|
||||
#include "jsalloc.h"
|
||||
|
||||
#include "jit/arm64/vixl/Assembler-vixl.h"
|
||||
|
@ -41,8 +40,6 @@
|
|||
#include "jit/IonTypes.h"
|
||||
#include "vm/PosixNSPR.h"
|
||||
|
||||
#ifdef JS_SIMULATOR_ARM64
|
||||
|
||||
#define JS_CHECK_SIMULATOR_RECURSION_WITH_EXTRA(cx, extra, onerror) \
|
||||
JS_BEGIN_MACRO \
|
||||
if (cx->mainThread().simulator()->overRecursedWithExtra(extra)) { \
|
||||
|
@ -341,26 +338,17 @@ class Simulator : public DecoderVisitor {
|
|||
|
||||
void ResetState();
|
||||
|
||||
static inline uintptr_t StackLimit() {
|
||||
return Simulator::Current()->stackLimit();
|
||||
}
|
||||
|
||||
// Run the simulator.
|
||||
virtual void Run();
|
||||
void RunFrom(const Instruction* first);
|
||||
|
||||
// Simulation helpers.
|
||||
const Instruction* pc() const { return pc_; }
|
||||
const Instruction* get_pc() const { return pc_; }
|
||||
|
||||
template <typename T>
|
||||
T get_pc_as() const { return reinterpret_cast<T>(const_cast<Instruction*>(pc())); }
|
||||
|
||||
void set_pc(const Instruction* new_pc) {
|
||||
pc_ = AddressUntag(new_pc);
|
||||
pc_modified_ = true;
|
||||
}
|
||||
void set_resume_pc(void* new_resume_pc);
|
||||
void set_resume_pc(const Instruction* new_resume_pc);
|
||||
|
||||
void increment_pc() {
|
||||
if (!pc_modified_) {
|
||||
|
@ -942,8 +930,9 @@ class Simulator : public DecoderVisitor {
|
|||
|
||||
// Stack
|
||||
byte* stack_;
|
||||
static const int stack_protection_size_ = 128 * KBytes;
|
||||
static const int stack_size_ = (2 * MBytes) + (2 * stack_protection_size_);
|
||||
static const int stack_protection_size_ = 256;
|
||||
// 2 KB stack.
|
||||
static const int stack_size_ = 2 * 1024 + 2 * stack_protection_size_;
|
||||
byte* stack_limit_;
|
||||
|
||||
Decoder* decoder_;
|
||||
|
@ -985,5 +974,4 @@ class Simulator : public DecoderVisitor {
|
|||
};
|
||||
} // namespace vixl
|
||||
|
||||
#endif // JS_SIMULATOR_ARM64
|
||||
#endif // VIXL_A64_SIMULATOR_A64_H_
|
||||
|
|
|
@ -184,11 +184,8 @@ class Simulator {
|
|||
void set_pc(int32_t value);
|
||||
int32_t get_pc() const;
|
||||
|
||||
template <typename T>
|
||||
T get_pc_as() const { return reinterpret_cast<T>(get_pc()); }
|
||||
|
||||
void set_resume_pc(void* value) {
|
||||
resume_pc_ = int32_t(value);
|
||||
void set_resume_pc(int32_t value) {
|
||||
resume_pc_ = value;
|
||||
}
|
||||
|
||||
// Accessor to the internal simulator stack area.
|
||||
|
|
|
@ -18,17 +18,15 @@
|
|||
#include "jit/RegisterSets.h"
|
||||
#include "vm/HelperThreads.h"
|
||||
|
||||
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
|
||||
// Push return addresses callee-side.
|
||||
# define JS_USE_LINK_REGISTER
|
||||
#if defined(JS_CODEGEN_ARM)
|
||||
#define JS_USE_LINK_REGISTER
|
||||
#endif
|
||||
|
||||
#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
|
||||
#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM)
|
||||
// JS_SMALL_BRANCH means the range on a branch instruction
|
||||
// is smaller than the whole address space
|
||||
# define JS_SMALL_BRANCH
|
||||
# define JS_SMALL_BRANCH
|
||||
#endif
|
||||
|
||||
namespace js {
|
||||
namespace jit {
|
||||
|
||||
|
@ -708,13 +706,9 @@ static const uint32_t AsmJSFrameBytesAfterReturnAddress = sizeof(void*);
|
|||
// everywhere. Values are asserted in AsmJSModule.h.
|
||||
static const unsigned AsmJSActivationGlobalDataOffset = 0;
|
||||
static const unsigned AsmJSHeapGlobalDataOffset = sizeof(void*);
|
||||
#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM64)
|
||||
static const unsigned AsmJSNaN64GlobalDataOffset = 3 * sizeof(void*);
|
||||
static const unsigned AsmJSNaN32GlobalDataOffset = 3 * sizeof(void*) + sizeof(double);
|
||||
#else
|
||||
static const unsigned AsmJSNaN64GlobalDataOffset = 4 * sizeof(void*);
|
||||
static const unsigned AsmJSNaN32GlobalDataOffset = 4 * sizeof(void*) + sizeof(double);
|
||||
#endif
|
||||
static const unsigned AsmJSNaN64GlobalDataOffset = 2 * sizeof(void*);
|
||||
static const unsigned AsmJSNaN32GlobalDataOffset = 2 * sizeof(void*) + sizeof(double);
|
||||
|
||||
// Summarizes a heap access made by asm.js code that needs to be patched later
|
||||
// and/or looked up by the asm.js signal handlers. Different architectures need
|
||||
// to know different things (x64: offset and length, ARM: where to patch in
|
||||
|
@ -775,7 +769,7 @@ class AsmJSHeapAccess
|
|||
cmpDelta_ = cmp == NoLengthCheck ? 0 : insnOffset - cmp;
|
||||
MOZ_ASSERT(offsetWithinWholeSimdVector_ == offsetWithinWholeSimdVector);
|
||||
}
|
||||
#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS)
|
||||
#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
|
||||
explicit AsmJSHeapAccess(uint32_t insnOffset)
|
||||
{
|
||||
mozilla::PodZero(this); // zero padding for Valgrind
|
||||
|
|
|
@ -221,7 +221,7 @@ CodeGeneratorShared::ToOperand(const LAllocation& a)
|
|||
return Operand(a.toGeneralReg()->reg());
|
||||
if (a.isFloatReg())
|
||||
return Operand(a.toFloatReg()->reg());
|
||||
return Operand(masm.getStackPointer(), ToStackOffset(&a));
|
||||
return Operand(StackPointer, ToStackOffset(&a));
|
||||
}
|
||||
|
||||
Operand
|
||||
|
|
|
@ -1385,12 +1385,12 @@ CodeGeneratorShared::visitOutOfLineTruncateSlow(OutOfLineTruncateSlow* ool)
|
|||
Register dest = ool->dest();
|
||||
|
||||
saveVolatile(dest);
|
||||
|
||||
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
|
||||
#if defined(JS_CODEGEN_ARM)
|
||||
if (ool->needFloat32Conversion()) {
|
||||
masm.convertFloat32ToDouble(src, ScratchDoubleReg);
|
||||
src = ScratchDoubleReg;
|
||||
}
|
||||
|
||||
#else
|
||||
FloatRegister srcSingle = src.asSingle();
|
||||
if (ool->needFloat32Conversion()) {
|
||||
|
@ -1400,7 +1400,6 @@ CodeGeneratorShared::visitOutOfLineTruncateSlow(OutOfLineTruncateSlow* ool)
|
|||
src = src.asDouble();
|
||||
}
|
||||
#endif
|
||||
|
||||
masm.setupUnalignedABICall(1, dest);
|
||||
masm.passABIArg(src, MoveOp::DOUBLE);
|
||||
if (gen->compilingAsmJS())
|
||||
|
@ -1409,12 +1408,12 @@ CodeGeneratorShared::visitOutOfLineTruncateSlow(OutOfLineTruncateSlow* ool)
|
|||
masm.callWithABI(BitwiseCast<void*, int32_t(*)(double)>(JS::ToInt32));
|
||||
masm.storeCallResult(dest);
|
||||
|
||||
#if !defined(JS_CODEGEN_ARM) && !defined(JS_CODEGEN_ARM64)
|
||||
#if !defined(JS_CODEGEN_ARM)
|
||||
if (ool->needFloat32Conversion())
|
||||
masm.pop(srcSingle);
|
||||
#endif
|
||||
|
||||
restoreVolatile(dest);
|
||||
|
||||
masm.jump(ool->rejoin());
|
||||
}
|
||||
|
||||
|
|
|
@ -823,7 +823,7 @@ struct AssemblerBufferWithConstantPools : public AssemblerBuffer<SliceSize, Inst
|
|||
return;
|
||||
unsigned destOffset = branch.getOffset() + offset;
|
||||
if (offset > 0) {
|
||||
while (curpool < numDumps_ && poolInfo_[curpool].offset <= (size_t)destOffset) {
|
||||
while (curpool < numDumps_ && poolInfo_[curpool].offset <= destOffset) {
|
||||
offset += poolInfo_[curpool].size;
|
||||
curpool++;
|
||||
}
|
||||
|
|
|
@ -311,7 +311,7 @@ LIRGeneratorShared::useRegisterOrNonDoubleConstant(MDefinition* mir)
|
|||
return useRegister(mir);
|
||||
}
|
||||
|
||||
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
|
||||
#if defined(JS_CODEGEN_ARM)
|
||||
LAllocation
|
||||
LIRGeneratorShared::useAnyOrConstant(MDefinition* mir)
|
||||
{
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
|
||||
#include "asmjs/AsmJSSignalHandlers.h"
|
||||
#include "jit/arm/Simulator-arm.h"
|
||||
#include "jit/arm64/vixl/Simulator-vixl.h"
|
||||
#include "jit/JitCompartment.h"
|
||||
#include "jit/mips/Simulator-mips.h"
|
||||
#include "jit/PcScriptCache.h"
|
||||
|
|
|
@ -65,12 +65,6 @@ extern mozilla::ThreadLocal<PerThreadData*> TlsPerThreadData;
|
|||
|
||||
struct DtoaState;
|
||||
|
||||
#ifdef JS_SIMULATOR_ARM64
|
||||
namespace vixl {
|
||||
class Simulator;
|
||||
}
|
||||
#endif
|
||||
|
||||
namespace js {
|
||||
|
||||
extern MOZ_COLD void
|
||||
|
@ -92,14 +86,9 @@ namespace jit {
|
|||
class JitRuntime;
|
||||
class JitActivation;
|
||||
struct PcScriptCache;
|
||||
class Simulator;
|
||||
struct AutoFlushICache;
|
||||
class CompileRuntime;
|
||||
|
||||
#ifdef JS_SIMULATOR_ARM64
|
||||
typedef vixl::Simulator Simulator;
|
||||
#elif defined(JS_SIMULATOR)
|
||||
class Simulator;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Загрузка…
Ссылка в новой задаче