diff --git a/js/src/jit/MacroAssembler.cpp b/js/src/jit/MacroAssembler.cpp index c8cf32f36b1e..9ca1f4f79c9d 100644 --- a/js/src/jit/MacroAssembler.cpp +++ b/js/src/jit/MacroAssembler.cpp @@ -1551,7 +1551,7 @@ MacroAssembler::generateBailoutTail(Register scratch, Register bailoutInfo) { // Prepare a register set for use in this case. AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All()); - MOZ_ASSERT(!regs.has(getStackPointer())); + MOZ_ASSERT_IF(!IsHiddenSP(getStackPointer()), !regs.has(AsRegister(getStackPointer()))); regs.take(bailoutInfo); // Reset SP to the point where clobbering starts. @@ -3141,12 +3141,18 @@ MacroAssembler::wasmEmitOldTrapOutOfLineCode() } void -MacroAssembler::wasmEmitStackCheck(Register sp, Register scratch, Label* onOverflow) +MacroAssembler::wasmEmitStackCheck(RegisterOrSP sp, Register scratch, Label* onOverflow) { - branchPtr(Assembler::AboveOrEqual, - Address(WasmTlsReg, offsetof(wasm::TlsData, stackLimit)), - sp, - onOverflow); + if (IsHiddenSP(sp)) { + branchStackPtrRhs(Assembler::AboveOrEqual, + Address(WasmTlsReg, offsetof(wasm::TlsData, stackLimit)), + onOverflow); + } else { + branchPtr(Assembler::AboveOrEqual, + Address(WasmTlsReg, offsetof(wasm::TlsData, stackLimit)), + AsRegister(sp), + onOverflow); + } } void diff --git a/js/src/jit/MacroAssembler.h b/js/src/jit/MacroAssembler.h index 6d586bb8b093..d84f9cfb1385 100644 --- a/js/src/jit/MacroAssembler.h +++ b/js/src/jit/MacroAssembler.h @@ -421,6 +421,10 @@ class MacroAssembler : public MacroAssemblerSpecific return size(); } +#ifdef JS_HAS_HIDDEN_SP + void Push(RegisterOrSP reg); +#endif + //{{{ check_macroassembler_decl_style public: // =============================================================== @@ -1564,7 +1568,7 @@ class MacroAssembler : public MacroAssemblerSpecific void wasmEmitOldTrapOutOfLineCode(); // Perform a stack-overflow test, branching to the given Label on overflow. - void wasmEmitStackCheck(Register sp, Register scratch, Label* onOverflow); + void wasmEmitStackCheck(RegisterOrSP sp, Register scratch, Label* onOverflow); void emitPreBarrierFastPath(JSRuntime* rt, MIRType type, Register temp1, Register temp2, Register temp3, Label* noBarrier); diff --git a/js/src/jit/MoveResolver.cpp b/js/src/jit/MoveResolver.cpp index 7af184fbce19..bd71d0acea44 100644 --- a/js/src/jit/MoveResolver.cpp +++ b/js/src/jit/MoveResolver.cpp @@ -35,7 +35,10 @@ MoveOperand::MoveOperand(MacroAssembler& masm, const ABIArg& arg) break; case ABIArg::Stack: kind_ = MEMORY; - code_ = masm.getStackPointer().code(); + if (IsHiddenSP(masm.getStackPointer())) + MOZ_CRASH("Hidden SP cannot be represented as register code on this platform"); + else + code_ = AsRegister(masm.getStackPointer()).code(); disp_ = arg.offsetFromArgBase(); break; case ABIArg::Uninitialized: diff --git a/js/src/jit/Registers.h b/js/src/jit/Registers.h index 1c02aacb1079..572554ffe160 100644 --- a/js/src/jit/Registers.h +++ b/js/src/jit/Registers.h @@ -117,6 +117,82 @@ struct Register { } }; +// Architectures where the stack pointer is not a plain register with a standard +// register encoding must define JS_HAS_HIDDEN_SP and HiddenSPEncoding. + +#ifdef JS_HAS_HIDDEN_SP +struct RegisterOrSP +{ + // The register code -- but possibly one that cannot be represented as a bit + // position in a 32-bit vector. + const uint32_t code; + + explicit RegisterOrSP(uint32_t code) : code(code) {} + explicit RegisterOrSP(Register r) : code(r.code()) {} +}; + +static inline bool +IsHiddenSP(RegisterOrSP r) +{ + return r.code == HiddenSPEncoding; +} + +static inline Register +AsRegister(RegisterOrSP r) +{ + MOZ_ASSERT(!IsHiddenSP(r)); + return Register::FromCode(r.code); +} + +inline bool +operator == (Register r, RegisterOrSP e) { + return r.code() == e.code; +} + +inline bool +operator != (Register r, RegisterOrSP e) { + return !(r == e); +} + +inline bool +operator == (RegisterOrSP e, Register r) { + return r == e; +} + +inline bool +operator != (RegisterOrSP e, Register r) { + return r != e; +} + +inline bool +operator == (RegisterOrSP lhs, RegisterOrSP rhs) { + return lhs.code == rhs.code; +} + +inline bool +operator != (RegisterOrSP lhs, RegisterOrSP rhs) { + return !(lhs == rhs); +} +#else +// On platforms where there's nothing special about SP, make RegisterOrSP be +// just Register, and return false for IsHiddenSP(r) for any r so that we use +// "normal" code for handling the SP. This reduces ifdeffery throughout the +// jit. +typedef Register RegisterOrSP; + +static inline bool +IsHiddenSP(RegisterOrSP r) +{ + return false; +} + +static inline Register +AsRegister(RegisterOrSP r) +{ + return r; +} +#endif + template <> inline Register::SetType Register::LiveAsIndexableSet(SetType set) { diff --git a/js/src/jit/arm64/Architecture-arm64.h b/js/src/jit/arm64/Architecture-arm64.h index 18a64b049200..05e72b54fcd8 100644 --- a/js/src/jit/arm64/Architecture-arm64.h +++ b/js/src/jit/arm64/Architecture-arm64.h @@ -10,10 +10,14 @@ #include "mozilla/Assertions.h" #include "mozilla/MathAlgorithms.h" +#include "jit/arm64/vixl/Instructions-vixl.h" #include "jit/shared/Architecture-shared.h" #include "js/Utility.h" +#define JS_HAS_HIDDEN_SP +static const uint32_t HiddenSPEncoding = vixl::kSPRegInternalCode; + namespace js { namespace jit { diff --git a/js/src/jit/arm64/MacroAssembler-arm64.cpp b/js/src/jit/arm64/MacroAssembler-arm64.cpp index 1c7c9b018c2f..7e1f560f7bea 100644 --- a/js/src/jit/arm64/MacroAssembler-arm64.cpp +++ b/js/src/jit/arm64/MacroAssembler-arm64.cpp @@ -219,10 +219,19 @@ MacroAssemblerCompat::handleFailureWithHandlerTail(void* handler, Label* profile void MacroAssemblerCompat::profilerEnterFrame(Register framePtr, Register scratch) +{ + profilerEnterFrame(RegisterOrSP(framePtr), scratch); +} + +void +MacroAssemblerCompat::profilerEnterFrame(RegisterOrSP framePtr, Register scratch) { asMasm().loadJSContext(scratch); loadPtr(Address(scratch, offsetof(JSContext, profilingActivation_)), scratch); - storePtr(framePtr, Address(scratch, JitActivation::offsetOfLastProfilingFrame())); + if (IsHiddenSP(framePtr)) + storeStackPtr(Address(scratch, JitActivation::offsetOfLastProfilingFrame())); + else + storePtr(AsRegister(framePtr), Address(scratch, JitActivation::offsetOfLastProfilingFrame())); storePtr(ImmPtr(nullptr), Address(scratch, JitActivation::offsetOfLastProfilingCallSite())); } @@ -242,6 +251,16 @@ MacroAssembler::reserveStack(uint32_t amount) adjustFrame(amount); } +void +MacroAssembler::Push(RegisterOrSP reg) +{ + if (IsHiddenSP(reg)) + push(sp); + else + push(AsRegister(reg)); + adjustFrame(sizeof(intptr_t)); +} + //{{{ check_macroassembler_style // =============================================================== // MacroAssembler high-level usage. diff --git a/js/src/jit/arm64/MacroAssembler-arm64.h b/js/src/jit/arm64/MacroAssembler-arm64.h index 1147a6ecca7c..0b59b1c74aef 100644 --- a/js/src/jit/arm64/MacroAssembler-arm64.h +++ b/js/src/jit/arm64/MacroAssembler-arm64.h @@ -72,11 +72,18 @@ class MacroAssemblerCompat : public vixl::MacroAssembler bool oom() const { return Assembler::oom() || !enoughMemory_; } + static ARMRegister toARMRegister(RegisterOrSP r, size_t size) { + if (IsHiddenSP(r)) { + MOZ_ASSERT(size == 64); + return sp; + } + return ARMRegister(AsRegister(r), size); + } static MemOperand toMemOperand(const Address& a) { - return MemOperand(ARMRegister(a.base, 64), a.offset); + return MemOperand(toARMRegister(a.base, 64), a.offset); } void doBaseIndex(const vixl::CPURegister& rt, const BaseIndex& addr, vixl::LoadStoreOp op) { - const ARMRegister base = ARMRegister(addr.base, 64); + const ARMRegister base = toARMRegister(addr.base, 64); const ARMRegister index = ARMRegister(addr.index, 64); const unsigned scale = addr.scale; @@ -175,6 +182,11 @@ class MacroAssemblerCompat : public vixl::MacroAssembler void push(Register reg) { vixl::MacroAssembler::Push(ARMRegister(reg, 64)); } + void push(RegisterOrSP reg) { + if (IsHiddenSP(reg)) + vixl::MacroAssembler::Push(sp); + vixl::MacroAssembler::Push(toARMRegister(reg, 64)); + } void push(Register r0, Register r1) { vixl::MacroAssembler::Push(ARMRegister(r0, 64), ARMRegister(r1, 64)); } @@ -761,7 +773,7 @@ class MacroAssemblerCompat : public vixl::MacroAssembler Ldr(ARMRegister(dest, 64), MemOperand(address)); } void loadPtr(const BaseIndex& src, Register dest) { - Register base = src.base; + ARMRegister base = toARMRegister(src.base, 64); uint32_t scale = Imm32::ShiftOf(src.scale).value; ARMRegister dest64(dest, 64); ARMRegister index64(src.index, 64); @@ -769,16 +781,16 @@ class MacroAssemblerCompat : public vixl::MacroAssembler if (src.offset) { vixl::UseScratchRegisterScope temps(this); const ARMRegister scratch = temps.AcquireX(); - MOZ_ASSERT(!scratch.Is(ARMRegister(base, 64))); + MOZ_ASSERT(!scratch.Is(base)); MOZ_ASSERT(!scratch.Is(dest64)); MOZ_ASSERT(!scratch.Is(index64)); - Add(scratch, ARMRegister(base, 64), Operand(int64_t(src.offset))); + Add(scratch, base, Operand(int64_t(src.offset))); Ldr(dest64, MemOperand(scratch, index64, vixl::LSL, scale)); return; } - Ldr(dest64, MemOperand(ARMRegister(base, 64), index64, vixl::LSL, scale)); + Ldr(dest64, MemOperand(base, index64, vixl::LSL, scale)); } void loadPrivate(const Address& src, Register dest); @@ -1077,7 +1089,7 @@ class MacroAssemblerCompat : public vixl::MacroAssembler Ldr(ARMFPRegister(dest, 64), MemOperand(src)); } void loadDouble(const BaseIndex& src, FloatRegister dest) { - ARMRegister base(src.base, 64); + ARMRegister base = toARMRegister(src.base, 64); ARMRegister index(src.index, 64); if (src.offset == 0) { @@ -1098,7 +1110,7 @@ class MacroAssemblerCompat : public vixl::MacroAssembler fcvt(ARMFPRegister(dest, 64), ARMFPRegister(dest, 32)); } void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest) { - ARMRegister base(src.base, 64); + ARMRegister base = toARMRegister(src.base, 64); ARMRegister index(src.index, 64); if (src.offset == 0) { Ldr(ARMFPRegister(dest, 32), MemOperand(base, index, vixl::LSL, unsigned(src.scale))); @@ -1118,7 +1130,7 @@ class MacroAssemblerCompat : public vixl::MacroAssembler Ldr(ARMFPRegister(dest, 32), toMemOperand(addr)); } void loadFloat32(const BaseIndex& src, FloatRegister dest) { - ARMRegister base(src.base, 64); + ARMRegister base = toARMRegister(src.base, 64); ARMRegister index(src.index, 64); if (src.offset == 0) { Ldr(ARMFPRegister(dest, 32), MemOperand(base, index, vixl::LSL, unsigned(src.scale))); @@ -1875,11 +1887,14 @@ class MacroAssemblerCompat : public vixl::MacroAssembler } void computeEffectiveAddress(const Address& address, Register dest) { - Add(ARMRegister(dest, 64), ARMRegister(address.base, 64), Operand(address.offset)); + Add(ARMRegister(dest, 64), toARMRegister(address.base, 64), Operand(address.offset)); + } + void computeEffectiveAddress(const Address& address, RegisterOrSP dest) { + Add(toARMRegister(dest, 64), toARMRegister(address.base, 64), Operand(address.offset)); } void computeEffectiveAddress(const BaseIndex& address, Register dest) { ARMRegister dest64(dest, 64); - ARMRegister base64(address.base, 64); + ARMRegister base64 = toARMRegister(address.base, 64); ARMRegister index64(address.index, 64); Add(dest64, base64, Operand(index64, vixl::LSL, address.scale)); @@ -1895,6 +1910,7 @@ class MacroAssemblerCompat : public vixl::MacroAssembler void handleFailureWithHandlerTail(void* handler, Label* profilerExitTail); void profilerEnterFrame(Register framePtr, Register scratch); + void profilerEnterFrame(RegisterOrSP framePtr, Register scratch); void profilerExitFrame() { jump(GetJitContext()->runtime->jitRuntime()->getProfilerExitFrameTail()); } @@ -1960,7 +1976,7 @@ class MacroAssemblerCompat : public vixl::MacroAssembler #ifdef DEBUG vixl::UseScratchRegisterScope temps(this); const ARMRegister scratch64 = temps.AcquireX(); - MOZ_ASSERT(scratch64.asUnsized() != reg.asUnsized()); + MOZ_ASSERT_IF(!reg.IsSP(), scratch64.asUnsized() != reg.asUnsized()); Label aligned; Mov(scratch64, reg); Tst(scratch64, Operand(StackAlignment - 1)); diff --git a/js/src/jit/arm64/vixl/Assembler-vixl.h b/js/src/jit/arm64/vixl/Assembler-vixl.h index a59eca6e22ff..3f1d659f83aa 100644 --- a/js/src/jit/arm64/vixl/Assembler-vixl.h +++ b/js/src/jit/arm64/vixl/Assembler-vixl.h @@ -265,8 +265,10 @@ class Register : public CPURegister { } js::jit::Register asUnsized() const { - if (code_ == kSPRegInternalCode) - return js::jit::Register::FromCode((js::jit::Register::Code)kZeroRegCode); + // asUnsized() is only ever used on temp registers or on registers that + // are known not to be SP, and there should be no risk of it being + // applied to SP. Check anyway. + VIXL_ASSERT(code_ != kSPRegInternalCode); return js::jit::Register::FromCode((js::jit::Register::Code)code_); } @@ -706,6 +708,9 @@ class Operand { explicit Operand(js::jit::Register, int32_t) { MOZ_CRASH("Operand with implicit Address"); } + explicit Operand(js::jit::RegisterOrSP, int32_t) { + MOZ_CRASH("Operand with implicit Address"); + } bool IsImmediate() const; bool IsShiftedRegister() const; @@ -777,7 +782,7 @@ class MemOperand { // Adapter constructors using C++11 delegating. // TODO: If sp == kSPRegInternalCode, the xzr check isn't necessary. explicit MemOperand(js::jit::Address addr) - : MemOperand(addr.base.code() == 31 ? sp : Register(addr.base, 64), + : MemOperand(IsHiddenSP(addr.base) ? sp : Register(AsRegister(addr.base), 64), (ptrdiff_t)addr.offset) { } diff --git a/js/src/jit/arm64/vixl/MacroAssembler-vixl.h b/js/src/jit/arm64/vixl/MacroAssembler-vixl.h index 352794432f9a..4fc0aa75a0d8 100644 --- a/js/src/jit/arm64/vixl/MacroAssembler-vixl.h +++ b/js/src/jit/arm64/vixl/MacroAssembler-vixl.h @@ -2224,12 +2224,8 @@ class MacroAssembler : public js::jit::Assembler { return sp_; } - const js::jit::Register getStackPointer() const { - int code = sp_.code(); - if (code == kSPRegInternalCode) { - code = 31; - } - return js::jit::Register::FromCode(code); + const js::jit::RegisterOrSP getStackPointer() const { + return js::jit::RegisterOrSP(sp_.code()); } CPURegList* TmpList() { return &tmp_list_; } diff --git a/js/src/jit/shared/Assembler-shared.h b/js/src/jit/shared/Assembler-shared.h index 7a5243975728..1a38d3a8c3ae 100644 --- a/js/src/jit/shared/Assembler-shared.h +++ b/js/src/jit/shared/Assembler-shared.h @@ -308,13 +308,18 @@ struct PatchedAbsoluteAddress // 32-bit offset. struct Address { - Register base; + RegisterOrSP base; int32_t offset; - Address(Register base, int32_t offset) : base(base), offset(offset) + Address(Register base, int32_t offset) : base(RegisterOrSP(base)), offset(offset) { } - Address() : base(Registers::Invalid), offset(0) +#ifdef JS_HAS_HIDDEN_SP + Address(RegisterOrSP base, int32_t offset) : base(base), offset(offset) + { } +#endif + + Address() : base(RegisterOrSP(Registers::Invalid)), offset(0) { } }; @@ -340,17 +345,23 @@ HighWord(const Address& address) { // index with a scale, and a constant, 32-bit offset. struct BaseIndex { - Register base; + RegisterOrSP base; Register index; Scale scale; int32_t offset; BaseIndex(Register base, Register index, Scale scale, int32_t offset = 0) - : base(base), index(index), scale(scale), offset(offset) + : base(RegisterOrSP(base)), index(index), scale(scale), offset(offset) { } +#ifdef JS_HAS_HIDDEN_SP + BaseIndex(RegisterOrSP base, Register index, Scale scale, int32_t offset = 0) + : base(base), index(index), scale(scale), offset(offset) + { } +#endif + BaseIndex() - : base(Registers::Invalid) + : base(RegisterOrSP(Registers::Invalid)) , index(Registers::Invalid) , scale(TimesOne) , offset(0) @@ -384,8 +395,14 @@ HighWord(const BaseIndex& address) { struct BaseValueIndex : BaseIndex { BaseValueIndex(Register base, Register index, int32_t offset = 0) + : BaseIndex(RegisterOrSP(base), index, ValueScale, offset) + { } + +#ifdef JS_HAS_HIDDEN_SP + BaseValueIndex(RegisterOrSP base, Register index, int32_t offset = 0) : BaseIndex(base, index, ValueScale, offset) { } +#endif }; // Specifies the address of an indexed Value within object elements from a @@ -397,6 +414,14 @@ struct BaseObjectElementIndex : BaseValueIndex { NativeObject::elementsSizeMustNotOverflow(); } + +#ifdef JS_HAS_HIDDEN_SP + BaseObjectElementIndex(RegisterOrSP base, Register index, int32_t offset = 0) + : BaseValueIndex(base, index, offset) + { + NativeObject::elementsSizeMustNotOverflow(); + } +#endif }; // Like BaseObjectElementIndex, except for object slots. @@ -407,6 +432,14 @@ struct BaseObjectSlotIndex : BaseValueIndex { NativeObject::slotsSizeMustNotOverflow(); } + +#ifdef JS_HAS_HIDDEN_SP + BaseObjectSlotIndex(RegisterOrSP base, Register index) + : BaseValueIndex(base, index) + { + NativeObject::slotsSizeMustNotOverflow(); + } +#endif }; class Relocation { diff --git a/js/src/wasm/WasmBaselineCompile.cpp b/js/src/wasm/WasmBaselineCompile.cpp index 0cacb4a9e202..0a3fd2ac13ce 100644 --- a/js/src/wasm/WasmBaselineCompile.cpp +++ b/js/src/wasm/WasmBaselineCompile.cpp @@ -985,7 +985,7 @@ class BaseStackFrame CodeOffset stackAddOffset_; // The stack pointer, cached for brevity. - Register sp_; + RegisterOrSP sp_; public: @@ -1178,7 +1178,7 @@ class BaseStackFrame void allocStack(Register tmp0, Register tmp1, Label* stackOverflowLabel) { stackAddOffset_ = masm.sub32FromStackPtrWithPatch(tmp0); - masm.wasmEmitStackCheck(tmp0, tmp1, stackOverflowLabel); + masm.wasmEmitStackCheck(RegisterOrSP(tmp0), tmp1, stackOverflowLabel); } void patchAllocStack() { diff --git a/js/src/wasm/WasmStubs.cpp b/js/src/wasm/WasmStubs.cpp index b8c2e4612846..7485d3f074d8 100644 --- a/js/src/wasm/WasmStubs.cpp +++ b/js/src/wasm/WasmStubs.cpp @@ -137,7 +137,7 @@ SetupABIArguments(MacroAssembler& masm, const FuncExport& fe, Register argv, Reg masm.storePtr(scratch, Address(masm.getStackPointer(), iter->offsetFromArgBase())); break; case MIRType::Int64: { - Register sp = masm.getStackPointer(); + RegisterOrSP sp = masm.getStackPointer(); #if JS_BITS_PER_WORD == 32 masm.load32(LowWord(src), scratch); masm.store32(scratch, LowWord(Address(sp, iter->offsetFromArgBase())));