Bug 1184959 part 5 - Move callWithABI functions to the generic MacroAssembler. r=bbouvier,sstangl,jandem,rankov,h4writer

This commit is contained in:
Nicolas B. Pierron 2015-08-17 11:32:18 +02:00
Родитель cf2c493f3e
Коммит 34e11e574d
25 изменённых файлов: 857 добавлений и 1596 удалений

Просмотреть файл

@ -25,6 +25,7 @@
#include "jsscriptinlines.h"
#include "jit/MacroAssembler-inl.h"
#include "vm/Interpreter-inl.h"
#include "vm/NativeObject-inl.h"

Просмотреть файл

@ -13,6 +13,7 @@
#include "jit/PerfSpewer.h"
#include "jit/JitFrames-inl.h"
#include "jit/MacroAssembler-inl.h"
#include "vm/Stack-inl.h"
using namespace js;

Просмотреть файл

@ -76,6 +76,82 @@ MacroAssembler::call(const CallSiteDesc& desc, Label* label)
append(desc, currentOffset(), framePushed());
}
// ===============================================================
// ABI function calls.
void
MacroAssembler::passABIArg(Register reg)
{
passABIArg(MoveOperand(reg), MoveOp::GENERAL);
}
void
MacroAssembler::passABIArg(FloatRegister reg, MoveOp::Type type)
{
passABIArg(MoveOperand(reg), type);
}
template <typename T> void
MacroAssembler::callWithABI(const T& fun, MoveOp::Type result)
{
profilerPreCall();
callWithABINoProfiler(fun, result);
profilerPostReturn();
}
void
MacroAssembler::appendSignatureType(MoveOp::Type type)
{
#ifdef JS_SIMULATOR
signature_ <<= ArgType_Shift;
switch (type) {
case MoveOp::GENERAL: signature_ |= ArgType_General; break;
case MoveOp::DOUBLE: signature_ |= ArgType_Double; break;
case MoveOp::FLOAT32: signature_ |= ArgType_Float32; break;
default: MOZ_CRASH("Invalid argument type");
}
#endif
}
ABIFunctionType
MacroAssembler::signature() const
{
#ifdef JS_SIMULATOR
#ifdef DEBUG
switch (signature_) {
case Args_General0:
case Args_General1:
case Args_General2:
case Args_General3:
case Args_General4:
case Args_General5:
case Args_General6:
case Args_General7:
case Args_General8:
case Args_Double_None:
case Args_Int_Double:
case Args_Float32_Float32:
case Args_Double_Double:
case Args_Double_Int:
case Args_Double_DoubleInt:
case Args_Double_DoubleDouble:
case Args_Double_IntDouble:
case Args_Int_IntDouble:
case Args_Double_DoubleDoubleDouble:
case Args_Double_DoubleDoubleDoubleDouble:
break;
default:
MOZ_CRASH("Unexpected type");
}
#endif // DEBUG
return ABIFunctionType(signature_);
#else
// No simulator enabled.
MOZ_CRASH("Only available for making calls within a simulator.");
#endif
}
//}}} check_macroassembler_style
// ===============================================================

Просмотреть файл

@ -2548,6 +2548,9 @@ MacroAssembler::MacroAssembler(JSContext* cx, IonScript* ion,
JSScript* script, jsbytecode* pc)
: emitProfilingInstrumentation_(false),
framePushed_(0)
#ifdef DEBUG
, inCall_(false)
#endif
{
constructRoot(cx);
jitContext_.emplace(cx, (js::jit::TempAllocator*)nullptr);
@ -2764,4 +2767,108 @@ MacroAssembler::freeStack(Register amount)
addToStackPtr(amount);
}
// ===============================================================
// ABI function calls.
void
MacroAssembler::setupABICall()
{
#ifdef DEBUG
MOZ_ASSERT(!inCall_);
inCall_ = true;
#endif
#ifdef JS_SIMULATOR
signature_ = 0;
#endif
// Reinitialize the ABIArg generator.
abiArgs_ = ABIArgGenerator();
#if defined(JS_CODEGEN_ARM)
// On ARM, we need to know what ABI we are using, either in the
// simulator, or based on the configure flags.
#if defined(JS_SIMULATOR_ARM)
abiArgs_.setUseHardFp(UseHardFpABI());
#elif defined(JS_CODEGEN_ARM_HARDFP)
abiArgs_.setUseHardFp(true);
#else
abiArgs_.setUseHardFp(false);
#endif
#endif
#if defined(JS_CODEGEN_MIPS32)
// On MIPS, the system ABI use general registers pairs to encode double
// arguments, after one or 2 integer-like arguments. Unfortunately, the
// Lowering phase is not capable to express it at the moment. So we enforce
// the system ABI here.
abiArgs_.enforceO32ABI();
#endif
}
void
MacroAssembler::setupAlignedABICall(uint32_t args)
{
setupABICall();
dynamicAlignment_ = false;
assertStackAlignment(ABIStackAlignment);
#if defined(JS_CODEGEN_ARM64)
MOZ_CRASH("Not supported on arm64");
#endif
}
void
MacroAssembler::passABIArg(const MoveOperand& from, MoveOp::Type type)
{
MOZ_ASSERT(inCall_);
appendSignatureType(type);
ABIArg arg;
switch (type) {
case MoveOp::FLOAT32:
arg = abiArgs_.next(MIRType_Float32);
break;
case MoveOp::DOUBLE:
arg = abiArgs_.next(MIRType_Double);
break;
case MoveOp::GENERAL:
arg = abiArgs_.next(MIRType_Pointer);
break;
default:
MOZ_CRASH("Unexpected argument type");
}
MoveOperand to(*this, arg);
if (from == to)
return;
if (!enoughMemory_)
return;
enoughMemory_ = moveResolver_.addMove(from, to, type);
}
void
MacroAssembler::callWithABINoProfiler(void* fun, MoveOp::Type result)
{
appendSignatureType(result);
#ifdef JS_SIMULATOR
fun = Simulator::RedirectNativeFunction(fun, signature());
#endif
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
call(ImmPtr(fun));
callWithABIPost(stackAdjust, result);
}
void
MacroAssembler::callWithABINoProfiler(AsmJSImmPtr imm, MoveOp::Type result)
{
uint32_t stackAdjust;
callWithABIPre(&stackAdjust, /* callFromAsmJS = */ true);
call(imm);
callWithABIPost(stackAdjust, result);
}
//}}} check_macroassembler_style

Просмотреть файл

@ -332,6 +332,9 @@ class MacroAssembler : public MacroAssemblerSpecific
MacroAssembler()
: emitProfilingInstrumentation_(false),
framePushed_(0)
#ifdef DEBUG
, inCall_(false)
#endif
{
JitContext* jcx = GetJitContext();
JSContext* cx = jcx->cx;
@ -364,6 +367,9 @@ class MacroAssembler : public MacroAssemblerSpecific
explicit MacroAssembler(AsmJSToken)
: emitProfilingInstrumentation_(false),
framePushed_(0)
#ifdef DEBUG
, inCall_(false)
#endif
{
#if defined(JS_CODEGEN_ARM)
initWithAllocator();
@ -473,6 +479,79 @@ class MacroAssembler : public MacroAssemblerSpecific
inline void call(const CallSiteDesc& desc, const Register reg);
inline void call(const CallSiteDesc& desc, Label* label);
public:
// ===============================================================
// ABI function calls.
// Setup a call to C/C++ code, given the assumption that the framePushed
// accruately define the state of the stack, and that the top of the stack
// was properly aligned. Note that this only supports cdecl.
void setupAlignedABICall(uint32_t args); // CRASH_ON(arm64)
// Setup an ABI call for when the alignment is not known. This may need a
// scratch register.
void setupUnalignedABICall(uint32_t args, Register scratch) PER_ARCH;
// Arguments must be assigned to a C/C++ call in order. They are moved
// in parallel immediately before performing the call. This process may
// temporarily use more stack, in which case esp-relative addresses will be
// automatically adjusted. It is extremely important that esp-relative
// addresses are computed *after* setupABICall(). Furthermore, no
// operations should be emitted while setting arguments.
void passABIArg(const MoveOperand& from, MoveOp::Type type);
inline void passABIArg(Register reg);
inline void passABIArg(FloatRegister reg, MoveOp::Type type);
template <typename T>
inline void callWithABI(const T& fun, MoveOp::Type result = MoveOp::GENERAL);
private:
// Reinitialize the variables which have to be cleared before making a call
// with callWithABI.
void setupABICall();
// Reserve the stack and resolve the arguments move.
void callWithABIPre(uint32_t* stackAdjust, bool callFromAsmJS = false) PER_ARCH;
// Emits a call to a C/C++ function, resolving all argument moves.
void callWithABINoProfiler(void* fun, MoveOp::Type result);
void callWithABINoProfiler(AsmJSImmPtr imm, MoveOp::Type result);
void callWithABINoProfiler(Register fun, MoveOp::Type result) PER_ARCH;
void callWithABINoProfiler(const Address& fun, MoveOp::Type result) PER_ARCH;
// Restore the stack to its state before the setup function call.
void callWithABIPost(uint32_t stackAdjust, MoveOp::Type result) PER_ARCH;
// Create the signature to be able to decode the arguments of a native
// function, when calling a function within the simulator.
inline void appendSignatureType(MoveOp::Type type);
inline ABIFunctionType signature() const;
// Private variables used to handle moves between registers given as
// arguments to passABIArg and the list of ABI registers expected for the
// signature of the function.
MoveResolver moveResolver_;
// Architecture specific implementation which specify how registers & stack
// offsets are used for calling a function.
ABIArgGenerator abiArgs_;
#ifdef DEBUG
// Flag use to assert that we use ABI function in the right context.
bool inCall_;
#endif
// If set by setupUnalignedABICall then callWithABI will pop the stack
// register which is on the stack.
bool dynamicAlignment_;
#ifdef JS_SIMULATOR
// The signature is used to accumulate all types of arguments which are used
// by the caller. This is used by the simulators to decode the arguments
// properly, and cast the function pointer to the right type.
uint32_t signature_;
#endif
//}}} check_macroassembler_style
public:
@ -1002,13 +1081,6 @@ class MacroAssembler : public MacroAssemblerSpecific
// they are returning the offset of the assembler just after the call has
// been made so that a safepoint can be made at that location.
template <typename T>
void callWithABI(const T& fun, MoveOp::Type result = MoveOp::GENERAL) {
profilerPreCall();
MacroAssemblerSpecific::callWithABI(fun, result);
profilerPostReturn();
}
// see above comment for what is returned
uint32_t callJit(Register callee) {
profilerPreCall();

Просмотреть файл

@ -5,11 +5,40 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/MoveResolver.h"
#include "jit/MacroAssembler.h"
#include "jit/RegisterSets.h"
using namespace js;
using namespace js::jit;
MoveOperand::MoveOperand(MacroAssembler& masm, const ABIArg& arg)
{
switch (arg.kind()) {
case ABIArg::GPR:
kind_ = REG;
code_ = arg.gpr().code();
break;
#ifdef JS_CODEGEN_REGISTER_PAIR
case ABIArg::GPR_PAIR:
kind_ = REG_PAIR;
code_ = arg.evenGpr().code();
MOZ_ASSERT(code_ % 2 == 0);
MOZ_ASSERT(code_ + 1 == arg.oddGpr().code());
break;
#endif
case ABIArg::FPU:
kind_ = FLOAT_REG;
code_ = arg.fpu().code();
break;
case ABIArg::Stack:
kind_ = MEMORY;
code_ = masm.getStackPointer().code();
disp_ = arg.offsetFromArgBase();
break;
}
}
MoveResolver::MoveResolver()
: numCycles_(0), curCycles_(0)
{

Просмотреть файл

@ -14,6 +14,8 @@
namespace js {
namespace jit {
class MacroAssembler;
// This is similar to Operand, but carries more information. We're also not
// guaranteed that Operand looks like this on all ISAs.
class MoveOperand
@ -59,6 +61,7 @@ class MoveOperand
if (disp == 0 && kind_ == EFFECTIVE_ADDRESS)
kind_ = REG;
}
MoveOperand(MacroAssembler& masm, const ABIArg& arg);
MoveOperand(const MoveOperand& other)
: kind_(other.kind_),
code_(other.code_),

Просмотреть файл

@ -18,6 +18,8 @@
#endif
#include "jit/VMFunctions.h"
#include "jit/MacroAssembler-inl.h"
namespace js {
namespace jit {

Просмотреть файл

@ -10,6 +10,8 @@
#include "jit/Linker.h"
#include "jit/SharedICHelpers.h"
#include "jit/MacroAssembler-inl.h"
using namespace js;
using namespace js::jit;

Просмотреть файл

@ -3751,409 +3751,9 @@ MacroAssemblerARMCompat::breakpoint(Condition cc)
}
void
MacroAssemblerARMCompat::setupABICall(uint32_t args)
MacroAssemblerARMCompat::checkStackAlignment()
{
MOZ_ASSERT(!inCall_);
inCall_ = true;
args_ = args;
passedArgs_ = 0;
passedArgTypes_ = 0;
usedIntSlots_ = 0;
#if defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_SIMULATOR_ARM)
usedFloatSlots_ = 0;
usedFloat32_ = false;
padding_ = 0;
#endif
floatArgsInGPR[0] = MoveOperand();
floatArgsInGPR[1] = MoveOperand();
floatArgsInGPR[2] = MoveOperand();
floatArgsInGPR[3] = MoveOperand();
floatArgsInGPRValid[0] = false;
floatArgsInGPRValid[1] = false;
floatArgsInGPRValid[2] = false;
floatArgsInGPRValid[3] = false;
}
void
MacroAssemblerARMCompat::setupAlignedABICall(uint32_t args)
{
setupABICall(args);
dynamicAlignment_ = false;
}
void
MacroAssemblerARMCompat::setupUnalignedABICall(uint32_t args, Register scratch)
{
setupABICall(args);
dynamicAlignment_ = true;
ma_mov(sp, scratch);
// Force sp to be aligned.
ma_and(Imm32(~(ABIStackAlignment - 1)), sp, sp);
ma_push(scratch);
}
#if defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_SIMULATOR_ARM)
void
MacroAssemblerARMCompat::passHardFpABIArg(const MoveOperand& from, MoveOp::Type type)
{
MoveOperand to;
++passedArgs_;
if (!enoughMemory_)
return;
switch (type) {
case MoveOp::FLOAT32: {
FloatRegister fr;
passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Float32;
if (GetFloat32ArgReg(usedIntSlots_, usedFloatSlots_, &fr)) {
if (from.isFloatReg() && from.floatReg() == fr) {
// Nothing to do; the value is in the right register already.
usedFloatSlots_++;
passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Float32;
return;
}
to = MoveOperand(fr);
} else {
// If (and only if) the integer registers have started spilling, do
// we need to take the register's alignment into account.
uint32_t disp = GetFloat32ArgStackDisp(usedIntSlots_, usedFloatSlots_, &padding_);
to = MoveOperand(sp, disp);
}
usedFloatSlots_++;
break;
}
case MoveOp::DOUBLE: {
FloatRegister fr;
passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Double;
usedFloatSlots_ = (usedFloatSlots_ + 1) & -2;
if (GetDoubleArgReg(usedIntSlots_, usedFloatSlots_, &fr)) {
if (from.isFloatReg() && from.floatReg() == fr) {
// Nothing to do; the value is in the right register already.
usedFloatSlots_ += 2;
return;
}
to = MoveOperand(fr);
} else {
// If (and only if) the integer registers have started spilling, do we
// need to take the register's alignment into account
uint32_t disp = GetDoubleArgStackDisp(usedIntSlots_, usedFloatSlots_, &padding_);
to = MoveOperand(sp, disp);
}
usedFloatSlots_+=2;
break;
}
case MoveOp::GENERAL: {
Register r;
passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_General;
if (GetIntArgReg(usedIntSlots_, usedFloatSlots_, &r)) {
if (from.isGeneralReg() && from.reg() == r) {
// Nothing to do; the value is in the right register already.
usedIntSlots_++;
return;
}
to = MoveOperand(r);
} else {
uint32_t disp = GetIntArgStackDisp(usedIntSlots_, usedFloatSlots_, &padding_);
to = MoveOperand(sp, disp);
}
usedIntSlots_++;
break;
}
default:
MOZ_CRASH("Unexpected argument type");
}
enoughMemory_ = moveResolver_.addMove(from, to, type);
}
#endif
#if !defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_SIMULATOR_ARM)
void
MacroAssemblerARMCompat::passSoftFpABIArg(const MoveOperand& from, MoveOp::Type type)
{
MoveOperand to;
uint32_t increment = 1;
bool useResolver = true;
++passedArgs_;
switch (type) {
case MoveOp::DOUBLE:
// Double arguments need to be rounded up to the nearest doubleword
// boundary, even if it is in a register!
usedIntSlots_ = (usedIntSlots_ + 1) & ~1;
increment = 2;
passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Double;
break;
case MoveOp::FLOAT32:
passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Float32;
break;
case MoveOp::GENERAL:
passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_General;
break;
default:
MOZ_CRASH("Unexpected argument type");
}
Register destReg;
MoveOperand dest;
if (GetIntArgReg(usedIntSlots_, 0, &destReg)) {
if (type == MoveOp::DOUBLE || type == MoveOp::FLOAT32) {
floatArgsInGPR[destReg.code()] = from;
floatArgsInGPRValid[destReg.code()] = true;
useResolver = false;
} else if (from.isGeneralReg() && from.reg() == destReg) {
// No need to move anything.
useResolver = false;
} else {
dest = MoveOperand(destReg);
}
} else {
uint32_t disp = GetArgStackDisp(usedIntSlots_);
dest = MoveOperand(sp, disp);
}
if (useResolver)
enoughMemory_ = enoughMemory_ && moveResolver_.addMove(from, dest, type);
usedIntSlots_ += increment;
}
#endif
void
MacroAssemblerARMCompat::passABIArg(const MoveOperand& from, MoveOp::Type type)
{
#if defined(JS_SIMULATOR_ARM)
if (UseHardFpABI())
MacroAssemblerARMCompat::passHardFpABIArg(from, type);
else
MacroAssemblerARMCompat::passSoftFpABIArg(from, type);
#elif defined(JS_CODEGEN_ARM_HARDFP)
MacroAssemblerARMCompat::passHardFpABIArg(from, type);
#else
MacroAssemblerARMCompat::passSoftFpABIArg(from, type);
#endif
}
void
MacroAssemblerARMCompat::passABIArg(Register reg)
{
passABIArg(MoveOperand(reg), MoveOp::GENERAL);
}
void
MacroAssemblerARMCompat::passABIArg(FloatRegister freg, MoveOp::Type type)
{
passABIArg(MoveOperand(freg), type);
}
void MacroAssemblerARMCompat::checkStackAlignment()
{
#ifdef DEBUG
ma_tst(sp, Imm32(ABIStackAlignment - 1));
breakpoint(NonZero);
#endif
}
void
MacroAssemblerARMCompat::callWithABIPre(uint32_t* stackAdjust, bool callFromAsmJS)
{
MOZ_ASSERT(inCall_);
*stackAdjust = ((usedIntSlots_ > NumIntArgRegs) ? usedIntSlots_ - NumIntArgRegs : 0) * sizeof(intptr_t);
#if defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_SIMULATOR_ARM)
if (UseHardFpABI())
*stackAdjust += 2*((usedFloatSlots_ > NumFloatArgRegs) ? usedFloatSlots_ - NumFloatArgRegs : 0) * sizeof(intptr_t);
#endif
uint32_t alignmentAtPrologue = callFromAsmJS ? sizeof(AsmJSFrame) : 0;
if (!dynamicAlignment_) {
*stackAdjust += ComputeByteAlignment(asMasm().framePushed() + *stackAdjust + alignmentAtPrologue,
ABIStackAlignment);
} else {
// sizeof(intptr_t) accounts for the saved stack pointer pushed by
// setupUnalignedABICall.
*stackAdjust += ComputeByteAlignment(*stackAdjust + sizeof(intptr_t), ABIStackAlignment);
}
asMasm().reserveStack(*stackAdjust);
// Position all arguments.
{
enoughMemory_ = enoughMemory_ && moveResolver_.resolve();
if (!enoughMemory_)
return;
MoveEmitter emitter(asMasm());
emitter.emit(moveResolver_);
emitter.finish();
}
for (int i = 0; i < 4; i++) {
if (floatArgsInGPRValid[i]) {
MoveOperand from = floatArgsInGPR[i];
Register to0 = Register::FromCode(i);
Register to1;
if (!from.isFloatReg() || from.floatReg().isDouble()) {
// Doubles need to be moved into a pair of aligned registers
// whether they come from the stack, or VFP registers.
to1 = Register::FromCode(i + 1);
MOZ_ASSERT(i % 2 == 0);
}
if (from.isFloatReg()) {
if (from.floatReg().isDouble())
ma_vxfer(from.floatReg(), to0, to1);
else
ma_vxfer(from.floatReg(), to0);
} else {
MOZ_ASSERT(from.isMemory());
// Note: We can safely use the MoveOperand's displacement here,
// even if the base is SP: MoveEmitter::toOperand adjusts
// SP-relative operands by the difference between the current
// stack usage and stackAdjust, which emitter.finish() resets to
// 0.
//
// Warning: if the offset isn't within [-255,+255] then this
// will assert-fail (or, if non-debug, load the wrong words).
// Nothing uses such an offset at the time of this writing.
ma_ldrd(EDtrAddr(from.base(), EDtrOffImm(from.disp())), to0, to1);
}
}
}
checkStackAlignment();
// Save the lr register if we need to preserve it.
if (secondScratchReg_ != lr)
ma_mov(lr, secondScratchReg_);
}
void
MacroAssemblerARMCompat::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
{
if (secondScratchReg_ != lr)
ma_mov(secondScratchReg_, lr);
switch (result) {
case MoveOp::DOUBLE:
if (!UseHardFpABI()) {
// Move double from r0/r1 to ReturnFloatReg.
ma_vxfer(r0, r1, ReturnDoubleReg);
break;
}
case MoveOp::FLOAT32:
if (!UseHardFpABI()) {
// Move float32 from r0 to ReturnFloatReg.
ma_vxfer(r0, ReturnFloat32Reg);
break;
}
case MoveOp::GENERAL:
break;
default:
MOZ_CRASH("unexpected callWithABI result");
}
asMasm().freeStack(stackAdjust);
if (dynamicAlignment_) {
// While the x86 supports pop esp, on ARM that isn't well defined, so
// just do it manually.
as_dtr(IsLoad, 32, Offset, sp, DTRAddr(sp, DtrOffImm(0)));
}
MOZ_ASSERT(inCall_);
inCall_ = false;
}
#if defined(DEBUG) && defined(JS_SIMULATOR_ARM)
static void
AssertValidABIFunctionType(uint32_t passedArgTypes)
{
switch (passedArgTypes) {
case Args_General0:
case Args_General1:
case Args_General2:
case Args_General3:
case Args_General4:
case Args_General5:
case Args_General6:
case Args_General7:
case Args_General8:
case Args_Double_None:
case Args_Int_Double:
case Args_Float32_Float32:
case Args_Double_Double:
case Args_Double_Int:
case Args_Double_DoubleInt:
case Args_Double_DoubleDouble:
case Args_Double_IntDouble:
case Args_Int_IntDouble:
case Args_Double_DoubleDoubleDouble:
case Args_Double_DoubleDoubleDoubleDouble:
break;
default:
MOZ_CRASH("Unexpected type");
}
}
#endif
void
MacroAssemblerARMCompat::callWithABI(void* fun, MoveOp::Type result)
{
#ifdef JS_SIMULATOR_ARM
MOZ_ASSERT(passedArgs_ <= 15);
passedArgTypes_ <<= ArgType_Shift;
switch (result) {
case MoveOp::GENERAL: passedArgTypes_ |= ArgType_General; break;
case MoveOp::DOUBLE: passedArgTypes_ |= ArgType_Double; break;
case MoveOp::FLOAT32: passedArgTypes_ |= ArgType_Float32; break;
default: MOZ_CRASH("Invalid return type");
}
#ifdef DEBUG
AssertValidABIFunctionType(passedArgTypes_);
#endif
ABIFunctionType type = ABIFunctionType(passedArgTypes_);
fun = Simulator::RedirectNativeFunction(fun, type);
#endif
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
ma_call(ImmPtr(fun));
callWithABIPost(stackAdjust, result);
}
void
MacroAssemblerARMCompat::callWithABI(AsmJSImmPtr imm, MoveOp::Type result)
{
uint32_t stackAdjust;
callWithABIPre(&stackAdjust, /* callFromAsmJS = */ true);
asMasm().call(imm);
callWithABIPost(stackAdjust, result);
}
void
MacroAssemblerARMCompat::callWithABI(const Address& fun, MoveOp::Type result)
{
// Load the callee in r12, no instruction between the ldr and call should
// clobber it. Note that we can't use fun.base because it may be one of the
// IntArg registers clobbered before the call.
ma_ldr(fun, r12);
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
asMasm().call(r12);
callWithABIPost(stackAdjust, result);
}
void
MacroAssemblerARMCompat::callWithABI(Register fun, MoveOp::Type result)
{
// Load the callee in r12, as above.
ma_mov(fun, r12);
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
asMasm().call(r12);
callWithABIPost(stackAdjust, result);
asMasm().assertStackAlignment(ABIStackAlignment);
}
void
@ -4166,9 +3766,9 @@ MacroAssemblerARMCompat::handleFailureWithHandlerTail(void* handler)
ma_mov(sp, r0);
// Call the handler.
setupUnalignedABICall(1, r1);
passABIArg(r0);
callWithABI(handler);
asMasm().setupUnalignedABICall(1, r1);
asMasm().passABIArg(r0);
asMasm().callWithABI(handler);
Label entryFrame;
Label catch_;
@ -5380,4 +4980,122 @@ MacroAssembler::call(JitCode* c)
ma_callJitHalfPush(ScratchRegister);
}
// ===============================================================
// ABI function calls.
void
MacroAssembler::setupUnalignedABICall(uint32_t args, Register scratch)
{
setupABICall();
dynamicAlignment_ = true;
ma_mov(sp, scratch);
// Force sp to be aligned.
ma_and(Imm32(~(ABIStackAlignment - 1)), sp, sp);
ma_push(scratch);
}
void
MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromAsmJS)
{
MOZ_ASSERT(inCall_);
uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
if (dynamicAlignment_) {
// sizeof(intptr_t) accounts for the saved stack pointer pushed by
// setupUnalignedABICall.
stackForCall += ComputeByteAlignment(stackForCall + sizeof(intptr_t),
ABIStackAlignment);
} else {
uint32_t alignmentAtPrologue = callFromAsmJS ? sizeof(AsmJSFrame) : 0;
stackForCall += ComputeByteAlignment(stackForCall + framePushed() + alignmentAtPrologue,
ABIStackAlignment);
}
*stackAdjust = stackForCall;
reserveStack(stackForCall);
// Position all arguments.
{
enoughMemory_ = enoughMemory_ && moveResolver_.resolve();
if (!enoughMemory_)
return;
MoveEmitter emitter(*this);
emitter.emit(moveResolver_);
emitter.finish();
}
assertStackAlignment(ABIStackAlignment);
// Save the lr register if we need to preserve it.
if (secondScratchReg_ != lr)
ma_mov(lr, secondScratchReg_);
}
void
MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
{
if (secondScratchReg_ != lr)
ma_mov(secondScratchReg_, lr);
switch (result) {
case MoveOp::DOUBLE:
if (!UseHardFpABI()) {
// Move double from r0/r1 to ReturnFloatReg.
ma_vxfer(r0, r1, ReturnDoubleReg);
break;
}
case MoveOp::FLOAT32:
if (!UseHardFpABI()) {
// Move float32 from r0 to ReturnFloatReg.
ma_vxfer(r0, ReturnFloat32Reg.singleOverlay());
break;
}
case MoveOp::GENERAL:
break;
default:
MOZ_CRASH("unexpected callWithABI result");
}
freeStack(stackAdjust);
if (dynamicAlignment_) {
// While the x86 supports pop esp, on ARM that isn't well defined, so
// just do it manually.
as_dtr(IsLoad, 32, Offset, sp, DTRAddr(sp, DtrOffImm(0)));
}
#ifdef DEBUG
MOZ_ASSERT(inCall_);
inCall_ = false;
#endif
}
void
MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result)
{
// Load the callee in r12, as above.
ma_mov(fun, r12);
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
call(r12);
callWithABIPost(stackAdjust, result);
}
void
MacroAssembler::callWithABINoProfiler(const Address& fun, MoveOp::Type result)
{
// Load the callee in r12, no instruction between the ldr and call should
// clobber it. Note that we can't use fun.base because it may be one of the
// IntArg registers clobbered before the call.
ma_ldr(fun, r12);
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
call(r12);
callWithABIPost(stackAdjust, result);
}
//}}} check_macroassembler_style

Просмотреть файл

@ -498,48 +498,8 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
MacroAssembler& asMasm();
const MacroAssembler& asMasm() const;
private:
bool inCall_;
// Number of bytes the stack is adjusted inside a call to C. Calls to C may
// not be nested.
uint32_t args_;
// The actual number of arguments that were passed, used to assert that the
// initial number of arguments declared was correct.
uint32_t passedArgs_;
uint32_t passedArgTypes_;
// ARM treats arguments as a vector in registers/memory, that looks like:
// { r0, r1, r2, r3, [sp], [sp,+4], [sp,+8] ... }
// usedIntSlots_ keeps track of how many of these have been used. It bears a
// passing resemblance to passedArgs_, but a single argument can effectively
// use between one and three slots depending on its size and alignment
// requirements.
uint32_t usedIntSlots_;
#if defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_SIMULATOR_ARM)
uint32_t usedFloatSlots_;
bool usedFloat32_;
uint32_t padding_;
#endif
bool dynamicAlignment_;
// Used to work around the move resolver's lack of support for moving into
// register pairs, which the softfp ABI needs.
mozilla::Array<MoveOperand, 4> floatArgsInGPR;
mozilla::Array<bool, 4> floatArgsInGPRValid;
// Compute space needed for the function call and set the properties of the
// callee. It returns the space which has to be allocated for calling the
// function.
//
// arg Number of arguments of the function.
void setupABICall(uint32_t arg);
protected:
MoveResolver moveResolver_;
public:
MacroAssemblerARMCompat()
: inCall_(false)
{ }
public:
@ -1757,47 +1717,10 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
emitSet(cond, dest);
}
// Setup a call to C/C++ code, given the number of general arguments it
// takes. Note that this only supports cdecl.
//
// In order for alignment to work correctly, the MacroAssembler must have a
// consistent view of the stack displacement. It is okay to call "push"
// manually, however, if the stack alignment were to change, the macro
// assembler should be notified before starting a call.
void setupAlignedABICall(uint32_t args);
// Sets up an ABI call for when the alignment is not known. This may need a
// scratch register.
void setupUnalignedABICall(uint32_t args, Register scratch);
// Arguments must be assigned in a left-to-right order. This process may
// temporarily use more stack, in which case esp-relative addresses will be
// automatically adjusted. It is extremely important that esp-relative
// addresses are computed *after* setupABICall(). Furthermore, no operations
// should be emitted while setting arguments.
void passABIArg(const MoveOperand& from, MoveOp::Type type);
void passABIArg(Register reg);
void passABIArg(FloatRegister reg, MoveOp::Type type);
void passABIArg(const ValueOperand& regs);
private:
void passHardFpABIArg(const MoveOperand& from, MoveOp::Type type);
void passSoftFpABIArg(const MoveOperand& from, MoveOp::Type type);
protected:
bool buildOOLFakeExitFrame(void* fakeReturnAddr);
private:
void callWithABIPre(uint32_t* stackAdjust, bool callFromAsmJS = false);
void callWithABIPost(uint32_t stackAdjust, MoveOp::Type result);
public:
// Emits a call to a C/C++ function, resolving all argument moves.
void callWithABI(void* fun, MoveOp::Type result = MoveOp::GENERAL);
void callWithABI(AsmJSImmPtr imm, MoveOp::Type result = MoveOp::GENERAL);
void callWithABI(const Address& fun, MoveOp::Type result = MoveOp::GENERAL);
void callWithABI(Register fun, MoveOp::Type result = MoveOp::GENERAL);
CodeOffsetLabel labelForPatch() {
return CodeOffsetLabel(nextOffset().getOffset());
}

Просмотреть файл

@ -12,6 +12,8 @@
#include "jit/BaselineFrame.h"
#include "jit/MacroAssembler.h"
#include "jit/MacroAssembler-inl.h"
namespace js {
namespace jit {
@ -180,9 +182,9 @@ MacroAssemblerCompat::handleFailureWithHandlerTail(void* handler)
Mov(x0, GetStackPointer64());
// Call the handler.
setupUnalignedABICall(1, r1);
passABIArg(r0);
callWithABI(handler);
asMasm().setupUnalignedABICall(1, r1);
asMasm().passABIArg(r0);
asMasm().callWithABI(handler);
Label entryFrame;
Label catch_;
@ -252,247 +254,6 @@ MacroAssemblerCompat::handleFailureWithHandlerTail(void* handler)
Br(x1);
}
void
MacroAssemblerCompat::setupABICall(uint32_t args)
{
MOZ_ASSERT(!inCall_);
inCall_ = true;
args_ = args;
usedOutParam_ = false;
passedIntArgs_ = 0;
passedFloatArgs_ = 0;
passedArgTypes_ = 0;
stackForCall_ = ShadowStackSpace;
}
void
MacroAssemblerCompat::setupUnalignedABICall(uint32_t args, Register scratch)
{
setupABICall(args);
dynamicAlignment_ = true;
int64_t alignment = ~(int64_t(ABIStackAlignment) - 1);
ARMRegister scratch64(scratch, 64);
// Always save LR -- Baseline ICs assume that LR isn't modified.
push(lr);
// Unhandled for sp -- needs slightly different logic.
MOZ_ASSERT(!GetStackPointer64().Is(sp));
// Remember the stack address on entry.
Mov(scratch64, GetStackPointer64());
// Make alignment, including the effective push of the previous sp.
Sub(GetStackPointer64(), GetStackPointer64(), Operand(8));
And(GetStackPointer64(), GetStackPointer64(), Operand(alignment));
// If the PseudoStackPointer is used, sp must be <= psp before a write is valid.
syncStackPtr();
// Store previous sp to the top of the stack, aligned.
Str(scratch64, MemOperand(GetStackPointer64(), 0));
}
void
MacroAssemblerCompat::passABIArg(const MoveOperand& from, MoveOp::Type type)
{
if (!enoughMemory_)
return;
Register activeSP = Register::FromCode(GetStackPointer64().code());
if (type == MoveOp::GENERAL) {
Register dest;
passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_General;
if (GetIntArgReg(passedIntArgs_++, passedFloatArgs_, &dest)) {
if (!from.isGeneralReg() || from.reg() != dest)
enoughMemory_ = moveResolver_.addMove(from, MoveOperand(dest), type);
return;
}
enoughMemory_ = moveResolver_.addMove(from, MoveOperand(activeSP, stackForCall_), type);
stackForCall_ += sizeof(int64_t);
return;
}
MOZ_ASSERT(type == MoveOp::FLOAT32 || type == MoveOp::DOUBLE);
if (type == MoveOp::FLOAT32)
passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Float32;
else
passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Double;
FloatRegister fdest;
if (GetFloatArgReg(passedIntArgs_, passedFloatArgs_++, &fdest)) {
if (!from.isFloatReg() || from.floatReg() != fdest)
enoughMemory_ = moveResolver_.addMove(from, MoveOperand(fdest), type);
return;
}
enoughMemory_ = moveResolver_.addMove(from, MoveOperand(activeSP, stackForCall_), type);
switch (type) {
case MoveOp::FLOAT32: stackForCall_ += sizeof(float); break;
case MoveOp::DOUBLE: stackForCall_ += sizeof(double); break;
default: MOZ_CRASH("Unexpected float register class argument type");
}
}
void
MacroAssemblerCompat::passABIArg(Register reg)
{
passABIArg(MoveOperand(reg), MoveOp::GENERAL);
}
void
MacroAssemblerCompat::passABIArg(FloatRegister reg, MoveOp::Type type)
{
passABIArg(MoveOperand(reg), type);
}
void
MacroAssemblerCompat::passABIOutParam(Register reg)
{
if (!enoughMemory_)
return;
MOZ_ASSERT(!usedOutParam_);
usedOutParam_ = true;
if (reg == r8)
return;
enoughMemory_ = moveResolver_.addMove(MoveOperand(reg), MoveOperand(r8), MoveOp::GENERAL);
}
void
MacroAssemblerCompat::callWithABIPre(uint32_t* stackAdjust)
{
*stackAdjust = stackForCall_;
// ARM64 /really/ wants the stack to always be aligned. Since we're already tracking it
// getting it aligned for an abi call is pretty easy.
*stackAdjust += ComputeByteAlignment(*stackAdjust, StackAlignment);
asMasm().reserveStack(*stackAdjust);
{
moveResolver_.resolve();
MoveEmitter emitter(asMasm());
emitter.emit(moveResolver_);
emitter.finish();
}
// Call boundaries communicate stack via sp.
syncStackPtr();
}
void
MacroAssemblerCompat::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
{
// Call boundaries communicate stack via sp.
if (!GetStackPointer64().Is(sp))
Mov(GetStackPointer64(), sp);
inCall_ = false;
asMasm().freeStack(stackAdjust);
// Restore the stack pointer from entry.
if (dynamicAlignment_)
Ldr(GetStackPointer64(), MemOperand(GetStackPointer64(), 0));
// Restore LR.
pop(lr);
// TODO: This one shouldn't be necessary -- check that callers
// aren't enforcing the ABI themselves!
syncStackPtr();
// If the ABI's return regs are where ION is expecting them, then
// no other work needs to be done.
}
#if defined(DEBUG) && defined(JS_SIMULATOR_ARM64)
static void
AssertValidABIFunctionType(uint32_t passedArgTypes)
{
switch (passedArgTypes) {
case Args_General0:
case Args_General1:
case Args_General2:
case Args_General3:
case Args_General4:
case Args_General5:
case Args_General6:
case Args_General7:
case Args_General8:
case Args_Double_None:
case Args_Int_Double:
case Args_Float32_Float32:
case Args_Double_Double:
case Args_Double_Int:
case Args_Double_DoubleInt:
case Args_Double_DoubleDouble:
case Args_Double_DoubleDoubleDouble:
case Args_Double_DoubleDoubleDoubleDouble:
case Args_Double_IntDouble:
case Args_Int_IntDouble:
break;
default:
MOZ_CRASH("Unexpected type");
}
}
#endif // DEBUG && JS_SIMULATOR_ARM64
void
MacroAssemblerCompat::callWithABI(void* fun, MoveOp::Type result)
{
#ifdef JS_SIMULATOR_ARM64
MOZ_ASSERT(passedIntArgs_ + passedFloatArgs_ <= 15);
passedArgTypes_ <<= ArgType_Shift;
switch (result) {
case MoveOp::GENERAL: passedArgTypes_ |= ArgType_General; break;
case MoveOp::DOUBLE: passedArgTypes_ |= ArgType_Double; break;
case MoveOp::FLOAT32: passedArgTypes_ |= ArgType_Float32; break;
default: MOZ_CRASH("Invalid return type");
}
# ifdef DEBUG
AssertValidABIFunctionType(passedArgTypes_);
# endif
ABIFunctionType type = ABIFunctionType(passedArgTypes_);
fun = vixl::Simulator::RedirectNativeFunction(fun, type);
#endif // JS_SIMULATOR_ARM64
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
asMasm().call(ImmPtr(fun));
callWithABIPost(stackAdjust, result);
}
void
MacroAssemblerCompat::callWithABI(Register fun, MoveOp::Type result)
{
movePtr(fun, ip0);
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
asMasm().call(ip0);
callWithABIPost(stackAdjust, result);
}
void
MacroAssemblerCompat::callWithABI(AsmJSImmPtr imm, MoveOp::Type result)
{
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
asMasm().call(imm);
callWithABIPost(stackAdjust, result);
}
void
MacroAssemblerCompat::callWithABI(Address fun, MoveOp::Type result)
{
loadPtr(fun, ip0);
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
asMasm().call(ip0);
callWithABIPost(stackAdjust, result);
}
void
MacroAssemblerCompat::branchPtrInNurseryRange(Condition cond, Register ptr, Register temp,
Label* label)
@ -754,6 +515,116 @@ MacroAssembler::call(JitCode* c)
blr(scratch64);
}
// ===============================================================
// ABI function calls.
void
MacroAssembler::setupUnalignedABICall(uint32_t args, Register scratch)
{
setupABICall();
dynamicAlignment_ = true;
int64_t alignment = ~(int64_t(ABIStackAlignment) - 1);
ARMRegister scratch64(scratch, 64);
// Always save LR -- Baseline ICs assume that LR isn't modified.
push(lr);
// Unhandled for sp -- needs slightly different logic.
MOZ_ASSERT(!GetStackPointer64().Is(sp));
// Remember the stack address on entry.
Mov(scratch64, GetStackPointer64());
// Make alignment, including the effective push of the previous sp.
Sub(GetStackPointer64(), GetStackPointer64(), Operand(8));
And(GetStackPointer64(), GetStackPointer64(), Operand(alignment));
// If the PseudoStackPointer is used, sp must be <= psp before a write is valid.
syncStackPtr();
// Store previous sp to the top of the stack, aligned.
Str(scratch64, MemOperand(GetStackPointer64(), 0));
}
void
MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromAsmJS)
{
MOZ_ASSERT(inCall_);
uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
// ARM64 /really/ wants the stack to always be aligned. Since we're already tracking it
// getting it aligned for an abi call is pretty easy.
MOZ_ASSERT(dynamicAlignment_);
stackForCall += ComputeByteAlignment(stackForCall, StackAlignment);
*stackAdjust = stackForCall;
reserveStack(*stackAdjust);
{
moveResolver_.resolve();
MoveEmitter emitter(*this);
emitter.emit(moveResolver_);
emitter.finish();
}
// Call boundaries communicate stack via sp.
syncStackPtr();
}
void
MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
{
// Call boundaries communicate stack via sp.
if (!GetStackPointer64().Is(sp))
Mov(GetStackPointer64(), sp);
freeStack(stackAdjust);
// Restore the stack pointer from entry.
if (dynamicAlignment_)
Ldr(GetStackPointer64(), MemOperand(GetStackPointer64(), 0));
// Restore LR.
pop(lr);
// TODO: This one shouldn't be necessary -- check that callers
// aren't enforcing the ABI themselves!
syncStackPtr();
// If the ABI's return regs are where ION is expecting them, then
// no other work needs to be done.
#ifdef DEBUG
MOZ_ASSERT(inCall_);
inCall_ = false;
#endif
}
void
MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result)
{
vixl::UseScratchRegisterScope temps(this);
const Register scratch = temps.AcquireX().asUnsized();
movePtr(fun, scratch);
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
call(scratch);
callWithABIPost(stackAdjust, result);
}
void
MacroAssembler::callWithABINoProfiler(const Address& fun, MoveOp::Type result)
{
vixl::UseScratchRegisterScope temps(this);
const Register scratch = temps.AcquireX().asUnsized();
loadPtr(fun, scratch);
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
call(scratch);
callWithABIPost(stackAdjust, result);
}
//}}} check_macroassembler_style
} // namespace jit

Просмотреть файл

@ -59,29 +59,10 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
bool enoughMemory_;
uint32_t framePushed_;
// TODO: Can this be moved out of the MacroAssembler and into some shared code?
// TODO: All the code seems to be arch-independent, and it's weird to have this here.
bool inCall_;
bool usedOutParam_;
uint32_t args_;
uint32_t passedIntArgs_;
uint32_t passedFloatArgs_;
uint32_t passedArgTypes_;
uint32_t stackForCall_;
bool dynamicAlignment_;
MacroAssemblerCompat()
: vixl::MacroAssembler(),
enoughMemory_(true),
framePushed_(0),
inCall_(false),
usedOutParam_(false),
args_(0),
passedIntArgs_(0),
passedFloatArgs_(0),
passedArgTypes_(0),
stackForCall_(0),
dynamicAlignment_(false)
framePushed_(0)
{ }
protected:
@ -2650,47 +2631,7 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
Add(dest64, dest64, Operand(address.offset));
}
private:
void setupABICall(uint32_t args);
public:
// Setup a call to C/C++ code, given the number of general arguments it
// takes. Note that this only supports cdecl.
//
// In order for alignment to work correctly, the MacroAssembler must have a
// consistent view of the stack displacement. It is okay to call "push"
// manually, however, if the stack alignment were to change, the macro
// assembler should be notified before starting a call.
void setupAlignedABICall(uint32_t args) {
MOZ_CRASH("setupAlignedABICall");
}
// Sets up an ABI call for when the alignment is not known. This may need a
// scratch register.
void setupUnalignedABICall(uint32_t args, Register scratch);
// Arguments must be assigned to a C/C++ call in order. They are moved
// in parallel immediately before performing the call. This process may
// temporarily use more stack, in which case sp-relative addresses will be
// automatically adjusted. It is extremely important that sp-relative
// addresses are computed *after* setupABICall(). Furthermore, no
// operations should be emitted while setting arguments.
void passABIArg(const MoveOperand& from, MoveOp::Type type);
void passABIArg(Register reg);
void passABIArg(FloatRegister reg, MoveOp::Type type);
void passABIOutParam(Register reg);
private:
void callWithABIPre(uint32_t* stackAdjust);
void callWithABIPost(uint32_t stackAdjust, MoveOp::Type result);
public:
// Emits a call to a C/C++ function, resolving all argument moves.
void callWithABI(void* fun, MoveOp::Type result = MoveOp::GENERAL);
void callWithABI(Register fun, MoveOp::Type result = MoveOp::GENERAL);
void callWithABI(AsmJSImmPtr imm, MoveOp::Type result = MoveOp::GENERAL);
void callWithABI(Address fun, MoveOp::Type result = MoveOp::GENERAL);
CodeOffsetLabel labelForPatch() {
return CodeOffsetLabel(nextOffset().getOffset());
}

Просмотреть файл

@ -14,6 +14,8 @@
#include "jit/arm64/SharedICHelpers-arm64.h"
#include "jit/VMFunctions.h"
#include "jit/MacroAssembler-inl.h"
using namespace js;
using namespace js::jit;

Просмотреть файл

@ -3148,133 +3148,6 @@ MacroAssemblerMIPSCompat::ensureDouble(const ValueOperand& source, FloatRegister
bind(&done);
}
void
MacroAssemblerMIPSCompat::setupABICall(uint32_t args)
{
MOZ_ASSERT(!inCall_);
inCall_ = true;
args_ = args;
passedArgs_ = 0;
passedArgTypes_ = 0;
usedArgSlots_ = 0;
firstArgType = MoveOp::GENERAL;
}
void
MacroAssemblerMIPSCompat::setupAlignedABICall(uint32_t args)
{
setupABICall(args);
dynamicAlignment_ = false;
}
void
MacroAssemblerMIPSCompat::setupUnalignedABICall(uint32_t args, Register scratch)
{
setupABICall(args);
dynamicAlignment_ = true;
ma_move(scratch, StackPointer);
// Force sp to be aligned
ma_subu(StackPointer, StackPointer, Imm32(sizeof(uint32_t)));
ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
as_sw(scratch, StackPointer, 0);
}
void
MacroAssemblerMIPSCompat::passABIArg(const MoveOperand& from, MoveOp::Type type)
{
++passedArgs_;
if (!enoughMemory_)
return;
switch (type) {
case MoveOp::FLOAT32:
if (!usedArgSlots_) {
if (from.floatReg() != f12)
enoughMemory_ = moveResolver_.addMove(from, MoveOperand(f12), type);
firstArgType = MoveOp::FLOAT32;
} else if ((usedArgSlots_ == 1 && firstArgType == MoveOp::FLOAT32) ||
(usedArgSlots_ == 2 && firstArgType == MoveOp::DOUBLE)) {
if (from.floatReg() != f14)
enoughMemory_ = moveResolver_.addMove(from, MoveOperand(f14), type);
} else {
Register destReg;
if (GetIntArgReg(usedArgSlots_, &destReg)) {
if (from.isGeneralReg() && from.reg() == destReg) {
// Nothing to do. Value is in the right register already
} else {
enoughMemory_ = moveResolver_.addMove(from, MoveOperand(destReg), type);
}
} else {
uint32_t disp = GetArgStackDisp(usedArgSlots_);
enoughMemory_ = moveResolver_.addMove(from, MoveOperand(sp, disp), type);
}
}
usedArgSlots_++;
passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Float32;
break;
case MoveOp::DOUBLE:
if (!usedArgSlots_) {
if (from.floatReg() != f12)
enoughMemory_ = moveResolver_.addMove(from, MoveOperand(f12), type);
usedArgSlots_ = 2;
firstArgType = MoveOp::DOUBLE;
} else if (usedArgSlots_ <= 2) {
if ((usedArgSlots_ == 1 && firstArgType == MoveOp::FLOAT32) ||
(usedArgSlots_ == 2 && firstArgType == MoveOp::DOUBLE)) {
if (from.floatReg() != f14)
enoughMemory_ = moveResolver_.addMove(from, MoveOperand(f14), type);
} else {
// Create two moves so that cycles are found. Move emitter
// will have special case to handle this.
enoughMemory_ = moveResolver_.addMove(from, MoveOperand(a2), type);
enoughMemory_ = moveResolver_.addMove(from, MoveOperand(a3), type);
}
usedArgSlots_ = 4;
} else {
// Align if necessary
usedArgSlots_ += usedArgSlots_ % 2;
uint32_t disp = GetArgStackDisp(usedArgSlots_);
enoughMemory_ = moveResolver_.addMove(from, MoveOperand(sp, disp), type);
usedArgSlots_ += 2;
}
passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Double;
break;
case MoveOp::GENERAL:
Register destReg;
if (GetIntArgReg(usedArgSlots_, &destReg)) {
if (from.isGeneralReg() && from.reg() == destReg) {
// Nothing to do. Value is in the right register already
} else {
enoughMemory_ = moveResolver_.addMove(from, MoveOperand(destReg), type);
}
} else {
uint32_t disp = GetArgStackDisp(usedArgSlots_);
enoughMemory_ = moveResolver_.addMove(from, MoveOperand(sp, disp), type);
}
usedArgSlots_++;
passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_General;
break;
default:
MOZ_CRASH("Unexpected argument type");
}
}
void
MacroAssemblerMIPSCompat::passABIArg(Register reg)
{
passABIArg(MoveOperand(reg), MoveOp::GENERAL);
}
void
MacroAssemblerMIPSCompat::passABIArg(FloatRegister freg, MoveOp::Type type)
{
passABIArg(MoveOperand(freg), type);
}
void
MacroAssemblerMIPSCompat::checkStackAlignment()
{
@ -3322,158 +3195,6 @@ MacroAssembler::restoreFrameAlignmentForICArguments(AfterICSaveLive& aic)
freeStack(aic.alignmentPadding);
}
void
MacroAssemblerMIPSCompat::callWithABIPre(uint32_t* stackAdjust, bool callFromAsmJS)
{
MOZ_ASSERT(inCall_);
// Reserve place for $ra.
*stackAdjust = sizeof(intptr_t);
*stackAdjust += usedArgSlots_ > NumIntArgRegs ?
usedArgSlots_ * sizeof(intptr_t) :
NumIntArgRegs * sizeof(intptr_t);
uint32_t alignmentAtPrologue = callFromAsmJS ? sizeof(AsmJSFrame) : 0;
if (dynamicAlignment_) {
*stackAdjust += ComputeByteAlignment(*stackAdjust, ABIStackAlignment);
} else {
*stackAdjust += ComputeByteAlignment(asMasm().framePushed() + alignmentAtPrologue + *stackAdjust,
ABIStackAlignment);
}
asMasm().reserveStack(*stackAdjust);
// Save $ra because call is going to clobber it. Restore it in
// callWithABIPost. NOTE: This is needed for calls from BaselineIC.
// Maybe we can do this differently.
ma_sw(ra, Address(StackPointer, *stackAdjust - sizeof(intptr_t)));
// Position all arguments.
{
enoughMemory_ = enoughMemory_ && moveResolver_.resolve();
if (!enoughMemory_)
return;
MoveEmitter emitter(asMasm());
emitter.emit(moveResolver_);
emitter.finish();
}
checkStackAlignment();
}
void
MacroAssemblerMIPSCompat::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
{
// Restore ra value (as stored in callWithABIPre()).
ma_lw(ra, Address(StackPointer, stackAdjust - sizeof(intptr_t)));
if (dynamicAlignment_) {
// Restore sp value from stack (as stored in setupUnalignedABICall()).
ma_lw(StackPointer, Address(StackPointer, stackAdjust));
// Use adjustFrame instead of freeStack because we already restored sp.
asMasm().adjustFrame(-stackAdjust);
} else {
asMasm().freeStack(stackAdjust);
}
MOZ_ASSERT(inCall_);
inCall_ = false;
}
#if defined(DEBUG) && defined(JS_SIMULATOR_MIPS32)
static void
AssertValidABIFunctionType(uint32_t passedArgTypes)
{
switch (passedArgTypes) {
case Args_General0:
case Args_General1:
case Args_General2:
case Args_General3:
case Args_General4:
case Args_General5:
case Args_General6:
case Args_General7:
case Args_General8:
case Args_Double_None:
case Args_Int_Double:
case Args_Float32_Float32:
case Args_Double_Double:
case Args_Double_Int:
case Args_Double_DoubleInt:
case Args_Double_DoubleDouble:
case Args_Double_IntDouble:
case Args_Int_IntDouble:
case Args_Double_DoubleDoubleDouble:
case Args_Double_DoubleDoubleDoubleDouble:
break;
default:
MOZ_CRASH("Unexpected type");
}
}
#endif
void
MacroAssemblerMIPSCompat::callWithABI(void* fun, MoveOp::Type result)
{
#ifdef JS_SIMULATOR_MIPS32
MOZ_ASSERT(passedArgs_ <= 15);
passedArgTypes_ <<= ArgType_Shift;
switch (result) {
case MoveOp::GENERAL: passedArgTypes_ |= ArgType_General; break;
case MoveOp::DOUBLE: passedArgTypes_ |= ArgType_Double; break;
case MoveOp::FLOAT32: passedArgTypes_ |= ArgType_Float32; break;
default: MOZ_CRASH("Invalid return type");
}
#ifdef DEBUG
AssertValidABIFunctionType(passedArgTypes_);
#endif
ABIFunctionType type = ABIFunctionType(passedArgTypes_);
fun = Simulator::RedirectNativeFunction(fun, type);
#endif
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
ma_call(ImmPtr(fun));
callWithABIPost(stackAdjust, result);
}
void
MacroAssemblerMIPSCompat::callWithABI(AsmJSImmPtr imm, MoveOp::Type result)
{
uint32_t stackAdjust;
callWithABIPre(&stackAdjust, /* callFromAsmJS = */ true);
asMasm().call(imm);
callWithABIPost(stackAdjust, result);
}
void
MacroAssemblerMIPSCompat::callWithABI(const Address& fun, MoveOp::Type result)
{
// Load the callee in t9, no instruction between the lw and call
// should clobber it. Note that we can't use fun.base because it may
// be one of the IntArg registers clobbered before the call.
ma_lw(t9, Address(fun.base, fun.offset));
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
asMasm().call(t9);
callWithABIPost(stackAdjust, result);
}
void
MacroAssemblerMIPSCompat::callWithABI(Register fun, MoveOp::Type result)
{
// Load the callee in t9, as above.
ma_move(t9, fun);
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
asMasm().call(t9);
callWithABIPost(stackAdjust, result);
}
void
MacroAssemblerMIPSCompat::handleFailureWithHandlerTail(void* handler)
{
@ -3483,9 +3204,9 @@ MacroAssemblerMIPSCompat::handleFailureWithHandlerTail(void* handler)
ma_move(a0, StackPointer); // Use a0 since it is a first function argument
// Call the handler.
setupUnalignedABICall(1, a1);
passABIArg(a0);
callWithABI(handler);
asMasm().setupUnalignedABICall(1, a1);
asMasm().passABIArg(a0);
asMasm().callWithABI(handler);
Label entryFrame;
Label catch_;
@ -3824,4 +3545,105 @@ MacroAssembler::call(JitCode* c)
ma_callJitHalfPush(ScratchRegister);
}
// ===============================================================
// ABI function calls.
void
MacroAssembler::setupUnalignedABICall(uint32_t args, Register scratch)
{
setupABICall();
dynamicAlignment_ = true;
ma_move(scratch, StackPointer);
// Force sp to be aligned
ma_subu(StackPointer, StackPointer, Imm32(sizeof(uint32_t)));
ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
as_sw(scratch, StackPointer, 0);
}
void
MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromAsmJS)
{
MOZ_ASSERT(inCall_);
uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
// Reserve place for $ra.
stackForCall += sizeof(intptr_t);
if (dynamicAlignment_) {
stackForCall += ComputeByteAlignment(stackForCall, ABIStackAlignment);
} else {
uint32_t alignmentAtPrologue = callFromAsmJS ? sizeof(AsmJSFrame) : 0;
stackForCall += ComputeByteAlignment(stackForCall + framePushed() + alignmentAtPrologue,
ABIStackAlignment);
}
*stackAdjust = stackForCall;
reserveStack(stackForCall);
// Save $ra because call is going to clobber it. Restore it in
// callWithABIPost. NOTE: This is needed for calls from BaselineIC.
// Maybe we can do this differently.
ma_sw(ra, Address(StackPointer, stackForCall - sizeof(intptr_t)));
// Position all arguments.
{
enoughMemory_ = enoughMemory_ && moveResolver_.resolve();
if (!enoughMemory_)
return;
MoveEmitter emitter(*this);
emitter.emit(moveResolver_);
emitter.finish();
}
assertStackAlignment(ABIStackAlignment);
}
void
MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
{
// Restore ra value (as stored in callWithABIPre()).
ma_lw(ra, Address(StackPointer, stackAdjust - sizeof(intptr_t)));
if (dynamicAlignment_) {
// Restore sp value from stack (as stored in setupUnalignedABICall()).
ma_lw(StackPointer, Address(StackPointer, stackAdjust));
// Use adjustFrame instead of freeStack because we already restored sp.
adjustFrame(-stackAdjust);
} else {
freeStack(stackAdjust);
}
#ifdef DEBUG
MOZ_ASSERT(inCall_);
inCall_ = false;
#endif
}
void
MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result)
{
// Load the callee in t9, no instruction between the lw and call
// should clobber it. Note that we can't use fun.base because it may
// be one of the IntArg registers clobbered before the call.
ma_move(t9, fun);
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
call(t9);
callWithABIPost(stackAdjust, result);
}
void
MacroAssembler::callWithABINoProfiler(const Address& fun, MoveOp::Type result)
{
// Load the callee in t9, as above.
ma_lw(t9, Address(fun.base, fun.offset));
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
call(t9);
callWithABIPost(stackAdjust, result);
}
//}}} check_macroassembler_style

Просмотреть файл

@ -352,34 +352,8 @@ class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS
MacroAssembler& asMasm();
const MacroAssembler& asMasm() const;
private:
// Number of bytes the stack is adjusted inside a call to C. Calls to C may
// not be nested.
bool inCall_;
uint32_t args_;
// The actual number of arguments that were passed, used to assert that
// the initial number of arguments declared was correct.
uint32_t passedArgs_;
uint32_t passedArgTypes_;
uint32_t usedArgSlots_;
MoveOp::Type firstArgType;
bool dynamicAlignment_;
// Compute space needed for the function call and set the properties of the
// callee. It returns the space which has to be allocated for calling the
// function.
//
// arg Number of arguments of the function.
void setupABICall(uint32_t arg);
protected:
MoveResolver moveResolver_;
public:
MacroAssemblerMIPSCompat()
: inCall_(false)
{ }
public:
@ -1418,43 +1392,10 @@ public:
ma_cmp_set(dest, lhs, rhs, cond);
}
// Setup a call to C/C++ code, given the number of general arguments it
// takes. Note that this only supports cdecl.
//
// In order for alignment to work correctly, the MacroAssembler must have a
// consistent view of the stack displacement. It is okay to call "push"
// manually, however, if the stack alignment were to change, the macro
// assembler should be notified before starting a call.
void setupAlignedABICall(uint32_t args);
// Sets up an ABI call for when the alignment is not known. This may need a
// scratch register.
void setupUnalignedABICall(uint32_t args, Register scratch);
// Arguments must be assigned in a left-to-right order. This process may
// temporarily use more stack, in which case sp-relative addresses will be
// automatically adjusted. It is extremely important that sp-relative
// addresses are computed *after* setupABICall(). Furthermore, no
// operations should be emitted while setting arguments.
void passABIArg(const MoveOperand& from, MoveOp::Type type);
void passABIArg(Register reg);
void passABIArg(FloatRegister reg, MoveOp::Type type);
void passABIArg(const ValueOperand& regs);
protected:
bool buildOOLFakeExitFrame(void* fakeReturnAddr);
private:
void callWithABIPre(uint32_t* stackAdjust, bool callFromAsmJS = false);
void callWithABIPost(uint32_t stackAdjust, MoveOp::Type result);
public:
// Emits a call to a C/C++ function, resolving all argument moves.
void callWithABI(void* fun, MoveOp::Type result = MoveOp::GENERAL);
void callWithABI(AsmJSImmPtr imm, MoveOp::Type result = MoveOp::GENERAL);
void callWithABI(const Address& fun, MoveOp::Type result = MoveOp::GENERAL);
void callWithABI(Register fun, MoveOp::Type result = MoveOp::GENERAL);
CodeOffsetLabel labelForPatch() {
return CodeOffsetLabel(nextOffset().getOffset());
}

Просмотреть файл

@ -195,13 +195,8 @@ class MacroAssemblerNone : public Assembler
template <typename T> void call(T) { MOZ_CRASH(); }
template <typename T, typename S> void call(T, S) { MOZ_CRASH(); }
template <typename T> void callWithABI(T, MoveOp::Type v = MoveOp::GENERAL) { MOZ_CRASH(); }
void callAndPushReturnAddress(Label* label) { MOZ_CRASH(); }
void setupAlignedABICall(uint32_t) { MOZ_CRASH(); }
void setupUnalignedABICall(uint32_t, Register) { MOZ_CRASH(); }
template <typename T> void passABIArg(T, MoveOp::Type v = MoveOp::GENERAL) { MOZ_CRASH(); }
void callWithExitFrame(Label*) { MOZ_CRASH(); }
void callWithExitFrame(JitCode*) { MOZ_CRASH(); }
void callWithExitFrame(JitCode*, Register) { MOZ_CRASH(); }

Просмотреть файл

@ -11,6 +11,7 @@
#include "jsscriptinlines.h"
#include "jit/MacroAssembler-inl.h"
#include "jit/shared/CodeGenerator-shared-inl.h"
using namespace js;

Просмотреть файл

@ -183,212 +183,6 @@ MacroAssemblerX64::finish()
MacroAssemblerX86Shared::finish();
}
void
MacroAssemblerX64::setupABICall(uint32_t args)
{
MOZ_ASSERT(!inCall_);
inCall_ = true;
args_ = args;
passedIntArgs_ = 0;
passedFloatArgs_ = 0;
stackForCall_ = ShadowStackSpace;
}
void
MacroAssemblerX64::setupAlignedABICall(uint32_t args)
{
setupABICall(args);
dynamicAlignment_ = false;
}
void
MacroAssemblerX64::setupUnalignedABICall(uint32_t args, Register scratch)
{
setupABICall(args);
dynamicAlignment_ = true;
movq(rsp, scratch);
andq(Imm32(~(ABIStackAlignment - 1)), rsp);
push(scratch);
}
void
MacroAssemblerX64::passABIArg(const MoveOperand& from, MoveOp::Type type)
{
MoveOperand to;
switch (type) {
case MoveOp::FLOAT32:
case MoveOp::DOUBLE: {
FloatRegister dest;
if (GetFloatArgReg(passedIntArgs_, passedFloatArgs_++, &dest)) {
// Convert to the right type of register.
if (type == MoveOp::FLOAT32)
dest = dest.asSingle();
if (from.isFloatReg() && from.floatReg() == dest) {
// Nothing to do; the value is in the right register already
return;
}
to = MoveOperand(dest);
} else {
to = MoveOperand(StackPointer, stackForCall_);
switch (type) {
case MoveOp::FLOAT32: stackForCall_ += sizeof(float); break;
case MoveOp::DOUBLE: stackForCall_ += sizeof(double); break;
default: MOZ_CRASH("Unexpected float register class argument type");
}
}
break;
}
case MoveOp::GENERAL: {
Register dest;
if (GetIntArgReg(passedIntArgs_++, passedFloatArgs_, &dest)) {
if (from.isGeneralReg() && from.reg() == dest) {
// Nothing to do; the value is in the right register already
return;
}
to = MoveOperand(dest);
} else {
to = MoveOperand(StackPointer, stackForCall_);
stackForCall_ += sizeof(int64_t);
}
break;
}
default:
MOZ_CRASH("Unexpected argument type");
}
enoughMemory_ = moveResolver_.addMove(from, to, type);
}
void
MacroAssemblerX64::passABIArg(Register reg)
{
passABIArg(MoveOperand(reg), MoveOp::GENERAL);
}
void
MacroAssemblerX64::passABIArg(FloatRegister reg, MoveOp::Type type)
{
passABIArg(MoveOperand(reg), type);
}
void
MacroAssemblerX64::callWithABIPre(uint32_t* stackAdjust)
{
MOZ_ASSERT(inCall_);
MOZ_ASSERT(args_ == passedIntArgs_ + passedFloatArgs_);
if (dynamicAlignment_) {
*stackAdjust = stackForCall_
+ ComputeByteAlignment(stackForCall_ + sizeof(intptr_t),
ABIStackAlignment);
} else {
*stackAdjust = stackForCall_
+ ComputeByteAlignment(stackForCall_ + asMasm().framePushed(),
ABIStackAlignment);
}
asMasm().reserveStack(*stackAdjust);
// Position all arguments.
{
enoughMemory_ &= moveResolver_.resolve();
if (!enoughMemory_)
return;
MoveEmitter emitter(asMasm());
emitter.emit(moveResolver_);
emitter.finish();
}
#ifdef DEBUG
{
Label good;
testPtr(rsp, Imm32(ABIStackAlignment - 1));
j(Equal, &good);
breakpoint();
bind(&good);
}
#endif
}
void
MacroAssemblerX64::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
{
asMasm().freeStack(stackAdjust);
if (dynamicAlignment_)
pop(rsp);
MOZ_ASSERT(inCall_);
inCall_ = false;
}
void
MacroAssemblerX64::callWithABI(void* fun, MoveOp::Type result)
{
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
asMasm().call(ImmPtr(fun));
callWithABIPost(stackAdjust, result);
}
void
MacroAssemblerX64::callWithABI(AsmJSImmPtr imm, MoveOp::Type result)
{
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
asMasm().call(imm);
callWithABIPost(stackAdjust, result);
}
static bool
IsIntArgReg(Register reg)
{
for (uint32_t i = 0; i < NumIntArgRegs; i++) {
if (IntArgRegs[i] == reg)
return true;
}
return false;
}
void
MacroAssemblerX64::callWithABI(Address fun, MoveOp::Type result)
{
if (IsIntArgReg(fun.base)) {
// Callee register may be clobbered for an argument. Move the callee to
// r10, a volatile, non-argument register.
moveResolver_.addMove(MoveOperand(fun.base), MoveOperand(r10), MoveOp::GENERAL);
fun.base = r10;
}
MOZ_ASSERT(!IsIntArgReg(fun.base));
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
asMasm().call(fun);
callWithABIPost(stackAdjust, result);
}
void
MacroAssemblerX64::callWithABI(Register fun, MoveOp::Type result)
{
if (IsIntArgReg(fun)) {
// Callee register may be clobbered for an argument. Move the callee to
// r10, a volatile, non-argument register.
moveResolver_.addMove(MoveOperand(fun), MoveOperand(r10), MoveOp::GENERAL);
fun = r10;
}
MOZ_ASSERT(!IsIntArgReg(fun));
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
asMasm().call(fun);
callWithABIPost(stackAdjust, result);
}
void
MacroAssemblerX64::handleFailureWithHandlerTail(void* handler)
{
@ -397,9 +191,9 @@ MacroAssemblerX64::handleFailureWithHandlerTail(void* handler)
movq(rsp, rax);
// Call the handler.
setupUnalignedABICall(1, rcx);
passABIArg(rax);
callWithABI(handler);
asMasm().setupUnalignedABICall(1, rcx);
asMasm().passABIArg(rax);
asMasm().callWithABI(handler);
Label entryFrame;
Label catch_;
@ -608,4 +402,115 @@ MacroAssembler::reserveStack(uint32_t amount)
framePushed_ += amount;
}
// ===============================================================
// ABI function calls.
void
MacroAssembler::setupUnalignedABICall(uint32_t args, Register scratch)
{
setupABICall();
dynamicAlignment_ = true;
movq(rsp, scratch);
andq(Imm32(~(ABIStackAlignment - 1)), rsp);
push(scratch);
}
void
MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromAsmJS)
{
MOZ_ASSERT(inCall_);
uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
if (dynamicAlignment_) {
// sizeof(intptr_t) accounts for the saved stack pointer pushed by
// setupUnalignedABICall.
stackForCall += ComputeByteAlignment(stackForCall + sizeof(intptr_t),
ABIStackAlignment);
} else {
static_assert(sizeof(AsmJSFrame) % ABIStackAlignment == 0,
"AsmJSFrame should be part of the stack alignment.");
stackForCall += ComputeByteAlignment(stackForCall + framePushed(),
ABIStackAlignment);
}
*stackAdjust = stackForCall;
reserveStack(stackForCall);
// Position all arguments.
{
enoughMemory_ &= moveResolver_.resolve();
if (!enoughMemory_)
return;
MoveEmitter emitter(*this);
emitter.emit(moveResolver_);
emitter.finish();
}
assertStackAlignment(ABIStackAlignment);
}
void
MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
{
freeStack(stackAdjust);
if (dynamicAlignment_)
pop(rsp);
#ifdef DEBUG
MOZ_ASSERT(inCall_);
inCall_ = false;
#endif
}
static bool
IsIntArgReg(Register reg)
{
for (uint32_t i = 0; i < NumIntArgRegs; i++) {
if (IntArgRegs[i] == reg)
return true;
}
return false;
}
void
MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result)
{
if (IsIntArgReg(fun)) {
// Callee register may be clobbered for an argument. Move the callee to
// r10, a volatile, non-argument register.
moveResolver_.addMove(MoveOperand(fun), MoveOperand(r10), MoveOp::GENERAL);
fun = r10;
}
MOZ_ASSERT(!IsIntArgReg(fun));
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
call(fun);
callWithABIPost(stackAdjust, result);
}
void
MacroAssembler::callWithABINoProfiler(const Address& fun, MoveOp::Type result)
{
Address safeFun = fun;
if (IsIntArgReg(safeFun.base)) {
// Callee register may be clobbered for an argument. Move the callee to
// r10, a volatile, non-argument register.
moveResolver_.addMove(MoveOperand(fun.base), MoveOperand(r10), MoveOp::GENERAL);
safeFun.base = r10;
}
MOZ_ASSERT(!IsIntArgReg(safeFun.base));
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
call(safeFun);
callWithABIPost(stackAdjust, result);
}
//}}} check_macroassembler_style

Просмотреть файл

@ -40,15 +40,6 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
const MacroAssembler& asMasm() const;
private:
// Number of bytes the stack is adjusted inside a call to C. Calls to C may
// not be nested.
bool inCall_;
uint32_t args_;
uint32_t passedIntArgs_;
uint32_t passedFloatArgs_;
uint32_t stackForCall_;
bool dynamicAlignment_;
// These use SystemAllocPolicy since asm.js releases memory after each
// function is compiled, and these need to live until after all functions
// are compiled.
@ -83,11 +74,6 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
typedef HashMap<SimdConstant, size_t, SimdConstant, SystemAllocPolicy> SimdMap;
SimdMap simdMap_;
void setupABICall(uint32_t arg);
protected:
MoveResolver moveResolver_;
public:
using MacroAssemblerX86Shared::callWithExitFrame;
using MacroAssemblerX86Shared::branch32;
@ -96,7 +82,6 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
using MacroAssemblerX86Shared::store32;
MacroAssemblerX64()
: inCall_(false)
{
}
@ -1364,40 +1349,7 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
bind(&done);
}
// Setup a call to C/C++ code, given the number of general arguments it
// takes. Note that this only supports cdecl.
//
// In order for alignment to work correctly, the MacroAssembler must have a
// consistent view of the stack displacement. It is okay to call "push"
// manually, however, if the stack alignment were to change, the macro
// assembler should be notified before starting a call.
void setupAlignedABICall(uint32_t args);
// Sets up an ABI call for when the alignment is not known. This may need a
// scratch register.
void setupUnalignedABICall(uint32_t args, Register scratch);
// Arguments must be assigned to a C/C++ call in order. They are moved
// in parallel immediately before performing the call. This process may
// temporarily use more stack, in which case esp-relative addresses will be
// automatically adjusted. It is extremely important that esp-relative
// addresses are computed *after* setupABICall(). Furthermore, no
// operations should be emitted while setting arguments.
void passABIArg(const MoveOperand& from, MoveOp::Type type);
void passABIArg(Register reg);
void passABIArg(FloatRegister reg, MoveOp::Type type);
private:
void callWithABIPre(uint32_t* stackAdjust);
void callWithABIPost(uint32_t stackAdjust, MoveOp::Type result);
public:
// Emits a call to a C/C++ function, resolving all argument moves.
void callWithABI(void* fun, MoveOp::Type result = MoveOp::GENERAL);
void callWithABI(AsmJSImmPtr imm, MoveOp::Type result = MoveOp::GENERAL);
void callWithABI(Address fun, MoveOp::Type result = MoveOp::GENERAL);
void callWithABI(Register fun, MoveOp::Type result = MoveOp::GENERAL);
void handleFailureWithHandlerTail(void* handler);
void makeFrameDescriptor(Register frameSizeReg, FrameType type) {

Просмотреть файл

@ -20,6 +20,7 @@
#include "jsscriptinlines.h"
#include "jit/MacroAssembler-inl.h"
#include "jit/shared/CodeGenerator-shared-inl.h"
using namespace js;

Просмотреть файл

@ -204,161 +204,6 @@ MacroAssemblerX86::finish()
}
}
void
MacroAssemblerX86::setupABICall(uint32_t args)
{
MOZ_ASSERT(!inCall_);
inCall_ = true;
args_ = args;
passedArgs_ = 0;
stackForCall_ = 0;
}
void
MacroAssemblerX86::setupAlignedABICall(uint32_t args)
{
setupABICall(args);
dynamicAlignment_ = false;
}
void
MacroAssemblerX86::setupUnalignedABICall(uint32_t args, Register scratch)
{
setupABICall(args);
dynamicAlignment_ = true;
movl(esp, scratch);
andl(Imm32(~(ABIStackAlignment - 1)), esp);
push(scratch);
}
void
MacroAssemblerX86::passABIArg(const MoveOperand& from, MoveOp::Type type)
{
++passedArgs_;
MoveOperand to = MoveOperand(StackPointer, stackForCall_);
switch (type) {
case MoveOp::FLOAT32: stackForCall_ += sizeof(float); break;
case MoveOp::DOUBLE: stackForCall_ += sizeof(double); break;
case MoveOp::INT32: stackForCall_ += sizeof(int32_t); break;
case MoveOp::GENERAL: stackForCall_ += sizeof(intptr_t); break;
default: MOZ_CRASH("Unexpected argument type");
}
enoughMemory_ &= moveResolver_.addMove(from, to, type);
}
void
MacroAssemblerX86::passABIArg(Register reg)
{
passABIArg(MoveOperand(reg), MoveOp::GENERAL);
}
void
MacroAssemblerX86::passABIArg(FloatRegister reg, MoveOp::Type type)
{
passABIArg(MoveOperand(reg), type);
}
void
MacroAssemblerX86::callWithABIPre(uint32_t* stackAdjust)
{
MOZ_ASSERT(inCall_);
MOZ_ASSERT(args_ == passedArgs_);
if (dynamicAlignment_) {
*stackAdjust = stackForCall_
+ ComputeByteAlignment(stackForCall_ + sizeof(intptr_t),
ABIStackAlignment);
} else {
*stackAdjust = stackForCall_
+ ComputeByteAlignment(stackForCall_ + asMasm().framePushed(),
ABIStackAlignment);
}
asMasm().reserveStack(*stackAdjust);
// Position all arguments.
{
enoughMemory_ &= moveResolver_.resolve();
if (!enoughMemory_)
return;
MoveEmitter emitter(asMasm());
emitter.emit(moveResolver_);
emitter.finish();
}
#ifdef DEBUG
{
// Check call alignment.
Label good;
test32(esp, Imm32(ABIStackAlignment - 1));
j(Equal, &good);
breakpoint();
bind(&good);
}
#endif
}
void
MacroAssemblerX86::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
{
asMasm().freeStack(stackAdjust);
if (result == MoveOp::DOUBLE) {
asMasm().reserveStack(sizeof(double));
fstp(Operand(esp, 0));
loadDouble(Operand(esp, 0), ReturnDoubleReg);
asMasm().freeStack(sizeof(double));
} else if (result == MoveOp::FLOAT32) {
asMasm().reserveStack(sizeof(float));
fstp32(Operand(esp, 0));
loadFloat32(Operand(esp, 0), ReturnFloat32Reg);
asMasm().freeStack(sizeof(float));
}
if (dynamicAlignment_)
pop(esp);
MOZ_ASSERT(inCall_);
inCall_ = false;
}
void
MacroAssemblerX86::callWithABI(void* fun, MoveOp::Type result)
{
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
asMasm().call(ImmPtr(fun));
callWithABIPost(stackAdjust, result);
}
void
MacroAssemblerX86::callWithABI(AsmJSImmPtr fun, MoveOp::Type result)
{
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
asMasm().call(fun);
callWithABIPost(stackAdjust, result);
}
void
MacroAssemblerX86::callWithABI(const Address& fun, MoveOp::Type result)
{
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
asMasm().call(fun);
callWithABIPost(stackAdjust, result);
}
void
MacroAssemblerX86::callWithABI(Register fun, MoveOp::Type result)
{
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
asMasm().call(fun);
callWithABIPost(stackAdjust, result);
}
void
MacroAssemblerX86::handleFailureWithHandlerTail(void* handler)
{
@ -367,9 +212,9 @@ MacroAssemblerX86::handleFailureWithHandlerTail(void* handler)
movl(esp, eax);
// Call the handler.
setupUnalignedABICall(1, ecx);
passABIArg(eax);
callWithABI(handler);
asMasm().setupUnalignedABICall(1, ecx);
asMasm().passABIArg(eax);
asMasm().callWithABI(handler);
Label entryFrame;
Label catch_;
@ -589,4 +434,94 @@ MacroAssembler::reserveStack(uint32_t amount)
framePushed_ += amount;
}
// ===============================================================
// ABI function calls.
void
MacroAssembler::setupUnalignedABICall(uint32_t args, Register scratch)
{
setupABICall();
dynamicAlignment_ = true;
movl(esp, scratch);
andl(Imm32(~(ABIStackAlignment - 1)), esp);
push(scratch);
}
void
MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromAsmJS)
{
MOZ_ASSERT(inCall_);
uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
if (dynamicAlignment_) {
// sizeof(intptr_t) accounts for the saved stack pointer pushed by
// setupUnalignedABICall.
stackForCall += ComputeByteAlignment(stackForCall + sizeof(intptr_t),
ABIStackAlignment);
} else {
uint32_t alignmentAtPrologue = callFromAsmJS ? sizeof(AsmJSFrame) : 0;
stackForCall += ComputeByteAlignment(stackForCall + framePushed() + alignmentAtPrologue,
ABIStackAlignment);
}
*stackAdjust = stackForCall;
reserveStack(stackForCall);
// Position all arguments.
{
enoughMemory_ &= moveResolver_.resolve();
if (!enoughMemory_)
return;
MoveEmitter emitter(*this);
emitter.emit(moveResolver_);
emitter.finish();
}
assertStackAlignment(ABIStackAlignment);
}
void
MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
{
freeStack(stackAdjust);
if (result == MoveOp::DOUBLE) {
reserveStack(sizeof(double));
fstp(Operand(esp, 0));
loadDouble(Operand(esp, 0), ReturnDoubleReg);
freeStack(sizeof(double));
} else if (result == MoveOp::FLOAT32) {
reserveStack(sizeof(float));
fstp32(Operand(esp, 0));
loadFloat32(Operand(esp, 0), ReturnFloat32Reg);
freeStack(sizeof(float));
}
if (dynamicAlignment_)
pop(esp);
#ifdef DEBUG
MOZ_ASSERT(inCall_);
inCall_ = false;
#endif
}
void
MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result)
{
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
call(fun);
callWithABIPost(stackAdjust, result);
}
void
MacroAssembler::callWithABINoProfiler(const Address& fun, MoveOp::Type result)
{
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
call(fun);
callWithABIPost(stackAdjust, result);
}
//}}} check_macroassembler_style

Просмотреть файл

@ -24,14 +24,6 @@ class MacroAssemblerX86 : public MacroAssemblerX86Shared
const MacroAssembler& asMasm() const;
private:
// Number of bytes the stack is adjusted inside a call to C. Calls to C may
// not be nested.
bool inCall_;
uint32_t args_;
uint32_t passedArgs_;
uint32_t stackForCall_;
bool dynamicAlignment_;
struct Double {
double value;
AbsoluteLabel uses;
@ -99,7 +91,6 @@ class MacroAssemblerX86 : public MacroAssemblerX86Shared
using MacroAssemblerX86Shared::call;
MacroAssemblerX86()
: inCall_(false)
{
}
@ -1141,40 +1132,7 @@ class MacroAssemblerX86 : public MacroAssemblerX86Shared
bind(&done);
}
// Setup a call to C/C++ code, given the number of general arguments it
// takes. Note that this only supports cdecl.
//
// In order for alignment to work correctly, the MacroAssembler must have a
// consistent view of the stack displacement. It is okay to call "push"
// manually, however, if the stack alignment were to change, the macro
// assembler should be notified before starting a call.
void setupAlignedABICall(uint32_t args);
// Sets up an ABI call for when the alignment is not known. This may need a
// scratch register.
void setupUnalignedABICall(uint32_t args, Register scratch);
// Arguments must be assigned to a C/C++ call in order. They are moved
// in parallel immediately before performing the call. This process may
// temporarily use more stack, in which case esp-relative addresses will be
// automatically adjusted. It is extremely important that esp-relative
// addresses are computed *after* setupABICall(). Furthermore, no
// operations should be emitted while setting arguments.
void passABIArg(const MoveOperand& from, MoveOp::Type type);
void passABIArg(Register reg);
void passABIArg(FloatRegister reg, MoveOp::Type type);
private:
void callWithABIPre(uint32_t* stackAdjust);
void callWithABIPost(uint32_t stackAdjust, MoveOp::Type result);
public:
// Emits a call to a C/C++ function, resolving all argument moves.
void callWithABI(void* fun, MoveOp::Type result = MoveOp::GENERAL);
void callWithABI(AsmJSImmPtr fun, MoveOp::Type result = MoveOp::GENERAL);
void callWithABI(const Address& fun, MoveOp::Type result = MoveOp::GENERAL);
void callWithABI(Register fun, MoveOp::Type result = MoveOp::GENERAL);
// Used from within an Exit frame to handle a pending exception.
void handleFailureWithHandlerTail(void* handler);

Просмотреть файл

@ -20,6 +20,8 @@
#include "jsscriptinlines.h"
#include "jit/MacroAssembler-inl.h"
using namespace js;
using namespace js::jit;

Просмотреть файл

@ -13,6 +13,7 @@
#include "jsobjinlines.h"
#include "gc/Nursery-inl.h"
#include "jit/MacroAssembler-inl.h"
#include "vm/Shape-inl.h"
using mozilla::ArrayLength;