Bug 507089 - TM/nanojit: introduce get/set methods for CallInfo::_argtypes. r=edwsmith.

--HG--
extra : convert_revision : 8075a19e11565e6de5f33ed829fe435e47e116ca
This commit is contained in:
Nicholas Nethercote 2010-02-15 11:01:04 +11:00
Родитель 17eccdbfcf
Коммит 18c949dde4
16 изменённых файлов: 270 добавлений и 282 удалений

Просмотреть файл

@ -141,12 +141,14 @@ enum ReturnType {
#define FN(name, args) \
{#name, CI(name, args)}
const int I32 = nanojit::ARGSIZE_LO;
const ArgType I32 = nanojit::ARGTYPE_I;
#ifdef NANOJIT_64BIT
const int I64 = nanojit::ARGSIZE_Q;
const ArgType I64 = nanojit::ARGTYPE_Q;
#endif
const int F64 = nanojit::ARGSIZE_F;
const int PTR = nanojit::ARGSIZE_P;
const ArgType F64 = nanojit::ARGTYPE_F;
const ArgType PTR = nanojit::ARGTYPE_P;
const ArgType WRD = nanojit::ARGTYPE_P;
const ArgType VOID = nanojit::ARGTYPE_V;
enum LirTokenType {
NAME, NUMBER, PUNCT, NEWLINE
@ -329,24 +331,6 @@ private:
void endFragment();
};
// Meaning: arg 'm' of 'n' has size 'sz'.
static int argMask(int sz, int m, int n)
{
// Order examples, from MSB to LSB:
// - 3 args: 000 | 000 | 000 | 000 | 000 | arg1| arg2| arg3| ret
// - 8 args: arg1| arg2| arg3| arg4| arg5| arg6| arg7| arg8| ret
// If the mask encoding reversed the arg order the 'n' parameter wouldn't
// be necessary, as argN would always be in the same place in the
// bitfield.
return sz << ((1 + n - m) * ARGSIZE_SHIFT);
}
// Return value has size 'sz'.
static int retMask(int sz)
{
return sz;
}
// 'sin' is overloaded on some platforms, so taking its address
// doesn't quite work. Provide a do-nothing function here
// that's not overloaded.
@ -356,10 +340,10 @@ double sinFn(double d) {
#define sin sinFn
Function functions[] = {
FN(puts, argMask(PTR, 1, 1) | retMask(I32)),
FN(sin, argMask(F64, 1, 1) | retMask(F64)),
FN(malloc, argMask(PTR, 1, 1) | retMask(PTR)),
FN(free, argMask(PTR, 1, 1) | retMask(I32))
FN(puts, CallInfo::typeSig1(I32, PTR)),
FN(sin, CallInfo::typeSig1(F64, F64)),
FN(malloc, CallInfo::typeSig1(PTR, WRD)),
FN(free, CallInfo::typeSig1(VOID, PTR)),
};
template<typename out, typename in> out
@ -676,32 +660,30 @@ FragmentAssembler::assemble_call(const string &op)
} else {
// User-defined function: infer CallInfo details (ABI, arg types, ret
// type) from the call site.
int ty;
ci->_abi = _abi;
ci->_argtypes = 0;
size_t argc = mTokens.size();
ArgType argTypes[MAXARGS];
for (size_t i = 0; i < argc; ++i) {
NanoAssert(i < MAXARGS); // should give a useful error msg if this fails
args[i] = ref(mTokens[mTokens.size() - (i+1)]);
if (args[i]->isF64()) ty = ARGSIZE_F;
if (args[i]->isF64()) argTypes[i] = F64;
#ifdef NANOJIT_64BIT
else if (args[i]->isI64()) ty = ARGSIZE_Q;
else if (args[i]->isI64()) argTypes[i] = I64;
#endif
else ty = ARGSIZE_I;
else argTypes[i] = I32;
// Nb: i+1 because argMask() uses 1-based arg counting.
ci->_argtypes |= argMask(ty, i+1, argc);
}
// Select return type from opcode.
ty = 0;
if (mOpcode == LIR_icall) ty = ARGSIZE_LO;
else if (mOpcode == LIR_fcall) ty = ARGSIZE_F;
ArgType retType = ARGTYPE_V;
if (mOpcode == LIR_icall) retType = I32;
else if (mOpcode == LIR_fcall) retType = F64;
#ifdef NANOJIT_64BIT
else if (mOpcode == LIR_qcall) ty = ARGSIZE_Q;
else if (mOpcode == LIR_qcall) retType = I64;
#endif
else nyi("callh");
ci->_argtypes |= retMask(ty);
ci->_typesig = CallInfo::typeSigN(retType, argc, argTypes);
}
return mLir->insCall(ci, args);
@ -1216,58 +1198,22 @@ static double f_F_F8(double a, double b, double c, double d,
}
#ifdef NANOJIT_64BIT
static void f_N_IQF(int32_t, uint64_t, double)
static void f_V_IQF(int32_t, uint64_t, double)
{
return; // no need to do anything
}
#endif
const CallInfo ci_I_I1 = CI(f_I_I1, argMask(I32, 1, 1) |
retMask(I32));
const CallInfo ci_I_I6 = CI(f_I_I6, argMask(I32, 1, 6) |
argMask(I32, 2, 6) |
argMask(I32, 3, 6) |
argMask(I32, 4, 6) |
argMask(I32, 5, 6) |
argMask(I32, 6, 6) |
retMask(I32));
const CallInfo ci_I_I1 = CI(f_I_I1, CallInfo::typeSig1(I32, I32));
const CallInfo ci_I_I6 = CI(f_I_I6, CallInfo::typeSig6(I32, I32, I32, I32, I32, I32, I32));
#ifdef NANOJIT_64BIT
const CallInfo ci_Q_Q2 = CI(f_Q_Q2, argMask(I64, 1, 2) |
argMask(I64, 2, 2) |
retMask(I64));
const CallInfo ci_Q_Q7 = CI(f_Q_Q7, argMask(I64, 1, 7) |
argMask(I64, 2, 7) |
argMask(I64, 3, 7) |
argMask(I64, 4, 7) |
argMask(I64, 5, 7) |
argMask(I64, 6, 7) |
argMask(I64, 7, 7) |
retMask(I64));
const CallInfo ci_Q_Q2 = CI(f_Q_Q2, CallInfo::typeSig2(I64, I64, I64));
const CallInfo ci_Q_Q7 = CI(f_Q_Q7, CallInfo::typeSig7(I64, I64, I64, I64, I64, I64, I64, I64));
#endif
const CallInfo ci_F_F3 = CI(f_F_F3, argMask(F64, 1, 3) |
argMask(F64, 2, 3) |
argMask(F64, 3, 3) |
retMask(F64));
const CallInfo ci_F_F8 = CI(f_F_F8, argMask(F64, 1, 8) |
argMask(F64, 2, 8) |
argMask(F64, 3, 8) |
argMask(F64, 4, 8) |
argMask(F64, 5, 8) |
argMask(F64, 6, 8) |
argMask(F64, 7, 8) |
argMask(F64, 8, 8) |
retMask(F64));
const CallInfo ci_F_F3 = CI(f_F_F3, CallInfo::typeSig3(F64, F64, F64, F64));
const CallInfo ci_F_F8 = CI(f_F_F8, CallInfo::typeSig8(F64, F64, F64, F64, F64, F64, F64, F64, F64));
#ifdef NANOJIT_64BIT
const CallInfo ci_N_IQF = CI(f_N_IQF, argMask(I32, 1, 3) |
argMask(I64, 2, 3) |
argMask(F64, 3, 3) |
retMask(ARGSIZE_NONE));
const CallInfo ci_V_IQF = CI(f_V_IQF, CallInfo::typeSig3(VOID, I32, I64, F64));
#endif
// Generate a random block containing nIns instructions, plus a few more
@ -1911,7 +1857,7 @@ FragmentAssembler::assembleRandomFragment(int nIns)
if (!Is.empty() && !Qs.empty() && !Fs.empty()) {
// Nb: args[] holds the args in reverse order... sigh.
LIns* args[3] = { rndPick(Fs), rndPick(Qs), rndPick(Is) };
ins = mLir->insCall(&ci_N_IQF, args);
ins = mLir->insCall(&ci_V_IQF, args);
n++;
}
break;

Просмотреть файл

@ -2360,35 +2360,6 @@ namespace nanojit
}
#endif // NJ_VERBOSE
uint32_t CallInfo::_count_args(uint32_t mask) const
{
uint32_t argc = 0;
uint32_t argt = _argtypes;
for (uint32_t i = 0; i < MAXARGS; ++i) {
argt >>= ARGSIZE_SHIFT;
if (!argt)
break;
argc += (argt & mask) != 0;
}
return argc;
}
uint32_t CallInfo::get_sizes(ArgSize* sizes) const
{
uint32_t argt = _argtypes;
uint32_t argc = 0;
for (uint32_t i = 0; i < MAXARGS; i++) {
argt >>= ARGSIZE_SHIFT;
ArgSize a = ArgSize(argt & ARGSIZE_MASK_ANY);
if (a != ARGSIZE_NONE) {
sizes[argc++] = a;
} else {
break;
}
}
return argc;
}
void LabelStateMap::add(LIns *label, NIns *addr, RegAlloc &regs) {
LabelState *st = new (alloc) LabelState(addr, regs);
labels.put(label, st);

Просмотреть файл

@ -73,6 +73,46 @@ namespace nanojit
#endif /* NANOJIT_VERBOSE */
uint32_t CallInfo::count_args() const
{
uint32_t argc = 0;
uint32_t argt = _typesig;
argt >>= _typesig_fieldszb; // remove retType
while (argt) {
argc++;
argt >>= _typesig_fieldszb;
}
return argc;
}
uint32_t CallInfo::count_iargs() const
{
uint32_t argc = 0;
uint32_t argt = _typesig;
argt >>= _typesig_fieldszb; // remove retType
while (argt) {
ArgType a = ArgType(argt & _typesig_fieldmask);
if (a == ARGTYPE_I || a == ARGTYPE_U)
argc++;
argt >>= _typesig_fieldszb;
}
return argc;
}
uint32_t CallInfo::getArgTypes(ArgType* argTypes) const
{
uint32_t argc = 0;
uint32_t argt = _typesig;
argt >>= _typesig_fieldszb; // remove retType
while (argt) {
ArgType a = ArgType(argt & _typesig_fieldmask);
argTypes[argc] = a;
argc++;
argt >>= _typesig_fieldszb;
}
return argc;
}
// implementation
#ifdef NJ_VERBOSE
void ReverseLister::finish()
@ -2224,11 +2264,11 @@ namespace nanojit
static int32_t FASTCALL fle(double a, double b) { return a <= b; }
static int32_t FASTCALL fge(double a, double b) { return a >= b; }
#define SIG_F_I (ARGSIZE_F | ARGSIZE_I << ARGSIZE_SHIFT*1)
#define SIG_F_U (ARGSIZE_F | ARGSIZE_U << ARGSIZE_SHIFT*1)
#define SIG_F_F (ARGSIZE_F | ARGSIZE_F << ARGSIZE_SHIFT*1)
#define SIG_F_FF (ARGSIZE_F | ARGSIZE_F << ARGSIZE_SHIFT*1 | ARGSIZE_F << ARGSIZE_SHIFT*2)
#define SIG_B_FF (ARGSIZE_B | ARGSIZE_F << ARGSIZE_SHIFT*1 | ARGSIZE_F << ARGSIZE_SHIFT*2)
#define SIG_F_I CallInfo::typeSig1(ARGTYPE_F, ARGTYPE_I)
#define SIG_F_U CallInfo::typeSig1(ARGTYPE_F, ARGTYPE_U)
#define SIG_F_F CallInfo::typeSig1(ARGTYPE_F, ARGTYPE_F)
#define SIG_F_FF CallInfo::typeSig2(ARGTYPE_F, ARGTYPE_F, ARGTYPE_F)
#define SIG_B_FF CallInfo::typeSig2(ARGTYPE_B, ARGTYPE_F, ARGTYPE_F)
#define SF_CALLINFO(name, typesig) \
static const CallInfo name##_ci = \
@ -2318,14 +2358,13 @@ namespace nanojit
}
LIns* SoftFloatFilter::insCall(const CallInfo *ci, LInsp args[]) {
uint32_t argt = ci->_argtypes;
for (uint32_t i = 0, argsizes = argt >> ARGSIZE_SHIFT; argsizes != 0; i++, argsizes >>= ARGSIZE_SHIFT)
uint32_t nArgs = ci->count_args();
for (uint32_t i = 0; i < nArgs; i++)
args[i] = split(args[i]);
if ((argt & ARGSIZE_MASK_ANY) == ARGSIZE_F) {
// this function returns a double as two 32bit values, so replace
// call with qjoin(qhi(call), call)
if (ci->returnType() == ARGTYPE_F) {
// This function returns a double as two 32bit values, so replace
// call with qjoin(qhi(call), call).
return split(ci, args);
}
return out->insCall(ci, args);
@ -2820,26 +2859,26 @@ namespace nanojit
LIns* ValidateWriter::insCall(const CallInfo *ci, LIns* args0[])
{
ArgSize sizes[MAXARGS];
uint32_t nArgs = ci->get_sizes(sizes);
ArgType argTypes[MAXARGS];
uint32_t nArgs = ci->getArgTypes(argTypes);
LTy formals[MAXARGS];
LIns* args[MAXARGS]; // in left-to-right order, unlike args0[]
LOpcode op = getCallOpcode(ci);
// This loop iterates over the args from right-to-left (because
// arg() and get_sizes() use right-to-left order), but puts the
// results into formals[] and args[] in left-to-right order so
// that arg numbers in error messages make sense to the user.
// arg() and getArgTypes() use right-to-left order), but puts the
// results into formals[] and args[] in left-to-right order so that
// arg numbers in error messages make sense to the user.
for (uint32_t i = 0; i < nArgs; i++) {
uint32_t i2 = nArgs - i - 1; // converts right-to-left to left-to-right
switch (sizes[i]) {
case ARGSIZE_I:
case ARGSIZE_U: formals[i2] = LTy_I32; break;
switch (argTypes[i]) {
case ARGTYPE_I:
case ARGTYPE_U: formals[i2] = LTy_I32; break;
#ifdef NANOJIT_64BIT
case ARGSIZE_Q: formals[i2] = LTy_I64; break;
case ARGTYPE_Q: formals[i2] = LTy_I64; break;
#endif
case ARGSIZE_F: formals[i2] = LTy_F64; break;
case ARGTYPE_F: formals[i2] = LTy_F64; break;
default: NanoAssert(0); formals[i2] = LTy_Void; break;
}
args[i2] = args0[i];

Просмотреть файл

@ -99,23 +99,20 @@ namespace nanojit
ABI_CDECL
};
enum ArgSize {
ARGSIZE_NONE = 0,
ARGSIZE_F = 1, // double (64bit)
ARGSIZE_I = 2, // int32_t
// All values must fit into three bits. See CallInfo for details.
enum ArgType {
ARGTYPE_V = 0, // void
ARGTYPE_F = 1, // double (64bit)
ARGTYPE_I = 2, // int32_t
ARGTYPE_U = 3, // uint32_t
#ifdef NANOJIT_64BIT
ARGSIZE_Q = 3, // uint64_t
ARGTYPE_Q = 4, // uint64_t
#endif
ARGSIZE_U = 6, // uint32_t
ARGSIZE_MASK_ANY = 7,
ARGSIZE_MASK_INT = 2,
ARGSIZE_SHIFT = 3,
// aliases
ARGSIZE_P = PTR_SIZE(ARGSIZE_I, ARGSIZE_Q), // pointer
ARGSIZE_LO = ARGSIZE_I, // int32_t
ARGSIZE_B = ARGSIZE_I, // bool
ARGSIZE_V = ARGSIZE_NONE // void
ARGTYPE_P = PTR_SIZE(ARGTYPE_I, ARGTYPE_Q), // pointer
ARGTYPE_LO = ARGTYPE_I, // int32_t
ARGTYPE_B = ARGTYPE_I // bool
};
enum IndirectCall {
@ -124,38 +121,77 @@ namespace nanojit
struct CallInfo
{
private:
// In _typesig, each entry is three bits.
static const int _typesig_fieldszb = 3;
static const int _typesig_fieldmask = 7;
public:
uintptr_t _address;
uint32_t _argtypes:27; // 9 3-bit fields indicating arg type, by ARGSIZE above (including ret type): a1 a2 a3 a4 a5 ret
uint8_t _cse:1; // true if no side effects
uint8_t _fold:1; // true if no side effects
uint32_t _typesig:27; // 9 3-bit fields indicating arg type, by ArgType above (including ret type): a1 a2 a3 a4 a5 ret
uint8_t _cse:1; // true if no side effects
uint8_t _fold:1; // true if no side effects
AbiKind _abi:3;
verbose_only ( const char* _name; )
uint32_t _count_args(uint32_t mask) const;
// Nb: uses right-to-left order, eg. sizes[0] is the size of the right-most arg.
uint32_t get_sizes(ArgSize* sizes) const;
uint32_t count_args() const;
uint32_t count_iargs() const;
// Nb: uses right-to-left order, eg. types[0] is the size of the right-most arg.
uint32_t getArgTypes(ArgType* types) const;
inline ArgSize returnType() const {
return ArgSize(_argtypes & ARGSIZE_MASK_ANY);
inline ArgType returnType() const {
return ArgType(_typesig & _typesig_fieldmask);
}
// Index args in reverse order, i.e. arg(0) returns the rightmost arg.
// See mozilla bug 525815 for fixing this.
inline ArgType argType(uint32_t arg) const {
return ArgType((_typesig >> (_typesig_fieldszb * (arg+1))) & _typesig_fieldmask);
}
// Note that this indexes arguments *backwards*, that is to
// get the Nth arg, you have to ask for index (numargs - N).
// See mozilla bug 525815 for fixing this.
inline ArgSize argType(uint32_t arg) const {
return ArgSize((_argtypes >> (ARGSIZE_SHIFT * (arg+1))) & ARGSIZE_MASK_ANY);
// The following encode 'r func()' through to 'r func(a1, a2, a3, a4, a5, a6, a7, a8)'.
static inline uint32_t typeSig0(ArgType r) {
return r;
}
static inline uint32_t typeSig1(ArgType r, ArgType a1) {
return a1 << _typesig_fieldszb*1 | typeSig0(r);
}
static inline uint32_t typeSig2(ArgType r, ArgType a1, ArgType a2) {
return a1 << _typesig_fieldszb*2 | typeSig1(r, a2);
}
static inline uint32_t typeSig3(ArgType r, ArgType a1, ArgType a2, ArgType a3) {
return a1 << _typesig_fieldszb*3 | typeSig2(r, a2, a3);
}
static inline uint32_t typeSig4(ArgType r, ArgType a1, ArgType a2, ArgType a3,
ArgType a4) {
return a1 << _typesig_fieldszb*4 | typeSig3(r, a2, a3, a4);
}
static inline uint32_t typeSig5(ArgType r, ArgType a1, ArgType a2, ArgType a3,
ArgType a4, ArgType a5) {
return a1 << _typesig_fieldszb*5 | typeSig4(r, a2, a3, a4, a5);
}
static inline uint32_t typeSig6(ArgType r, ArgType a1, ArgType a2, ArgType a3,
ArgType a4, ArgType a5, ArgType a6) {
return a1 << _typesig_fieldszb*6 | typeSig5(r, a2, a3, a4, a5, a6);
}
static inline uint32_t typeSig7(ArgType r, ArgType a1, ArgType a2, ArgType a3,
ArgType a4, ArgType a5, ArgType a6, ArgType a7) {
return a1 << _typesig_fieldszb*7 | typeSig6(r, a2, a3, a4, a5, a6, a7);
}
static inline uint32_t typeSig8(ArgType r, ArgType a1, ArgType a2, ArgType a3, ArgType a4,
ArgType a5, ArgType a6, ArgType a7, ArgType a8) {
return a1 << _typesig_fieldszb*8 | typeSig7(r, a2, a3, a4, a5, a6, a7, a8);
}
// Encode 'r func(a1, ..., aN))'
static inline uint32_t typeSigN(ArgType r, int N, ArgType a[]) {
uint32_t typesig = r;
for (int i = 0; i < N; i++) {
typesig |= a[i] << _typesig_fieldszb*(N-i);
}
return typesig;
}
inline bool isIndirect() const {
return _address < 256;
}
inline uint32_t count_args() const {
return _count_args(ARGSIZE_MASK_ANY);
}
inline uint32_t count_iargs() const {
return _count_args(ARGSIZE_MASK_INT);
}
// fargs = args - iargs
};
/*
@ -197,14 +233,14 @@ namespace nanojit
inline LOpcode getCallOpcode(const CallInfo* ci) {
LOpcode op = LIR_pcall;
switch (ci->returnType()) {
case ARGSIZE_NONE: op = LIR_pcall; break;
case ARGSIZE_I:
case ARGSIZE_U: op = LIR_icall; break;
case ARGSIZE_F: op = LIR_fcall; break;
case ARGTYPE_V: op = LIR_pcall; break;
case ARGTYPE_I:
case ARGTYPE_U: op = LIR_icall; break;
case ARGTYPE_F: op = LIR_fcall; break;
#ifdef NANOJIT_64BIT
case ARGSIZE_Q: op = LIR_qcall; break;
case ARGTYPE_Q: op = LIR_qcall; break;
#endif
default: NanoAssert(0); break;
default: NanoAssert(0); break;
}
return op;
}

Просмотреть файл

@ -597,19 +597,19 @@ Assembler::genEpilogue()
* alignment.
*/
void
Assembler::asm_arg(ArgSize sz, LInsp arg, Register& r, int& stkd)
Assembler::asm_arg(ArgType ty, LInsp arg, Register& r, int& stkd)
{
// The stack pointer must always be at least aligned to 4 bytes.
NanoAssert((stkd & 3) == 0);
if (sz == ARGSIZE_F) {
if (ty == ARGTYPE_F) {
// This task is fairly complex and so is delegated to asm_arg_64.
asm_arg_64(arg, r, stkd);
} else {
NanoAssert(sz == ARGSIZE_I || sz == ARGSIZE_U);
NanoAssert(ty == ARGTYPE_I || ty == ARGTYPE_U);
// pre-assign registers R0-R3 for arguments (if they fit)
if (r < R4) {
asm_regarg(sz, arg, r);
asm_regarg(ty, arg, r);
r = nextreg(r);
} else {
asm_stkarg(arg, stkd);
@ -620,7 +620,7 @@ Assembler::asm_arg(ArgSize sz, LInsp arg, Register& r, int& stkd)
// Encode a 64-bit floating-point argument using the appropriate ABI.
// This function operates in the same way as asm_arg, except that it will only
// handle arguments where (ArgSize)sz == ARGSIZE_F.
// handle arguments where (ArgType)ty == ARGTYPE_F.
void
Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd)
{
@ -665,8 +665,8 @@ Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd)
if (config.arm_vfp) {
FMRRD(ra, rb, fp_reg);
} else {
asm_regarg(ARGSIZE_LO, arg->oprnd1(), ra);
asm_regarg(ARGSIZE_LO, arg->oprnd2(), rb);
asm_regarg(ARGTYPE_LO, arg->oprnd1(), ra);
asm_regarg(ARGTYPE_LO, arg->oprnd2(), rb);
}
#ifndef NJ_ARM_EABI
@ -699,7 +699,7 @@ Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd)
// Without VFP, we can simply use asm_regarg and asm_stkarg to
// encode the two 32-bit words as we don't need to load from a VFP
// register.
asm_regarg(ARGSIZE_LO, arg->oprnd1(), ra);
asm_regarg(ARGTYPE_LO, arg->oprnd1(), ra);
asm_stkarg(arg->oprnd2(), 0);
stkd += 4;
}
@ -720,10 +720,10 @@ Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd)
}
void
Assembler::asm_regarg(ArgSize sz, LInsp p, Register r)
Assembler::asm_regarg(ArgType ty, LInsp p, Register r)
{
NanoAssert(isKnownReg(r));
if (sz & ARGSIZE_MASK_INT)
if (ty == ARGTYPE_I || ty == ARGTYPE_U)
{
// arg goes in specific register
if (p->isconst()) {
@ -752,7 +752,7 @@ Assembler::asm_regarg(ArgSize sz, LInsp p, Register r)
}
else
{
NanoAssert(sz == ARGSIZE_F);
NanoAssert(ty == ARGTYPE_F);
// fpu argument in register - should never happen since FPU
// args are converted to two 32-bit ints on ARM
NanoAssert(false);
@ -848,10 +848,10 @@ Assembler::asm_call(LInsp ins)
evictScratchRegs();
const CallInfo* call = ins->callInfo();
ArgSize sizes[MAXARGS];
uint32_t argc = call->get_sizes(sizes);
bool indirect = call->isIndirect();
const CallInfo* ci = ins->callInfo();
ArgType argTypes[MAXARGS];
uint32_t argc = ci->getArgTypes(argTypes);
bool indirect = ci->isIndirect();
// If we aren't using VFP, assert that the LIR operation is an integer
// function call.
@ -862,12 +862,9 @@ Assembler::asm_call(LInsp ins)
// See comments above for more details as to why this is necessary here
// for floating point calls, but not for integer calls.
if (config.arm_vfp && ins->isUsed()) {
// Determine the size (and type) of the instruction result.
ArgSize rsize = (ArgSize)(call->_argtypes & ARGSIZE_MASK_ANY);
// If the result size is a floating-point value, treat the result
// specially, as described previously.
if (rsize == ARGSIZE_F) {
if (ci->returnType() == ARGTYPE_F) {
Register rr = ins->deprecated_getReg();
NanoAssert(ins->opcode() == LIR_fcall);
@ -902,7 +899,7 @@ Assembler::asm_call(LInsp ins)
// interlock in the "long" branch sequence by manually loading the
// target address into LR ourselves before setting up the parameters
// in other registers.
BranchWithLink((NIns*)call->_address);
BranchWithLink((NIns*)ci->_address);
} else {
// Indirect call: we assign the address arg to LR since it's not
// used for regular arguments, and is otherwise scratch since it's
@ -917,7 +914,7 @@ Assembler::asm_call(LInsp ins)
} else {
BLX(LR);
}
asm_regarg(ARGSIZE_LO, ins->arg(--argc), LR);
asm_regarg(ARGTYPE_LO, ins->arg(--argc), LR);
}
// Encode the arguments, starting at R0 and with an empty argument stack.
@ -930,7 +927,7 @@ Assembler::asm_call(LInsp ins)
// in reverse order.
uint32_t i = argc;
while(i--) {
asm_arg(sizes[i], ins->arg(i), r, stkd);
asm_arg(argTypes[i], ins->arg(i), r, stkd);
}
if (stkd > max_out_args) {

Просмотреть файл

@ -220,14 +220,14 @@ verbose_only( extern const char* shiftNames[]; )
void nativePageReset(); \
void nativePageSetup(); \
void asm_quad_nochk(Register, int32_t, int32_t); \
void asm_regarg(ArgSize, LInsp, Register); \
void asm_regarg(ArgType, LInsp, Register); \
void asm_stkarg(LInsp p, int stkd); \
void asm_cmpi(Register, int32_t imm); \
void asm_ldr_chk(Register d, Register b, int32_t off, bool chk); \
void asm_cmp(LIns *cond); \
void asm_fcmp(LIns *cond); \
void asm_ld_imm(Register d, int32_t imm, bool chk = true); \
void asm_arg(ArgSize sz, LInsp arg, Register& r, int& stkd); \
void asm_arg(ArgType ty, LInsp arg, Register& r, int& stkd); \
void asm_arg_64(LInsp arg, Register& r, int& stkd); \
void asm_add_imm(Register rd, Register rn, int32_t imm, int stat = 0); \
void asm_sub_imm(Register rd, Register rn, int32_t imm, int stat = 0); \

Просмотреть файл

@ -389,10 +389,11 @@ namespace nanojit
}
}
void Assembler::asm_regarg(ArgSize sz, LInsp p, Register r)
void Assembler::asm_regarg(ArgType ty, LInsp p, Register r)
{
NanoAssert(isKnownReg(r));
if (sz & ARGSIZE_MASK_INT) {
if (ty == ARGTYPE_I || ty == ARGTYPE_U)
{
// arg goes in specific register
if (p->isconst())
asm_li(r, p->imm32());
@ -464,7 +465,7 @@ namespace nanojit
// Encode a 64-bit floating-point argument using the appropriate ABI.
// This function operates in the same way as asm_arg, except that it will only
// handle arguments where (ArgSize)sz == ARGSIZE_F.
// handle arguments where (ArgType)ty == ARGTYPE_F.
void
Assembler::asm_arg_64(LInsp arg, Register& r, Register& fr, int& stkd)
{
@ -1515,18 +1516,18 @@ namespace nanojit
* on the stack.
*/
void
Assembler::asm_arg(ArgSize sz, LInsp arg, Register& r, Register& fr, int& stkd)
Assembler::asm_arg(ArgType ty, LInsp arg, Register& r, Register& fr, int& stkd)
{
// The stack offset must always be at least aligned to 4 bytes.
NanoAssert((stkd & 3) == 0);
if (sz == ARGSIZE_F) {
if (ty == ARGTYPE_F) {
// This task is fairly complex and so is delegated to asm_arg_64.
asm_arg_64(arg, r, fr, stkd);
}
else if (sz & ARGSIZE_MASK_INT) {
} else {
NanoAssert(ty == ARGTYPE_I || ty == ARGTYPE_U);
if (stkd < 16) {
asm_regarg(sz, arg, r);
asm_regarg(ty, arg, r);
fr = nextreg(fr);
r = nextreg(r);
}
@ -1537,11 +1538,6 @@ namespace nanojit
fr = r;
stkd += 4;
}
else {
NanoAssert(sz == ARGSIZE_Q);
// shouldn't have 64 bit int params
NanoAssert(false);
}
}
void
@ -1570,10 +1566,10 @@ namespace nanojit
evictScratchRegs();
const CallInfo* call = ins->callInfo();
ArgSize sizes[MAXARGS];
uint32_t argc = call->get_sizes(sizes);
bool indirect = call->isIndirect();
const CallInfo* ci = ins->callInfo();
ArgType argTypes[MAXARGS];
uint32_t argc = ci->getArgTypes(argTypes);
bool indirect = ci->isIndirect();
// FIXME: Put one of the argument moves into the BDS slot
@ -1584,11 +1580,11 @@ namespace nanojit
if (!indirect)
// FIXME: If we can tell that we are calling non-PIC
// (ie JIT) code, we could call direct instead of using t9
asm_li(T9, call->_address);
asm_li(T9, ci->_address);
else
// Indirect call: we assign the address arg to t9
// which matches the o32 ABI for calling functions
asm_regarg(ARGSIZE_P, ins->arg(--argc), T9);
asm_regarg(ARGTYPE_P, ins->arg(--argc), T9);
// Encode the arguments, starting at A0 and with an empty argument stack.
Register r = A0, fr = FA0;
@ -1599,7 +1595,7 @@ namespace nanojit
// Note that we loop through the arguments backwards as LIR specifies them
// in reverse order.
while(argc--)
asm_arg(sizes[argc], ins->arg(argc), r, fr, stkd);
asm_arg(argTypes[argc], ins->arg(argc), r, fr, stkd);
if (stkd > max_out_args)
max_out_args = stkd;

Просмотреть файл

@ -179,9 +179,9 @@ namespace nanojit
NIns *asm_branch_near(bool, LIns*, NIns*); \
void asm_cmp(LOpcode condop, LIns *a, LIns *b, Register cr); \
void asm_move(Register d, Register s); \
void asm_regarg(ArgSize sz, LInsp p, Register r); \
void asm_regarg(ArgType ty, LInsp p, Register r); \
void asm_stkarg(LInsp arg, int stkd); \
void asm_arg(ArgSize sz, LInsp arg, Register& r, Register& fr, int& stkd); \
void asm_arg(ArgType ty, LInsp arg, Register& r, Register& fr, int& stkd); \
void asm_arg_64(LInsp arg, Register& r, Register& fr, int& stkd) ;

Просмотреть файл

@ -691,8 +691,8 @@ namespace nanojit
evictScratchRegs();
const CallInfo* call = ins->callInfo();
ArgSize sizes[MAXARGS];
uint32_t argc = call->get_sizes(sizes);
ArgType argTypes[MAXARGS];
uint32_t argc = call->getArgTypes(argTypes);
bool indirect;
if (!(indirect = call->isIndirect())) {
@ -707,7 +707,7 @@ namespace nanojit
underrunProtect(8); // underrunProtect might clobber CTR
BCTRL();
MTCTR(R11);
asm_regarg(ARGSIZE_P, ins->arg(--argc), R11);
asm_regarg(ARGTYPE_P, ins->arg(--argc), R11);
}
int param_size = 0;
@ -716,22 +716,22 @@ namespace nanojit
Register fr = F1;
for(uint32_t i = 0; i < argc; i++) {
uint32_t j = argc - i - 1;
ArgSize sz = sizes[j];
ArgType ty = argTypes[j];
LInsp arg = ins->arg(j);
if (sz & ARGSIZE_MASK_INT) {
if (ty == ARGTYPE_I || ty == ARGTYPE_U || ty == ARGTYPE_Q) {
// GP arg
if (r <= R10) {
asm_regarg(sz, arg, r);
asm_regarg(ty, arg, r);
r = nextreg(r);
param_size += sizeof(void*);
} else {
// put arg on stack
TODO(stack_int32);
}
} else if (sz == ARGSIZE_F) {
} else if (ty == ARGTYPE_F) {
// double
if (fr <= F13) {
asm_regarg(sz, arg, fr);
asm_regarg(ty, arg, fr);
fr = nextreg(fr);
#ifdef NANOJIT_64BIT
r = nextreg(r);
@ -744,23 +744,23 @@ namespace nanojit
TODO(stack_double);
}
} else {
TODO(ARGSIZE_UNK);
TODO(ARGTYPE_UNK);
}
}
if (param_size > max_param_size)
max_param_size = param_size;
}
void Assembler::asm_regarg(ArgSize sz, LInsp p, Register r)
void Assembler::asm_regarg(ArgType ty, LInsp p, Register r)
{
NanoAssert(r != deprecated_UnknownReg);
if (sz & ARGSIZE_MASK_INT)
if (ty == ARGTYPE_I || ty == ARGTYPE_U || ty == ARGTYPE_Q)
{
#ifdef NANOJIT_64BIT
if (sz == ARGSIZE_I) {
if (ty == ARGTYPE_I) {
// sign extend 32->64
EXTSW(r, r);
} else if (sz == ARGSIZE_U) {
} else if (ty == ARGTYPE_U) {
// zero extend 32->64
CLRLDI(r, r, 32);
}
@ -793,7 +793,7 @@ namespace nanojit
}
}
}
else if (sz == ARGSIZE_F) {
else if (ty == ARGTYPE_F) {
if (p->isUsed()) {
Register rp = p->deprecated_getReg();
if (!isKnownReg(rp) || !IsFpReg(rp)) {
@ -813,7 +813,7 @@ namespace nanojit
}
}
else {
TODO(ARGSIZE_UNK);
TODO(ARGTYPE_UNK);
}
}

Просмотреть файл

@ -287,7 +287,7 @@ namespace nanojit
void nativePageSetup(); \
void br(NIns *addr, int link); \
void br_far(NIns *addr, int link); \
void asm_regarg(ArgSize, LIns*, Register); \
void asm_regarg(ArgType, LIns*, Register); \
void asm_li(Register r, int32_t imm); \
void asm_li32(Register r, int32_t imm); \
void asm_li64(Register r, uint64_t imm); \

Просмотреть файл

@ -166,8 +166,8 @@ namespace nanojit
underrunProtect(8);
NOP();
ArgSize sizes[MAXARGS];
uint32_t argc = call->get_sizes(sizes);
ArgType argTypes[MAXARGS];
uint32_t argc = call->getArgTypes(argTypes);
NanoAssert(ins->isop(LIR_pcall) || ins->isop(LIR_fcall));
verbose_only(if (_logc->lcbits & LC_Assembly)
@ -189,8 +189,8 @@ namespace nanojit
for(int i=0; i<argc; i++)
{
uint32_t j = argc-i-1;
ArgSize sz = sizes[j];
if (sz == ARGSIZE_F) {
ArgType ty = argTypes[j];
if (ty == ARGTYPE_F) {
Register r = findRegFor(ins->arg(j), FpRegs);
GPRIndex += 2;
offset += 8;

Просмотреть файл

@ -875,8 +875,8 @@ namespace nanojit
evictScratchRegs();
const CallInfo *call = ins->callInfo();
ArgSize sizes[MAXARGS];
int argc = call->get_sizes(sizes);
ArgType argTypes[MAXARGS];
int argc = call->getArgTypes(argTypes);
bool indirect = call->isIndirect();
if (!indirect) {
@ -895,7 +895,7 @@ namespace nanojit
// Indirect call: we assign the address arg to RAX since it's not
// used for regular arguments, and is otherwise scratch since it's
// clobberred by the call.
asm_regarg(ARGSIZE_P, ins->arg(--argc), RAX);
asm_regarg(ARGTYPE_P, ins->arg(--argc), RAX);
CALLRAX();
}
@ -908,28 +908,28 @@ namespace nanojit
int arg_index = 0;
for (int i = 0; i < argc; i++) {
int j = argc - i - 1;
ArgSize sz = sizes[j];
ArgType ty = argTypes[j];
LIns* arg = ins->arg(j);
if ((sz & ARGSIZE_MASK_INT) && arg_index < NumArgRegs) {
if ((ty == ARGTYPE_I || ty == ARGTYPE_U || ty == ARGTYPE_Q) && arg_index < NumArgRegs) {
// gp arg
asm_regarg(sz, arg, argRegs[arg_index]);
asm_regarg(ty, arg, argRegs[arg_index]);
arg_index++;
}
#ifdef _WIN64
else if (sz == ARGSIZE_F && arg_index < NumArgRegs) {
else if (ty == ARGTYPE_F && arg_index < NumArgRegs) {
// double goes in XMM reg # based on overall arg_index
asm_regarg(sz, arg, Register(XMM0+arg_index));
asm_regarg(ty, arg, Register(XMM0+arg_index));
arg_index++;
}
#else
else if (sz == ARGSIZE_F && fr < XMM8) {
else if (ty == ARGTYPE_F && fr < XMM8) {
// double goes in next available XMM register
asm_regarg(sz, arg, fr);
asm_regarg(ty, arg, fr);
fr = nextreg(fr);
}
#endif
else {
asm_stkarg(sz, arg, stk_used);
asm_stkarg(ty, arg, stk_used);
stk_used += sizeof(void*);
}
}
@ -938,8 +938,8 @@ namespace nanojit
max_stk_used = stk_used;
}
void Assembler::asm_regarg(ArgSize sz, LIns *p, Register r) {
if (sz == ARGSIZE_I) {
void Assembler::asm_regarg(ArgType ty, LIns *p, Register r) {
if (ty == ARGTYPE_I) {
NanoAssert(p->isI32());
if (p->isconst()) {
asm_quad(r, int64_t(p->imm32()));
@ -947,7 +947,7 @@ namespace nanojit
}
// sign extend int32 to int64
MOVSXDR(r, r);
} else if (sz == ARGSIZE_U) {
} else if (ty == ARGTYPE_U) {
NanoAssert(p->isI32());
if (p->isconst()) {
asm_quad(r, uint64_t(uint32_t(p->imm32())));
@ -955,6 +955,8 @@ namespace nanojit
}
// zero extend with 32bit mov, auto-zeros upper 32bits
MOVLR(r, r);
} else {
// Do nothing.
}
/* there is no point in folding an immediate here, because
* the argument register must be a scratch register and we're
@ -966,19 +968,22 @@ namespace nanojit
findSpecificRegFor(p, r);
}
void Assembler::asm_stkarg(ArgSize sz, LIns *p, int stk_off) {
void Assembler::asm_stkarg(ArgType ty, LIns *p, int stk_off) {
NanoAssert(isS8(stk_off));
if (sz & ARGSIZE_MASK_INT) {
if (ty == ARGTYPE_I || ty == ARGTYPE_U || ty == ARGTYPE_Q) {
Register r = findRegFor(p, GpRegs);
MOVQSPR(stk_off, r); // movq [rsp+d8], r
if (sz == ARGSIZE_I) {
if (ty == ARGTYPE_I) {
// extend int32 to int64
NanoAssert(p->isI32());
MOVSXDR(r, r);
} else if (sz == ARGSIZE_U) {
} else if (ty == ARGTYPE_U) {
// extend uint32 to uint64
NanoAssert(p->isI32());
MOVLR(r, r);
} else {
NanoAssert(ty == ARGTYPE_Q);
// Do nothing.
}
} else {
TODO(asm_stkarg_non_int);

Просмотреть файл

@ -393,8 +393,8 @@ namespace nanojit
bool isTargetWithinS8(NIns* target);\
bool isTargetWithinS32(NIns* target);\
void asm_quad(Register r, uint64_t v);\
void asm_regarg(ArgSize, LIns*, Register);\
void asm_stkarg(ArgSize, LIns*, int);\
void asm_regarg(ArgType, LIns*, Register);\
void asm_stkarg(ArgType, LIns*, int);\
void asm_shift(LIns*);\
void asm_shift_imm(LIns*);\
void asm_arith_imm(LIns*);\

Просмотреть файл

@ -274,13 +274,13 @@ namespace nanojit
// Pre-assign registers to the first N 4B args based on the calling convention.
uint32_t n = 0;
ArgSize sizes[MAXARGS];
uint32_t argc = call->get_sizes(sizes);
ArgType argTypes[MAXARGS];
uint32_t argc = call->getArgTypes(argTypes);
int32_t stkd = 0;
if (indirect) {
argc--;
asm_arg(ARGSIZE_P, ins->arg(argc), EAX, stkd);
asm_arg(ARGTYPE_P, ins->arg(argc), EAX, stkd);
if (!config.fixed_esp)
stkd = 0;
}
@ -288,12 +288,12 @@ namespace nanojit
for(uint32_t i=0; i < argc; i++)
{
uint32_t j = argc-i-1;
ArgSize sz = sizes[j];
ArgType ty = argTypes[j];
Register r = UnspecifiedReg;
if (n < max_regs && sz != ARGSIZE_F) {
if (n < max_regs && ty != ARGTYPE_F) {
r = argRegs[n++]; // tell asm_arg what reg to use
}
asm_arg(sz, ins->arg(j), r, stkd);
asm_arg(ty, ins->arg(j), r, stkd);
if (!config.fixed_esp)
stkd = 0;
}
@ -1393,13 +1393,12 @@ namespace nanojit
}
}
void Assembler::asm_arg(ArgSize sz, LInsp ins, Register r, int32_t& stkd)
void Assembler::asm_arg(ArgType ty, LInsp ins, Register r, int32_t& stkd)
{
// If 'r' is known, then that's the register we have to put 'ins'
// into.
if (sz == ARGSIZE_I || sz == ARGSIZE_U)
{
if (ty == ARGTYPE_I || ty == ARGTYPE_U) {
if (r != UnspecifiedReg) {
if (ins->isconst()) {
// Rematerialize the constant.
@ -1428,10 +1427,9 @@ namespace nanojit
else
asm_pusharg(ins);
}
}
else
{
NanoAssert(sz == ARGSIZE_F);
} else {
NanoAssert(ty == ARGTYPE_F);
asm_farg(ins, stkd);
}
}

Просмотреть файл

@ -184,7 +184,7 @@ namespace nanojit
void asm_int(Register r, int32_t val, bool canClobberCCs);\
void asm_stkarg(LInsp p, int32_t& stkd);\
void asm_farg(LInsp, int32_t& stkd);\
void asm_arg(ArgSize sz, LInsp p, Register r, int32_t& stkd);\
void asm_arg(ArgType ty, LInsp p, Register r, int32_t& stkd);\
void asm_pusharg(LInsp);\
void asm_fcmp(LIns *cond);\
NIns* asm_fbranch(bool, LIns*, NIns*);\
@ -968,23 +968,23 @@ namespace nanojit
#define EMMS() do { count_fpu(); FPUc(0x0f77); asm_output("emms"); } while (0)
// standard direct call
#define CALL(c) do { \
#define CALL(ci) do { \
count_call();\
underrunProtect(5); \
int offset = (c->_address) - ((int)_nIns); \
int offset = (ci->_address) - ((int)_nIns); \
IMM32( (uint32_t)offset ); \
*(--_nIns) = 0xE8; \
verbose_only(asm_output("call %s",(c->_name));) \
debug_only(if ((c->_argtypes & ARGSIZE_MASK_ANY)==ARGSIZE_F) fpu_push();)\
verbose_only(asm_output("call %s",(ci->_name));) \
debug_only(if (ci->returnType()==ARGTYPE_F) fpu_push();)\
} while (0)
// indirect call thru register
#define CALLr(c,r) do { \
#define CALLr(ci,r) do { \
count_calli();\
underrunProtect(2);\
ALU(0xff, 2, (r));\
verbose_only(asm_output("call %s",gpn(r));) \
debug_only(if ((c->_argtypes & ARGSIZE_MASK_ANY)==ARGSIZE_F) fpu_push();)\
debug_only(if (ci->returnType()==ARGTYPE_F) fpu_push();)\
} while (0)
}

Просмотреть файл

@ -121,7 +121,7 @@ extern void* _tprof_before_id;
#ifndef DOPROF
#ifndef VMCFG_SYMBIAN
#define _vprof(v,...)
#define _nvprof(e,v,...)
#define _nvprof(e,v)
#define _hprof(h,n,...)
#define _nhprof(e,v,n,...)
#define _ntprof(e)
@ -139,13 +139,13 @@ extern void* _tprof_before_id;
;\
}
#define _nvprof(e,v,...) \
#define _nvprof(e,v) \
{ \
static void* id = 0; \
(id != 0) ? \
_profileEntryValue (id, (int64_t) (v)) \
: \
profileValue (&id, (char*) (e), -1, (int64_t) (v), ##__VA_ARGS__, NULL) \
profileValue (&id, (char*) (e), -1, (int64_t) (v), NULL) \
; \
}