Bug 542133 - Add a real NJConfig struct to nanojit (r=edwsmith,r=nnethercote)

--HG--
extra : convert_revision : 353c06e32329377cc9ed5f66eeb6bcaf72ff3aea
This commit is contained in:
Steven Johnson 2010-02-15 17:56:41 -08:00
Родитель 42c2c1f152
Коммит da9d3b28c6
16 изменённых файлов: 415 добавлений и 202 удалений

Просмотреть файл

@ -1942,10 +1942,9 @@ FragmentAssembler::assembleRandomFragment(int nIns)
}
Lirasm::Lirasm(bool verbose) :
mAssm(mCodeAlloc, mAlloc, mAlloc, &mCore, &mLogc)
mAssm(mCodeAlloc, mAlloc, mAlloc, &mCore, &mLogc, nanojit::AvmCore::config)
{
mVerbose = verbose;
nanojit::AvmCore::config.tree_opt = true;
mLogc.lcbits = 0;
mLirbuf = new (mAlloc) LirBuffer(mAlloc);
@ -2214,8 +2213,8 @@ processCmdLine(int argc, char **argv, CmdLineOptions& opts)
// Handle the architecture-specific options.
#if defined NANOJIT_IA32
avmplus::AvmCore::config.use_cmov = avmplus::AvmCore::config.sse2 = i386_sse;
avmplus::AvmCore::config.fixed_esp = true;
avmplus::AvmCore::config.i386_use_cmov = avmplus::AvmCore::config.i386_sse2 = i386_sse;
avmplus::AvmCore::config.i386_fixed_esp = true;
#elif defined NANOJIT_ARM
// Note that we don't check for sensible configurations here!
avmplus::AvmCore::config.arm_arch = arm_arch;

Просмотреть файл

@ -57,7 +57,7 @@ namespace nanojit
*
* - merging paths ( build a graph? ), possibly use external rep to drive codegen
*/
Assembler::Assembler(CodeAlloc& codeAlloc, Allocator& dataAlloc, Allocator& alloc, AvmCore* core, LogControl* logc)
Assembler::Assembler(CodeAlloc& codeAlloc, Allocator& dataAlloc, Allocator& alloc, AvmCore* core, LogControl* logc, const Config& config)
: codeList(NULL)
, alloc(alloc)
, _codeAlloc(codeAlloc)
@ -77,7 +77,7 @@ namespace nanojit
#ifdef VTUNE
, cgen(NULL)
#endif
, config(core->config)
, _config(config)
{
VMPI_memset(&_stats, 0, sizeof(_stats));
nInit(core);

Просмотреть файл

@ -293,7 +293,7 @@ namespace nanojit
avmplus::CodegenLIR *cgen;
#endif
Assembler(CodeAlloc& codeAlloc, Allocator& dataAlloc, Allocator& alloc, AvmCore* core, LogControl* logc);
Assembler(CodeAlloc& codeAlloc, Allocator& dataAlloc, Allocator& alloc, AvmCore* core, LogControl* logc, const Config& config);
void compile(Fragment *frag, Allocator& alloc, bool optimize
verbose_only(, LabelMap*));
@ -487,7 +487,7 @@ namespace nanojit
debug_only( --_fpuStkDepth; NanoAssert(_fpuStkDepth<=0); )
}
#endif
avmplus::Config &config;
const Config& _config;
};
inline int32_t arDisp(LIns* ins)

Просмотреть файл

@ -953,7 +953,7 @@ namespace nanojit
LOpcode op = getCallOpcode(ci);
#if NJ_SOFTFLOAT_SUPPORTED
// SoftFloat: convert LIR_fcall to LIR_icall.
if (!_config.arm_vfp && op == LIR_fcall)
if (_config.soft_float && op == LIR_fcall)
op = LIR_icall;
#endif

Просмотреть файл

@ -1558,10 +1558,10 @@ namespace nanojit
class LirBufWriter : public LirWriter
{
LirBuffer* _buf; // underlying buffer housing the instructions
const avmplus::Config& _config;
const Config& _config;
public:
LirBufWriter(LirBuffer* buf, const avmplus::Config& config)
LirBufWriter(LirBuffer* buf, const Config& config)
: LirWriter(0), _buf(buf), _config(config) {
}

Просмотреть файл

@ -104,10 +104,10 @@ Assembler::CountLeadingZeroes(uint32_t data)
// as that aren't supported, but assert that we aren't running on one
// anyway.
// If ARMv4 support is required in the future for some reason, we can do a
// run-time check on config.arch and fall back to the C routine, but for
// run-time check on _config.arm_arch and fall back to the C routine, but for
// now we can avoid the cost of the check as we don't intend to support
// ARMv4 anyway.
NanoAssert(config.arm_arch >= 5);
NanoAssert(_config.arm_arch >= 5);
#if defined(__ARMCC__)
// ARMCC can do this with an intrinsic.
@ -535,7 +535,7 @@ Assembler::nFragExit(LInsp guard)
}
#ifdef NJ_VERBOSE
if (config.show_stats) {
if (_config.arm_show_stats) {
// load R1 with Fragment *fromFrag, target fragment
// will make use of this when calling fragenter().
int fromfrag = int((Fragment*)_thisfrag);
@ -559,7 +559,7 @@ Assembler::genEpilogue()
{
// On ARMv5+, loading directly to PC correctly handles interworking.
// Note that we don't support anything older than ARMv5.
NanoAssert(config.arm_arch >= 5);
NanoAssert(_config.arm_arch >= 5);
RegisterMask savingMask = rmask(FP) | rmask(PC);
@ -628,11 +628,11 @@ Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd)
NanoAssert((stkd & 3) == 0);
// The only use for this function when we are using soft floating-point
// is for LIR_qjoin.
NanoAssert(config.arm_vfp || arg->isop(LIR_qjoin));
NanoAssert(_config.arm_vfp || arg->isop(LIR_qjoin));
Register fp_reg = deprecated_UnknownReg;
if (config.arm_vfp) {
if (_config.arm_vfp) {
fp_reg = findRegFor(arg, FpRegs);
NanoAssert(isKnownReg(fp_reg));
}
@ -662,7 +662,7 @@ Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd)
// Put the argument in ra and rb. If the argument is in a VFP register,
// use FMRRD to move it to ra and rb. Otherwise, let asm_regarg deal
// with the argument as if it were two 32-bit arguments.
if (config.arm_vfp) {
if (_config.arm_vfp) {
FMRRD(ra, rb, fp_reg);
} else {
asm_regarg(ARGSIZE_LO, arg->oprnd1(), ra);
@ -685,7 +685,7 @@ Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd)
// must be the first time that the stack is used, so stkd must be at 0.
NanoAssert(stkd == 0);
if (config.arm_vfp) {
if (_config.arm_vfp) {
// TODO: We could optimize the this to store directly from
// the VFP register to memory using "FMRRD ra, fp_reg[31:0]" and
// "STR fp_reg[63:32], [SP, #stkd]".
@ -768,7 +768,7 @@ Assembler::asm_stkarg(LInsp arg, int stkd)
if (arg->isUsed() && (rr = arg->deprecated_getReg(), isKnownReg(rr))) {
// The argument resides somewhere in registers, so we simply need to
// push it onto the stack.
if (!config.arm_vfp || !isF64) {
if (!_config.arm_vfp || !isF64) {
NanoAssert(IsGpReg(rr));
STR(rr, SP, stkd);
@ -779,7 +779,7 @@ Assembler::asm_stkarg(LInsp arg, int stkd)
// arguments to asm_stkarg so we can ignore that case here and
// assert that we will never get 64-bit arguments unless VFP is
// available.
NanoAssert(config.arm_vfp);
NanoAssert(_config.arm_vfp);
NanoAssert(IsFpReg(rr));
#ifdef NJ_ARM_EABI
@ -817,7 +817,7 @@ Assembler::asm_stkarg(LInsp arg, int stkd)
void
Assembler::asm_call(LInsp ins)
{
if (config.arm_vfp && ins->isop(LIR_fcall)) {
if (_config.arm_vfp && ins->isop(LIR_fcall)) {
/* Because ARM actually returns the result in (R0,R1), and not in a
* floating point register, the code to move the result into a correct
* register is below. We do nothing here.
@ -855,13 +855,13 @@ Assembler::asm_call(LInsp ins)
// If we aren't using VFP, assert that the LIR operation is an integer
// function call.
NanoAssert(config.arm_vfp || ins->isop(LIR_icall));
NanoAssert(_config.arm_vfp || ins->isop(LIR_icall));
// If we're using VFP, and the return type is a double, it'll come back in
// R0/R1. We need to either place it in the result fp reg, or store it.
// See comments above for more details as to why this is necessary here
// for floating point calls, but not for integer calls.
if (config.arm_vfp && ins->isUsed()) {
if (_config.arm_vfp && ins->isUsed()) {
// Determine the size (and type) of the instruction result.
ArgSize rsize = (ArgSize)(call->_argtypes & ARGSIZE_MASK_ANY);
@ -964,7 +964,7 @@ Assembler::nRegisterResetAll(RegAlloc& a)
rmask(R0) | rmask(R1) | rmask(R2) | rmask(R3) | rmask(R4) |
rmask(R5) | rmask(R6) | rmask(R7) | rmask(R8) | rmask(R9) |
rmask(R10) | rmask(LR);
if (config.arm_vfp)
if (_config.arm_vfp)
a.free |= FpRegs;
debug_only(a.managed = a.free);
@ -1249,7 +1249,7 @@ Assembler::asm_restore(LInsp i, Register r)
// ensure that memory is allocated for the constant and load it from
// memory.
int d = findMemFor(i);
if (config.arm_vfp && IsFpReg(r)) {
if (_config.arm_vfp && IsFpReg(r)) {
if (isS8(d >> 2)) {
FLDD(r, FP, d);
} else {
@ -1280,7 +1280,7 @@ Assembler::asm_spill(Register rr, int d, bool pop, bool quad)
(void) pop;
(void) quad;
if (d) {
if (config.arm_vfp && IsFpReg(rr)) {
if (_config.arm_vfp && IsFpReg(rr)) {
if (isS8(d >> 2)) {
FSTD(rr, FP, d);
} else {
@ -1327,7 +1327,7 @@ Assembler::asm_load64(LInsp ins)
switch (ins->opcode()) {
case LIR_ldf:
case LIR_ldfc:
if (config.arm_vfp && isKnownReg(rr)) {
if (_config.arm_vfp && isKnownReg(rr)) {
// VFP is enabled and the result will go into a register.
NanoAssert(IsFpReg(rr));
@ -1354,7 +1354,7 @@ Assembler::asm_load64(LInsp ins)
case LIR_ld32f:
case LIR_ldc32f:
if (config.arm_vfp) {
if (_config.arm_vfp) {
if (isKnownReg(rr)) {
NanoAssert(IsFpReg(rr));
FCVTDS(rr, S14);
@ -1398,7 +1398,7 @@ Assembler::asm_store64(LOpcode op, LInsp value, int dr, LInsp base)
switch (op) {
case LIR_stfi:
if (config.arm_vfp) {
if (_config.arm_vfp) {
Register rb = findRegFor(base, GpRegs);
if (value->isconstq()) {
@ -1447,7 +1447,7 @@ Assembler::asm_store64(LOpcode op, LInsp value, int dr, LInsp base)
return;
case LIR_st32f:
if (config.arm_vfp) {
if (_config.arm_vfp) {
Register rb = findRegFor(base, GpRegs);
if (value->isconstq()) {
@ -1535,7 +1535,7 @@ Assembler::asm_quad(LInsp ins)
deprecated_freeRsrcOf(ins, false);
if (config.arm_vfp && isKnownReg(rr))
if (_config.arm_vfp && isKnownReg(rr))
{
asm_spill(rr, d, false, true);
@ -1559,7 +1559,7 @@ Assembler::asm_quad(LInsp ins)
void
Assembler::asm_nongp_copy(Register r, Register s)
{
if (config.arm_vfp && IsFpReg(r) && IsFpReg(s)) {
if (_config.arm_vfp && IsFpReg(r) && IsFpReg(s)) {
// fp->fp
FCPYD(r, s);
} else {
@ -1809,7 +1809,7 @@ Assembler::BranchWithLink(NIns* addr)
// We need to emit an ARMv5+ instruction, so assert that we have a
// suitable processor. Note that we don't support ARMv4(T), but
// this serves as a useful sanity check.
NanoAssert(config.arm_arch >= 5);
NanoAssert(_config.arm_arch >= 5);
// The (pre-shifted) value of the "H" bit in the BLX encoding.
uint32_t H = (offs & 0x2) << 23;
@ -1836,7 +1836,7 @@ Assembler::BLX(Register addr, bool chk /* = true */)
// We need to emit an ARMv5+ instruction, so assert that we have a suitable
// processor. Note that we don't support ARMv4(T), but this serves as a
// useful sanity check.
NanoAssert(config.arm_arch >= 5);
NanoAssert(_config.arm_arch >= 5);
NanoAssert(IsGpReg(addr));
// There is a bug in the WinCE device emulator which stops "BLX LR" from
@ -1861,7 +1861,7 @@ Assembler::BLX(Register addr, bool chk /* = true */)
void
Assembler::asm_ldr_chk(Register d, Register b, int32_t off, bool chk)
{
if (config.arm_vfp && IsFpReg(d)) {
if (_config.arm_vfp && IsFpReg(d)) {
FLDD_chk(d,b,off,chk);
return;
}
@ -1940,7 +1940,7 @@ Assembler::asm_ld_imm(Register d, int32_t imm, bool chk /* = true */)
// (Note that we use Thumb-2 if arm_arch is ARMv7 or later; the only earlier
// ARM core that provided Thumb-2 is ARMv6T2/ARM1156, which is a real-time
// core that nanojit is unlikely to ever target.)
if (config.arm_arch >= 7 && (d != PC)) {
if (_config.arm_arch >= 7 && (d != PC)) {
// ARMv6T2 and above have MOVW and MOVT.
uint32_t high_h = (uint32_t)imm >> 16;
uint32_t low_h = imm & 0xffff;
@ -2183,7 +2183,7 @@ Assembler::asm_branch(bool branchOnFalse, LInsp cond, NIns* targ)
{
LOpcode condop = cond->opcode();
NanoAssert(cond->isCond());
NanoAssert(config.arm_vfp || ((condop < LIR_feq) || (condop > LIR_fge)));
NanoAssert(_config.arm_vfp || ((condop < LIR_feq) || (condop > LIR_fge)));
// The old "never" condition code has special meaning on newer ARM cores,
// so use "always" as a sensible default code.
@ -2236,7 +2236,7 @@ Assembler::asm_branch(bool branchOnFalse, LInsp cond, NIns* targ)
NanoAssert((cc != AL) && (cc != NV));
// Ensure that we don't hit floating-point LIR codes if VFP is disabled.
NanoAssert(config.arm_vfp || !fp_cond);
NanoAssert(_config.arm_vfp || !fp_cond);
// Emit a suitable branch instruction.
B_cond(cc, targ);
@ -2245,7 +2245,7 @@ Assembler::asm_branch(bool branchOnFalse, LInsp cond, NIns* targ)
// asm_[f]cmp will move _nIns so we must do this now.
NIns *at = _nIns;
if (config.arm_vfp && fp_cond)
if (_config.arm_vfp && fp_cond)
asm_fcmp(cond);
else
asm_cmp(cond);
@ -2457,7 +2457,7 @@ Assembler::asm_arith(LInsp ins)
// common for (rr == ra) and is thus likely to be the most
// efficient method.
if ((config.arm_arch > 5) || (rr != rb)) {
if ((_config.arm_arch > 5) || (rr != rb)) {
// IP is used to temporarily store the high word of the result from
// SMULL, so we make use of this to perform an overflow check, as
// ARM's MUL instruction can't set the overflow flag by itself.
@ -2469,7 +2469,7 @@ Assembler::asm_arith(LInsp ins)
ALUr_shi(AL, cmp, 1, SBZ, IP, rr, ASR_imm, 31);
SMULL(rr, IP, rb, ra);
} else {
// config.arm_arch is ARMv5 (or below) and rr == rb, so we must
// _config.arm_arch is ARMv5 (or below) and rr == rb, so we must
// find a different way to encode the instruction.
// If possible, swap the arguments to avoid the restriction.
@ -2735,7 +2735,7 @@ Assembler::asm_ret(LIns *ins)
}
else {
NanoAssert(ins->isop(LIR_fret));
if (config.arm_vfp) {
if (_config.arm_vfp) {
Register reg = findRegFor(value, FpRegs);
FMRRD(R0, R1, reg);
} else {

Просмотреть файл

@ -462,8 +462,8 @@ enum {
// [_d_hi,_d] = _l * _r
#define SMULL(_d, _d_hi, _l, _r) do { \
underrunProtect(4); \
NanoAssert((config.arm_arch >= 6) || ((_d ) != (_l))); \
NanoAssert((config.arm_arch >= 6) || ((_d_hi) != (_l))); \
NanoAssert((_config.arm_arch >= 6) || ((_d ) != (_l))); \
NanoAssert((_config.arm_arch >= 6) || ((_d_hi) != (_l))); \
NanoAssert(IsGpReg(_d) && IsGpReg(_d_hi) && IsGpReg(_l) && IsGpReg(_r)); \
NanoAssert(((_d) != PC) && ((_d_hi) != PC) && ((_l) != PC) && ((_r) != PC)); \
*(--_nIns) = (NIns)( COND_AL | 0xc00090 | (_d_hi)<<16 | (_d)<<12 | (_r)<<8 | (_l) ); \
@ -473,7 +473,7 @@ enum {
// _d = _l * _r
#define MUL(_d, _l, _r) do { \
underrunProtect(4); \
NanoAssert((config.arm_arch >= 6) || ((_d) != (_l))); \
NanoAssert((_config.arm_arch >= 6) || ((_d) != (_l))); \
NanoAssert(IsGpReg(_d) && IsGpReg(_l) && IsGpReg(_r)); \
NanoAssert(((_d) != PC) && ((_l) != PC) && ((_r) != PC)); \
*(--_nIns) = (NIns)( COND_AL | (_d)<<16 | (_r)<<8 | 0x90 | (_l) ); \
@ -829,7 +829,7 @@ enum {
#define FMDRR(_Dm,_Rd,_Rn) do { \
underrunProtect(4); \
NanoAssert(config.arm_vfp); \
NanoAssert(_config.arm_vfp); \
NanoAssert(IsFpReg(_Dm) && IsGpReg(_Rd) && IsGpReg(_Rn)); \
*(--_nIns) = (NIns)( COND_AL | (0xC4<<20) | ((_Rn)<<16) | ((_Rd)<<12) | (0xB1<<4) | (FpRegNum(_Dm)) ); \
asm_output("fmdrr %s,%s,%s", gpn(_Dm), gpn(_Rd), gpn(_Rn)); \
@ -837,7 +837,7 @@ enum {
#define FMRRD(_Rd,_Rn,_Dm) do { \
underrunProtect(4); \
NanoAssert(config.arm_vfp); \
NanoAssert(_config.arm_vfp); \
NanoAssert(IsGpReg(_Rd) && IsGpReg(_Rn) && IsFpReg(_Dm)); \
*(--_nIns) = (NIns)( COND_AL | (0xC5<<20) | ((_Rn)<<16) | ((_Rd)<<12) | (0xB1<<4) | (FpRegNum(_Dm)) ); \
asm_output("fmrrd %s,%s,%s", gpn(_Rd), gpn(_Rn), gpn(_Dm)); \
@ -845,7 +845,7 @@ enum {
#define FMRDH(_Rd,_Dn) do { \
underrunProtect(4); \
NanoAssert(config.arm_vfp); \
NanoAssert(_config.arm_vfp); \
NanoAssert(IsGpReg(_Rd) && IsFpReg(_Dn)); \
*(--_nIns) = (NIns)( COND_AL | (0xE3<<20) | (FpRegNum(_Dn)<<16) | ((_Rd)<<12) | (0xB<<8) | (1<<4) ); \
asm_output("fmrdh %s,%s", gpn(_Rd), gpn(_Dn)); \
@ -853,7 +853,7 @@ enum {
#define FMRDL(_Rd,_Dn) do { \
underrunProtect(4); \
NanoAssert(config.arm_vfp); \
NanoAssert(_config.arm_vfp); \
NanoAssert(IsGpReg(_Rd) && IsFpReg(_Dn)); \
*(--_nIns) = (NIns)( COND_AL | (0xE1<<20) | (FpRegNum(_Dn)<<16) | ((_Rd)<<12) | (0xB<<8) | (1<<4) ); \
asm_output("fmrdh %s,%s", gpn(_Rd), gpn(_Dn)); \
@ -861,7 +861,7 @@ enum {
#define FSTD_allowD7(_Dd,_Rn,_offs,_allowD7) do { \
underrunProtect(4); \
NanoAssert(config.arm_vfp); \
NanoAssert(_config.arm_vfp); \
NanoAssert((((_offs) & 3) == 0) && isS8((_offs) >> 2)); \
NanoAssert((IsFpReg(_Dd) || ((_allowD7) && (_Dd) == D7)) && !IsFpReg(_Rn)); \
int negflag = 1<<23; \
@ -879,7 +879,7 @@ enum {
#define FLDD_chk(_Dd,_Rn,_offs,_chk) do { \
if(_chk) underrunProtect(4); \
NanoAssert(config.arm_vfp); \
NanoAssert(_config.arm_vfp); \
NanoAssert((((_offs) & 3) == 0) && isS8((_offs) >> 2)); \
NanoAssert(IsFpReg(_Dd) && !IsFpReg(_Rn)); \
int negflag = 1<<23; \
@ -895,7 +895,7 @@ enum {
#define FUITOD(_Dd,_Sm) do { \
underrunProtect(4); \
NanoAssert(config.arm_vfp); \
NanoAssert(_config.arm_vfp); \
NanoAssert(IsFpReg(_Dd) && ((_Sm) == S14)); \
*(--_nIns) = (NIns)( COND_AL | (0xEB8<<16) | (FpRegNum(_Dd)<<12) | (0x2D<<6) | (0<<5) | (0x7) ); \
asm_output("fuitod %s,%s", gpn(_Dd), gpn(_Sm)); \
@ -903,7 +903,7 @@ enum {
#define FNEGD(_Dd,_Dm) do { \
underrunProtect(4); \
NanoAssert(config.arm_vfp); \
NanoAssert(_config.arm_vfp); \
NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dm)); \
*(--_nIns) = (NIns)( COND_AL | (0xEB1<<16) | (FpRegNum(_Dd)<<12) | (0xB4<<4) | (FpRegNum(_Dm)) ); \
asm_output("fnegd %s,%s", gpn(_Dd), gpn(_Dm)); \
@ -911,7 +911,7 @@ enum {
#define FADDD(_Dd,_Dn,_Dm) do { \
underrunProtect(4); \
NanoAssert(config.arm_vfp); \
NanoAssert(_config.arm_vfp); \
NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dn) && IsFpReg(_Dm)); \
*(--_nIns) = (NIns)( COND_AL | (0xE3<<20) | (FpRegNum(_Dn)<<16) | (FpRegNum(_Dd)<<12) | (0xB0<<4) | (FpRegNum(_Dm)) ); \
asm_output("faddd %s,%s,%s", gpn(_Dd), gpn(_Dn), gpn(_Dm)); \
@ -919,7 +919,7 @@ enum {
#define FSUBD(_Dd,_Dn,_Dm) do { \
underrunProtect(4); \
NanoAssert(config.arm_vfp); \
NanoAssert(_config.arm_vfp); \
NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dn) && IsFpReg(_Dm)); \
*(--_nIns) = (NIns)( COND_AL | (0xE3<<20) | (FpRegNum(_Dn)<<16) | (FpRegNum(_Dd)<<12) | (0xB4<<4) | (FpRegNum(_Dm)) ); \
asm_output("fsubd %s,%s,%s", gpn(_Dd), gpn(_Dn), gpn(_Dm)); \
@ -927,7 +927,7 @@ enum {
#define FMULD(_Dd,_Dn,_Dm) do { \
underrunProtect(4); \
NanoAssert(config.arm_vfp); \
NanoAssert(_config.arm_vfp); \
NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dn) && IsFpReg(_Dm)); \
*(--_nIns) = (NIns)( COND_AL | (0xE2<<20) | (FpRegNum(_Dn)<<16) | (FpRegNum(_Dd)<<12) | (0xB0<<4) | (FpRegNum(_Dm)) ); \
asm_output("fmuld %s,%s,%s", gpn(_Dd), gpn(_Dn), gpn(_Dm)); \
@ -935,7 +935,7 @@ enum {
#define FDIVD(_Dd,_Dn,_Dm) do { \
underrunProtect(4); \
NanoAssert(config.arm_vfp); \
NanoAssert(_config.arm_vfp); \
NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dn) && IsFpReg(_Dm)); \
*(--_nIns) = (NIns)( COND_AL | (0xE8<<20) | (FpRegNum(_Dn)<<16) | (FpRegNum(_Dd)<<12) | (0xB0<<4) | (FpRegNum(_Dm)) ); \
asm_output("fmuld %s,%s,%s", gpn(_Dd), gpn(_Dn), gpn(_Dm)); \
@ -943,14 +943,14 @@ enum {
#define FMSTAT() do { \
underrunProtect(4); \
NanoAssert(config.arm_vfp); \
NanoAssert(_config.arm_vfp); \
*(--_nIns) = (NIns)( COND_AL | 0x0EF1FA10); \
asm_output("fmstat"); \
} while (0)
#define FCMPD(_Dd,_Dm,_E) do { \
underrunProtect(4); \
NanoAssert(config.arm_vfp); \
NanoAssert(_config.arm_vfp); \
NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dm)); \
NanoAssert(((_E)==0) || ((_E)==1)); \
*(--_nIns) = (NIns)( COND_AL | (0xEB4<<16) | (FpRegNum(_Dd)<<12) | (0xB<<8) | ((_E)<<7) | (0x4<<4) | (FpRegNum(_Dm)) ); \
@ -959,7 +959,7 @@ enum {
#define FCPYD(_Dd,_Dm) do { \
underrunProtect(4); \
NanoAssert(config.arm_vfp); \
NanoAssert(_config.arm_vfp); \
NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dm)); \
*(--_nIns) = (NIns)( COND_AL | (0xEB0<<16) | (FpRegNum(_Dd)<<12) | (0xB4<<4) | (FpRegNum(_Dm)) ); \
asm_output("fcpyd %s,%s", gpn(_Dd), gpn(_Dm)); \
@ -967,7 +967,7 @@ enum {
#define FMRS(_Rd,_Sn) do { \
underrunProtect(4); \
NanoAssert(config.arm_vfp); \
NanoAssert(_config.arm_vfp); \
NanoAssert(((_Sn) == S14) && IsGpReg(_Rd)); \
*(--_nIns) = (NIns)( COND_AL | (0xE1<<20) | (0x7<<16) | ((_Rd)<<12) | (0xA<<8) | (0<<7) | (0x1<<4) ); \
asm_output("fmrs %s,%s", gpn(_Rd), gpn(_Sn)); \
@ -983,7 +983,7 @@ enum {
#define FSITOD(_Dd,_Sm) do { \
underrunProtect(4); \
NanoAssert(config.arm_vfp); \
NanoAssert(_config.arm_vfp); \
NanoAssert(IsFpReg(_Dd) && ((_Sm) == S14)); \
*(--_nIns) = (NIns)( COND_AL | (0xEB8<<16) | (FpRegNum(_Dd)<<12) | (0x2F<<6) | (0<<5) | (0x7) ); \
asm_output("fsitod %s,%s", gpn(_Dd), gpn(_Sm)); \
@ -991,7 +991,7 @@ enum {
#define FMSR(_Sn,_Rd) do { \
underrunProtect(4); \
NanoAssert(config.arm_vfp); \
NanoAssert(_config.arm_vfp); \
NanoAssert(((_Sn) == S14) && IsGpReg(_Rd)); \
*(--_nIns) = (NIns)( COND_AL | (0xE0<<20) | (0x7<<16) | ((_Rd)<<12) | (0xA<<8) | (0<<7) | (0x1<<4) ); \
asm_output("fmsr %s,%s", gpn(_Sn), gpn(_Rd)); \
@ -999,7 +999,7 @@ enum {
#define FMRS(_Rd,_Sn) do { \
underrunProtect(4); \
NanoAssert(config.arm_vfp); \
NanoAssert(_config.arm_vfp); \
NanoAssert(((_Sn) == S14) && IsGpReg(_Rd)); \
*(--_nIns) = (NIns)( COND_AL | (0xE1<<20) | (0x7<<16) | ((_Rd)<<12) | (0xA<<8) | (0<<7) | (0x1<<4) ); \
asm_output("fmrs %s,%s", gpn(_Rd), gpn(_Sn)); \
@ -1007,7 +1007,7 @@ enum {
#define FMSR(_Sn,_Rd) do { \
underrunProtect(4); \
NanoAssert(config.arm_vfp); \
NanoAssert(_config.arm_vfp); \
NanoAssert(((_Sn) == S14) && IsGpReg(_Rd)); \
*(--_nIns) = (NIns)( COND_AL | (0xE0<<20) | (0x7<<16) | ((_Rd)<<12) | (0xA<<8) | (0<<7) | (0x1<<4) ); \
asm_output("fmsr %s,%s", gpn(_Sn), gpn(_Rd)); \
@ -1015,7 +1015,7 @@ enum {
#define FCVTSD(_Sd,_Dm) do { \
underrunProtect(4); \
NanoAssert(config.arm_vfp); \
NanoAssert(_config.arm_vfp); \
NanoAssert(((_Sd) == S14) && IsFpReg(_Dm)); \
*(--_nIns) = (NIns)( COND_AL | (0xEB7<<16) | (0x7<<12) | (0xBC<<4) | (FpRegNum(_Dm)) ); \
asm_output("[0x%08x] fcvtsd s14,%s", *_nIns, gpn(_Dm)); \
@ -1023,7 +1023,7 @@ enum {
#define FCVTDS_allowD7(_Dd,_Sm,_allowD7) do { \
underrunProtect(4); \
NanoAssert(config.arm_vfp); \
NanoAssert(_config.arm_vfp); \
NanoAssert(((_Sm) == S14) && (IsFpReg(_Dd) || ((_allowD7) && (_Dd) == D7))); \
*(--_nIns) = (NIns)( COND_AL | (0xEB7<<16) | (FpRegNum(_Dd)<<12) | (0xAC<<4) | (0x7) ); \
asm_output("[0x%08x] fcvtds %s,s14", *_nIns, gpn(_Dd)); \
@ -1034,7 +1034,7 @@ enum {
#define FLDS(_Sd,_Rn,_offs) do { \
underrunProtect(4); \
NanoAssert(config.arm_vfp); \
NanoAssert(_config.arm_vfp); \
NanoAssert(((_Sd) == S14) && !IsFpReg(_Rn)); \
NanoAssert((((_offs) & 3) == 0) && isS8((_offs) >> 2)); \
int addflag = 1<<23; \
@ -1049,7 +1049,7 @@ enum {
#define FSTS(_Sd,_Rn,_offs) do { \
underrunProtect(4); \
NanoAssert(config.arm_vfp); \
NanoAssert(_config.arm_vfp); \
NanoAssert(((_Sd) == S14) && !IsFpReg(_Rn)); \
NanoAssert((((_offs) & 3) == 0) && isS8((_offs) >> 2)); \
int addflag = 1<<23; \
@ -1064,7 +1064,7 @@ enum {
#define FTOSID(_Sd,_Dm) do { \
underrunProtect(4); \
NanoAssert(config.arm_vfp); \
NanoAssert(_config.arm_vfp); \
NanoAssert(((_Sd) == S14) && IsFpReg(_Dm)); \
*(--_nIns) = (NIns)( COND_AL | (0xEBD<<16) | (0x7<<12) | (0xB4<<4) | FpRegNum(_Dm) ); \
asm_output("ftosid s14, %s", gpn(_Dm)); \

Просмотреть файл

@ -1690,14 +1690,13 @@ namespace nanojit
Assembler::nFragExit(LIns *guard)
{
SideExit *exit = guard->record()->exit;
bool trees = config.tree_opt;
Fragment *frag = exit->target;
bool destKnown = (frag && frag->fragEntry);
// Generate jump to epilogue and initialize lr.
// If the guard already exists, use a simple jump.
if (destKnown && !trees) {
if (destKnown) {
// j _fragEntry
// move $v0,$zero
MOVE(V0, ZERO);

Просмотреть файл

@ -70,45 +70,8 @@ namespace nanojit
0 /* ABI_CDECL */
};
static bool CheckForSSE2()
void Assembler::nInit(AvmCore*)
{
int features = 0;
#if defined _MSC_VER
__asm
{
pushad
mov eax, 1
cpuid
mov features, edx
popad
}
#elif defined __GNUC__
asm("xchg %%esi, %%ebx\n" /* we can't clobber ebx on gcc (PIC register) */
"mov $0x01, %%eax\n"
"cpuid\n"
"mov %%edx, %0\n"
"xchg %%esi, %%ebx\n"
: "=m" (features)
: /* We have no inputs */
: "%eax", "%esi", "%ecx", "%edx"
);
#elif defined __SUNPRO_C || defined __SUNPRO_CC
asm("push %%ebx\n"
"mov $0x01, %%eax\n"
"cpuid\n"
"pop %%ebx\n"
: "=d" (features)
: /* We have no inputs */
: "%eax", "%ecx"
);
#endif
return (features & (1<<26)) != 0;
}
void Assembler::nInit(AvmCore* core)
{
(void) core;
config.sse2 = config.sse2 && CheckForSSE2();
}
void Assembler::nBeginAssembly() {
@ -145,7 +108,6 @@ namespace nanojit
void Assembler::nFragExit(LInsp guard)
{
SideExit *exit = guard->record()->exit;
bool trees = config.tree_opt;
Fragment *frag = exit->target;
GuardRecord *lr = 0;
bool destKnown = (frag && frag->fragEntry);
@ -163,7 +125,7 @@ namespace nanojit
LEAmi4(r, si->table, r);
} else {
// If the guard already exists, use a simple jump.
if (destKnown && !trees) {
if (destKnown) {
JMP(frag->fragEntry);
lr = 0;
} else { // Target doesn't exist. Jump to an epilogue for now. This can be patched later.
@ -237,7 +199,7 @@ namespace nanojit
#endif
if (pushsize) {
if (config.fixed_esp) {
if (_config.i386_fixed_esp) {
// In case of fastcall, stdcall and thiscall the callee cleans up the stack,
// and since we reserve max_stk_args words in the prolog to call functions
// and don't adjust the stack pointer individually for each call we have
@ -281,7 +243,7 @@ namespace nanojit
if (indirect) {
argc--;
asm_arg(ARGSIZE_P, ins->arg(argc), EAX, stkd);
if (!config.fixed_esp)
if (!_config.i386_fixed_esp)
stkd = 0;
}
@ -294,11 +256,11 @@ namespace nanojit
r = argRegs[n++]; // tell asm_arg what reg to use
}
asm_arg(sz, ins->arg(j), r, stkd);
if (!config.fixed_esp)
if (!_config.i386_fixed_esp)
stkd = 0;
}
if (config.fixed_esp) {
if (_config.i386_fixed_esp) {
if (pushsize > max_stk_args)
max_stk_args = pushsize;
} else if (extra > 0) {
@ -342,7 +304,7 @@ namespace nanojit
// add scratch registers to our free list for the allocator
a.clear();
a.free = SavedRegs | ScratchRegs;
if (!config.sse2)
if (!_config.i386_sse2)
a.free &= ~XmmRegs;
debug_only( a.managed = a.free; )
}
@ -611,7 +573,7 @@ namespace nanojit
if (op == LIR_st32f) {
bool pop = !value->isInReg();
Register rv = ( pop
? findRegFor(value, config.sse2 ? XmmRegs : FpRegs)
? findRegFor(value, _config.i386_sse2 ? XmmRegs : FpRegs)
: value->getReg() );
if (rmask(rv) & XmmRegs) {
@ -641,7 +603,7 @@ namespace nanojit
// side exit, copying a non-double.
// c) Maybe it's a double just being stored. Oh well.
if (config.sse2) {
if (_config.i386_sse2) {
Register rv = findRegFor(value, XmmRegs);
SSE_STQ(dr, rb, rv);
} else {
@ -652,7 +614,7 @@ namespace nanojit
} else {
bool pop = !value->isInReg();
Register rv = ( pop
? findRegFor(value, config.sse2 ? XmmRegs : FpRegs)
? findRegFor(value, _config.i386_sse2 ? XmmRegs : FpRegs)
: value->getReg() );
if (rmask(rv) & XmmRegs) {
@ -670,7 +632,7 @@ namespace nanojit
// Value is either a 64-bit struct or maybe a float that isn't live in
// an FPU reg. Either way, avoid allocating an FPU reg just to load
// and store it.
if (config.sse2) {
if (_config.i386_sse2) {
Register t = registerAllocTmp(XmmRegs);
SSE_STQ(dd, rd, t);
SSE_LDQ(t, ds, rs);
@ -823,7 +785,7 @@ namespace nanojit
// SETcc only sets low 8 bits, so extend
MOVZX8(r,r);
if (config.sse2) {
if (_config.i386_sse2) {
// LIR_flt and LIR_fgt are handled by the same case because
// asm_fcmp() converts LIR_flt(a,b) to LIR_fgt(b,a). Likewise
// for LIR_fle/LIR_fge.
@ -1352,7 +1314,7 @@ namespace nanojit
{
LIns *lhs = ins->oprnd1();
if (config.sse2) {
if (_config.i386_sse2) {
Register rr = prepareResultReg(ins, XmmRegs);
// If 'lhs' isn't in a register, it can be clobbered by 'ins'.
@ -1423,7 +1385,7 @@ namespace nanojit
}
}
else {
if (config.fixed_esp)
if (_config.i386_fixed_esp)
asm_stkarg(ins, stkd);
else
asm_pusharg(ins);
@ -1502,7 +1464,7 @@ namespace nanojit
*/
evictIfActive(FST0);
}
if (!config.fixed_esp)
if (!_config.i386_fixed_esp)
SUBi(ESP, 8);
stkd += sizeof(double);
@ -1511,7 +1473,7 @@ namespace nanojit
void Assembler::asm_fop(LInsp ins)
{
LOpcode op = ins->opcode();
if (config.sse2)
if (_config.i386_sse2)
{
LIns *lhs = ins->oprnd1();
LIns *rhs = ins->oprnd2();
@ -1686,7 +1648,7 @@ namespace nanojit
{
LIns *lhs = ins->oprnd1();
if (config.sse2) {
if (_config.i386_sse2) {
Register rr = prepareResultReg(ins, GpRegs);
Register ra = findRegFor(lhs, XmmRegs);
SSE_CVTSD2SI(rr, ra);
@ -1720,7 +1682,7 @@ namespace nanojit
NIns* at;
LOpcode opcode = cond->opcode();
if (config.sse2) {
if (_config.i386_sse2) {
// LIR_flt and LIR_fgt are handled by the same case because
// asm_fcmp() converts LIR_flt(a,b) to LIR_fgt(b,a). Likewise
// for LIR_fle/LIR_fge.
@ -1769,7 +1731,7 @@ namespace nanojit
LIns* rhs = cond->oprnd2();
NanoAssert(lhs->isF64() && rhs->isF64());
if (config.sse2) {
if (_config.i386_sse2) {
// First, we convert (a < b) into (b > a), and (a <= b) into (b >= a).
if (condop == LIR_flt) {
condop = LIR_fgt;

Просмотреть файл

@ -49,7 +49,7 @@ blx_lr_broken() {
using namespace avmplus;
Config AvmCore::config;
nanojit::Config AvmCore::config;
void
avmplus::AvmLog(char const *msg, ...) {

Просмотреть файл

@ -37,6 +37,7 @@
#define avm_h___
#include "VMPI.h"
#include "njconfig.h"
#if !defined(AVMPLUS_LITTLE_ENDIAN) && !defined(AVMPLUS_BIG_ENDIAN)
#ifdef IS_BIG_ENDIAN
@ -181,60 +182,6 @@ namespace avmplus {
extern void AvmLog(char const *msg, ...);
class Config
{
public:
Config() {
memset(this, 0, sizeof(Config));
#ifdef DEBUG
verbose = false;
verbose_addrs = 1;
verbose_exits = 1;
verbose_live = 1;
show_stats = 1;
#endif
}
uint32_t tree_opt:1;
uint32_t quiet_opt:1;
uint32_t verbose:1;
uint32_t verbose_addrs:1;
uint32_t verbose_live:1;
uint32_t verbose_exits:1;
uint32_t show_stats:1;
#if defined (AVMPLUS_IA32)
// Whether or not we can use SSE2 instructions and conditional moves.
bool sse2;
bool use_cmov;
// Whether to use a virtual stack pointer
bool fixed_esp;
#endif
#if defined (AVMPLUS_ARM)
// Whether or not to generate VFP instructions.
# if defined (NJ_FORCE_SOFTFLOAT)
static const bool arm_vfp = false;
# else
bool arm_vfp;
# endif
// The ARM architecture version.
# if defined (NJ_FORCE_ARM_ARCH_VERSION)
static const unsigned int arm_arch = NJ_FORCE_ARM_ARCH_VERSION;
# else
unsigned int arm_arch;
# endif
#endif
#if defined (NJ_FORCE_SOFTFLOAT)
static const bool soft_float = true;
#else
bool soft_float;
#endif
};
static const int kstrconst_emptyString = 0;
class AvmInterpreter
@ -276,13 +223,13 @@ namespace avmplus {
AvmInterpreter interp;
AvmConsole console;
static Config config;
static nanojit::Config config;
#ifdef AVMPLUS_IA32
static inline bool
use_sse2()
{
return config.sse2;
return config.i386_sse2;
}
#endif
@ -290,24 +237,11 @@ namespace avmplus {
use_cmov()
{
#ifdef AVMPLUS_IA32
return config.use_cmov;
return config.i386_use_cmov;
#else
return true;
#endif
}
static inline bool
quiet_opt()
{
return config.quiet_opt;
}
static inline bool
verbose()
{
return config.verbose;
}
};
/**

Просмотреть файл

@ -72,6 +72,7 @@ avmplus_CXXSRCS := $(avmplus_CXXSRCS) \
$(curdir)/Containers.cpp \
$(curdir)/Fragmento.cpp \
$(curdir)/LIR.cpp \
$(curdir)/njconfig.cpp \
$(curdir)/RegAlloc.cpp \
$(curdir)/$(nanojit_cpu_cxxsrc) \
$(NULL)

Просмотреть файл

@ -265,7 +265,6 @@ namespace nanojit {
// An OR of LC_Bits values, indicating what should be output
uint32_t lcbits;
};
}
// -------------------------------------------------------------------
@ -273,6 +272,7 @@ namespace nanojit {
// -------------------------------------------------------------------
#include "njconfig.h"
#include "Allocator.h"
#include "Containers.h"
#include "Native.h"

113
js/src/nanojit/njconfig.cpp Normal file
Просмотреть файл

@ -0,0 +1,113 @@
/* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
/* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is [Open Source Virtual Machine].
*
* The Initial Developer of the Original Code is
* Adobe System Incorporated.
* Portions created by the Initial Developer are Copyright (C) 2004-2007
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Adobe AS3 Team
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#include "nanojit.h"
#ifdef FEATURE_NANOJIT
namespace nanojit
{
#ifdef NANOJIT_IA32
static bool CheckForSSE2()
{
int features = 0;
#if defined _MSC_VER
__asm
{
pushad
mov eax, 1
cpuid
mov features, edx
popad
}
#elif defined __GNUC__
asm("xchg %%esi, %%ebx\n" /* we can't clobber ebx on gcc (PIC register) */
"mov $0x01, %%eax\n"
"cpuid\n"
"mov %%edx, %0\n"
"xchg %%esi, %%ebx\n"
: "=m" (features)
: /* We have no inputs */
: "%eax", "%esi", "%ecx", "%edx"
);
#elif defined __SUNPRO_C || defined __SUNPRO_CC
asm("push %%ebx\n"
"mov $0x01, %%eax\n"
"cpuid\n"
"pop %%ebx\n"
: "=d" (features)
: /* We have no inputs */
: "%eax", "%ecx"
);
#endif
return (features & (1<<26)) != 0;
}
#endif
Config::Config()
{
VMPI_memset(this, 0, sizeof(*this));
cseopt = true;
#ifdef NANOJIT_IA32
i386_sse2 = CheckForSSE2();
i386_use_cmov = true;
i386_fixed_esp = false;
#endif
#if defined(NANOJIT_ARM)
NanoStaticAssert(NJ_COMPILER_ARM_ARCH >= 5 && NJ_COMPILER_ARM_ARCH <= 7);
arm_arch = NJ_COMPILER_ARM_ARCH;
arm_vfp = (arm_arch >= 7);
#if defined(DEBUG) || defined(_DEBUG)
arm_show_stats = true;
#else
arm_show_stats = false;
#endif
soft_float = !arm_vfp;
#endif // NANOJIT_ARM
}
}
#endif /* FEATURE_NANOJIT */

101
js/src/nanojit/njconfig.h Normal file
Просмотреть файл

@ -0,0 +1,101 @@
/* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
/* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is [Open Source Virtual Machine].
*
* The Initial Developer of the Original Code is
* Adobe System Incorporated.
* Portions created by the Initial Developer are Copyright (C) 2004-2007
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Adobe AS3 Team
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#ifndef __njconfig_h__
#define __njconfig_h__
#include "avmplus.h"
// Do not include nanojit.h here; this file should be usable without it.
#ifdef FEATURE_NANOJIT
namespace nanojit
{
/***
* A struct used to configure the assumptions that Assembler can make when
* generating code. The ctor will fill in all fields with the most reasonable
* values it can derive from compiler flags and/or runtime detection, but
* the embedder is free to override any or all of them as it sees fit.
* Using the ctor-provided default setup is guaranteed to provide a safe
* runtime environment (though perhaps suboptimal in some cases), so an embedder
* should replace these values with great care.
*
* Note that although many fields are used on only specific architecture(s),
* this struct is deliberately declared without ifdef's for them, so (say) ARM-specific
* fields are declared everywhere. This reduces build dependencies (so that this
* files does not require nanojit.h to be included beforehand) and also reduces
* clutter in this file; the extra storage space required is trivial since most
* fields are single bits.
*/
struct Config
{
public:
// fills in reasonable default values for all fields.
Config();
// ARM architecture to assume when generate instructions for (currently, 5 <= arm_arch <= 7)
uint8_t arm_arch;
// If true, use CSE.
uint32_t cseopt:1;
// Can we use SSE2 instructions? (x86-only)
uint32_t i386_sse2:1;
// Can we use cmov instructions? (x86-only)
uint32_t i386_use_cmov:1;
// Should we use a virtual stack pointer? (x86-only)
uint32_t i386_fixed_esp:1;
// Whether or not to generate VFP instructions. (ARM only)
uint32_t arm_vfp:1;
// @todo, document me
uint32_t arm_show_stats:1;
// If true, use softfloat for all floating point operations,
// whether or not an FPU is present. (ARM only for now, but might also includes MIPS in the future)
uint32_t soft_float:1;
};
}
#endif // FEATURE_NANOJIT
#endif // __njconfig_h__

Просмотреть файл

@ -0,0 +1,104 @@
/* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
/* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is [Open Source Virtual Machine].
*
* The Initial Developer of the Original Code is
* Adobe System Incorporated.
* Portions created by the Initial Developer are Copyright (C) 2004-2007
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Adobe AS3 Team
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#ifndef __njcpudetect__
#define __njcpudetect__
/***
* Note: this file should not include *any* other files, nor should it wrap
* itself in ifdef FEATURE_NANOJIT, nor should it do anything other than
* define preprocessor symbols.
*/
/***
* NJ_COMPILER_ARM_ARCH attempts to specify the minimum ARM architecture
* that the C++ compiler has specified. Note that although Config::arm_arch
* is initialized to this value by default, there is no requirement that they
* be in sync.
*
* Note, this is done via #define so that downstream preprocessor usage can
* examine it, but please don't attempt to redefine it.
*
* Note, this is deliberately not encased in "ifdef NANOJIT_ARM", as this file
* may be included before that is defined. On non-ARM platforms we will hit the
* "Unable to determine" case.
*/
// GCC and RealView usually define __ARM_ARCH__
#if defined(__ARM_ARCH__)
#define NJ_COMPILER_ARM_ARCH __ARM_ARCH__
// ok, try well-known GCC flags ( see http://gcc.gnu.org/onlinedocs/gcc/ARM-Options.html )
#elif defined(__ARM_ARCH_7__) || \
defined(__ARM_ARCH_7A__) || \
defined(__ARM_ARCH_7M__) || \
defined(__ARM_ARCH_7R__)
#define NJ_COMPILER_ARM_ARCH 7
#elif defined(__ARM_ARCH_6__) || \
defined(__ARM_ARCH_6J__) || \
defined(__ARM_ARCH_6T2__) || \
defined(__ARM_ARCH_6Z__) || \
defined(__ARM_ARCH_6ZK__) || \
defined(__ARM_ARCH_6M__)
#define NJ_COMPILER_ARM_ARCH 6
#elif defined(__ARM_ARCH_5__) || \
defined(__ARM_ARCH_5T__) || \
defined(__ARM_ARCH_5E__) || \
defined(__ARM_ARCH_5TE__)
#define NJ_COMPILER_ARM_ARCH 5;
// Visual C has its own mojo
#elif defined(_MSC_VER) && defined(_M_ARM)
#define NJ_COMPILER_ARM_ARCH _M_ARM
#else
// non-numeric value
#define NJ_COMPILER_ARM_ARCH "Unable to determine valid NJ_COMPILER_ARM_ARCH (nanojit only supports ARMv5 or later)"
#endif
#endif // __njcpudetect__