Bug 525379 - nanojit: make the _nIns/_nExitIns swap hack more consistent. r=edwsmith.

--HG--
extra : convert_revision : bad9394918255f4afcbff259153dc9d8c9afad25
This commit is contained in:
Nicholas Nethercote 2009-11-24 14:56:33 +11:00
Родитель e692555818
Коммит 2102f230e5
12 изменённых файлов: 90 добавлений и 63 удалений

Просмотреть файл

@ -216,8 +216,8 @@ namespace nanojit
void Assembler::pageValidate() void Assembler::pageValidate()
{ {
if (error()) return; if (error()) return;
// _nIns needs to be at least on one of these pages // This may be a normal code chunk or an exit code chunk.
NanoAssertMsg(_inExit ? containsPtr(exitStart, exitEnd, _nIns) : containsPtr(codeStart, codeEnd, _nIns), NanoAssertMsg(containsPtr(codeStart, codeEnd, _nIns),
"Native instruction pointer overstep paging bounds; check overrideProtect for last instruction"); "Native instruction pointer overstep paging bounds; check overrideProtect for last instruction");
} }
#endif #endif
@ -621,7 +621,7 @@ namespace nanojit
// otherwise we free it entirely. intersectRegisterState will restore. // otherwise we free it entirely. intersectRegisterState will restore.
releaseRegisters(); releaseRegisters();
swapptrs(); swapCodeChunks();
_inExit = true; _inExit = true;
#ifdef NANOJIT_IA32 #ifdef NANOJIT_IA32
@ -644,10 +644,10 @@ namespace nanojit
NIns* jmpTarget = _nIns; // target in exit path for our mainline conditional jump NIns* jmpTarget = _nIns; // target in exit path for our mainline conditional jump
// swap back pointers, effectively storing the last location used in the exit path // swap back pointers, effectively storing the last location used in the exit path
swapptrs(); swapCodeChunks();
_inExit = false; _inExit = false;
//verbose_only( verbose_outputf(" LIR_xt/xf swapptrs, _nIns is now %08X(%08X), _nExitIns is now %08X(%08X)",_nIns, *_nIns,_nExitIns,*_nExitIns) ); //verbose_only( verbose_outputf(" LIR_xt/xf swapCodeChunks, _nIns is now %08X(%08X), _nExitIns is now %08X(%08X)",_nIns, *_nIns,_nExitIns,*_nExitIns) );
verbose_only( verbose_outputf("%010lx:", (unsigned long)jmpTarget);) verbose_only( verbose_outputf("%010lx:", (unsigned long)jmpTarget);)
verbose_only( verbose_outputf("----------------------------------- ## BEGIN exit block (LIR_xt|LIR_xf)") ); verbose_only( verbose_outputf("----------------------------------- ## BEGIN exit block (LIR_xt|LIR_xf)") );
@ -783,6 +783,7 @@ namespace nanojit
} }
) )
NanoAssert(!_inExit);
// save used parts of current block on fragment's code list, free the rest // save used parts of current block on fragment's code list, free the rest
#ifdef NANOJIT_ARM #ifdef NANOJIT_ARM
// [codeStart, _nSlot) ... gap ... [_nIns, codeEnd) // [codeStart, _nSlot) ... gap ... [_nIns, codeEnd)

Просмотреть файл

@ -206,8 +206,6 @@ namespace nanojit
// Log controller object. Contains what-stuff-should-we-print // Log controller object. Contains what-stuff-should-we-print
// bits, and a sink function for debug printing // bits, and a sink function for debug printing
LogControl* _logc; LogControl* _logc;
size_t codeBytes;
size_t exitBytes;
#endif // NJ_VERBOSE #endif // NJ_VERBOSE
#ifdef VTUNE #ifdef VTUNE
@ -295,10 +293,30 @@ namespace nanojit
NInsMap _patches; NInsMap _patches;
LabelStateMap _labels; LabelStateMap _labels;
NIns *codeStart, *codeEnd; // current block we're adding code to // We generate code into two places: normal code chunks, and exit
NIns *exitStart, *exitEnd; // current block for exit stubs // code chunks (for exit stubs). We use a hack to avoid having to
NIns* _nIns; // current native instruction // parameterise the code that does the generating -- we let that
NIns* _nExitIns; // current instruction in exit fragment page // code assume that it's always generating into a normal code
// chunk (most of the time it is), and when we instead need to
// generate into an exit code chunk, we set _inExit to true and
// temporarily swap all the code/exit variables below (using
// swapCodeChunks()). Afterwards we swap them all back and set
// _inExit to false again.
bool _inExit, vpad2[3];
NIns *codeStart, *codeEnd; // current normal code chunk
NIns *exitStart, *exitEnd; // current exit code chunk
NIns* _nIns; // current instruction in current normal code chunk
NIns* _nExitIns; // current instruction in current exit code chunk
#ifdef NJ_VERBOSE
public:
size_t codeBytes; // bytes allocated in normal code chunks
size_t exitBytes; // bytes allocated in exit code chunks
#endif
private:
#define SWAP(t, a, b) do { t tmp = a; a = b; b = tmp; } while (0)
void swapCodeChunks();
NIns* _epilogue; NIns* _epilogue;
AssmError _err; // 0 = means assemble() appears ok, otherwise it failed AssmError _err; // 0 = means assemble() appears ok, otherwise it failed
#if PEDANTIC #if PEDANTIC
@ -308,8 +326,6 @@ namespace nanojit
AR _activation; AR _activation;
RegAlloc _allocator; RegAlloc _allocator;
bool _inExit, vpad2[3];
verbose_only( void asm_inc_m32(uint32_t*); ) verbose_only( void asm_inc_m32(uint32_t*); )
void asm_mmq(Register rd, int dd, Register rs, int ds); void asm_mmq(Register rd, int dd, Register rs, int ds);
NIns* asm_exit(LInsp guard); NIns* asm_exit(LInsp guard);

Просмотреть файл

@ -1570,6 +1570,7 @@ Assembler::nativePageReset()
void void
Assembler::nativePageSetup() Assembler::nativePageSetup()
{ {
NanoAssert(!_inExit);
if (!_nIns) if (!_nIns)
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes)); codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
if (!_nExitIns) if (!_nExitIns)
@ -1595,12 +1596,10 @@ Assembler::underrunProtect(int bytes)
{ {
verbose_only(verbose_outputf(" %p:", _nIns);) verbose_only(verbose_outputf(" %p:", _nIns);)
NIns* target = _nIns; NIns* target = _nIns;
if (_inExit) // This may be in a normal code chunk or an exit code chunk.
codeAlloc(exitStart, exitEnd, _nIns verbose_only(, exitBytes)); codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
else
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
_nSlot = _inExit ? exitStart : codeStart; _nSlot = codeStart;
// _nSlot points to the first empty position in the new code block // _nSlot points to the first empty position in the new code block
// _nIns points just past the last empty position. // _nIns points just past the last empty position.
@ -2619,5 +2618,13 @@ Assembler::asm_jtbl(LIns* ins, NIns** table)
asm_ld_imm(tmp, (int32_t)table); // tmp = #table asm_ld_imm(tmp, (int32_t)table); // tmp = #table
} }
void Assembler::swapCodeChunks() {
SWAP(NIns*, _nIns, _nExitIns);
SWAP(NIns*, _nSlot, _nExitSlot); // this one is ARM-specific
SWAP(NIns*, codeStart, exitStart);
SWAP(NIns*, codeEnd, exitEnd);
verbose_only( SWAP(size_t, codeBytes, exitBytes); )
}
} }
#endif /* FEATURE_NANOJIT */ #endif /* FEATURE_NANOJIT */

Просмотреть файл

@ -255,14 +255,6 @@ verbose_only( extern const char* shiftNames[]; )
bool blx_lr_bug; \ bool blx_lr_bug; \
int max_out_args; /* bytes */ int max_out_args; /* bytes */
#define swapptrs() { \
NIns* _tins = _nIns; _nIns=_nExitIns; _nExitIns=_tins; \
int* _nslot = _nSlot; \
_nSlot = _nExitSlot; \
_nExitSlot = _nslot; \
}
#define IMM32(imm) *(--_nIns) = (NIns)((imm)); #define IMM32(imm) *(--_nIns) = (NIns)((imm));
#define OP_IMM (1<<25) #define OP_IMM (1<<25)

Просмотреть файл

@ -1060,8 +1060,8 @@ namespace nanojit
void Assembler::underrunProtect(int bytes) { void Assembler::underrunProtect(int bytes) {
NanoAssertMsg(bytes<=LARGEST_UNDERRUN_PROT, "constant LARGEST_UNDERRUN_PROT is too small"); NanoAssertMsg(bytes<=LARGEST_UNDERRUN_PROT, "constant LARGEST_UNDERRUN_PROT is too small");
int instr = (bytes + sizeof(NIns) - 1) / sizeof(NIns); int instr = (bytes + sizeof(NIns) - 1) / sizeof(NIns);
NIns *top = _inExit ? this->exitStart : this->codeStart;
NIns *pc = _nIns; NIns *pc = _nIns;
NIns *top = codeStart; // this may be in a normal code chunk or an exit code chunk
#if PEDANTIC #if PEDANTIC
// pedanticTop is based on the last call to underrunProtect; any time we call // pedanticTop is based on the last call to underrunProtect; any time we call
@ -1091,12 +1091,9 @@ namespace nanojit
#else #else
if (pc - instr < top) { if (pc - instr < top) {
verbose_only(if (_logc->lcbits & LC_Assembly) outputf("newpage %p:", pc);) verbose_only(if (_logc->lcbits & LC_Assembly) outputf("newpage %p:", pc);)
if (_inExit) // This may be in a normal code chunk or an exit code chunk.
codeAlloc(exitStart, exitEnd, _nIns verbose_only(, exitBytes)); codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
else // This jump will call underrunProtect again, but since we're on a new
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
// this jump will call underrunProtect again, but since we're on a new
// page, nothing will happen. // page, nothing will happen.
br(pc, 0); br(pc, 0);
} }
@ -1171,6 +1168,7 @@ namespace nanojit
} }
void Assembler::nativePageSetup() { void Assembler::nativePageSetup() {
NanoAssert(!_inExit);
if (!_nIns) { if (!_nIns) {
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes)); codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
IF_PEDANTIC( pedanticTop = _nIns; ) IF_PEDANTIC( pedanticTop = _nIns; )
@ -1320,6 +1318,13 @@ namespace nanojit
#endif // 64bit #endif // 64bit
} }
void Assembler::swapCodeChunks() {
SWAP(NIns*, _nIns, _nExitIns);
SWAP(NIns*, codeStart, exitStart);
SWAP(NIns*, codeEnd, exitEnd);
verbose_only( SWAP(size_t, codeBytes, exitBytes); )
}
} // namespace nanojit } // namespace nanojit
#endif // FEATURE_NANOJIT && NANOJIT_PPC #endif // FEATURE_NANOJIT && NANOJIT_PPC

Просмотреть файл

@ -290,10 +290,6 @@ namespace nanojit
int max_param_size; /* bytes */ \ int max_param_size; /* bytes */ \
DECL_PPC64() DECL_PPC64()
#define swapptrs() do { \
NIns* _tins = _nIns; _nIns=_nExitIns; _nExitIns=_tins; \
} while (0) /* no semi */
const int LARGEST_UNDERRUN_PROT = 9*4; // largest value passed to underrunProtect const int LARGEST_UNDERRUN_PROT = 9*4; // largest value passed to underrunProtect
typedef uint32_t NIns; typedef uint32_t NIns;

Просмотреть файл

@ -992,6 +992,7 @@ namespace nanojit
void Assembler::nativePageSetup() void Assembler::nativePageSetup()
{ {
NanoAssert(!_inExit);
if (!_nIns) if (!_nIns)
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes)); codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
if (!_nExitIns) if (!_nExitIns)
@ -1011,11 +1012,9 @@ namespace nanojit
Assembler::underrunProtect(int n) Assembler::underrunProtect(int n)
{ {
NIns *eip = _nIns; NIns *eip = _nIns;
if (eip - n < (_inExit ? exitStart : codeStart)) { // This may be in a normal code chunk or an exit code chunk.
if (_inExit) if (eip - n < codeStart) {
codeAlloc(exitStart, exitEnd, _nIns verbose_only(, exitBytes)); codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
else
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
JMP_long_nocheck((intptr_t)eip); JMP_long_nocheck((intptr_t)eip);
} }
} }
@ -1037,5 +1036,12 @@ namespace nanojit
TODO(asm_promote); TODO(asm_promote);
} }
void Assembler::swapCodeChunks() {
SWAP(NIns*, _nIns, _nExitIns);
SWAP(NIns*, codeStart, exitStart);
SWAP(NIns*, codeEnd, exitEnd);
verbose_only( SWAP(size_t, codeBytes, exitBytes); )
}
#endif /* FEATURE_NANOJIT */ #endif /* FEATURE_NANOJIT */
} }

Просмотреть файл

@ -205,8 +205,6 @@ namespace nanojit
void asm_fcmp(LIns *cond); \ void asm_fcmp(LIns *cond); \
NIns* asm_fbranch(bool, LIns*, NIns*); NIns* asm_fbranch(bool, LIns*, NIns*);
#define swapptrs() { NIns* _tins = _nIns; _nIns=_nExitIns; _nExitIns=_tins; }
#define IMM32(i) \ #define IMM32(i) \
--_nIns; \ --_nIns; \
*((int32_t*)_nIns) = (int32_t)(i) *((int32_t*)_nIns) = (int32_t)(i)

Просмотреть файл

@ -1681,7 +1681,7 @@ namespace nanojit
void Assembler::underrunProtect(ptrdiff_t bytes) { void Assembler::underrunProtect(ptrdiff_t bytes) {
NanoAssertMsg(bytes<=LARGEST_UNDERRUN_PROT, "constant LARGEST_UNDERRUN_PROT is too small"); NanoAssertMsg(bytes<=LARGEST_UNDERRUN_PROT, "constant LARGEST_UNDERRUN_PROT is too small");
NIns *pc = _nIns; NIns *pc = _nIns;
NIns *top = _inExit ? this->exitStart : this->codeStart; NIns *top = codeStart; // this may be in a normal code chunk or an exit code chunk
#if PEDANTIC #if PEDANTIC
// pedanticTop is based on the last call to underrunProtect; any time we call // pedanticTop is based on the last call to underrunProtect; any time we call
@ -1696,10 +1696,8 @@ namespace nanojit
if (pc - bytes - br_size < top) { if (pc - bytes - br_size < top) {
// really do need a page break // really do need a page break
verbose_only(if (_logc->lcbits & LC_Assembly) outputf("newpage %p:", pc);) verbose_only(if (_logc->lcbits & LC_Assembly) outputf("newpage %p:", pc);)
if (_inExit) // This may be in a normal code chunk or an exit code chunk.
codeAlloc(exitStart, exitEnd, _nIns verbose_only(, exitBytes)); codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
else
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
} }
// now emit the jump, but make sure we won't need another page break. // now emit the jump, but make sure we won't need another page break.
// we're pedantic, but not *that* pedantic. // we're pedantic, but not *that* pedantic.
@ -1710,11 +1708,9 @@ namespace nanojit
#else #else
if (pc - bytes < top) { if (pc - bytes < top) {
verbose_only(if (_logc->lcbits & LC_Assembly) outputf("newpage %p:", pc);) verbose_only(if (_logc->lcbits & LC_Assembly) outputf("newpage %p:", pc);)
if (_inExit) // This may be in a normal code chunk or an exit code chunk.
codeAlloc(exitStart, exitEnd, _nIns verbose_only(, exitBytes)); codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
else // This jump will call underrunProtect again, but since we're on a new
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
// this jump will call underrunProtect again, but since we're on a new
// page, nothing will happen. // page, nothing will happen.
JMP(pc); JMP(pc);
} }
@ -1726,6 +1722,7 @@ namespace nanojit
} }
void Assembler::nativePageSetup() { void Assembler::nativePageSetup() {
NanoAssert(!_inExit);
if (!_nIns) { if (!_nIns) {
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes)); codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
IF_PEDANTIC( pedanticTop = _nIns; ) IF_PEDANTIC( pedanticTop = _nIns; )
@ -1766,6 +1763,13 @@ namespace nanojit
} }
} }
void Assembler::swapCodeChunks() {
SWAP(NIns*, _nIns, _nExitIns);
SWAP(NIns*, codeStart, exitStart);
SWAP(NIns*, codeEnd, exitEnd);
verbose_only( SWAP(size_t, codeBytes, exitBytes); )
}
} // namespace nanojit } // namespace nanojit
#endif // FEATURE_NANOJIT && NANOJIT_X64 #endif // FEATURE_NANOJIT && NANOJIT_X64

Просмотреть файл

@ -554,8 +554,6 @@ namespace nanojit
void X86_SETNP(Register r);\ void X86_SETNP(Register r);\
void X86_SETE(Register r);\ void X86_SETE(Register r);\
#define swapptrs() { NIns* _tins = _nIns; _nIns=_nExitIns; _nExitIns=_tins; }
const int LARGEST_UNDERRUN_PROT = 32; // largest value passed to underrunProtect const int LARGEST_UNDERRUN_PROT = 32; // largest value passed to underrunProtect
typedef uint8_t NIns; typedef uint8_t NIns;

Просмотреть файл

@ -1835,6 +1835,7 @@ namespace nanojit
void Assembler::nativePageSetup() void Assembler::nativePageSetup()
{ {
NanoAssert(!_inExit);
if (!_nIns) if (!_nIns)
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes)); codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
if (!_nExitIns) if (!_nExitIns)
@ -1846,11 +1847,9 @@ namespace nanojit
{ {
NIns *eip = _nIns; NIns *eip = _nIns;
NanoAssertMsg(n<=LARGEST_UNDERRUN_PROT, "constant LARGEST_UNDERRUN_PROT is too small"); NanoAssertMsg(n<=LARGEST_UNDERRUN_PROT, "constant LARGEST_UNDERRUN_PROT is too small");
if (eip - n < (_inExit ? exitStart : codeStart)) { // This may be in a normal code chunk or an exit code chunk.
if (_inExit) if (eip - n < codeStart) {
codeAlloc(exitStart, exitEnd, _nIns verbose_only(, exitBytes)); codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
else
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
JMP(eip); JMP(eip);
} }
} }
@ -1877,5 +1876,12 @@ namespace nanojit
TODO(asm_promote); TODO(asm_promote);
} }
void Assembler::swapCodeChunks() {
SWAP(NIns*, _nIns, _nExitIns);
SWAP(NIns*, codeStart, exitStart);
SWAP(NIns*, codeEnd, exitEnd);
verbose_only( SWAP(size_t, codeBytes, exitBytes); )
}
#endif /* FEATURE_NANOJIT */ #endif /* FEATURE_NANOJIT */
} }

Просмотреть файл

@ -187,8 +187,6 @@ namespace nanojit
void asm_cmp(LIns *cond); \ void asm_cmp(LIns *cond); \
void asm_div_mod(LIns *cond); void asm_div_mod(LIns *cond);
#define swapptrs() { NIns* _tins = _nIns; _nIns=_nExitIns; _nExitIns=_tins; }
#define IMM32(i) \ #define IMM32(i) \
_nIns -= 4; \ _nIns -= 4; \
*((int32_t*)_nIns) = (int32_t)(i) *((int32_t*)_nIns) = (int32_t)(i)