Bug 525379 - nanojit: make the _nIns/_nExitIns swap hack more consistent. r=edwsmith.

--HG--
extra : convert_revision : bad9394918255f4afcbff259153dc9d8c9afad25
This commit is contained in:
Nicholas Nethercote 2009-11-24 14:56:33 +11:00
Родитель e692555818
Коммит 2102f230e5
12 изменённых файлов: 90 добавлений и 63 удалений

Просмотреть файл

@ -216,8 +216,8 @@ namespace nanojit
void Assembler::pageValidate()
{
if (error()) return;
// _nIns needs to be at least on one of these pages
NanoAssertMsg(_inExit ? containsPtr(exitStart, exitEnd, _nIns) : containsPtr(codeStart, codeEnd, _nIns),
// This may be a normal code chunk or an exit code chunk.
NanoAssertMsg(containsPtr(codeStart, codeEnd, _nIns),
"Native instruction pointer overstep paging bounds; check overrideProtect for last instruction");
}
#endif
@ -621,7 +621,7 @@ namespace nanojit
// otherwise we free it entirely. intersectRegisterState will restore.
releaseRegisters();
swapptrs();
swapCodeChunks();
_inExit = true;
#ifdef NANOJIT_IA32
@ -644,10 +644,10 @@ namespace nanojit
NIns* jmpTarget = _nIns; // target in exit path for our mainline conditional jump
// swap back pointers, effectively storing the last location used in the exit path
swapptrs();
swapCodeChunks();
_inExit = false;
//verbose_only( verbose_outputf(" LIR_xt/xf swapptrs, _nIns is now %08X(%08X), _nExitIns is now %08X(%08X)",_nIns, *_nIns,_nExitIns,*_nExitIns) );
//verbose_only( verbose_outputf(" LIR_xt/xf swapCodeChunks, _nIns is now %08X(%08X), _nExitIns is now %08X(%08X)",_nIns, *_nIns,_nExitIns,*_nExitIns) );
verbose_only( verbose_outputf("%010lx:", (unsigned long)jmpTarget);)
verbose_only( verbose_outputf("----------------------------------- ## BEGIN exit block (LIR_xt|LIR_xf)") );
@ -783,6 +783,7 @@ namespace nanojit
}
)
NanoAssert(!_inExit);
// save used parts of current block on fragment's code list, free the rest
#ifdef NANOJIT_ARM
// [codeStart, _nSlot) ... gap ... [_nIns, codeEnd)

Просмотреть файл

@ -206,8 +206,6 @@ namespace nanojit
// Log controller object. Contains what-stuff-should-we-print
// bits, and a sink function for debug printing
LogControl* _logc;
size_t codeBytes;
size_t exitBytes;
#endif // NJ_VERBOSE
#ifdef VTUNE
@ -295,10 +293,30 @@ namespace nanojit
NInsMap _patches;
LabelStateMap _labels;
NIns *codeStart, *codeEnd; // current block we're adding code to
NIns *exitStart, *exitEnd; // current block for exit stubs
NIns* _nIns; // current native instruction
NIns* _nExitIns; // current instruction in exit fragment page
// We generate code into two places: normal code chunks, and exit
// code chunks (for exit stubs). We use a hack to avoid having to
// parameterise the code that does the generating -- we let that
// code assume that it's always generating into a normal code
// chunk (most of the time it is), and when we instead need to
// generate into an exit code chunk, we set _inExit to true and
// temporarily swap all the code/exit variables below (using
// swapCodeChunks()). Afterwards we swap them all back and set
// _inExit to false again.
bool _inExit, vpad2[3];
NIns *codeStart, *codeEnd; // current normal code chunk
NIns *exitStart, *exitEnd; // current exit code chunk
NIns* _nIns; // current instruction in current normal code chunk
NIns* _nExitIns; // current instruction in current exit code chunk
#ifdef NJ_VERBOSE
public:
size_t codeBytes; // bytes allocated in normal code chunks
size_t exitBytes; // bytes allocated in exit code chunks
#endif
private:
#define SWAP(t, a, b) do { t tmp = a; a = b; b = tmp; } while (0)
void swapCodeChunks();
NIns* _epilogue;
AssmError _err; // 0 = means assemble() appears ok, otherwise it failed
#if PEDANTIC
@ -308,8 +326,6 @@ namespace nanojit
AR _activation;
RegAlloc _allocator;
bool _inExit, vpad2[3];
verbose_only( void asm_inc_m32(uint32_t*); )
void asm_mmq(Register rd, int dd, Register rs, int ds);
NIns* asm_exit(LInsp guard);

Просмотреть файл

@ -1570,6 +1570,7 @@ Assembler::nativePageReset()
void
Assembler::nativePageSetup()
{
NanoAssert(!_inExit);
if (!_nIns)
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
if (!_nExitIns)
@ -1595,12 +1596,10 @@ Assembler::underrunProtect(int bytes)
{
verbose_only(verbose_outputf(" %p:", _nIns);)
NIns* target = _nIns;
if (_inExit)
codeAlloc(exitStart, exitEnd, _nIns verbose_only(, exitBytes));
else
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
// This may be in a normal code chunk or an exit code chunk.
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
_nSlot = _inExit ? exitStart : codeStart;
_nSlot = codeStart;
// _nSlot points to the first empty position in the new code block
// _nIns points just past the last empty position.
@ -2619,5 +2618,13 @@ Assembler::asm_jtbl(LIns* ins, NIns** table)
asm_ld_imm(tmp, (int32_t)table); // tmp = #table
}
void Assembler::swapCodeChunks() {
SWAP(NIns*, _nIns, _nExitIns);
SWAP(NIns*, _nSlot, _nExitSlot); // this one is ARM-specific
SWAP(NIns*, codeStart, exitStart);
SWAP(NIns*, codeEnd, exitEnd);
verbose_only( SWAP(size_t, codeBytes, exitBytes); )
}
}
#endif /* FEATURE_NANOJIT */

Просмотреть файл

@ -255,14 +255,6 @@ verbose_only( extern const char* shiftNames[]; )
bool blx_lr_bug; \
int max_out_args; /* bytes */
#define swapptrs() { \
NIns* _tins = _nIns; _nIns=_nExitIns; _nExitIns=_tins; \
int* _nslot = _nSlot; \
_nSlot = _nExitSlot; \
_nExitSlot = _nslot; \
}
#define IMM32(imm) *(--_nIns) = (NIns)((imm));
#define OP_IMM (1<<25)

Просмотреть файл

@ -1060,8 +1060,8 @@ namespace nanojit
void Assembler::underrunProtect(int bytes) {
NanoAssertMsg(bytes<=LARGEST_UNDERRUN_PROT, "constant LARGEST_UNDERRUN_PROT is too small");
int instr = (bytes + sizeof(NIns) - 1) / sizeof(NIns);
NIns *top = _inExit ? this->exitStart : this->codeStart;
NIns *pc = _nIns;
NIns *top = codeStart; // this may be in a normal code chunk or an exit code chunk
#if PEDANTIC
// pedanticTop is based on the last call to underrunProtect; any time we call
@ -1091,12 +1091,9 @@ namespace nanojit
#else
if (pc - instr < top) {
verbose_only(if (_logc->lcbits & LC_Assembly) outputf("newpage %p:", pc);)
if (_inExit)
codeAlloc(exitStart, exitEnd, _nIns verbose_only(, exitBytes));
else
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
// this jump will call underrunProtect again, but since we're on a new
// This may be in a normal code chunk or an exit code chunk.
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
// This jump will call underrunProtect again, but since we're on a new
// page, nothing will happen.
br(pc, 0);
}
@ -1171,6 +1168,7 @@ namespace nanojit
}
void Assembler::nativePageSetup() {
NanoAssert(!_inExit);
if (!_nIns) {
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
IF_PEDANTIC( pedanticTop = _nIns; )
@ -1320,6 +1318,13 @@ namespace nanojit
#endif // 64bit
}
void Assembler::swapCodeChunks() {
SWAP(NIns*, _nIns, _nExitIns);
SWAP(NIns*, codeStart, exitStart);
SWAP(NIns*, codeEnd, exitEnd);
verbose_only( SWAP(size_t, codeBytes, exitBytes); )
}
} // namespace nanojit
#endif // FEATURE_NANOJIT && NANOJIT_PPC

Просмотреть файл

@ -290,10 +290,6 @@ namespace nanojit
int max_param_size; /* bytes */ \
DECL_PPC64()
#define swapptrs() do { \
NIns* _tins = _nIns; _nIns=_nExitIns; _nExitIns=_tins; \
} while (0) /* no semi */
const int LARGEST_UNDERRUN_PROT = 9*4; // largest value passed to underrunProtect
typedef uint32_t NIns;

Просмотреть файл

@ -992,6 +992,7 @@ namespace nanojit
void Assembler::nativePageSetup()
{
NanoAssert(!_inExit);
if (!_nIns)
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
if (!_nExitIns)
@ -1011,11 +1012,9 @@ namespace nanojit
Assembler::underrunProtect(int n)
{
NIns *eip = _nIns;
if (eip - n < (_inExit ? exitStart : codeStart)) {
if (_inExit)
codeAlloc(exitStart, exitEnd, _nIns verbose_only(, exitBytes));
else
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
// This may be in a normal code chunk or an exit code chunk.
if (eip - n < codeStart) {
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
JMP_long_nocheck((intptr_t)eip);
}
}
@ -1037,5 +1036,12 @@ namespace nanojit
TODO(asm_promote);
}
void Assembler::swapCodeChunks() {
SWAP(NIns*, _nIns, _nExitIns);
SWAP(NIns*, codeStart, exitStart);
SWAP(NIns*, codeEnd, exitEnd);
verbose_only( SWAP(size_t, codeBytes, exitBytes); )
}
#endif /* FEATURE_NANOJIT */
}

Просмотреть файл

@ -205,8 +205,6 @@ namespace nanojit
void asm_fcmp(LIns *cond); \
NIns* asm_fbranch(bool, LIns*, NIns*);
#define swapptrs() { NIns* _tins = _nIns; _nIns=_nExitIns; _nExitIns=_tins; }
#define IMM32(i) \
--_nIns; \
*((int32_t*)_nIns) = (int32_t)(i)

Просмотреть файл

@ -1681,7 +1681,7 @@ namespace nanojit
void Assembler::underrunProtect(ptrdiff_t bytes) {
NanoAssertMsg(bytes<=LARGEST_UNDERRUN_PROT, "constant LARGEST_UNDERRUN_PROT is too small");
NIns *pc = _nIns;
NIns *top = _inExit ? this->exitStart : this->codeStart;
NIns *top = codeStart; // this may be in a normal code chunk or an exit code chunk
#if PEDANTIC
// pedanticTop is based on the last call to underrunProtect; any time we call
@ -1696,10 +1696,8 @@ namespace nanojit
if (pc - bytes - br_size < top) {
// really do need a page break
verbose_only(if (_logc->lcbits & LC_Assembly) outputf("newpage %p:", pc);)
if (_inExit)
codeAlloc(exitStart, exitEnd, _nIns verbose_only(, exitBytes));
else
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
// This may be in a normal code chunk or an exit code chunk.
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
}
// now emit the jump, but make sure we won't need another page break.
// we're pedantic, but not *that* pedantic.
@ -1710,11 +1708,9 @@ namespace nanojit
#else
if (pc - bytes < top) {
verbose_only(if (_logc->lcbits & LC_Assembly) outputf("newpage %p:", pc);)
if (_inExit)
codeAlloc(exitStart, exitEnd, _nIns verbose_only(, exitBytes));
else
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
// this jump will call underrunProtect again, but since we're on a new
// This may be in a normal code chunk or an exit code chunk.
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
// This jump will call underrunProtect again, but since we're on a new
// page, nothing will happen.
JMP(pc);
}
@ -1726,6 +1722,7 @@ namespace nanojit
}
void Assembler::nativePageSetup() {
NanoAssert(!_inExit);
if (!_nIns) {
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
IF_PEDANTIC( pedanticTop = _nIns; )
@ -1766,6 +1763,13 @@ namespace nanojit
}
}
void Assembler::swapCodeChunks() {
SWAP(NIns*, _nIns, _nExitIns);
SWAP(NIns*, codeStart, exitStart);
SWAP(NIns*, codeEnd, exitEnd);
verbose_only( SWAP(size_t, codeBytes, exitBytes); )
}
} // namespace nanojit
#endif // FEATURE_NANOJIT && NANOJIT_X64

Просмотреть файл

@ -554,8 +554,6 @@ namespace nanojit
void X86_SETNP(Register r);\
void X86_SETE(Register r);\
#define swapptrs() { NIns* _tins = _nIns; _nIns=_nExitIns; _nExitIns=_tins; }
const int LARGEST_UNDERRUN_PROT = 32; // largest value passed to underrunProtect
typedef uint8_t NIns;

Просмотреть файл

@ -1835,6 +1835,7 @@ namespace nanojit
void Assembler::nativePageSetup()
{
NanoAssert(!_inExit);
if (!_nIns)
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
if (!_nExitIns)
@ -1846,11 +1847,9 @@ namespace nanojit
{
NIns *eip = _nIns;
NanoAssertMsg(n<=LARGEST_UNDERRUN_PROT, "constant LARGEST_UNDERRUN_PROT is too small");
if (eip - n < (_inExit ? exitStart : codeStart)) {
if (_inExit)
codeAlloc(exitStart, exitEnd, _nIns verbose_only(, exitBytes));
else
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
// This may be in a normal code chunk or an exit code chunk.
if (eip - n < codeStart) {
codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
JMP(eip);
}
}
@ -1877,5 +1876,12 @@ namespace nanojit
TODO(asm_promote);
}
void Assembler::swapCodeChunks() {
SWAP(NIns*, _nIns, _nExitIns);
SWAP(NIns*, codeStart, exitStart);
SWAP(NIns*, codeEnd, exitEnd);
verbose_only( SWAP(size_t, codeBytes, exitBytes); )
}
#endif /* FEATURE_NANOJIT */
}

Просмотреть файл

@ -187,8 +187,6 @@ namespace nanojit
void asm_cmp(LIns *cond); \
void asm_div_mod(LIns *cond);
#define swapptrs() { NIns* _tins = _nIns; _nIns=_nExitIns; _nExitIns=_tins; }
#define IMM32(i) \
_nIns -= 4; \
*((int32_t*)_nIns) = (int32_t)(i)