Backed out changeset (by hand due to update-nanojit difficulties) 69d5a3454a6e (bug 460993), avmplus.cpp changes were bogus.

This commit is contained in:
Nicholas Nethercote 2010-01-22 08:43:09 +11:00
Родитель 49a3366f24
Коммит ffbe1ee005
5 изменённых файлов: 18 добавлений и 82 удалений

Просмотреть файл

@ -941,7 +941,7 @@ namespace nanojit
// something went wrong, release all allocated code memory
_codeAlloc.freeAll(codeList);
if (_nExitIns)
_codeAlloc.free(exitStart, exitEnd);
_codeAlloc.free(exitStart, exitEnd);
_codeAlloc.free(codeStart, codeEnd);
codeList = NULL;
return;
@ -957,15 +957,15 @@ namespace nanojit
#ifdef NANOJIT_ARM
// [codeStart, _nSlot) ... gap ... [_nIns, codeEnd)
if (_nExitIns) {
_codeAlloc.addRemainder(codeList, exitStart, exitEnd, _nExitSlot, _nExitIns);
verbose_only( exitBytes -= (_nExitIns - _nExitSlot) * sizeof(NIns); )
_codeAlloc.addRemainder(codeList, exitStart, exitEnd, _nExitSlot, _nExitIns);
verbose_only( exitBytes -= (_nExitIns - _nExitSlot) * sizeof(NIns); )
}
_codeAlloc.addRemainder(codeList, codeStart, codeEnd, _nSlot, _nIns);
verbose_only( codeBytes -= (_nIns - _nSlot) * sizeof(NIns); )
#else
// [codeStart ... gap ... [_nIns, codeEnd))
if (_nExitIns) {
_codeAlloc.addRemainder(codeList, exitStart, exitEnd, exitStart, _nExitIns);
_codeAlloc.addRemainder(codeList, exitStart, exitEnd, exitStart, _nExitIns);
verbose_only( exitBytes -= (_nExitIns - exitStart) * sizeof(NIns); )
}
_codeAlloc.addRemainder(codeList, codeStart, codeEnd, codeStart, _nIns);

Просмотреть файл

@ -70,14 +70,14 @@ namespace nanojit
void CodeAlloc::reset() {
// give all memory back to gcheap. Assumption is that all
// code is done being used by now.
for (CodeList* hb = heapblocks; hb != 0; ) {
for (CodeList* b = heapblocks; b != 0; ) {
_nvprof("free page",1);
CodeList* next = hb->next;
CodeList* fb = firstBlock(hb);
markBlockWrite(fb);
freeCodeChunk(fb, bytesPerAlloc);
CodeList* next = b->next;
void *mem = firstBlock(b);
VMPI_setPageProtection(mem, bytesPerAlloc, false /* executable */, true /* writable */);
freeCodeChunk(mem, bytesPerAlloc);
totalAllocated -= bytesPerAlloc;
hb = next;
b = next;
}
NanoAssert(!totalAllocated);
heapblocks = availblocks = 0;
@ -89,10 +89,9 @@ namespace nanojit
return (CodeList*) (end - (uintptr_t)bytesPerAlloc);
}
static int round(size_t x) {
int round(size_t x) {
return (int)((x + 512) >> 10);
}
void CodeAlloc::logStats() {
size_t total = 0;
size_t frag_size = 0;
@ -113,19 +112,9 @@ namespace nanojit
round(total), round(free_size), frag_size);
}
inline void CodeAlloc::markBlockWrite(CodeList* b) {
NanoAssert(b->terminator != NULL);
CodeList* term = b->terminator;
if (term->isExec) {
markCodeChunkWrite(firstBlock(term), bytesPerAlloc);
term->isExec = false;
}
}
void CodeAlloc::alloc(NIns* &start, NIns* &end) {
// Reuse a block if possible.
if (availblocks) {
markBlockWrite(availblocks);
CodeList* b = removeBlock(availblocks);
b->isFree = false;
start = b->start();
@ -139,6 +128,7 @@ namespace nanojit
totalAllocated += bytesPerAlloc;
NanoAssert(mem != NULL); // see allocCodeChunk contract in CodeAlloc.h
_nvprof("alloc page", uintptr_t(mem)>>12);
VMPI_setPageProtection(mem, bytesPerAlloc, true/*executable*/, true/*writable*/);
CodeList* b = addMem(mem, bytesPerAlloc);
b->isFree = false;
start = b->start();
@ -235,7 +225,7 @@ namespace nanojit
void* mem = hb->lower;
*prev = hb->next;
_nvprof("free page",1);
markBlockWrite(firstBlock(hb));
VMPI_setPageProtection(mem, bytesPerAlloc, false /* executable */, true /* writable */);
freeCodeChunk(mem, bytesPerAlloc);
totalAllocated -= bytesPerAlloc;
} else {
@ -357,12 +347,9 @@ extern "C" void sync_instruction_memory(caddr_t v, u_int len);
// create a tiny terminator block, add to fragmented list, this way
// all other blocks have a valid block at b->higher
CodeList* terminator = b->higher;
b->terminator = terminator;
terminator->lower = b;
terminator->end = 0; // this is how we identify the terminator
terminator->isFree = false;
terminator->isExec = false;
terminator->terminator = 0;
debug_only(sanity_check();)
// add terminator to heapblocks list so we can track whole blocks
@ -378,7 +365,7 @@ extern "C" void sync_instruction_memory(caddr_t v, u_int len);
CodeList* CodeAlloc::removeBlock(CodeList* &blocks) {
CodeList* b = blocks;
NanoAssert(b != NULL);
NanoAssert(b);
blocks = b->next;
b->next = 0;
return b;
@ -412,7 +399,6 @@ extern "C" void sync_instruction_memory(caddr_t v, u_int len);
// b1 b2
CodeList* b1 = getBlock(start, end);
CodeList* b2 = (CodeList*) (uintptr_t(holeEnd) - offsetof(CodeList, code));
b2->terminator = b1->terminator;
b2->isFree = false;
b2->next = 0;
b2->higher = b1->higher;
@ -435,12 +421,10 @@ extern "C" void sync_instruction_memory(caddr_t v, u_int len);
b2->lower = b1;
b2->higher = b3;
b2->isFree = false; // redundant, since we're about to free, but good hygiene
b2->terminator = b1->terminator;
b3->lower = b2;
b3->end = end;
b3->isFree = false;
b3->higher->lower = b3;
b3->terminator = b1->terminator;
b2->next = 0;
b3->next = 0;
debug_only(sanity_check();)
@ -534,14 +518,5 @@ extern "C" void sync_instruction_memory(caddr_t v, u_int len);
#endif /* CROSS_CHECK_FREE_LIST */
}
#endif
void CodeAlloc::markAllExec() {
for (CodeList* hb = heapblocks; hb != NULL; hb = hb->next) {
if (!hb->isExec) {
hb->isExec = true;
markCodeChunkExec(firstBlock(hb), bytesPerAlloc);
}
}
}
}
#endif // FEATURE_NANOJIT

Просмотреть файл

@ -64,16 +64,8 @@ namespace nanojit
for splitting and coalescing blocks. */
CodeList* lower;
/** pointer to the heapblock terminal that represents the code chunk containing this block */
CodeList* terminator;
/** true if block is free, false otherwise */
bool isFree;
/** (only valid for terminator blocks). Set true just before calling
* markCodeChunkExec() and false just after markCodeChunkWrite() */
bool isExec;
union {
// this union is used in leu of pointer punning in code
// the end of this block is always the address of the next higher block
@ -150,17 +142,9 @@ namespace nanojit
/** free a block previously allocated by allocCodeMem. nbytes will
* match the previous allocCodeMem, but is provided here as well
* to mirror the mmap()/munmap() api. markCodeChunkWrite() will have
* been called if necessary, so it is not necessary for freeCodeChunk()
* to do it again. */
* to mirror the mmap()/munmap() api. */
void freeCodeChunk(void* addr, size_t nbytes);
/** make this specific extent ready to execute (might remove write) */
void markCodeChunkExec(void* addr, size_t nbytes);
/** make this extent ready to modify (might remove exec) */
void markCodeChunkWrite(void* addr, size_t nbytes);
public:
CodeAlloc();
~CodeAlloc();
@ -214,12 +198,6 @@ namespace nanojit
/** return any completely empty pages */
void sweep();
/** protect all code in this code alloc */
void markAllExec();
/** unprotect the code chunk containing just this one block */
void markBlockWrite(CodeList* b);
};
}

Просмотреть файл

@ -495,7 +495,7 @@ namespace nanojit
return out->ins1(v, i);
}
// This is an ugly workaround for an apparent compiler
// bug; in VC2008, compiling with optimization on
// will produce spurious errors if this code is inlined
@ -544,7 +544,7 @@ namespace nanojit
int32_t r;
switch (v) {
case LIR_qjoin:
case LIR_qjoin:
return insImmf(do_join(c1, c2));
case LIR_eq:
return insImm(c1 == c2);

Просмотреть файл

@ -89,13 +89,11 @@ void*
nanojit::CodeAlloc::allocCodeChunk(size_t nbytes) {
void * buffer;
posix_memalign(&buffer, 4096, nbytes);
VMPI_setPageProtection(mem, nbytes, true /* exec */, true /* write */);
return buffer;
}
void
nanojit::CodeAlloc::freeCodeChunk(void *p, size_t nbytes) {
VMPI_setPageProtection(mem, nbytes, false /* exec */, true /* write */);
::free(p);
}
@ -157,28 +155,13 @@ nanojit::CodeAlloc::freeCodeChunk(void *p, size_t nbytes) {
void*
nanojit::CodeAlloc::allocCodeChunk(size_t nbytes) {
void* mem = valloc(nbytes);
VMPI_setPageProtection(mem, nbytes, true /* exec */, true /* write */);
return mem;
return valloc(nbytes);
}
void
nanojit::CodeAlloc::freeCodeChunk(void *p, size_t nbytes) {
VMPI_setPageProtection(mem, nbytes, false /* exec */, true /* write */);
::free(p);
}
#endif // WIN32
// All of the allocCodeChunk/freeCodeChunk implementations above allocate
// code memory as RWX and then free it, so the explicit page protection api's
// below are no-ops.
void
nanojit::CodeAlloc::markCodeChunkWrite(void*, size_t)
{}
void
nanojit::CodeAlloc::markCodeChunkExec(void*, size_t)
{}