This commit is contained in:
Robert Sayre 2009-09-28 19:15:23 -07:00
Родитель 62717a97ab 4ca3da21ee
Коммит 4ae112ebb3
6 изменённых файлов: 50 добавлений и 50 удалений

Просмотреть файл

@ -849,43 +849,9 @@ TraceRecorder::tprint(const char *format, LIns *ins1, LIns *ins2, LIns *ins3, LI
*/ */
static Oracle oracle; static Oracle oracle;
/*
* This confusing and mysterious expression is used for the Tracker. The
* tracker's responsibility is to map opaque, 4-byte aligned addresses to LIns
* pointers. To do this efficiently, we observe that the addresses of jsvals
* living in the interpreter tend to be aggregated close to each other -
* usually on the same page (where a tracker page doesn't have to be the same
* size as the OS page size, but it's typically similar).
*
* For every address, we split it into two values: upper bits which represent
* the "base", and lower bits which represent an offset against the base. We
* create a list of:
* struct TrackerPage {
* void* base;
* LIns* map;
* };
* The mapping then becomes:
* page = page such that Base(address) == page->base,
* page->map[Index(address)]
*
* The size of the map is allocated as N * sizeof(LIns*), where N is
* (TRACKER_PAGE_SIZE >> 2). Since the lower two bits are 0, they are always
* discounted.
*
* TRACKER_PAGE_MASK is the "reverse" expression, with a |- 1| to get a mask
* which separates an address into the Base and Index bits. It is necessary to
* do all this work rather than use TRACKER_PAGE_SIZE - 1, because on 64-bit
* platforms the pointer width is twice as large, and only half as many
* indexes can fit into TrackerPage::map. So the "Base" grows by one bit, and
* the "Index" shrinks by one bit.
*/
#define TRACKER_PAGE_MASK (((TRACKER_PAGE_SIZE / sizeof(void*)) << 2) - 1)
#define TRACKER_PAGE_SIZE 4096
Tracker::Tracker() Tracker::Tracker()
{ {
pagelist = 0; pagelist = NULL;
} }
Tracker::~Tracker() Tracker::~Tracker()
@ -893,10 +859,16 @@ Tracker::~Tracker()
clear(); clear();
} }
jsuword inline jsuword
Tracker::getTrackerPageBase(const void* v) const Tracker::getTrackerPageBase(const void* v) const
{ {
return jsuword(v) & ~jsuword(TRACKER_PAGE_MASK); return jsuword(v) & ~TRACKER_PAGE_MASK;
}
inline jsuword
Tracker::getTrackerPageOffset(const void* v) const
{
return (jsuword(v) & TRACKER_PAGE_MASK) >> 2;
} }
struct Tracker::TrackerPage* struct Tracker::TrackerPage*
@ -905,19 +877,18 @@ Tracker::findTrackerPage(const void* v) const
jsuword base = getTrackerPageBase(v); jsuword base = getTrackerPageBase(v);
struct Tracker::TrackerPage* p = pagelist; struct Tracker::TrackerPage* p = pagelist;
while (p) { while (p) {
if (p->base == base) { if (p->base == base)
return p; return p;
}
p = p->next; p = p->next;
} }
return 0; return NULL;
} }
struct Tracker::TrackerPage* struct Tracker::TrackerPage*
Tracker::addTrackerPage(const void* v) { Tracker::addTrackerPage(const void* v)
{
jsuword base = getTrackerPageBase(v); jsuword base = getTrackerPageBase(v);
struct Tracker::TrackerPage* p = (struct Tracker::TrackerPage*) struct TrackerPage* p = (struct TrackerPage*) calloc(1, sizeof(*p));
calloc(1, sizeof(*p) - sizeof(p->map) + (TRACKER_PAGE_SIZE >> 2) * sizeof(LIns*));
p->base = base; p->base = base;
p->next = pagelist; p->next = pagelist;
pagelist = p; pagelist = p;
@ -946,7 +917,7 @@ Tracker::get(const void* v) const
struct Tracker::TrackerPage* p = findTrackerPage(v); struct Tracker::TrackerPage* p = findTrackerPage(v);
if (!p) if (!p)
return NULL; return NULL;
return p->map[(jsuword(v) & TRACKER_PAGE_MASK) >> 2]; return p->map[getTrackerPageOffset(v)];
} }
void void
@ -955,7 +926,7 @@ Tracker::set(const void* v, LIns* i)
struct Tracker::TrackerPage* p = findTrackerPage(v); struct Tracker::TrackerPage* p = findTrackerPage(v);
if (!p) if (!p)
p = addTrackerPage(v); p = addTrackerPage(v);
p->map[(jsuword(v) & TRACKER_PAGE_MASK) >> 2] = i; p->map[getTrackerPageOffset(v)] = i;
} }
static inline jsuint static inline jsuint

Просмотреть файл

@ -157,18 +157,37 @@ public:
/* /*
* Tracker is used to keep track of values being manipulated by the interpreter * Tracker is used to keep track of values being manipulated by the interpreter
* during trace recording. Note that tracker pages aren't necessarily the * during trace recording. It maps opaque, 4-byte aligned address to LIns pointers.
* same size as OS pages, they just are a moderate-sized chunk of memory. * pointers. To do this efficiently, we observe that the addresses of jsvals
* living in the interpreter tend to be aggregated close to each other -
* usually on the same page (where a tracker page doesn't have to be the same
* size as the OS page size, but it's typically similar). The Tracker
* consists of a linked-list of structures representing a memory page, which
* are created on-demand as memory locations are used.
*
* For every address, first we split it into two parts: upper bits which
* represent the "base", and lower bits which represent an offset against the
* base. For the offset, we then right-shift it by two because the bottom two
* bits of a 4-byte aligned address are always zero. The mapping then
* becomes:
*
* page = page in pagelist such that Base(address) == page->base,
* page->map[Offset(address)]
*/ */
class Tracker { class Tracker {
#define TRACKER_PAGE_SZB 4096
#define TRACKER_PAGE_ENTRIES (TRACKER_PAGE_SZB >> 2) // each slot is 4 bytes
#define TRACKER_PAGE_MASK jsuword(TRACKER_PAGE_SZB - 1)
struct TrackerPage { struct TrackerPage {
struct TrackerPage* next; struct TrackerPage* next;
jsuword base; jsuword base;
nanojit::LIns* map[1]; nanojit::LIns* map[TRACKER_PAGE_ENTRIES];
}; };
struct TrackerPage* pagelist; struct TrackerPage* pagelist;
jsuword getTrackerPageBase(const void* v) const; jsuword getTrackerPageBase(const void* v) const;
jsuword getTrackerPageOffset(const void* v) const;
struct TrackerPage* findTrackerPage(const void* v) const; struct TrackerPage* findTrackerPage(const void* v) const;
struct TrackerPage* addTrackerPage(const void* v); struct TrackerPage* addTrackerPage(const void* v);
public: public:

Просмотреть файл

@ -59,7 +59,7 @@ namespace nanojit
Chunk *c = current_chunk; Chunk *c = current_chunk;
while (c) { while (c) {
Chunk *prev = c->prev; Chunk *prev = c->prev;
this->freeChunk(c); freeChunk(c);
c = prev; c = prev;
} }
current_chunk = NULL; current_chunk = NULL;

Просмотреть файл

@ -53,7 +53,7 @@ namespace nanojit
class Allocator { class Allocator {
public: public:
Allocator(); Allocator();
virtual ~Allocator(); ~Allocator();
void reset(); void reset();
/** alloc memory, never return null. */ /** alloc memory, never return null. */

Просмотреть файл

@ -105,6 +105,9 @@ namespace nanojit
, _labels(alloc) , _labels(alloc)
, _epilogue(NULL) , _epilogue(NULL)
, _err(None) , _err(None)
#if PEDANTIC
, pedanticTop(NULL)
#endif
, config(core->config) , config(core->config)
{ {
VMPI_memset(&_stats, 0, sizeof(_stats)); VMPI_memset(&_stats, 0, sizeof(_stats));
@ -337,10 +340,14 @@ namespace nanojit
Register Assembler::getBaseReg(LIns *i, int &d, RegisterMask allow) Register Assembler::getBaseReg(LIns *i, int &d, RegisterMask allow)
{ {
#if !PEDANTIC
if (i->isop(LIR_alloc)) { if (i->isop(LIR_alloc)) {
d += findMemFor(i); d += findMemFor(i);
return FP; return FP;
} }
#else
(void) d;
#endif
return findRegFor(i, allow); return findRegFor(i, allow);
} }

Просмотреть файл

@ -253,6 +253,9 @@ namespace nanojit
NIns* _nExitIns; // current instruction in exit fragment page NIns* _nExitIns; // current instruction in exit fragment page
NIns* _epilogue; NIns* _epilogue;
AssmError _err; // 0 = means assemble() appears ok, otherwise it failed AssmError _err; // 0 = means assemble() appears ok, otherwise it failed
#if PEDANTIC
NIns* pedanticTop;
#endif
AR _activation; AR _activation;
RegAlloc _allocator; RegAlloc _allocator;