[JAEGER] Allow custom memory allocator use in spidermonkey (bug 549532, r=dvander).

This commit is contained in:
Mike Moenig 2010-04-12 18:51:25 -07:00
Родитель 98a8aede2d
Коммит ea63af8267
10 изменённых файлов: 88 добавлений и 78 удалений

Просмотреть файл

@ -200,7 +200,7 @@ struct JSArenaPool {
if ((pool)->current == (a)) (pool)->current = &(pool)->first; \ if ((pool)->current == (a)) (pool)->current = &(pool)->first; \
*(pnext) = (a)->next; \ *(pnext) = (a)->next; \
JS_CLEAR_ARENA(a); \ JS_CLEAR_ARENA(a); \
free(a); \ js_free(a); \
(a) = NULL; \ (a) = NULL; \
JS_END_MACRO JS_END_MACRO

Просмотреть файл

@ -334,6 +334,8 @@ typedef HashMap<jsbytecode*,
DefaultHasher<jsbytecode*>, DefaultHasher<jsbytecode*>,
SystemAllocPolicy> RecordAttemptMap; SystemAllocPolicy> RecordAttemptMap;
class Oracle;
/* /*
* Trace monitor. Every JSThread (if JS_THREADSAFE) or JSRuntime (if not * Trace monitor. Every JSThread (if JS_THREADSAFE) or JSRuntime (if not
* JS_THREADSAFE) has an associated trace monitor that keeps track of loop * JS_THREADSAFE) has an associated trace monitor that keeps track of loop
@ -395,6 +397,7 @@ struct TraceMonitor {
nanojit::Assembler* assembler; nanojit::Assembler* assembler;
FrameInfoCache* frameCache; FrameInfoCache* frameCache;
Oracle* oracle;
TraceRecorder* recorder; TraceRecorder* recorder;
GlobalState globalStates[MONITOR_N_GLOBAL_STATES]; GlobalState globalStates[MONITOR_N_GLOBAL_STATES];

Просмотреть файл

@ -2907,9 +2907,6 @@ PreGCCleanup(JSContext *cx, JSGCInvocationKind gckind)
} }
#endif #endif
#ifdef JS_TRACER
PurgeJITOracle();
#endif
/* /*
* Reset the property cache's type id generator so we can compress ids. * Reset the property cache's type id generator so we can compress ids.

Просмотреть файл

@ -67,7 +67,7 @@
static void * static void *
DefaultAllocTable(void *pool, size_t size) DefaultAllocTable(void *pool, size_t size)
{ {
return malloc(size); return js_malloc(size);
} }
static void static void
@ -79,7 +79,7 @@ DefaultFreeTable(void *pool, void *item, size_t size)
static JSHashEntry * static JSHashEntry *
DefaultAllocEntry(void *pool, const void *key) DefaultAllocEntry(void *pool, const void *key)
{ {
return (JSHashEntry*) malloc(sizeof(JSHashEntry)); return (JSHashEntry*) js_malloc(sizeof(JSHashEntry));
} }
static void static void

Просмотреть файл

@ -836,7 +836,7 @@ js_SetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot, jsval v)
static JSFatLock * static JSFatLock *
NewFatlock() NewFatlock()
{ {
JSFatLock *fl = (JSFatLock *)malloc(sizeof(JSFatLock)); /* for now */ JSFatLock *fl = (JSFatLock *)js_malloc(sizeof(JSFatLock)); /* for now */
if (!fl) return NULL; if (!fl) return NULL;
fl->susp = 0; fl->susp = 0;
fl->next = NULL; fl->next = NULL;

Просмотреть файл

@ -121,8 +121,8 @@ class UpRecursiveSlotMap : public RecursiveSlotMap
}; };
#if defined DEBUG #if defined DEBUG
static JS_REQUIRES_STACK void JS_REQUIRES_STACK void
AssertDownFrameIsConsistent(JSContext* cx, VMSideExit* anchor, FrameInfo* fi) TraceRecorder::assertDownFrameIsConsistent(VMSideExit* anchor, FrameInfo* fi)
{ {
JS_ASSERT(anchor->recursive_down); JS_ASSERT(anchor->recursive_down);
JS_ASSERT(anchor->recursive_down->callerHeight == fi->callerHeight); JS_ASSERT(anchor->recursive_down->callerHeight == fi->callerHeight);
@ -130,7 +130,7 @@ AssertDownFrameIsConsistent(JSContext* cx, VMSideExit* anchor, FrameInfo* fi)
unsigned downPostSlots = fi->callerHeight; unsigned downPostSlots = fi->callerHeight;
TraceType* typeMap = fi->get_typemap(); TraceType* typeMap = fi->get_typemap();
CaptureStackTypes(cx, 1, typeMap); captureStackTypes(1, typeMap);
const TraceType* m1 = anchor->recursive_down->get_typemap(); const TraceType* m1 = anchor->recursive_down->get_typemap();
for (unsigned i = 0; i < downPostSlots; i++) { for (unsigned i = 0; i < downPostSlots; i++) {
if (m1[i] == typeMap[i]) if (m1[i] == typeMap[i])
@ -258,7 +258,7 @@ TraceRecorder::upRecursion()
* recursive functions. * recursive functions.
*/ */
#if defined DEBUG #if defined DEBUG
AssertDownFrameIsConsistent(cx, anchor, fi); assertDownFrameIsConsistent(anchor, fi);
#endif #endif
fi = anchor->recursive_down; fi = anchor->recursive_down;
} else if (recursive_pc != fragment->root->ip) { } else if (recursive_pc != fragment->root->ip) {
@ -266,7 +266,7 @@ TraceRecorder::upRecursion()
* Case 1: Guess that down-recursion has to started back out, infer types * Case 1: Guess that down-recursion has to started back out, infer types
* from the down frame. * from the down frame.
*/ */
CaptureStackTypes(cx, 1, fi->get_typemap()); captureStackTypes(1, fi->get_typemap());
} else { } else {
/* Case 2: Guess that up-recursion is backing out, infer types from our Tree. */ /* Case 2: Guess that up-recursion is backing out, infer types from our Tree. */
JS_ASSERT(tree->nStackTypes == downPostSlots + 1); JS_ASSERT(tree->nStackTypes == downPostSlots + 1);
@ -491,7 +491,7 @@ TraceRecorder::slurpDownFrames(jsbytecode* return_pc)
TraceType* typeMap = exit->stackTypeMap(); TraceType* typeMap = exit->stackTypeMap();
jsbytecode* oldpc = cx->fp->regs->pc; jsbytecode* oldpc = cx->fp->regs->pc;
cx->fp->regs->pc = exit->pc; cx->fp->regs->pc = exit->pc;
CaptureStackTypes(cx, frameDepth, typeMap); captureStackTypes(frameDepth, typeMap);
cx->fp->regs->pc = oldpc; cx->fp->regs->pc = oldpc;
if (!anchor || anchor->exitType != RECURSIVE_SLURP_FAIL_EXIT) { if (!anchor || anchor->exitType != RECURSIVE_SLURP_FAIL_EXIT) {
JS_ASSERT_IF(*cx->fp->regs->pc != JSOP_RETURN, *cx->fp->regs->pc == JSOP_STOP); JS_ASSERT_IF(*cx->fp->regs->pc != JSOP_RETURN, *cx->fp->regs->pc == JSOP_STOP);

Просмотреть файл

@ -241,9 +241,9 @@ PointerRangeSize(T *begin, T *end)
class SystemAllocPolicy class SystemAllocPolicy
{ {
public: public:
void *malloc(size_t bytes) { return ::malloc(bytes); } void *malloc(size_t bytes) { return js_malloc(bytes); }
void *realloc(void *p, size_t bytes) { return ::realloc(p, bytes); } void *realloc(void *p, size_t bytes) { return js_realloc(p, bytes); }
void free(void *p) { ::free(p); } void free(void *p) { js_free(p); }
void reportAllocOverflow() const {} void reportAllocOverflow() const {}
}; };

Просмотреть файл

@ -107,7 +107,7 @@ nanojit::Allocator::allocChunk(size_t nbytes)
{ {
VMAllocator *vma = (VMAllocator*)this; VMAllocator *vma = (VMAllocator*)this;
JS_ASSERT(!vma->outOfMemory()); JS_ASSERT(!vma->outOfMemory());
void *p = calloc(1, nbytes); void *p = js_calloc(nbytes);
if (!p) { if (!p) {
JS_ASSERT(nbytes < sizeof(vma->mReserve)); JS_ASSERT(nbytes < sizeof(vma->mReserve));
vma->mOutOfMemory = true; vma->mOutOfMemory = true;
@ -121,7 +121,7 @@ void
nanojit::Allocator::freeChunk(void *p) { nanojit::Allocator::freeChunk(void *p) {
VMAllocator *vma = (VMAllocator*)this; VMAllocator *vma = (VMAllocator*)this;
if (p != &vma->mReserve[0]) if (p != &vma->mReserve[0])
free(p); js_free(p);
} }
void void
@ -906,12 +906,6 @@ TraceRecorder::tprint(const char *format, LIns *ins1, LIns *ins2, LIns *ins3, LI
} }
#endif #endif
/*
* The entire VM shares one oracle. Collisions and concurrent updates are
* tolerated and worst case cause performance regressions.
*/
static Oracle oracle;
Tracker::Tracker() Tracker::Tracker()
{ {
pagelist = NULL; pagelist = NULL;
@ -951,7 +945,7 @@ struct Tracker::TrackerPage*
Tracker::addTrackerPage(const void* v) Tracker::addTrackerPage(const void* v)
{ {
jsuword base = getTrackerPageBase(v); jsuword base = getTrackerPageBase(v);
struct TrackerPage* p = (struct TrackerPage*) calloc(1, sizeof(*p)); struct TrackerPage* p = (struct TrackerPage*) js_calloc(sizeof(*p));
p->base = base; p->base = base;
p->next = pagelist; p->next = pagelist;
pagelist = p; pagelist = p;
@ -964,7 +958,7 @@ Tracker::clear()
while (pagelist) { while (pagelist) {
TrackerPage* p = pagelist; TrackerPage* p = pagelist;
pagelist = pagelist->next; pagelist = pagelist->next;
free(p); js_free(p);
} }
} }
@ -1217,44 +1211,38 @@ Oracle::clearDemotability()
_pcDontDemote.reset(); _pcDontDemote.reset();
} }
JS_REQUIRES_STACK static JS_INLINE void JS_REQUIRES_STACK void
MarkSlotUndemotable(JSContext* cx, LinkableFragment* f, unsigned slot) TraceRecorder::markSlotUndemotable(LinkableFragment* f, unsigned slot)
{ {
if (slot < f->nStackTypes) { if (slot < f->nStackTypes) {
oracle.markStackSlotUndemotable(cx, slot); oracle->markStackSlotUndemotable(cx, slot);
return; return;
} }
uint16* gslots = f->globalSlots->data(); uint16* gslots = f->globalSlots->data();
oracle.markGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]); oracle->markGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]);
} }
JS_REQUIRES_STACK static JS_INLINE void JS_REQUIRES_STACK void
MarkSlotUndemotable(JSContext* cx, LinkableFragment* f, unsigned slot, const void* pc) TraceRecorder::markSlotUndemotable(LinkableFragment* f, unsigned slot, const void* pc)
{ {
if (slot < f->nStackTypes) { if (slot < f->nStackTypes) {
oracle.markStackSlotUndemotable(cx, slot, pc); oracle->markStackSlotUndemotable(cx, slot, pc);
return; return;
} }
uint16* gslots = f->globalSlots->data(); uint16* gslots = f->globalSlots->data();
oracle.markGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]); oracle->markGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]);
} }
static JS_REQUIRES_STACK inline bool static JS_REQUIRES_STACK bool
IsSlotUndemotable(JSContext* cx, LinkableFragment* f, unsigned slot, const void* ip) IsSlotUndemotable(Oracle* oracle, JSContext* cx, LinkableFragment* f, unsigned slot, const void* ip)
{ {
if (slot < f->nStackTypes) if (slot < f->nStackTypes)
return oracle.isStackSlotUndemotable(cx, slot, ip); return oracle->isStackSlotUndemotable(cx, slot, ip);
uint16* gslots = f->globalSlots->data(); uint16* gslots = f->globalSlots->data();
return oracle.isGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]); return oracle->isGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]);
}
static JS_REQUIRES_STACK inline bool
IsSlotUndemotable(JSContext* cx, LinkableFragment* f, unsigned slot)
{
return IsSlotUndemotable(cx, f, slot, cx->fp->regs->pc);
} }
class FrameInfoCache class FrameInfoCache
@ -1967,7 +1955,7 @@ public:
visitGlobalSlot(jsval *vp, unsigned n, unsigned slot) { visitGlobalSlot(jsval *vp, unsigned n, unsigned slot) {
TraceType type = getCoercedType(*vp); TraceType type = getCoercedType(*vp);
if (type == TT_INT32 && if (type == TT_INT32 &&
oracle.isGlobalSlotUndemotable(mCx, slot)) JS_TRACE_MONITOR(mCx).oracle->isGlobalSlotUndemotable(mCx, slot))
type = TT_DOUBLE; type = TT_DOUBLE;
JS_ASSERT(type != TT_JSVAL); JS_ASSERT(type != TT_JSVAL);
debug_only_printf(LC_TMTracer, debug_only_printf(LC_TMTracer,
@ -1981,7 +1969,7 @@ public:
for (int i = 0; i < count; ++i) { for (int i = 0; i < count; ++i) {
TraceType type = getCoercedType(vp[i]); TraceType type = getCoercedType(vp[i]);
if (type == TT_INT32 && if (type == TT_INT32 &&
oracle.isStackSlotUndemotable(mCx, length())) JS_TRACE_MONITOR(mCx).oracle->isStackSlotUndemotable(mCx, length()))
type = TT_DOUBLE; type = TT_DOUBLE;
JS_ASSERT(type != TT_JSVAL); JS_ASSERT(type != TT_JSVAL);
debug_only_printf(LC_TMTracer, debug_only_printf(LC_TMTracer,
@ -2137,6 +2125,7 @@ TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* anchor, VMFragment* frag
RecordReason recordReason) RecordReason recordReason)
: cx(cx), : cx(cx),
traceMonitor(&JS_TRACE_MONITOR(cx)), traceMonitor(&JS_TRACE_MONITOR(cx)),
oracle(JS_TRACE_MONITOR(cx).oracle),
fragment(fragment), fragment(fragment),
tree(fragment->root), tree(fragment->root),
recordReason(recordReason), recordReason(recordReason),
@ -2684,6 +2673,7 @@ TraceMonitor::flush()
codeAlloc->reset(); codeAlloc->reset();
tempAlloc->reset(); tempAlloc->reset();
reTempAlloc->reset(); reTempAlloc->reset();
oracle->clear();
Allocator& alloc = *dataAlloc; Allocator& alloc = *dataAlloc;
@ -3532,7 +3522,7 @@ TraceRecorder::importGlobalSlot(unsigned slot)
int index = tree->globalSlots->offsetOf(uint16(slot)); int index = tree->globalSlots->offsetOf(uint16(slot));
if (index == -1) { if (index == -1) {
type = getCoercedType(*vp); type = getCoercedType(*vp);
if (type == TT_INT32 && oracle.isGlobalSlotUndemotable(cx, slot)) if (type == TT_INT32 && oracle->isGlobalSlotUndemotable(cx, slot))
type = TT_DOUBLE; type = TT_DOUBLE;
index = (int)tree->globalSlots->length(); index = (int)tree->globalSlots->length();
tree->globalSlots->add(uint16(slot)); tree->globalSlots->add(uint16(slot));
@ -3763,7 +3753,7 @@ public:
* Aggressively undo speculation so the inner tree will compile * Aggressively undo speculation so the inner tree will compile
* if this fails. * if this fails.
*/ */
oracle.markGlobalSlotUndemotable(mCx, slot); mRecorder.oracle->markGlobalSlotUndemotable(mCx, slot);
} }
JS_ASSERT(!(!isPromote && *mTypeMap == TT_INT32)); JS_ASSERT(!(!isPromote && *mTypeMap == TT_INT32));
++mTypeMap; ++mTypeMap;
@ -3807,7 +3797,7 @@ public:
* Aggressively undo speculation so the inner tree will compile * Aggressively undo speculation so the inner tree will compile
* if this fails. * if this fails.
*/ */
oracle.markStackSlotUndemotable(mCx, mSlotnum); mRecorder.oracle->markStackSlotUndemotable(mCx, mSlotnum);
} }
JS_ASSERT(!(!isPromote && *mTypeMap == TT_INT32)); JS_ASSERT(!(!isPromote && *mTypeMap == TT_INT32));
++vp; ++vp;
@ -4439,7 +4429,7 @@ class SlotMap : public SlotVisitorBase
{ {
for (unsigned i = 0; i < length(); i++) { for (unsigned i = 0; i < length(); i++) {
if (get(i).lastCheck == TypeCheck_Undemote) if (get(i).lastCheck == TypeCheck_Undemote)
MarkSlotUndemotable(mRecorder.cx, mRecorder.tree, i); mRecorder.markSlotUndemotable(mRecorder.tree, i);
} }
} }
@ -4764,7 +4754,7 @@ TypeMapLinkability(JSContext* cx, const TypeMap& typeMap, TreeFragment* peer)
if (typeMap[i] == peerMap[i]) if (typeMap[i] == peerMap[i])
continue; continue;
if (typeMap[i] == TT_INT32 && peerMap[i] == TT_DOUBLE && if (typeMap[i] == TT_INT32 && peerMap[i] == TT_DOUBLE &&
IsSlotUndemotable(cx, peer, i, peer->ip)) { IsSlotUndemotable(JS_TRACE_MONITOR(cx).oracle, cx, peer, i, peer->ip)) {
consensus = TypeConsensus_Undemotes; consensus = TypeConsensus_Undemotes;
} else { } else {
return TypeConsensus_Bad; return TypeConsensus_Bad;
@ -4773,8 +4763,8 @@ TypeMapLinkability(JSContext* cx, const TypeMap& typeMap, TreeFragment* peer)
return consensus; return consensus;
} }
static JS_REQUIRES_STACK unsigned JS_REQUIRES_STACK unsigned
FindUndemotesInTypemaps(JSContext* cx, const TypeMap& typeMap, LinkableFragment* f, TraceRecorder::findUndemotesInTypemaps(const TypeMap& typeMap, LinkableFragment* f,
Queue<unsigned>& undemotes) Queue<unsigned>& undemotes)
{ {
undemotes.setLength(0); undemotes.setLength(0);
@ -4787,7 +4777,7 @@ FindUndemotesInTypemaps(JSContext* cx, const TypeMap& typeMap, LinkableFragment*
} }
} }
for (unsigned i = 0; i < undemotes.length(); i++) for (unsigned i = 0; i < undemotes.length(); i++)
MarkSlotUndemotable(cx, f, undemotes[i]); markSlotUndemotable(f, undemotes[i]);
return undemotes.length(); return undemotes.length();
} }
@ -4836,7 +4826,7 @@ TraceRecorder::joinEdgesToEntry(TreeFragment* peer_root)
uexit = peer->removeUnstableExit(uexit->exit); uexit = peer->removeUnstableExit(uexit->exit);
} else { } else {
/* Check for int32->double slots that suggest trashing. */ /* Check for int32->double slots that suggest trashing. */
if (FindUndemotesInTypemaps(cx, typeMap, tree, undemotes)) { if (findUndemotesInTypemaps(typeMap, tree, undemotes)) {
JS_ASSERT(peer == uexit->fragment->root); JS_ASSERT(peer == uexit->fragment->root);
if (fragment == peer) if (fragment == peer)
trashSelf = true; trashSelf = true;
@ -5683,6 +5673,7 @@ FindLoopEdgeTarget(JSContext* cx, VMSideExit* exit, TreeFragment** peerp)
TreeFragment* from = exit->root(); TreeFragment* from = exit->root();
JS_ASSERT(from->code()); JS_ASSERT(from->code());
Oracle* oracle = JS_TRACE_MONITOR(cx).oracle;
TypeMap typeMap(NULL); TypeMap typeMap(NULL);
FullMapFromExit(typeMap, exit); FullMapFromExit(typeMap, exit);
@ -5694,14 +5685,14 @@ FindLoopEdgeTarget(JSContext* cx, VMSideExit* exit, TreeFragment** peerp)
if (typeMap[i] == TT_DOUBLE) { if (typeMap[i] == TT_DOUBLE) {
if (exit->exitType == RECURSIVE_UNLINKED_EXIT) { if (exit->exitType == RECURSIVE_UNLINKED_EXIT) {
if (i < exit->numStackSlots) if (i < exit->numStackSlots)
oracle.markStackSlotUndemotable(cx, i, exit->recursive_pc); oracle->markStackSlotUndemotable(cx, i, exit->recursive_pc);
else else
oracle.markGlobalSlotUndemotable(cx, gslots[i - exit->numStackSlots]); oracle->markGlobalSlotUndemotable(cx, gslots[i - exit->numStackSlots]);
} }
if (i < from->nStackTypes) if (i < from->nStackTypes)
oracle.markStackSlotUndemotable(cx, i, from->ip); oracle->markStackSlotUndemotable(cx, i, from->ip);
else if (i >= exit->numStackSlots) else if (i >= exit->numStackSlots)
oracle.markGlobalSlotUndemotable(cx, gslots[i - exit->numStackSlots]); oracle->markGlobalSlotUndemotable(cx, gslots[i - exit->numStackSlots]);
} }
} }
@ -6069,7 +6060,7 @@ TraceRecorder::attemptTreeCall(TreeFragment* f, uintN& inlineCallCount)
} }
case OVERFLOW_EXIT: case OVERFLOW_EXIT:
oracle.markInstructionUndemotable(cx->fp->regs->pc); oracle->markInstructionUndemotable(cx->fp->regs->pc);
/* FALL THROUGH */ /* FALL THROUGH */
case RECURSIVE_SLURP_FAIL_EXIT: case RECURSIVE_SLURP_FAIL_EXIT:
case RECURSIVE_SLURP_MISMATCH_EXIT: case RECURSIVE_SLURP_MISMATCH_EXIT:
@ -6177,10 +6168,10 @@ public:
if (!IsEntryTypeCompatible(vp, mTypeMap)) { if (!IsEntryTypeCompatible(vp, mTypeMap)) {
mOk = false; mOk = false;
} else if (!isPromoteInt(mRecorder.get(vp)) && *mTypeMap == TT_INT32) { } else if (!isPromoteInt(mRecorder.get(vp)) && *mTypeMap == TT_INT32) {
oracle.markGlobalSlotUndemotable(mCx, slot); mRecorder.oracle->markGlobalSlotUndemotable(mCx, slot);
mOk = false; mOk = false;
} else if (JSVAL_IS_INT(*vp) && *mTypeMap == TT_DOUBLE) { } else if (JSVAL_IS_INT(*vp) && *mTypeMap == TT_DOUBLE) {
oracle.markGlobalSlotUndemotable(mCx, slot); mRecorder.oracle->markGlobalSlotUndemotable(mCx, slot);
} }
mTypeMap++; mTypeMap++;
} }
@ -6192,10 +6183,10 @@ public:
if (!IsEntryTypeCompatible(vp, mTypeMap)) { if (!IsEntryTypeCompatible(vp, mTypeMap)) {
mOk = false; mOk = false;
} else if (!isPromoteInt(mRecorder.get(vp)) && *mTypeMap == TT_INT32) { } else if (!isPromoteInt(mRecorder.get(vp)) && *mTypeMap == TT_INT32) {
oracle.markStackSlotUndemotable(mCx, mStackSlotNum); mRecorder.oracle->markStackSlotUndemotable(mCx, mStackSlotNum);
mOk = false; mOk = false;
} else if (JSVAL_IS_INT(*vp) && *mTypeMap == TT_DOUBLE) { } else if (JSVAL_IS_INT(*vp) && *mTypeMap == TT_DOUBLE) {
oracle.markStackSlotUndemotable(mCx, mStackSlotNum); mRecorder.oracle->markStackSlotUndemotable(mCx, mStackSlotNum);
} }
vp++; vp++;
mTypeMap++; mTypeMap++;
@ -7025,7 +7016,7 @@ MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount, RecordReason reason)
return rv; return rv;
case OVERFLOW_EXIT: case OVERFLOW_EXIT:
oracle.markInstructionUndemotable(cx->fp->regs->pc); tm->oracle->markInstructionUndemotable(cx->fp->regs->pc);
/* FALL THROUGH */ /* FALL THROUGH */
case RECURSIVE_SLURP_FAIL_EXIT: case RECURSIVE_SLURP_FAIL_EXIT:
case RECURSIVE_SLURP_MISMATCH_EXIT: case RECURSIVE_SLURP_MISMATCH_EXIT:
@ -7466,6 +7457,8 @@ InitJIT(TraceMonitor *tm)
/* Set the default size for the code cache to 16MB. */ /* Set the default size for the code cache to 16MB. */
tm->maxCodeCacheBytes = 16 M; tm->maxCodeCacheBytes = 16 M;
tm->oracle = new Oracle();
tm->recordAttempts = new RecordAttemptMap; tm->recordAttempts = new RecordAttemptMap;
if (!tm->recordAttempts->init(PC_HASH_COUNT)) if (!tm->recordAttempts->init(PC_HASH_COUNT))
abort(); abort();
@ -7548,6 +7541,7 @@ FinishJIT(TraceMonitor *tm)
#endif #endif
delete tm->recordAttempts; delete tm->recordAttempts;
delete tm->oracle;
#ifdef DEBUG #ifdef DEBUG
// Recover profiling data from expiring Fragments, and display // Recover profiling data from expiring Fragments, and display
@ -7619,12 +7613,6 @@ FinishJIT(TraceMonitor *tm)
tm->cachedTempTypeMap = NULL; tm->cachedTempTypeMap = NULL;
} }
void
PurgeJITOracle()
{
oracle.clear();
}
JS_REQUIRES_STACK void JS_REQUIRES_STACK void
PurgeScriptFragments(JSContext* cx, JSScript* script) PurgeScriptFragments(JSContext* cx, JSScript* script)
{ {
@ -8082,7 +8070,7 @@ TraceRecorder::alu(LOpcode v, jsdouble v0, jsdouble v1, LIns* s0, LIns* s1)
* integers and the oracle must not give us a negative hint for the * integers and the oracle must not give us a negative hint for the
* instruction. * instruction.
*/ */
if (oracle.isInstructionUndemotable(cx->fp->regs->pc) || !isPromoteInt(s0) || !isPromoteInt(s1)) { if (oracle->isInstructionUndemotable(cx->fp->regs->pc) || !isPromoteInt(s0) || !isPromoteInt(s1)) {
out: out:
if (v == LIR_fmod) { if (v == LIR_fmod) {
LIns* args[] = { s1, s0 }; LIns* args[] = { s1, s0 };
@ -10242,7 +10230,7 @@ TraceRecorder::record_JSOP_NEG()
* a double. Only follow this path if we're not an integer that's 0 and * a double. Only follow this path if we're not an integer that's 0 and
* we're not a double that's zero. * we're not a double that's zero.
*/ */
if (!oracle.isInstructionUndemotable(cx->fp->regs->pc) && if (!oracle->isInstructionUndemotable(cx->fp->regs->pc) &&
isPromoteInt(a) && isPromoteInt(a) &&
(!JSVAL_IS_INT(v) || JSVAL_TO_INT(v) != 0) && (!JSVAL_IS_INT(v) || JSVAL_TO_INT(v) != 0) &&
(!JSVAL_IS_DOUBLE(v) || !JSDOUBLE_IS_NEGZERO(*JSVAL_TO_DOUBLE(v))) && (!JSVAL_IS_DOUBLE(v) || !JSDOUBLE_IS_NEGZERO(*JSVAL_TO_DOUBLE(v))) &&
@ -15312,7 +15300,7 @@ StopTraceVisNative(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval
#endif /* MOZ_TRACEVIS */ #endif /* MOZ_TRACEVIS */
JS_REQUIRES_STACK void JS_REQUIRES_STACK void
CaptureStackTypes(JSContext* cx, unsigned callDepth, TraceType* typeMap) TraceRecorder::captureStackTypes(unsigned callDepth, TraceType* typeMap)
{ {
CaptureTypesVisitor capVisitor(cx, typeMap); CaptureTypesVisitor capVisitor(cx, typeMap);
VisitStackSlots(capVisitor, cx, callDepth); VisitStackSlots(capVisitor, cx, callDepth);

Просмотреть файл

@ -78,7 +78,7 @@ public:
memcpy(tmp, _data, _len * sizeof(T)); memcpy(tmp, _data, _len * sizeof(T));
_data = tmp; _data = tmp;
} else { } else {
_data = (T*)realloc(_data, _max * sizeof(T)); _data = (T*)js_realloc(_data, _max * sizeof(T));
} }
#if defined(DEBUG) #if defined(DEBUG)
memset(&_data[_len], 0xcd, _max - _len); memset(&_data[_len], 0xcd, _max - _len);
@ -95,7 +95,7 @@ public:
~Queue() { ~Queue() {
if (!alloc) if (!alloc)
free(_data); js_free(_data);
} }
bool contains(T a) { bool contains(T a) {
@ -926,6 +926,9 @@ class TraceRecorder
/* Cached value of JS_TRACE_MONITOR(cx). */ /* Cached value of JS_TRACE_MONITOR(cx). */
TraceMonitor* const traceMonitor; TraceMonitor* const traceMonitor;
/* Cached oracle keeps track of hit counts for program counter locations */
Oracle* oracle;
/* The Fragment being recorded by this recording session. */ /* The Fragment being recorded by this recording session. */
VMFragment* const fragment; VMFragment* const fragment;
@ -1065,6 +1068,17 @@ class TraceRecorder
*/ */
JS_REQUIRES_STACK nanojit::GuardRecord* createGuardRecord(VMSideExit* exit); JS_REQUIRES_STACK nanojit::GuardRecord* createGuardRecord(VMSideExit* exit);
JS_REQUIRES_STACK JS_INLINE void markSlotUndemotable(LinkableFragment* f, unsigned slot);
JS_REQUIRES_STACK JS_INLINE void markSlotUndemotable(LinkableFragment* f, unsigned slot, const void* pc);
JS_REQUIRES_STACK unsigned findUndemotesInTypemaps(const TypeMap& typeMap, LinkableFragment* f,
Queue<unsigned>& undemotes);
JS_REQUIRES_STACK void assertDownFrameIsConsistent(VMSideExit* anchor, FrameInfo* fi);
JS_REQUIRES_STACK void captureStackTypes(unsigned callDepth, TraceType* typeMap);
bool isGlobal(jsval* p) const; bool isGlobal(jsval* p) const;
ptrdiff_t nativeGlobalSlot(jsval *p) const; ptrdiff_t nativeGlobalSlot(jsval *p) const;
ptrdiff_t nativeGlobalOffset(jsval* p) const; ptrdiff_t nativeGlobalOffset(jsval* p) const;
@ -1380,8 +1394,8 @@ class TraceRecorder
# include "jsopcode.tbl" # include "jsopcode.tbl"
#undef OPDEF #undef OPDEF
inline void* operator new(size_t size) { return calloc(1, size); } inline void* operator new(size_t size) { return js_calloc(size); }
inline void operator delete(void *p) { free(p); } inline void operator delete(void *p) { js_free(p); }
JS_REQUIRES_STACK JS_REQUIRES_STACK
TraceRecorder(JSContext* cx, VMSideExit*, VMFragment*, TraceRecorder(JSContext* cx, VMSideExit*, VMFragment*,

Просмотреть файл

@ -44,6 +44,7 @@
#ifndef jsutil_h___ #ifndef jsutil_h___
#define jsutil_h___ #define jsutil_h___
#include "jstypes.h"
#include <stdlib.h> #include <stdlib.h>
JS_BEGIN_EXTERN_C JS_BEGIN_EXTERN_C
@ -182,6 +183,12 @@ extern JS_FRIEND_API(void)
JS_DumpBacktrace(JSCallsite *trace); JS_DumpBacktrace(JSCallsite *trace);
#endif #endif
#if defined JS_USE_CUSTOM_ALLOCATOR
#include "jscustomallocator.h"
#else
static JS_INLINE void* js_malloc(size_t bytes) { static JS_INLINE void* js_malloc(size_t bytes) {
if (bytes < sizeof(void*)) /* for asyncFree */ if (bytes < sizeof(void*)) /* for asyncFree */
bytes = sizeof(void*); bytes = sizeof(void*);
@ -203,6 +210,7 @@ static JS_INLINE void* js_realloc(void* p, size_t bytes) {
static JS_INLINE void js_free(void* p) { static JS_INLINE void js_free(void* p) {
free(p); free(p);
} }
#endif/* JS_USE_CUSTOM_ALLOCATOR */
JS_END_EXTERN_C JS_END_EXTERN_C