From 5833728ceeec39a93f3d107d9b7da89e811cb74e Mon Sep 17 00:00:00 2001 From: David Anderson Date: Wed, 21 Apr 2010 16:30:06 -0700 Subject: [PATCH] Backed out changeset 4a28bd424400 (tinderbox red). --- js/src/jscntxt.h | 3 -- js/src/jsgc.cpp | 3 ++ js/src/jsrecursion.cpp | 12 ++--- js/src/jstracer.cpp | 104 +++++++++++++++++++++++------------------ js/src/jstracer.h | 22 ++------- 5 files changed, 71 insertions(+), 73 deletions(-) diff --git a/js/src/jscntxt.h b/js/src/jscntxt.h index bed609f022da..41b6a2d576ea 100644 --- a/js/src/jscntxt.h +++ b/js/src/jscntxt.h @@ -334,8 +334,6 @@ typedef HashMap, SystemAllocPolicy> RecordAttemptMap; -class Oracle; - /* * Trace monitor. Every JSThread (if JS_THREADSAFE) or JSRuntime (if not * JS_THREADSAFE) has an associated trace monitor that keeps track of loop @@ -397,7 +395,6 @@ struct TraceMonitor { nanojit::Assembler* assembler; FrameInfoCache* frameCache; - Oracle* oracle; TraceRecorder* recorder; GlobalState globalStates[MONITOR_N_GLOBAL_STATES]; diff --git a/js/src/jsgc.cpp b/js/src/jsgc.cpp index b1e80363df49..f96fe20acd65 100644 --- a/js/src/jsgc.cpp +++ b/js/src/jsgc.cpp @@ -2930,6 +2930,9 @@ PreGCCleanup(JSContext *cx, JSGCInvocationKind gckind) } #endif +#ifdef JS_TRACER + PurgeJITOracle(); +#endif /* * Reset the property cache's type id generator so we can compress ids. diff --git a/js/src/jsrecursion.cpp b/js/src/jsrecursion.cpp index b2bdef740b10..cdcbf98af03f 100644 --- a/js/src/jsrecursion.cpp +++ b/js/src/jsrecursion.cpp @@ -121,8 +121,8 @@ class UpRecursiveSlotMap : public RecursiveSlotMap }; #if defined DEBUG -JS_REQUIRES_STACK void -TraceRecorder::assertDownFrameIsConsistent(VMSideExit* anchor, FrameInfo* fi) +static JS_REQUIRES_STACK void +AssertDownFrameIsConsistent(JSContext* cx, VMSideExit* anchor, FrameInfo* fi) { JS_ASSERT(anchor->recursive_down); JS_ASSERT(anchor->recursive_down->callerHeight == fi->callerHeight); @@ -130,7 +130,7 @@ TraceRecorder::assertDownFrameIsConsistent(VMSideExit* anchor, FrameInfo* fi) unsigned downPostSlots = fi->callerHeight; TraceType* typeMap = fi->get_typemap(); - captureStackTypes(1, typeMap); + CaptureStackTypes(cx, 1, typeMap); const TraceType* m1 = anchor->recursive_down->get_typemap(); for (unsigned i = 0; i < downPostSlots; i++) { if (m1[i] == typeMap[i]) @@ -258,7 +258,7 @@ TraceRecorder::upRecursion() * recursive functions. */ #if defined DEBUG - assertDownFrameIsConsistent(anchor, fi); + AssertDownFrameIsConsistent(cx, anchor, fi); #endif fi = anchor->recursive_down; } else if (recursive_pc != fragment->root->ip) { @@ -266,7 +266,7 @@ TraceRecorder::upRecursion() * Case 1: Guess that down-recursion has to started back out, infer types * from the down frame. */ - captureStackTypes(1, fi->get_typemap()); + CaptureStackTypes(cx, 1, fi->get_typemap()); } else { /* Case 2: Guess that up-recursion is backing out, infer types from our Tree. */ JS_ASSERT(tree->nStackTypes == downPostSlots + 1); @@ -491,7 +491,7 @@ TraceRecorder::slurpDownFrames(jsbytecode* return_pc) TraceType* typeMap = exit->stackTypeMap(); jsbytecode* oldpc = cx->fp->regs->pc; cx->fp->regs->pc = exit->pc; - captureStackTypes(frameDepth, typeMap); + CaptureStackTypes(cx, frameDepth, typeMap); cx->fp->regs->pc = oldpc; if (!anchor || anchor->exitType != RECURSIVE_SLURP_FAIL_EXIT) { JS_ASSERT_IF(*cx->fp->regs->pc != JSOP_RETURN, *cx->fp->regs->pc == JSOP_STOP); diff --git a/js/src/jstracer.cpp b/js/src/jstracer.cpp index ee22b8627355..8383b42ea054 100644 --- a/js/src/jstracer.cpp +++ b/js/src/jstracer.cpp @@ -107,7 +107,7 @@ nanojit::Allocator::allocChunk(size_t nbytes) { VMAllocator *vma = (VMAllocator*)this; JS_ASSERT(!vma->outOfMemory()); - void *p = js_calloc(nbytes); + void *p = calloc(1, nbytes); if (!p) { JS_ASSERT(nbytes < sizeof(vma->mReserve)); vma->mOutOfMemory = true; @@ -121,7 +121,7 @@ void nanojit::Allocator::freeChunk(void *p) { VMAllocator *vma = (VMAllocator*)this; if (p != &vma->mReserve[0]) - js_free(p); + free(p); } void @@ -906,6 +906,12 @@ TraceRecorder::tprint(const char *format, LIns *ins1, LIns *ins2, LIns *ins3, LI } #endif +/* + * The entire VM shares one oracle. Collisions and concurrent updates are + * tolerated and worst case cause performance regressions. + */ +static Oracle oracle; + Tracker::Tracker() { pagelist = NULL; @@ -945,7 +951,7 @@ struct Tracker::TrackerPage* Tracker::addTrackerPage(const void* v) { jsuword base = getTrackerPageBase(v); - struct TrackerPage* p = (struct TrackerPage*) js_calloc(sizeof(*p)); + struct TrackerPage* p = (struct TrackerPage*) calloc(1, sizeof(*p)); p->base = base; p->next = pagelist; pagelist = p; @@ -958,7 +964,7 @@ Tracker::clear() while (pagelist) { TrackerPage* p = pagelist; pagelist = pagelist->next; - js_free(p); + free(p); } } @@ -1211,38 +1217,44 @@ Oracle::clearDemotability() _pcDontDemote.reset(); } -JS_REQUIRES_STACK void -TraceRecorder::markSlotUndemotable(LinkableFragment* f, unsigned slot) +JS_REQUIRES_STACK static JS_INLINE void +MarkSlotUndemotable(JSContext* cx, LinkableFragment* f, unsigned slot) { if (slot < f->nStackTypes) { - oracle->markStackSlotUndemotable(cx, slot); + oracle.markStackSlotUndemotable(cx, slot); return; } uint16* gslots = f->globalSlots->data(); - oracle->markGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]); + oracle.markGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]); } -JS_REQUIRES_STACK void -TraceRecorder::markSlotUndemotable(LinkableFragment* f, unsigned slot, const void* pc) +JS_REQUIRES_STACK static JS_INLINE void +MarkSlotUndemotable(JSContext* cx, LinkableFragment* f, unsigned slot, const void* pc) { if (slot < f->nStackTypes) { - oracle->markStackSlotUndemotable(cx, slot, pc); + oracle.markStackSlotUndemotable(cx, slot, pc); return; } uint16* gslots = f->globalSlots->data(); - oracle->markGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]); + oracle.markGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]); } -static JS_REQUIRES_STACK bool -IsSlotUndemotable(Oracle* oracle, JSContext* cx, LinkableFragment* f, unsigned slot, const void* ip) +static JS_REQUIRES_STACK inline bool +IsSlotUndemotable(JSContext* cx, LinkableFragment* f, unsigned slot, const void* ip) { if (slot < f->nStackTypes) - return oracle->isStackSlotUndemotable(cx, slot, ip); + return oracle.isStackSlotUndemotable(cx, slot, ip); uint16* gslots = f->globalSlots->data(); - return oracle->isGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]); + return oracle.isGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]); +} + +static JS_REQUIRES_STACK inline bool +IsSlotUndemotable(JSContext* cx, LinkableFragment* f, unsigned slot) +{ + return IsSlotUndemotable(cx, f, slot, cx->fp->regs->pc); } class FrameInfoCache @@ -1955,7 +1967,7 @@ public: visitGlobalSlot(jsval *vp, unsigned n, unsigned slot) { TraceType type = getCoercedType(*vp); if (type == TT_INT32 && - JS_TRACE_MONITOR(mCx).oracle->isGlobalSlotUndemotable(mCx, slot)) + oracle.isGlobalSlotUndemotable(mCx, slot)) type = TT_DOUBLE; JS_ASSERT(type != TT_JSVAL); debug_only_printf(LC_TMTracer, @@ -1969,7 +1981,7 @@ public: for (int i = 0; i < count; ++i) { TraceType type = getCoercedType(vp[i]); if (type == TT_INT32 && - JS_TRACE_MONITOR(mCx).oracle->isStackSlotUndemotable(mCx, length())) + oracle.isStackSlotUndemotable(mCx, length())) type = TT_DOUBLE; JS_ASSERT(type != TT_JSVAL); debug_only_printf(LC_TMTracer, @@ -2125,7 +2137,6 @@ TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* anchor, VMFragment* frag RecordReason recordReason) : cx(cx), traceMonitor(&JS_TRACE_MONITOR(cx)), - oracle(JS_TRACE_MONITOR(cx).oracle), fragment(fragment), tree(fragment->root), recordReason(recordReason), @@ -2673,7 +2684,6 @@ TraceMonitor::flush() codeAlloc->reset(); tempAlloc->reset(); reTempAlloc->reset(); - oracle->clear(); Allocator& alloc = *dataAlloc; @@ -3523,7 +3533,7 @@ TraceRecorder::importGlobalSlot(unsigned slot) int index = tree->globalSlots->offsetOf(uint16(slot)); if (index == -1) { type = getCoercedType(*vp); - if (type == TT_INT32 && oracle->isGlobalSlotUndemotable(cx, slot)) + if (type == TT_INT32 && oracle.isGlobalSlotUndemotable(cx, slot)) type = TT_DOUBLE; index = (int)tree->globalSlots->length(); tree->globalSlots->add(uint16(slot)); @@ -3754,7 +3764,7 @@ public: * Aggressively undo speculation so the inner tree will compile * if this fails. */ - mRecorder.oracle->markGlobalSlotUndemotable(mCx, slot); + oracle.markGlobalSlotUndemotable(mCx, slot); } JS_ASSERT(!(!isPromote && *mTypeMap == TT_INT32)); ++mTypeMap; @@ -3798,7 +3808,7 @@ public: * Aggressively undo speculation so the inner tree will compile * if this fails. */ - mRecorder.oracle->markStackSlotUndemotable(mCx, mSlotnum); + oracle.markStackSlotUndemotable(mCx, mSlotnum); } JS_ASSERT(!(!isPromote && *mTypeMap == TT_INT32)); ++vp; @@ -4430,7 +4440,7 @@ class SlotMap : public SlotVisitorBase { for (unsigned i = 0; i < length(); i++) { if (get(i).lastCheck == TypeCheck_Undemote) - mRecorder.markSlotUndemotable(mRecorder.tree, i); + MarkSlotUndemotable(mRecorder.cx, mRecorder.tree, i); } } @@ -4755,7 +4765,7 @@ TypeMapLinkability(JSContext* cx, const TypeMap& typeMap, TreeFragment* peer) if (typeMap[i] == peerMap[i]) continue; if (typeMap[i] == TT_INT32 && peerMap[i] == TT_DOUBLE && - IsSlotUndemotable(JS_TRACE_MONITOR(cx).oracle, cx, peer, i, peer->ip)) { + IsSlotUndemotable(cx, peer, i, peer->ip)) { consensus = TypeConsensus_Undemotes; } else { return TypeConsensus_Bad; @@ -4764,8 +4774,8 @@ TypeMapLinkability(JSContext* cx, const TypeMap& typeMap, TreeFragment* peer) return consensus; } -JS_REQUIRES_STACK unsigned -TraceRecorder::findUndemotesInTypemaps(const TypeMap& typeMap, LinkableFragment* f, +static JS_REQUIRES_STACK unsigned +FindUndemotesInTypemaps(JSContext* cx, const TypeMap& typeMap, LinkableFragment* f, Queue& undemotes) { undemotes.setLength(0); @@ -4778,7 +4788,7 @@ TraceRecorder::findUndemotesInTypemaps(const TypeMap& typeMap, LinkableFragment* } } for (unsigned i = 0; i < undemotes.length(); i++) - markSlotUndemotable(f, undemotes[i]); + MarkSlotUndemotable(cx, f, undemotes[i]); return undemotes.length(); } @@ -4827,7 +4837,7 @@ TraceRecorder::joinEdgesToEntry(TreeFragment* peer_root) uexit = peer->removeUnstableExit(uexit->exit); } else { /* Check for int32->double slots that suggest trashing. */ - if (findUndemotesInTypemaps(typeMap, tree, undemotes)) { + if (FindUndemotesInTypemaps(cx, typeMap, tree, undemotes)) { JS_ASSERT(peer == uexit->fragment->root); if (fragment == peer) trashSelf = true; @@ -5674,7 +5684,6 @@ FindLoopEdgeTarget(JSContext* cx, VMSideExit* exit, TreeFragment** peerp) TreeFragment* from = exit->root(); JS_ASSERT(from->code()); - Oracle* oracle = JS_TRACE_MONITOR(cx).oracle; TypeMap typeMap(NULL); FullMapFromExit(typeMap, exit); @@ -5686,14 +5695,14 @@ FindLoopEdgeTarget(JSContext* cx, VMSideExit* exit, TreeFragment** peerp) if (typeMap[i] == TT_DOUBLE) { if (exit->exitType == RECURSIVE_UNLINKED_EXIT) { if (i < exit->numStackSlots) - oracle->markStackSlotUndemotable(cx, i, exit->recursive_pc); + oracle.markStackSlotUndemotable(cx, i, exit->recursive_pc); else - oracle->markGlobalSlotUndemotable(cx, gslots[i - exit->numStackSlots]); + oracle.markGlobalSlotUndemotable(cx, gslots[i - exit->numStackSlots]); } if (i < from->nStackTypes) - oracle->markStackSlotUndemotable(cx, i, from->ip); + oracle.markStackSlotUndemotable(cx, i, from->ip); else if (i >= exit->numStackSlots) - oracle->markGlobalSlotUndemotable(cx, gslots[i - exit->numStackSlots]); + oracle.markGlobalSlotUndemotable(cx, gslots[i - exit->numStackSlots]); } } @@ -6061,7 +6070,7 @@ TraceRecorder::attemptTreeCall(TreeFragment* f, uintN& inlineCallCount) } case OVERFLOW_EXIT: - oracle->markInstructionUndemotable(cx->fp->regs->pc); + oracle.markInstructionUndemotable(cx->fp->regs->pc); /* FALL THROUGH */ case RECURSIVE_SLURP_FAIL_EXIT: case RECURSIVE_SLURP_MISMATCH_EXIT: @@ -6169,10 +6178,10 @@ public: if (!IsEntryTypeCompatible(vp, mTypeMap)) { mOk = false; } else if (!isPromoteInt(mRecorder.get(vp)) && *mTypeMap == TT_INT32) { - mRecorder.oracle->markGlobalSlotUndemotable(mCx, slot); + oracle.markGlobalSlotUndemotable(mCx, slot); mOk = false; } else if (JSVAL_IS_INT(*vp) && *mTypeMap == TT_DOUBLE) { - mRecorder.oracle->markGlobalSlotUndemotable(mCx, slot); + oracle.markGlobalSlotUndemotable(mCx, slot); } mTypeMap++; } @@ -6184,10 +6193,10 @@ public: if (!IsEntryTypeCompatible(vp, mTypeMap)) { mOk = false; } else if (!isPromoteInt(mRecorder.get(vp)) && *mTypeMap == TT_INT32) { - mRecorder.oracle->markStackSlotUndemotable(mCx, mStackSlotNum); + oracle.markStackSlotUndemotable(mCx, mStackSlotNum); mOk = false; } else if (JSVAL_IS_INT(*vp) && *mTypeMap == TT_DOUBLE) { - mRecorder.oracle->markStackSlotUndemotable(mCx, mStackSlotNum); + oracle.markStackSlotUndemotable(mCx, mStackSlotNum); } vp++; mTypeMap++; @@ -7025,7 +7034,7 @@ MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount, RecordReason reason) return rv; case OVERFLOW_EXIT: - tm->oracle->markInstructionUndemotable(cx->fp->regs->pc); + oracle.markInstructionUndemotable(cx->fp->regs->pc); /* FALL THROUGH */ case RECURSIVE_SLURP_FAIL_EXIT: case RECURSIVE_SLURP_MISMATCH_EXIT: @@ -7466,8 +7475,6 @@ InitJIT(TraceMonitor *tm) /* Set the default size for the code cache to 16MB. */ tm->maxCodeCacheBytes = 16 M; - tm->oracle = new Oracle(); - tm->recordAttempts = new RecordAttemptMap; if (!tm->recordAttempts->init(PC_HASH_COUNT)) abort(); @@ -7550,7 +7557,6 @@ FinishJIT(TraceMonitor *tm) #endif delete tm->recordAttempts; - delete tm->oracle; #ifdef DEBUG // Recover profiling data from expiring Fragments, and display @@ -7622,6 +7628,12 @@ FinishJIT(TraceMonitor *tm) tm->cachedTempTypeMap = NULL; } +void +PurgeJITOracle() +{ + oracle.clear(); +} + JS_REQUIRES_STACK void PurgeScriptFragments(JSContext* cx, JSScript* script) { @@ -8079,7 +8091,7 @@ TraceRecorder::alu(LOpcode v, jsdouble v0, jsdouble v1, LIns* s0, LIns* s1) * integers and the oracle must not give us a negative hint for the * instruction. */ - if (oracle->isInstructionUndemotable(cx->fp->regs->pc) || !isPromoteInt(s0) || !isPromoteInt(s1)) { + if (oracle.isInstructionUndemotable(cx->fp->regs->pc) || !isPromoteInt(s0) || !isPromoteInt(s1)) { out: if (v == LIR_fmod) { LIns* args[] = { s1, s0 }; @@ -10276,7 +10288,7 @@ TraceRecorder::record_JSOP_NEG() * a double. Only follow this path if we're not an integer that's 0 and * we're not a double that's zero. */ - if (!oracle->isInstructionUndemotable(cx->fp->regs->pc) && + if (!oracle.isInstructionUndemotable(cx->fp->regs->pc) && isPromoteInt(a) && (!JSVAL_IS_INT(v) || JSVAL_TO_INT(v) != 0) && (!JSVAL_IS_DOUBLE(v) || !JSDOUBLE_IS_NEGZERO(*JSVAL_TO_DOUBLE(v))) && @@ -15361,7 +15373,7 @@ StopTraceVisNative(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval #endif /* MOZ_TRACEVIS */ JS_REQUIRES_STACK void -TraceRecorder::captureStackTypes(unsigned callDepth, TraceType* typeMap) +CaptureStackTypes(JSContext* cx, unsigned callDepth, TraceType* typeMap) { CaptureTypesVisitor capVisitor(cx, typeMap); VisitStackSlots(capVisitor, cx, callDepth); diff --git a/js/src/jstracer.h b/js/src/jstracer.h index e261e43f9e8a..794e58c970ff 100644 --- a/js/src/jstracer.h +++ b/js/src/jstracer.h @@ -78,7 +78,7 @@ public: memcpy(tmp, _data, _len * sizeof(T)); _data = tmp; } else { - _data = (T*)js_realloc(_data, _max * sizeof(T)); + _data = (T*)realloc(_data, _max * sizeof(T)); } #if defined(DEBUG) memset(&_data[_len], 0xcd, _max - _len); @@ -95,7 +95,7 @@ public: ~Queue() { if (!alloc) - js_free(_data); + free(_data); } bool contains(T a) { @@ -926,9 +926,6 @@ class TraceRecorder /* Cached value of JS_TRACE_MONITOR(cx). */ TraceMonitor* const traceMonitor; - /* Cached oracle keeps track of hit counts for program counter locations */ - Oracle* oracle; - /* The Fragment being recorded by this recording session. */ VMFragment* const fragment; @@ -1068,17 +1065,6 @@ class TraceRecorder */ JS_REQUIRES_STACK nanojit::GuardRecord* createGuardRecord(VMSideExit* exit); - JS_REQUIRES_STACK JS_INLINE void markSlotUndemotable(LinkableFragment* f, unsigned slot); - - JS_REQUIRES_STACK JS_INLINE void markSlotUndemotable(LinkableFragment* f, unsigned slot, const void* pc); - - JS_REQUIRES_STACK unsigned findUndemotesInTypemaps(const TypeMap& typeMap, LinkableFragment* f, - Queue& undemotes); - - JS_REQUIRES_STACK void assertDownFrameIsConsistent(VMSideExit* anchor, FrameInfo* fi); - - JS_REQUIRES_STACK void captureStackTypes(unsigned callDepth, TraceType* typeMap); - bool isGlobal(jsval* p) const; ptrdiff_t nativeGlobalSlot(jsval *p) const; ptrdiff_t nativeGlobalOffset(jsval* p) const; @@ -1398,8 +1384,8 @@ class TraceRecorder # include "jsopcode.tbl" #undef OPDEF - inline void* operator new(size_t size) { return js_calloc(size); } - inline void operator delete(void *p) { js_free(p); } + inline void* operator new(size_t size) { return calloc(1, size); } + inline void operator delete(void *p) { free(p); } JS_REQUIRES_STACK TraceRecorder(JSContext* cx, VMSideExit*, VMFragment*,