From 0ca6c5eb09a8f7f5e9dcf0fd9a3a8ace075a6437 Mon Sep 17 00:00:00 2001 From: Graydon Hoare Date: Tue, 18 Aug 2009 15:32:54 -0700 Subject: [PATCH 1/3] Bug 511237 - Remove primary use of Fragmento from jsregexp, r=dmandelin. --- js/src/jscntxt.h | 8 ++++ js/src/jsregexp.cpp | 74 +++++++++++++----------------------- js/src/jstracer.cpp | 3 +- js/src/jstracer.h | 32 +++++++++++++++- js/src/nanojit/Fragmento.cpp | 2 +- js/src/nanojit/Fragmento.h | 2 +- 6 files changed, 70 insertions(+), 51 deletions(-) diff --git a/js/src/jscntxt.h b/js/src/jscntxt.h index 3009774a7b0b..602b5f398d5f 100644 --- a/js/src/jscntxt.h +++ b/js/src/jscntxt.h @@ -100,6 +100,7 @@ namespace nanojit { class Fragment; class Fragmento; class LirBuffer; + extern "C++" { template class HashMap; } } class TraceRecorder; class VMAllocator; @@ -114,6 +115,12 @@ typedef Queue SlotList; #define FRAGMENT_TABLE_SIZE 512 struct VMFragment; +#ifdef __cplusplus +struct REHashKey; +struct REHashFn; +typedef nanojit::HashMap REHashMap; +#endif + #define MONITOR_N_GLOBAL_STATES 4 struct GlobalState { JSObject* globalObj; @@ -181,6 +188,7 @@ struct JSTraceMonitor { CLS(nanojit::Assembler) reAssembler; CLS(nanojit::LirBuffer) reLirBuf; CLS(nanojit::Fragmento) reFragmento; + CLS(REHashMap) reFragments; /* Keep a list of recorders we need to abort on cache flush. */ CLS(TraceRecorder) abortStack; diff --git a/js/src/jsregexp.cpp b/js/src/jsregexp.cpp index 1e6421926e29..eab4513e6de7 100644 --- a/js/src/jsregexp.cpp +++ b/js/src/jsregexp.cpp @@ -2006,41 +2006,32 @@ typedef JSTempVector LInsList; /* Dummy GC for nanojit placement new. */ static GC gc; -static void * -HashRegExp(uint16 flags, const jschar *s, size_t n) -{ - uint32 h; - - for (h = 0; n; s++, n--) - h = JS_ROTATE_LEFT32(h, 4) ^ *s; - return (void *)(h + flags); -} - -struct RESideExit : public SideExit { - size_t re_length; - uint16 re_flags; - jschar re_chars[1]; -}; - -/* Return the cached fragment for the given regexp, or NULL. */ +/* Return the cached fragment for the given regexp, or create one. */ static Fragment* -LookupNativeRegExp(JSContext* cx, void* hash, uint16 re_flags, +LookupNativeRegExp(JSContext* cx, uint16 re_flags, const jschar* re_chars, size_t re_length) { - Fragmento* fragmento = JS_TRACE_MONITOR(cx).reFragmento; - Fragment* fragment = fragmento->getLoop(hash); - while (fragment) { - if (fragment->lastIns) { - RESideExit *exit = (RESideExit*)fragment->lastIns->record()->exit; - if (exit->re_flags == re_flags && - exit->re_length == re_length && - !memcmp(exit->re_chars, re_chars, re_length * sizeof(jschar))) { - return fragment; - } - } - fragment = fragment->peer; + JSTraceMonitor *tm = &JS_TRACE_MONITOR(cx); + VMAllocator &alloc = *tm->reAllocator; + REHashMap &table = *tm->reFragments; + + REHashKey k(re_length, re_flags, re_chars); + Fragment *frag = table.get(k); + + if (!frag) { + frag = new (alloc) Fragment(0); + frag->lirbuf = tm->reLirBuf; + frag->root = frag; + /* + * Copy the re_chars portion of the hash key into the Allocator, so + * its lifecycle is disconnected from the lifecycle of the + * underlying regexp. + */ + k.re_chars = (const jschar*) new (alloc) jschar[re_length]; + memcpy((void*) k.re_chars, re_chars, re_length * sizeof(jschar)); + table.put(k, frag); } - return NULL; + return frag; } static JSBool @@ -3065,16 +3056,13 @@ class RegExpNativeCompiler { GuardRecord* insertGuard(const jschar* re_chars, size_t re_length) { LIns* skip = lirBufWriter->insSkip(sizeof(GuardRecord) + - sizeof(RESideExit) + + sizeof(SideExit) + (re_length-1) * sizeof(jschar)); GuardRecord* guard = (GuardRecord *) skip->payload(); memset(guard, 0, sizeof(*guard)); - RESideExit* exit = (RESideExit*)(guard+1); + SideExit* exit = (SideExit*)(guard+1); guard->exit = exit; guard->exit->target = fragment; - exit->re_flags = re->flags; - exit->re_length = re_length; - memcpy(exit->re_chars, re_chars, re_length * sizeof(jschar)); fragment->lastIns = lir->insGuard(LIR_loop, NULL, skip); return guard; } @@ -3163,9 +3151,9 @@ class RegExpNativeCompiler { fail: if (alloc.outOfMemory() || oom || js_OverfullFragmento(tm, fragmento)) { - fragmento->clearFrags(); tm->reCodeAlloc->sweep(); alloc.reset(); + tm->reFragments = new (alloc) REHashMap(alloc); #ifdef DEBUG fragmento->labels = new (alloc) LabelMap(alloc, &js_LogController); lirbuf->names = new (alloc) LirNameMap(alloc, fragmento->labels); @@ -3216,19 +3204,11 @@ typedef void *(FASTCALL *NativeRegExp)(REGlobalData*, const jschar *); static NativeRegExp GetNativeRegExp(JSContext* cx, JSRegExp* re) { - Fragment *fragment; const jschar *re_chars; size_t re_length; - Fragmento* fragmento = JS_TRACE_MONITOR(cx).reFragmento; - re->source->getCharsAndLength(re_chars, re_length); - void* hash = HashRegExp(re->flags, re_chars, re_length); - fragment = LookupNativeRegExp(cx, hash, re->flags, re_chars, re_length); - if (!fragment) { - fragment = fragmento->getAnchor(hash); - fragment->lirbuf = JS_TRACE_MONITOR(cx).reLirBuf; - fragment->root = fragment; - } + Fragment *fragment = LookupNativeRegExp(cx, re->flags, re_chars, re_length); + JS_ASSERT(fragment); if (!fragment->code()) { if (!CompileRegExpToNative(cx, re, fragment)) return NULL; diff --git a/js/src/jstracer.cpp b/js/src/jstracer.cpp index 5d83a7783226..c8d124512145 100644 --- a/js/src/jstracer.cpp +++ b/js/src/jstracer.cpp @@ -865,7 +865,7 @@ getLoop(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 globalSh static Fragment* getAnchor(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 globalShape, uint32 argc) { - VMFragment *f = new (&gc) VMFragment(ip, globalObj, globalShape, argc); + VMFragment *f = new VMFragment(ip, globalObj, globalShape, argc); JS_ASSERT(f); Fragment *p = getVMFragment(tm, ip, globalObj, globalShape, argc); @@ -6587,6 +6587,7 @@ js_InitJIT(JSTraceMonitor *tm) &js_LogController); if (!tm->reFragmento) { + tm->reFragments = new (reAlloc) REHashMap(reAlloc); Fragmento* fragmento = new (&gc) Fragmento(core, &js_LogController, 32, tm->reCodeAlloc); verbose_only(fragmento->labels = new (reAlloc) LabelMap(reAlloc, &js_LogController);) tm->reFragmento = fragmento; diff --git a/js/src/jstracer.h b/js/src/jstracer.h index fd88e8712491..2ecda891b8f3 100644 --- a/js/src/jstracer.h +++ b/js/src/jstracer.h @@ -411,7 +411,7 @@ struct VMSideExit : public nanojit::SideExit } }; -struct VMAllocator : public nanojit::Allocator +class VMAllocator : public nanojit::Allocator { public: @@ -438,6 +438,36 @@ public: uintptr_t mReserve[0x10000]; }; + +struct REHashKey { + size_t re_length; + uint16 re_flags; + const jschar* re_chars; + + REHashKey(size_t re_length, uint16 re_flags, const jschar *re_chars) + : re_length(re_length) + , re_flags(re_flags) + , re_chars(re_chars) + {} + + bool operator==(const REHashKey& other) const + { + return ((this->re_length == other.re_length) && + (this->re_flags == other.re_flags) && + !memcmp(this->re_chars, other.re_chars, + this->re_length * sizeof(jschar))); + } +}; + +struct REHashFn { + static size_t hash(const REHashKey& k) { + return + k.re_length + + k.re_flags + + nanojit::murmurhash(k.re_chars, k.re_length * sizeof(jschar)); + } +}; + struct FrameInfo { JSObject* callee; // callee function object JSObject* block; // caller block chain head diff --git a/js/src/nanojit/Fragmento.cpp b/js/src/nanojit/Fragmento.cpp index 3c3a4674bd23..09317b4ef18d 100644 --- a/js/src/nanojit/Fragmento.cpp +++ b/js/src/nanojit/Fragmento.cpp @@ -250,7 +250,7 @@ namespace nanojit Fragment *Fragmento::newFrag(const void* ip) { GC *gc = _core->gc; - Fragment *f = NJ_NEW(gc, Fragment)(ip); + Fragment *f = new Fragment(ip); f->blacklistLevel = 5; return f; } diff --git a/js/src/nanojit/Fragmento.h b/js/src/nanojit/Fragmento.h index 5f6ac89c6a63..cb1f72d337cb 100644 --- a/js/src/nanojit/Fragmento.h +++ b/js/src/nanojit/Fragmento.h @@ -128,7 +128,7 @@ namespace nanojit * It may turn out that that this arrangement causes too much traffic * between d and i-caches and that we need to carve up the structure differently. */ - class Fragment : public avmplus::GCFinalizedObject + class Fragment { public: Fragment(const void*); From b50a6907fa23b320490b77a5b927512362ca979a Mon Sep 17 00:00:00 2001 From: Andreas Gal Date: Thu, 20 Aug 2009 16:53:10 -0700 Subject: [PATCH 2/3] Fast path for writing a double into an array (511737, r=danderson). --- js/src/jsarray.cpp | 19 +++++++++++++++++++ js/src/jsbuiltins.h | 1 + js/src/jstracer.cpp | 12 +++++++++--- 3 files changed, 29 insertions(+), 3 deletions(-) diff --git a/js/src/jsarray.cpp b/js/src/jsarray.cpp index fe4e088486d0..6c1043816fac 100644 --- a/js/src/jsarray.cpp +++ b/js/src/jsarray.cpp @@ -955,6 +955,25 @@ js_Array_dense_setelem_int(JSContext* cx, JSObject* obj, jsint i, int32 j) return dense_grow(cx, obj, i, v); } JS_DEFINE_CALLINFO_4(extern, BOOL, js_Array_dense_setelem_int, CONTEXT, OBJECT, INT32, INT32, 0, 0) + +JSBool FASTCALL +js_Array_dense_setelem_double(JSContext* cx, JSObject* obj, jsint i, jsdouble d) +{ + JS_ASSERT(OBJ_IS_DENSE_ARRAY(cx, obj)); + + jsval v; + jsint j; + + if (JS_LIKELY(JSDOUBLE_IS_INT(d, j) && INT_FITS_IN_JSVAL(j))) { + v = INT_TO_JSVAL(j); + } else { + if (!js_NewDoubleInRootedValue(cx, d, &v)) + return JS_FALSE; + } + + return dense_grow(cx, obj, i, v); +} +JS_DEFINE_CALLINFO_4(extern, BOOL, js_Array_dense_setelem_double, CONTEXT, OBJECT, INT32, DOUBLE, 0, 0) #endif static JSBool diff --git a/js/src/jsbuiltins.h b/js/src/jsbuiltins.h index ee9b153fb661..52267137569c 100644 --- a/js/src/jsbuiltins.h +++ b/js/src/jsbuiltins.h @@ -453,6 +453,7 @@ JS_DECLARE_CALLINFO(js_NewInstance) /* Defined in jsarray.cpp. */ JS_DECLARE_CALLINFO(js_Array_dense_setelem) JS_DECLARE_CALLINFO(js_Array_dense_setelem_int) +JS_DECLARE_CALLINFO(js_Array_dense_setelem_double) JS_DECLARE_CALLINFO(js_NewEmptyArray) JS_DECLARE_CALLINFO(js_NewUninitializedArray) JS_DECLARE_CALLINFO(js_ArrayCompPush) diff --git a/js/src/jstracer.cpp b/js/src/jstracer.cpp index c8d124512145..70fa6d91f6f4 100644 --- a/js/src/jstracer.cpp +++ b/js/src/jstracer.cpp @@ -10423,9 +10423,15 @@ TraceRecorder::record_JSOP_SETELEM() // builtin for every storage type. Special case for integers though, // since they are so common. LIns* res_ins; - if (isNumber(v) && isPromoteInt(v_ins)) { - LIns* args[] = { ::demote(lir, v_ins), idx_ins, obj_ins, cx_ins }; - res_ins = lir->insCall(&js_Array_dense_setelem_int_ci, args); + LIns* args[] = { NULL, idx_ins, obj_ins, cx_ins }; + if (isNumber(v)) { + if (isPromoteInt(v_ins)) { + args[0] = ::demote(lir, v_ins); + res_ins = lir->insCall(&js_Array_dense_setelem_int_ci, args); + } else { + args[0] = v_ins; + res_ins = lir->insCall(&js_Array_dense_setelem_double_ci, args); + } } else { LIns* args[] = { box_jsval(v, v_ins), idx_ins, obj_ins, cx_ins }; res_ins = lir->insCall(&js_Array_dense_setelem_ci, args); From 4405f6d470d8e231e9b3be48895e1151a300eb98 Mon Sep 17 00:00:00 2001 From: Graydon Hoare Date: Thu, 20 Aug 2009 17:22:47 -0700 Subject: [PATCH 3/3] Bug 511309 - Delete class Fragmento and all uses of it, r=dvander. --HG-- extra : rebase_source : 47047589288ac0758e0068307b5a8f393db97606 --- js/src/jscntxt.h | 13 +- js/src/jsinterp.cpp | 2 +- js/src/jsregexp.cpp | 20 ++- js/src/jstracer.cpp | 281 +++++++++++++---------------------- js/src/jstracer.h | 64 +++++--- js/src/nanojit/Assembler.h | 2 +- js/src/nanojit/Fragmento.cpp | 213 -------------------------- js/src/nanojit/Fragmento.h | 77 ---------- js/src/nanojit/LIR.cpp | 4 - js/src/nanojit/LIR.h | 2 - 10 files changed, 168 insertions(+), 510 deletions(-) diff --git a/js/src/jscntxt.h b/js/src/jscntxt.h index 602b5f398d5f..3784431fa967 100644 --- a/js/src/jscntxt.h +++ b/js/src/jscntxt.h @@ -98,8 +98,10 @@ namespace nanojit { class Assembler; class CodeAlloc; class Fragment; - class Fragmento; class LirBuffer; +#ifdef DEBUG + class LabelMap; +#endif extern "C++" { template class HashMap; } } class TraceRecorder; @@ -148,10 +150,13 @@ struct JSTraceMonitor { JSContext *tracecx; CLS(nanojit::LirBuffer) lirbuf; - CLS(nanojit::Fragmento) fragmento; CLS(VMAllocator) allocator; // A chunk allocator for LIR. CLS(nanojit::CodeAlloc) codeAlloc; // A general allocator for native code. CLS(nanojit::Assembler) assembler; +#ifdef DEBUG + CLS(nanojit::LabelMap) labels; +#endif + CLS(TraceRecorder) recorder; jsval *reservedDoublePool; jsval *reservedDoublePoolPtr; @@ -187,8 +192,10 @@ struct JSTraceMonitor { CLS(nanojit::CodeAlloc) reCodeAlloc; CLS(nanojit::Assembler) reAssembler; CLS(nanojit::LirBuffer) reLirBuf; - CLS(nanojit::Fragmento) reFragmento; CLS(REHashMap) reFragments; +#ifdef DEBUG + CLS(nanojit::LabelMap) reLabels; +#endif /* Keep a list of recorders we need to abort on cache flush. */ CLS(TraceRecorder) abortStack; diff --git a/js/src/jsinterp.cpp b/js/src/jsinterp.cpp index 9342f373a54e..0545abc0ee61 100644 --- a/js/src/jsinterp.cpp +++ b/js/src/jsinterp.cpp @@ -2780,7 +2780,7 @@ js_Interpret(JSContext *cx) the recorder to be destroyed when we return. */ if (tr) { if (tr->wasDeepAborted()) - tr->removeFragmentoReferences(); + tr->removeFragmentReferences(); else tr->pushAbortStack(); } diff --git a/js/src/jsregexp.cpp b/js/src/jsregexp.cpp index eab4513e6de7..1aca19936c4e 100644 --- a/js/src/jsregexp.cpp +++ b/js/src/jsregexp.cpp @@ -2005,6 +2005,8 @@ typedef JSTempVector LInsList; /* Dummy GC for nanojit placement new. */ static GC gc; +static avmplus::AvmCore s_core = avmplus::AvmCore(); +static avmplus::AvmCore* core = &s_core; /* Return the cached fragment for the given regexp, or create one. */ static Fragment* @@ -3080,7 +3082,6 @@ class RegExpNativeCompiler { size_t re_length; JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx); Assembler *assm = tm->reAssembler; - Fragmento* fragmento = tm->reFragmento; VMAllocator& alloc = *tm->reAllocator; re->source->getCharsAndLength(re_chars, re_length); @@ -3136,7 +3137,7 @@ class RegExpNativeCompiler { if (alloc.outOfMemory()) goto fail; - ::compile(assm, fragment, alloc verbose_only(, fragmento->labels)); + ::compile(assm, fragment, alloc verbose_only(, tm->reLabels)); if (assm->error() != nanojit::None) { oom = assm->error() == nanojit::OutOMem; goto fail; @@ -3150,20 +3151,25 @@ class RegExpNativeCompiler { return JS_TRUE; fail: if (alloc.outOfMemory() || oom || - js_OverfullFragmento(tm, fragmento)) { + js_OverfullJITCache(tm, true)) { + delete lirBufWriter; tm->reCodeAlloc->sweep(); alloc.reset(); tm->reFragments = new (alloc) REHashMap(alloc); + tm->reLirBuf = new (alloc) LirBuffer(alloc); #ifdef DEBUG - fragmento->labels = new (alloc) LabelMap(alloc, &js_LogController); - lirbuf->names = new (alloc) LirNameMap(alloc, fragmento->labels); + tm->reLabels = new (alloc) LabelMap(alloc, &js_LogController); + tm->reLirBuf->names = new (alloc) LirNameMap(alloc, tm->reLabels); + tm->reAssembler = new (alloc) Assembler(*tm->reCodeAlloc, alloc, core, + &js_LogController); +#else + tm->reAssembler = new (alloc) Assembler(*tm->reCodeAlloc, alloc, core, NULL); #endif - lirbuf->clear(); } else { if (!guard) insertGuard(re_chars, re_length); re->flags |= JSREG_NOCOMPILE; + delete lirBufWriter; } - delete lirBufWriter; #ifdef NJ_VERBOSE debug_only_stmt( if (js_LogController.lcbits & LC_TMRegexp) delete lir; ) diff --git a/js/src/jstracer.cpp b/js/src/jstracer.cpp index 70fa6d91f6f4..2ca5e35012e4 100644 --- a/js/src/jstracer.cpp +++ b/js/src/jstracer.cpp @@ -865,7 +865,7 @@ getLoop(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 globalSh static Fragment* getAnchor(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 globalShape, uint32 argc) { - VMFragment *f = new VMFragment(ip, globalObj, globalShape, argc); + VMFragment *f = new (*tm->allocator) VMFragment(ip, globalObj, globalShape, argc); JS_ASSERT(f); Fragment *p = getVMFragment(tm, ip, globalObj, globalShape, argc); @@ -1683,6 +1683,8 @@ JS_REQUIRES_STACK TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* _anchor, Fragment* _fragment, TreeInfo* ti, unsigned stackSlots, unsigned ngslots, JSTraceType* typeMap, VMSideExit* innermostNestedGuard, jsbytecode* outer, uint32 outerArgc) + : whichTreesToTrash(JS_TRACE_MONITOR(cx).allocator), + cfgMerges(JS_TRACE_MONITOR(cx).allocator) { JS_ASSERT(!_fragment->vmprivate && ti && cx->fp->regs->pc == (jsbytecode*)_fragment->ip); @@ -1803,17 +1805,6 @@ TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* _anchor, Fragment* _frag } } -TreeInfo::~TreeInfo() -{ - UnstableExit* temp; - - while (unstableExits) { - temp = unstableExits->next; - delete unstableExits; - unstableExits = temp; - } -} - TraceRecorder::~TraceRecorder() { JS_ASSERT(nextRecorderToAbort == NULL); @@ -1827,18 +1818,12 @@ TraceRecorder::~TraceRecorder() } #endif if (fragment) { - if (wasRootFragment && !fragment->root->code()) { - JS_ASSERT(!fragment->root->vmprivate); - delete treeInfo; - } if (trashSelf) TrashTree(cx, fragment->root); for (unsigned int i = 0; i < whichTreesToTrash.length(); i++) TrashTree(cx, whichTreesToTrash[i]); - } else if (wasRootFragment) { - delete treeInfo; } #ifdef DEBUG debug_only_stmt( delete verbose_filter; ) @@ -1852,7 +1837,7 @@ TraceRecorder::~TraceRecorder() } void -TraceRecorder::removeFragmentoReferences() +TraceRecorder::removeFragmentReferences() { fragment = NULL; } @@ -2112,35 +2097,29 @@ oom: void JSTraceMonitor::flush() { - if (fragmento) { - fragmento->clearFrags(); - for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) { - VMFragment* f = vmfragments[i]; - while (f) { - VMFragment* next = f->next; - fragmento->clearFragment(f); - f = next; - } - vmfragments[i] = NULL; - } - for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) { - globalStates[i].globalShape = -1; - globalStates[i].globalSlots->clear(); - } - } + memset(&vmfragments[0], 0, + FRAGMENT_TABLE_SIZE * sizeof(VMFragment*)); allocator->reset(); codeAlloc->sweep(); -#ifdef DEBUG - JS_ASSERT(fragmento); - JS_ASSERT(fragmento->labels); Allocator& alloc = *allocator; - fragmento->labels = new (alloc) LabelMap(alloc, &js_LogController); - lirbuf->names = new (alloc) LirNameMap(alloc, fragmento->labels); + + for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) { + globalStates[i].globalShape = -1; + globalStates[i].globalSlots = new (alloc) SlotList(allocator); + } + + assembler = new (alloc) Assembler(*codeAlloc, alloc, core, + &js_LogController); + lirbuf = new (alloc) LirBuffer(alloc); + +#ifdef DEBUG + JS_ASSERT(labels); + labels = new (alloc) LabelMap(alloc, &js_LogController); + lirbuf->names = new (alloc) LirNameMap(alloc, labels); #endif - lirbuf->clear(); needFlush = JS_FALSE; } @@ -3529,12 +3508,12 @@ ResetJIT(JSContext* cx) js_AbortRecording(cx, "flush cache"); TraceRecorder* tr; while ((tr = tm->abortStack) != NULL) { - tr->removeFragmentoReferences(); + tr->removeFragmentReferences(); tr->deepAbort(); tr->popAbortStack(); } if (ProhibitFlush(cx)) { - debug_only_print0(LC_TMTracer, "Deferring fragmento flush due to deep bail.\n"); + debug_only_print0(LC_TMTracer, "Deferring JIT flush due to deep bail.\n"); tm->needFlush = JS_TRUE; return; } @@ -3553,7 +3532,6 @@ TraceRecorder::compile(JSTraceMonitor* tm) ResetJIT(cx); return; } - verbose_only(Fragmento* fragmento = tm->fragmento;) if (treeInfo->maxNativeStackSlots >= MAX_NATIVE_STACK_SLOTS) { debug_only_print0(LC_TMTracer, "Blacklist: excessive stack use.\n"); Blacklist((jsbytecode*) fragment->root->ip); @@ -3565,7 +3543,7 @@ TraceRecorder::compile(JSTraceMonitor* tm) return; Assembler *assm = tm->assembler; - ::compile(assm, fragment, *tm->allocator verbose_only(, fragmento->labels)); + ::compile(assm, fragment, *tm->allocator verbose_only(, tm->labels)); if (assm->error() == nanojit::OutOMem) return; @@ -3595,7 +3573,7 @@ TraceRecorder::compile(JSTraceMonitor* tm) char* label = (char*)js_malloc((filename ? strlen(filename) : 7) + 16); sprintf(label, "%s:%u", filename ? filename : "", js_FramePCToLineNumber(cx, cx->fp)); - fragmento->labels->add(fragment, sizeof(Fragment), 0, label); + tm->labels->add(fragment, sizeof(Fragment), 0, label); js_free(label); #endif AUDIT(traceCompleted); @@ -3631,17 +3609,22 @@ class SlotMap : public SlotVisitorBase public: struct SlotInfo { + SlotInfo() + : v(0), promoteInt(false), lastCheck(TypeCheck_Bad) + {} SlotInfo(jsval* v, bool promoteInt) : v(v), promoteInt(promoteInt), lastCheck(TypeCheck_Bad) - { - } + {} jsval *v; bool promoteInt; TypeCheckResult lastCheck; }; SlotMap(TraceRecorder& rec, unsigned slotOffset) - : mRecorder(rec), mCx(rec.cx), slotOffset(slotOffset) + : mRecorder(rec), + mCx(rec.cx), + slots(NULL), + slotOffset(slotOffset) { } @@ -3862,8 +3845,6 @@ TraceRecorder::closeLoop(SlotMap& slotMap, VMSideExit* exit, TypeConsensus& cons */ JS_ASSERT((*cx->fp->regs->pc == JSOP_LOOP || *cx->fp->regs->pc == JSOP_NOP) && !cx->fp->imacpc); - Fragmento* fragmento = traceMonitor->fragmento; - if (callDepth != 0) { debug_only_print0(LC_TMTracer, "Blacklisted: stack depth mismatch, possible recursion.\n"); @@ -3916,7 +3897,7 @@ TraceRecorder::closeLoop(SlotMap& slotMap, VMSideExit* exit, TypeConsensus& cons debug_only_print0(LC_TMTracer, "Trace has unstable loop variable with no stable peer, " "compiling anyway.\n"); - UnstableExit* uexit = new UnstableExit; + UnstableExit* uexit = new (*traceMonitor->allocator) UnstableExit; uexit->fragment = fragment; uexit->exit = exit; uexit->next = treeInfo->unstableExits; @@ -3947,7 +3928,7 @@ TraceRecorder::closeLoop(SlotMap& slotMap, VMSideExit* exit, TypeConsensus& cons peer = getLoop(traceMonitor, root->ip, root->globalObj, root->globalShape, root->argc); JS_ASSERT(peer); - joinEdgesToEntry(fragmento, peer); + joinEdgesToEntry(peer); debug_only_stmt(DumpPeerStability(traceMonitor, peer->ip, peer->globalObj, peer->globalShape, peer->argc);) @@ -4026,13 +4007,13 @@ FindUndemotesInTypemaps(JSContext* cx, const TypeMap& typeMap, TreeInfo* treeInf } JS_REQUIRES_STACK void -TraceRecorder::joinEdgesToEntry(Fragmento* fragmento, VMFragment* peer_root) +TraceRecorder::joinEdgesToEntry(VMFragment* peer_root) { if (fragment->kind != LoopTrace) return; - TypeMap typeMap; - Queue undemotes; + TypeMap typeMap(NULL); + Queue undemotes(NULL); for (VMFragment* peer = peer_root; peer; peer = (VMFragment*)peer->peer) { TreeInfo* ti = peer->getTreeInfo(); @@ -4096,11 +4077,11 @@ TraceRecorder::endLoop(VMSideExit* exit) debug_only_printf(LC_TMTreeVis, "TREEVIS ENDLOOP EXIT=%p\n", exit); VMFragment* root = (VMFragment*)fragment->root; - joinEdgesToEntry(traceMonitor->fragmento, getLoop(traceMonitor, - root->ip, - root->globalObj, - root->globalShape, - root->argc)); + joinEdgesToEntry(getLoop(traceMonitor, + root->ip, + root->globalObj, + root->globalShape, + root->argc)); debug_only_stmt(DumpPeerStability(traceMonitor, root->ip, root->globalObj, root->globalShape, root->argc);) @@ -4228,7 +4209,7 @@ TraceRecorder::emitTreeCall(Fragment* inner, VMSideExit* exit) * Bug 502604 - It is illegal to extend from the outer typemap without * first extending from the inner. Make a new typemap here. */ - TypeMap fullMap; + TypeMap fullMap(NULL); fullMap.add(exit->stackTypeMap(), exit->numStackSlots); BuildGlobalTypeMapFromInnerTree(fullMap, exit); import(ti, inner_sp_ins, exit->numStackSlots, fullMap.length() - exit->numStackSlots, @@ -4423,12 +4404,6 @@ nanojit::LirNameMap::formatGuard(LIns *i, char *out) } #endif -void -nanojit::Fragment::onDestroy() -{ - delete (TreeInfo *)vmprivate; -} - static JS_REQUIRES_STACK bool DeleteRecorder(JSContext* cx) { @@ -4441,7 +4416,7 @@ DeleteRecorder(JSContext* cx) /* If we ran out of memory, flush the code cache. */ Assembler *assm = JS_TRACE_MONITOR(cx).assembler; if (assm->error() == OutOMem || - js_OverfullFragmento(tm, tm->fragmento)) { + js_OverfullJITCache(tm, false)) { ResetJIT(cx); return false; } @@ -4556,7 +4531,7 @@ TrashTree(JSContext* cx, Fragment* f) debug_only_print0(LC_TMTracer, "Trashing tree info.\n"); TreeInfo* ti = (TreeInfo*)f->vmprivate; f->vmprivate = NULL; - f->releaseCode(JS_TRACE_MONITOR(cx).codeAlloc); + f->setCode(NULL); Fragment** data = ti->dependentTrees.data(); unsigned length = ti->dependentTrees.length(); for (unsigned n = 0; n < length; ++n) @@ -4565,8 +4540,6 @@ TrashTree(JSContext* cx, Fragment* f) length = ti->linkedTrees.length(); for (unsigned n = 0; n < length; ++n) TrashTree(cx, data[n]); - delete ti; - JS_ASSERT(!f->code() && !f->vmprivate); } static int @@ -4808,7 +4781,7 @@ RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f, jsbytecode* outer, f->root = f; f->lirbuf = tm->lirbuf; - if (tm->allocator->outOfMemory() || js_OverfullFragmento(tm, tm->fragmento)) { + if (tm->allocator->outOfMemory() || js_OverfullJITCache(tm, false)) { Backoff(cx, (jsbytecode*) f->root->ip); ResetJIT(cx); debug_only_print0(LC_TMTracer, @@ -4819,7 +4792,7 @@ RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f, jsbytecode* outer, JS_ASSERT(!f->code() && !f->vmprivate); /* Set up the VM-private treeInfo structure for this fragment. */ - TreeInfo* ti = new (&gc) TreeInfo(f, globalSlots); + TreeInfo* ti = new (*tm->allocator) TreeInfo(tm->allocator, f, globalSlots); /* Capture the coerced type of each active slot in the type map. */ ti->typeMap.captureTypes(cx, globalObj, *globalSlots, 0 /* callDepth */); @@ -4871,7 +4844,7 @@ FindLoopEdgeTarget(JSContext* cx, VMSideExit* exit, VMFragment** peerp) JS_ASSERT(from->code()); - TypeMap typeMap; + TypeMap typeMap(NULL); FullMapFromExit(typeMap, exit); JS_ASSERT(typeMap.length() - exit->numStackSlots == from_ti->nGlobalTypes()); @@ -4906,7 +4879,6 @@ TreeInfo::removeUnstableExit(VMSideExit* exit) for (UnstableExit* uexit = this->unstableExits; uexit != NULL; uexit = uexit->next) { if (uexit->exit == exit) { *tail = uexit->next; - delete uexit; return *tail; } tail = &uexit->next; @@ -4990,7 +4962,11 @@ AttemptToExtendTree(JSContext* cx, VMSideExit* anchor, VMSideExit* exitedFrom, j Fragment* c; if (!(c = anchor->target)) { - c = JS_TRACE_MONITOR(cx).fragmento->createBranch(anchor, cx->fp->regs->pc); + Allocator& alloc = *JS_TRACE_MONITOR(cx).allocator; + c = new (alloc) Fragment(cx->fp->regs->pc); + c->kind = BranchTrace; + c->anchor = anchor->from->anchor; + c->root = anchor->from->root; debug_only_printf(LC_TMTreeVis, "TREEVIS CREATEBRANCH ROOT=%p FRAG=%p PC=%p FILE=\"%s\"" " LINE=%d ANCHOR=%p OFFS=%d\n", f, c, cx->fp->regs->pc, cx->fp->script->filename, @@ -5019,7 +4995,7 @@ AttemptToExtendTree(JSContext* cx, VMSideExit* anchor, VMSideExit* exitedFrom, j unsigned stackSlots; unsigned ngslots; JSTraceType* typeMap; - TypeMap fullMap; + TypeMap fullMap(NULL); if (exitedFrom == NULL) { /* * If we are coming straight from a simple side exit, just use that @@ -6182,8 +6158,7 @@ TraceRecorder::monitorRecording(JSContext* cx, TraceRecorder* tr, JSOp op) } if (tr->traceMonitor->allocator->outOfMemory() || - js_OverfullFragmento(&JS_TRACE_MONITOR(cx), - JS_TRACE_MONITOR(cx).fragmento)) { + js_OverfullJITCache(&JS_TRACE_MONITOR(cx), false)) { js_AbortRecording(cx, "no more memory"); ResetJIT(cx); return JSRS_STOP; @@ -6484,7 +6459,8 @@ void js_SetMaxCodeCacheBytes(JSContext* cx, uint32 bytes) { JSTraceMonitor* tm = &JS_THREAD_DATA(cx)->traceMonitor; - JS_ASSERT(tm->fragmento && tm->reFragmento); + JS_ASSERT(tm->codeAlloc && tm->reCodeAlloc && + tm->allocator && tm->reAllocator); if (bytes > 1 G) bytes = 1 G; if (bytes < 128 K) @@ -6552,23 +6528,21 @@ js_InitJIT(JSTraceMonitor *tm) if (!tm->codeAlloc) tm->codeAlloc = new CodeAlloc(); - if (!tm->assembler) - tm->assembler = new (&gc) Assembler(*tm->codeAlloc, alloc, core, - &js_LogController); + if (!tm->assembler) { + tm->assembler = new (alloc) Assembler(*tm->codeAlloc, alloc, core, + &js_LogController); + - if (!tm->fragmento) { JS_ASSERT(!tm->reservedDoublePool); - Fragmento* fragmento = new (&gc) Fragmento(core, &js_LogController, 32, tm->codeAlloc); - verbose_only(fragmento->labels = new (alloc) LabelMap(alloc, &js_LogController);) - tm->fragmento = fragmento; - tm->lirbuf = new LirBuffer(alloc); + tm->lirbuf = new (alloc) LirBuffer(alloc); #ifdef DEBUG - tm->lirbuf->names = new (alloc) LirNameMap(alloc, tm->fragmento->labels); + tm->labels = new (alloc) LabelMap(alloc, &js_LogController); + tm->lirbuf->names = new (alloc) LirNameMap(alloc, tm->labels); #endif for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) { tm->globalStates[i].globalShape = -1; JS_ASSERT(!tm->globalStates[i].globalSlots); - tm->globalStates[i].globalSlots = new (&gc) SlotList(); + tm->globalStates[i].globalSlots = new (alloc) SlotList(tm->allocator); } tm->reservedDoublePoolPtr = tm->reservedDoublePool = new jsval[MAX_NATIVE_STACK_SLOTS]; memset(tm->vmfragments, 0, sizeof(tm->vmfragments)); @@ -6582,18 +6556,15 @@ js_InitJIT(JSTraceMonitor *tm) if (!tm->reCodeAlloc) tm->reCodeAlloc = new CodeAlloc(); - if (!tm->reAssembler) - tm->reAssembler = new (&gc) Assembler(*tm->reCodeAlloc, reAlloc, core, - &js_LogController); + if (!tm->reAssembler) { + tm->reAssembler = new (reAlloc) Assembler(*tm->reCodeAlloc, reAlloc, core, + &js_LogController); - if (!tm->reFragmento) { tm->reFragments = new (reAlloc) REHashMap(reAlloc); - Fragmento* fragmento = new (&gc) Fragmento(core, &js_LogController, 32, tm->reCodeAlloc); - verbose_only(fragmento->labels = new (reAlloc) LabelMap(reAlloc, &js_LogController);) - tm->reFragmento = fragmento; - tm->reLirBuf = new LirBuffer(reAlloc); + tm->reLirBuf = new (reAlloc) LirBuffer(reAlloc); #ifdef DEBUG - tm->reLirBuf->names = new (reAlloc) LirNameMap(reAlloc, fragmento->labels); + tm->reLabels = new (reAlloc) LabelMap(reAlloc, &js_LogController); + tm->reLirBuf->names = new (reAlloc) LirNameMap(reAlloc, tm->reLabels); #endif } #if !defined XP_WIN @@ -6620,44 +6591,24 @@ js_FinishJIT(JSTraceMonitor *tm) jitstats.typeMapMismatchAtEntry, jitstats.globalShapeMismatchAtEntry); } #endif - if (tm->fragmento != NULL) { + if (tm->assembler != NULL) { JS_ASSERT(tm->reservedDoublePool); -#ifdef DEBUG - tm->lirbuf->names = NULL; -#endif - delete tm->lirbuf; + tm->lirbuf = NULL; if (tm->recordAttempts.ops) JS_DHashTableFinish(&tm->recordAttempts); - for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) { - VMFragment* f = tm->vmfragments[i]; - while (f) { - VMFragment* next = f->next; - tm->fragmento->clearFragment(f); - f = next; - } - tm->vmfragments[i] = NULL; - } - delete tm->fragmento; - tm->fragmento = NULL; - for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) { - JS_ASSERT(tm->globalStates[i].globalSlots); - delete tm->globalStates[i].globalSlots; - } + memset(&tm->vmfragments[0], 0, + FRAGMENT_TABLE_SIZE * sizeof(VMFragment*)); + delete[] tm->reservedDoublePool; tm->reservedDoublePool = tm->reservedDoublePoolPtr = NULL; } - if (tm->reFragmento != NULL) { - delete tm->reLirBuf; - delete tm->reFragmento; + if (tm->reAssembler != NULL) { delete tm->reAllocator; - delete tm->reAssembler; delete tm->reCodeAlloc; } - if (tm->assembler) - delete tm->assembler; if (tm->codeAlloc) delete tm->codeAlloc; if (tm->allocator) @@ -6704,50 +6655,6 @@ PurgeScriptRecordingAttempts(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 n return JS_DHASH_NEXT; } -/* Call 'action' for each root fragment created for 'script'. */ -template -static void -IterateScriptFragments(JSContext* cx, JSScript* script, FragmentAction action) -{ - JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx); - for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) { - for (VMFragment **f = &(tm->vmfragments[i]); *f; ) { - VMFragment* frag = *f; - if (JS_UPTRDIFF(frag->ip, script->code) < script->length) { - /* This fragment is associated with the script. */ - JS_ASSERT(frag->root == frag); - VMFragment* next = frag->next; - if (action(cx, tm, frag)) { - debug_only_printf(LC_TMTracer, - "Disconnecting VMFragment %p " - "with ip %p, in range [%p,%p).\n", - (void*)frag, frag->ip, script->code, - script->code + script->length); - *f = next; - } else { - f = &((*f)->next); - } - } else { - f = &((*f)->next); - } - } - } -} - -static bool -TrashTreeAction(JSContext* cx, JSTraceMonitor* tm, Fragment* frag) -{ - for (Fragment *p = frag; p; p = p->peer) - TrashTree(cx, p); - return false; -} - -static bool -ClearFragmentAction(JSContext* cx, JSTraceMonitor* tm, Fragment* frag) -{ - tm->fragmento->clearFragment(frag); - return true; -} JS_REQUIRES_STACK void js_PurgeScriptFragments(JSContext* cx, JSScript* script) @@ -6757,18 +6664,34 @@ js_PurgeScriptFragments(JSContext* cx, JSScript* script) debug_only_printf(LC_TMTracer, "Purging fragments for JSScript %p.\n", (void*)script); - /* - * TrashTree trashes dependent trees recursively, so we must do all the trashing - * before clearing in order to avoid calling TrashTree with a deleted fragment. - */ - IterateScriptFragments(cx, script, TrashTreeAction); - IterateScriptFragments(cx, script, ClearFragmentAction); JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx); + for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) { + for (VMFragment **f = &(tm->vmfragments[i]); *f; ) { + VMFragment* frag = *f; + if (JS_UPTRDIFF(frag->ip, script->code) < script->length) { + /* This fragment is associated with the script. */ + debug_only_printf(LC_TMTracer, + "Disconnecting VMFragment %p " + "with ip %p, in range [%p,%p).\n", + (void*)frag, frag->ip, script->code, + script->code + script->length); + + JS_ASSERT(frag->root == frag); + VMFragment* next = frag->next; + for (Fragment *p = frag; p; p = p->peer) + TrashTree(cx, p); + *f = next; + } else { + f = &((*f)->next); + } + } + } + JS_DHashTableEnumerate(&(tm->recordAttempts), PurgeScriptRecordingAttempts, script); } bool -js_OverfullFragmento(JSTraceMonitor* tm, Fragmento *fragmento) +js_OverfullJITCache(JSTraceMonitor* tm, bool reCache) { /* * You might imagine the outOfMemory flag on the allocator is sufficient @@ -6798,7 +6721,7 @@ js_OverfullFragmento(JSTraceMonitor* tm, Fragmento *fragmento) * modeling the two forms of memory exhaustion *separately* for the * time being: condition 1 is handled by the outOfMemory flag inside * nanojit, and condition 2 is being handled independently *here*. So - * we construct our fragmentos to use all available memory they like, + * we construct our allocators to use all available memory they like, * and only report outOfMemory to us when there is literally no OS memory * left. Merely purging our cache when we hit our highwater mark is * handled by the (few) callers of this function. @@ -6807,7 +6730,7 @@ js_OverfullFragmento(JSTraceMonitor* tm, Fragmento *fragmento) jsuint maxsz = tm->maxCodeCacheBytes; VMAllocator *allocator = tm->allocator; CodeAlloc *codeAlloc = tm->codeAlloc; - if (fragmento == tm->reFragmento) { + if (reCache) { /* * At the time of making the code cache size configurable, we were using * 16 MB for the main code cache and 1 MB for the regular expression code diff --git a/js/src/jstracer.h b/js/src/jstracer.h index 2ecda891b8f3..edce0824cd46 100644 --- a/js/src/jstracer.h +++ b/js/src/jstracer.h @@ -56,10 +56,11 @@ #endif template -class Queue : public avmplus::GCObject { +class Queue { T* _data; unsigned _len; unsigned _max; + nanojit::Allocator* alloc; public: void ensure(unsigned size) { @@ -67,23 +68,34 @@ public: _max = 16; while (_max < size) _max <<= 1; - _data = (T*)realloc(_data, _max * sizeof(T)); + if (alloc) { + T* tmp = new (*alloc) T[_max]; + memcpy(tmp, _data, _len * sizeof(T)); + _data = tmp; + } else { + _data = (T*)realloc(_data, _max * sizeof(T)); + } #if defined(DEBUG) memset(&_data[_len], 0xcd, _max - _len); #endif } - Queue(unsigned max = 16) { + Queue(nanojit::Allocator* alloc, unsigned max = 16) + : alloc(alloc) + { this->_max = max; this->_len = 0; if (max) - this->_data = (T*)malloc(max * sizeof(T)); + this->_data = (alloc ? + new (*alloc) T[max] : + (T*)malloc(max * sizeof(T))); else this->_data = NULL; } ~Queue() { - free(_data); + if (!alloc) + free(_data); } bool contains(T a) { @@ -311,6 +323,7 @@ typedef Queue SlotList; class TypeMap : public Queue { public: + TypeMap(nanojit::Allocator* alloc) : Queue(alloc) {} JS_REQUIRES_STACK void captureTypes(JSContext* cx, JSObject* globalObj, SlotList& slots, unsigned callDepth); JS_REQUIRES_STACK void captureMissingGlobalTypes(JSContext* cx, JSObject* globalObj, SlotList& slots, unsigned stackSlots); @@ -428,6 +441,7 @@ public: bool mOutOfMemory; size_t mSize; + /* * FIXME: Area the LIR spills into if we encounter an OOM mid-way * through compilation; we must check mOutOfMemory before we run out @@ -511,7 +525,7 @@ struct UnstableExit UnstableExit* next; }; -class TreeInfo MMGC_SUBCLASS_DECL { +class TreeInfo { public: nanojit::Fragment* const fragment; JSScript* script; @@ -537,19 +551,25 @@ public: uintN treePCOffset; #endif - TreeInfo(nanojit::Fragment* _fragment, + TreeInfo(nanojit::Allocator* alloc, + nanojit::Fragment* _fragment, SlotList* _globalSlots) - : fragment(_fragment), - script(NULL), - maxNativeStackSlots(0), - nativeStackBase(0), - maxCallDepth(0), - nStackTypes(0), - globalSlots(_globalSlots), - branchCount(0), - unstableExits(NULL) - {} - ~TreeInfo(); + : fragment(_fragment), + script(NULL), + maxNativeStackSlots(0), + nativeStackBase(0), + maxCallDepth(0), + typeMap(alloc), + nStackTypes(0), + globalSlots(_globalSlots), + dependentTrees(alloc), + linkedTrees(alloc), + branchCount(0), + sideExits(alloc), + unstableExits(NULL), + gcthings(alloc), + sprops(alloc) + {} inline unsigned nGlobalTypes() { return typeMap.length() - nStackTypes; @@ -965,9 +985,7 @@ public: JS_REQUIRES_STACK bool closeLoop(SlotMap& slotMap, VMSideExit* exit, TypeConsensus &consensus); JS_REQUIRES_STACK void endLoop(); JS_REQUIRES_STACK void endLoop(VMSideExit* exit); - JS_REQUIRES_STACK void joinEdgesToEntry(nanojit::Fragmento* fragmento, - VMFragment* peer_root); - void blacklist() { fragment->blacklist(); } + JS_REQUIRES_STACK void joinEdgesToEntry(VMFragment* peer_root); JS_REQUIRES_STACK void adjustCallerTypes(nanojit::Fragment* f); JS_REQUIRES_STACK nanojit::Fragment* findNestedCompatiblePeer(nanojit::Fragment* f); JS_REQUIRES_STACK void prepareTreeCall(nanojit::Fragment* inner); @@ -975,7 +993,7 @@ public: unsigned getCallDepth() const; void pushAbortStack(); void popAbortStack(); - void removeFragmentoReferences(); + void removeFragmentReferences(); void deepAbort(); JS_REQUIRES_STACK JSRecordingStatus record_EnterFrame(); @@ -1052,7 +1070,7 @@ extern void js_PurgeScriptFragments(JSContext* cx, JSScript* script); extern bool -js_OverfullFragmento(JSTraceMonitor* tm, nanojit::Fragmento *frago); +js_OverfullJITCache(JSTraceMonitor* tm, bool reCache); extern void js_PurgeJITOracle(); diff --git a/js/src/nanojit/Assembler.h b/js/src/nanojit/Assembler.h index 5de394843274..4840887318bc 100644 --- a/js/src/nanojit/Assembler.h +++ b/js/src/nanojit/Assembler.h @@ -149,7 +149,7 @@ namespace nanojit * value. Temporary values can be placed into the AR as method calls * are issued. Also LIR_alloc instructions will consume space. */ - class Assembler MMGC_SUBCLASS_DECL + class Assembler { friend class VerboseBlockReader; public: diff --git a/js/src/nanojit/Fragmento.cpp b/js/src/nanojit/Fragmento.cpp index 09317b4ef18d..bde86e9559f3 100644 --- a/js/src/nanojit/Fragmento.cpp +++ b/js/src/nanojit/Fragmento.cpp @@ -48,150 +48,6 @@ namespace nanojit using namespace avmplus; - static uint32_t calcSaneCacheSize(uint32_t in) - { - if (in < uint32_t(NJ_LOG2_PAGE_SIZE)) return NJ_LOG2_PAGE_SIZE; // at least 1 page - if (in > 32) return 32; // 4GB should be enough for anyone - return in; - } - - /** - * This is the main control center for creating and managing fragments. - */ - Fragmento::Fragmento(AvmCore* core, LogControl* logc, uint32_t cacheSizeLog2, CodeAlloc* codeAlloc) - : -#ifdef NJ_VERBOSE - enterCounts(NULL), - mergeCounts(NULL), - labels(NULL), -#endif - _core(core), - _codeAlloc(codeAlloc), - _frags(core->GetGC()), - _max_pages(1 << (calcSaneCacheSize(cacheSizeLog2) - NJ_LOG2_PAGE_SIZE)), - _pagesGrowth(1) - { -#ifdef _DEBUG - { - // XXX These belong somewhere else, but I can't find the - // right location right now. - NanoStaticAssert((LIR_lt ^ 3) == LIR_ge); - NanoStaticAssert((LIR_le ^ 3) == LIR_gt); - NanoStaticAssert((LIR_ult ^ 3) == LIR_uge); - NanoStaticAssert((LIR_ule ^ 3) == LIR_ugt); - NanoStaticAssert((LIR_flt ^ 3) == LIR_fge); - NanoStaticAssert((LIR_fle ^ 3) == LIR_fgt); - - /* Opcodes must be strictly increasing without holes. */ - uint32_t count = 0; -#define OPDEF(op, number, operands, repkind) \ - NanoAssertMsg(LIR_##op == count++, "misnumbered opcode"); -#define OPDEF64(op, number, operands, repkind) \ - OPDEF(op, number, operands, repkind) -#include "LIRopcode.tbl" -#undef OPDEF -#undef OPDEF64 - } -#endif - -#ifdef MEMORY_INFO - _allocList.set_meminfo_name("Fragmento._allocList"); -#endif - NanoAssert(_max_pages > _pagesGrowth); // shrink growth if needed - verbose_only( enterCounts = NJ_NEW(core->gc, BlockHist)(core->gc); ) - verbose_only( mergeCounts = NJ_NEW(core->gc, BlockHist)(core->gc); ) - - memset(&_stats, 0, sizeof(_stats)); - } - - Fragmento::~Fragmento() - { - clearFrags(); -#if defined(NJ_VERBOSE) - NJ_DELETE(enterCounts); - NJ_DELETE(mergeCounts); -#endif - } - - - // Clear the fragment. This *does not* remove the fragment from the - // map--the caller must take care of this. - void Fragmento::clearFragment(Fragment* f) - { - Fragment *peer = f->peer; - while (peer) { - Fragment *next = peer->peer; - peer->releaseTreeMem(_codeAlloc); - NJ_DELETE(peer); - peer = next; - } - f->releaseTreeMem(_codeAlloc); - NJ_DELETE(f); - } - - void Fragmento::clearFrags() - { - while (!_frags.isEmpty()) { - clearFragment(_frags.removeLast()); - } - - verbose_only( enterCounts->clear();) - verbose_only( mergeCounts->clear();) - verbose_only( _stats.flushes++ ); - verbose_only( _stats.compiles = 0 ); - //nj_dprintf("Fragmento.clearFrags %d free pages of %d\n", _stats.freePages, _stats.pages); - } - - AvmCore* Fragmento::core() - { - return _core; - } - - Fragment* Fragmento::getAnchor(const void* ip) - { - Fragment *f = newFrag(ip); - Fragment *p = _frags.get(ip); - if (p) { - f->first = p; - /* append at the end of the peer list */ - Fragment* next; - while ((next = p->peer) != NULL) - p = next; - p->peer = f; - } else { - f->first = f; - _frags.put(ip, f); /* this is the first fragment */ - } - f->anchor = f; - f->root = f; - f->kind = LoopTrace; - verbose_only( addLabel(f, "T", _frags.size()); ) - return f; - } - - Fragment* Fragmento::getLoop(const void* ip) - { - return _frags.get(ip); - } - -#ifdef NJ_VERBOSE - void Fragmento::addLabel(Fragment *f, const char *prefix, int id) - { - char fragname[20]; - sprintf(fragname,"%s%d", prefix, id); - labels->add(f, sizeof(Fragment), 0, fragname); - } -#endif - - Fragment *Fragmento::createBranch(SideExit* exit, const void* ip) - { - Fragment *f = newBranch(exit->from, ip); - f->kind = BranchTrace; - f->treeBranches = f->root->treeBranches; - f->root->treeBranches = f; - return f; - } - // // Fragment // @@ -226,82 +82,13 @@ namespace nanojit guardCount(0), xjumpCount(0), recordAttempts(0), - blacklistLevel(0), fragEntry(NULL), loopEntry(NULL), vmprivate(NULL), - codeList(0), _code(NULL), _hits(0) { } - - Fragment::~Fragment() - { - onDestroy(); - } - - void Fragment::blacklist() - { - blacklistLevel++; - _hits = -(1<gc; - Fragment *f = new Fragment(ip); - f->blacklistLevel = 5; - return f; - } - - Fragment *Fragmento::newBranch(Fragment *from, const void* ip) - { - Fragment *f = newFrag(ip); - f->anchor = from->anchor; - f->root = from->root; - f->xjumpCount = from->xjumpCount; - /*// prepend - f->nextbranch = from->branches; - from->branches = f;*/ - // append - if (!from->branches) { - from->branches = f; - } else { - Fragment *p = from->branches; - while (p->nextbranch != 0) - p = p->nextbranch; - p->nextbranch = f; - } - return f; - } - - void Fragment::releaseLirBuffer() - { - lastIns = 0; - } - - void Fragment::releaseCode(CodeAlloc *codeAlloc) - { - _code = 0; - codeAlloc->freeAll(codeList); - } - - void Fragment::releaseTreeMem(CodeAlloc *codeAlloc) - { - releaseLirBuffer(); - releaseCode(codeAlloc); - - // now do it for all branches - Fragment* branch = branches; - while(branch) - { - Fragment* next = branch->nextbranch; - branch->releaseTreeMem(codeAlloc); // @todo safer here to recurse in case we support nested trees - NJ_DELETE(branch); - branch = next; - } - } #endif /* FEATURE_NANOJIT */ } diff --git a/js/src/nanojit/Fragmento.h b/js/src/nanojit/Fragmento.h index cb1f72d337cb..34b96db94f4c 100644 --- a/js/src/nanojit/Fragmento.h +++ b/js/src/nanojit/Fragmento.h @@ -46,74 +46,6 @@ namespace nanojit { struct GuardRecord; - class Assembler; - - typedef avmplus::GCSortedMap BlockSortedMap; - class BlockHist: public BlockSortedMap - { - public: - BlockHist(avmplus::GC*gc) : BlockSortedMap(gc) - { - } - uint32_t count(const void *p) { - uint32_t c = 1+get(p); - put(p, c); - return c; - } - }; - - struct fragstats; - /* - * - * This is the main control center for creating and managing fragments. - */ - class Fragmento : public avmplus::GCFinalizedObject - { - public: - Fragmento(AvmCore* core, LogControl* logc, uint32_t cacheSizeLog2, CodeAlloc *codeAlloc); - ~Fragmento(); - - AvmCore* core(); - - Fragment* getLoop(const void* ip); - Fragment* getAnchor(const void* ip); - // Remove one fragment. The caller is responsible for making sure - // that this does not destroy any resources shared with other - // fragments (such as a LirBuffer or this fragment itself as a - // jump target). - void clearFrags(); // clear all fragments from the cache - Fragment* createBranch(SideExit *exit, const void* ip); - Fragment* newFrag(const void* ip); - Fragment* newBranch(Fragment *from, const void* ip); - - verbose_only ( uint32_t pageCount(); ) - verbose_only( void addLabel(Fragment* f, const char *prefix, int id); ) - - // stats - struct - { - uint32_t pages; // pages consumed - uint32_t flushes, ilsize, abcsize, compiles, totalCompiles; - } - _stats; - - verbose_only( DWB(BlockHist*) enterCounts; ) - verbose_only( DWB(BlockHist*) mergeCounts; ) - verbose_only( LabelMap* labels; ) - - #ifdef AVMPLUS_VERBOSE - void drawTrees(char *fileName); - #endif - - void clearFragment(Fragment *f); - private: - AvmCore* _core; - CodeAlloc* _codeAlloc; - FragmentMap _frags; /* map from ip -> Fragment ptr */ - - const uint32_t _max_pages; - uint32_t _pagesGrowth; - }; enum TraceKind { LoopTrace, @@ -132,19 +64,12 @@ namespace nanojit { public: Fragment(const void*); - ~Fragment(); NIns* code() { return _code; } void setCode(NIns* codee) { _code = codee; } int32_t& hits() { return _hits; } - void blacklist(); - bool isBlacklisted() { return _hits < 0; } - void releaseLirBuffer(); - void releaseCode(CodeAlloc *alloc); - void releaseTreeMem(CodeAlloc *alloc); bool isAnchor() { return anchor == this; } bool isRoot() { return root == this; } - void onDestroy(); verbose_only( uint32_t _called; ) verbose_only( uint32_t _native; ) @@ -176,11 +101,9 @@ namespace nanojit uint32_t guardCount; uint32_t xjumpCount; uint32_t recordAttempts; - int32_t blacklistLevel; NIns* fragEntry; NIns* loopEntry; void* vmprivate; - CodeList* codeList; private: NIns* _code; // ptr to start of code diff --git a/js/src/nanojit/LIR.cpp b/js/src/nanojit/LIR.cpp index 00fd38608ca2..9876eaa51ec2 100644 --- a/js/src/nanojit/LIR.cpp +++ b/js/src/nanojit/LIR.cpp @@ -2062,10 +2062,6 @@ namespace nanojit frag->fragEntry = 0; frag->loopEntry = 0; } - else - { - CodeAlloc::moveAll(frag->codeList, assm->codeList); - } /* BEGIN decorative postamble */ verbose_only( if (anyVerb) { diff --git a/js/src/nanojit/LIR.h b/js/src/nanojit/LIR.h index b4794744ebb3..a3e7a39a45cb 100644 --- a/js/src/nanojit/LIR.h +++ b/js/src/nanojit/LIR.h @@ -781,8 +781,6 @@ namespace nanojit LIns* FASTCALL callArgN(LInsp i, uint32_t n); extern const uint8_t operandCount[]; - class Fragmento; // @todo remove this ; needed for minbuild for some reason?!? Should not be compiling this code at all - // make it a GCObject so we can explicitly delete it early class LirWriter : public GCObject {