From ea63af826791f390c93f13d0e71c0bc46718d9ea Mon Sep 17 00:00:00 2001 From: Mike Moenig Date: Mon, 12 Apr 2010 18:51:25 -0700 Subject: [PATCH] [JAEGER] Allow custom memory allocator use in spidermonkey (bug 549532, r=dvander). --- js/src/jsarena.h | 2 +- js/src/jscntxt.h | 3 ++ js/src/jsgc.cpp | 3 -- js/src/jshash.cpp | 4 +- js/src/jslock.cpp | 2 +- js/src/jsrecursion.cpp | 12 ++--- js/src/jstl.h | 6 +-- js/src/jstracer.cpp | 104 ++++++++++++++++++----------------------- js/src/jstracer.h | 22 +++++++-- js/src/jsutil.h | 8 ++++ 10 files changed, 88 insertions(+), 78 deletions(-) diff --git a/js/src/jsarena.h b/js/src/jsarena.h index 47207f9fece7..ad2abd19ac6d 100644 --- a/js/src/jsarena.h +++ b/js/src/jsarena.h @@ -200,7 +200,7 @@ struct JSArenaPool { if ((pool)->current == (a)) (pool)->current = &(pool)->first; \ *(pnext) = (a)->next; \ JS_CLEAR_ARENA(a); \ - free(a); \ + js_free(a); \ (a) = NULL; \ JS_END_MACRO diff --git a/js/src/jscntxt.h b/js/src/jscntxt.h index b4e213716274..df49ff4a7586 100644 --- a/js/src/jscntxt.h +++ b/js/src/jscntxt.h @@ -334,6 +334,8 @@ typedef HashMap, SystemAllocPolicy> RecordAttemptMap; +class Oracle; + /* * Trace monitor. Every JSThread (if JS_THREADSAFE) or JSRuntime (if not * JS_THREADSAFE) has an associated trace monitor that keeps track of loop @@ -395,6 +397,7 @@ struct TraceMonitor { nanojit::Assembler* assembler; FrameInfoCache* frameCache; + Oracle* oracle; TraceRecorder* recorder; GlobalState globalStates[MONITOR_N_GLOBAL_STATES]; diff --git a/js/src/jsgc.cpp b/js/src/jsgc.cpp index 46b00cfbb9e8..90de227be41c 100644 --- a/js/src/jsgc.cpp +++ b/js/src/jsgc.cpp @@ -2907,9 +2907,6 @@ PreGCCleanup(JSContext *cx, JSGCInvocationKind gckind) } #endif -#ifdef JS_TRACER - PurgeJITOracle(); -#endif /* * Reset the property cache's type id generator so we can compress ids. diff --git a/js/src/jshash.cpp b/js/src/jshash.cpp index e347744e7319..15a8a460ab5c 100644 --- a/js/src/jshash.cpp +++ b/js/src/jshash.cpp @@ -67,7 +67,7 @@ static void * DefaultAllocTable(void *pool, size_t size) { - return malloc(size); + return js_malloc(size); } static void @@ -79,7 +79,7 @@ DefaultFreeTable(void *pool, void *item, size_t size) static JSHashEntry * DefaultAllocEntry(void *pool, const void *key) { - return (JSHashEntry*) malloc(sizeof(JSHashEntry)); + return (JSHashEntry*) js_malloc(sizeof(JSHashEntry)); } static void diff --git a/js/src/jslock.cpp b/js/src/jslock.cpp index 7f68869d152b..00632d3ae6da 100644 --- a/js/src/jslock.cpp +++ b/js/src/jslock.cpp @@ -836,7 +836,7 @@ js_SetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot, jsval v) static JSFatLock * NewFatlock() { - JSFatLock *fl = (JSFatLock *)malloc(sizeof(JSFatLock)); /* for now */ + JSFatLock *fl = (JSFatLock *)js_malloc(sizeof(JSFatLock)); /* for now */ if (!fl) return NULL; fl->susp = 0; fl->next = NULL; diff --git a/js/src/jsrecursion.cpp b/js/src/jsrecursion.cpp index cdcbf98af03f..b2bdef740b10 100644 --- a/js/src/jsrecursion.cpp +++ b/js/src/jsrecursion.cpp @@ -121,8 +121,8 @@ class UpRecursiveSlotMap : public RecursiveSlotMap }; #if defined DEBUG -static JS_REQUIRES_STACK void -AssertDownFrameIsConsistent(JSContext* cx, VMSideExit* anchor, FrameInfo* fi) +JS_REQUIRES_STACK void +TraceRecorder::assertDownFrameIsConsistent(VMSideExit* anchor, FrameInfo* fi) { JS_ASSERT(anchor->recursive_down); JS_ASSERT(anchor->recursive_down->callerHeight == fi->callerHeight); @@ -130,7 +130,7 @@ AssertDownFrameIsConsistent(JSContext* cx, VMSideExit* anchor, FrameInfo* fi) unsigned downPostSlots = fi->callerHeight; TraceType* typeMap = fi->get_typemap(); - CaptureStackTypes(cx, 1, typeMap); + captureStackTypes(1, typeMap); const TraceType* m1 = anchor->recursive_down->get_typemap(); for (unsigned i = 0; i < downPostSlots; i++) { if (m1[i] == typeMap[i]) @@ -258,7 +258,7 @@ TraceRecorder::upRecursion() * recursive functions. */ #if defined DEBUG - AssertDownFrameIsConsistent(cx, anchor, fi); + assertDownFrameIsConsistent(anchor, fi); #endif fi = anchor->recursive_down; } else if (recursive_pc != fragment->root->ip) { @@ -266,7 +266,7 @@ TraceRecorder::upRecursion() * Case 1: Guess that down-recursion has to started back out, infer types * from the down frame. */ - CaptureStackTypes(cx, 1, fi->get_typemap()); + captureStackTypes(1, fi->get_typemap()); } else { /* Case 2: Guess that up-recursion is backing out, infer types from our Tree. */ JS_ASSERT(tree->nStackTypes == downPostSlots + 1); @@ -491,7 +491,7 @@ TraceRecorder::slurpDownFrames(jsbytecode* return_pc) TraceType* typeMap = exit->stackTypeMap(); jsbytecode* oldpc = cx->fp->regs->pc; cx->fp->regs->pc = exit->pc; - CaptureStackTypes(cx, frameDepth, typeMap); + captureStackTypes(frameDepth, typeMap); cx->fp->regs->pc = oldpc; if (!anchor || anchor->exitType != RECURSIVE_SLURP_FAIL_EXIT) { JS_ASSERT_IF(*cx->fp->regs->pc != JSOP_RETURN, *cx->fp->regs->pc == JSOP_STOP); diff --git a/js/src/jstl.h b/js/src/jstl.h index 3faaacb6a47e..2bb6677b01a4 100644 --- a/js/src/jstl.h +++ b/js/src/jstl.h @@ -241,9 +241,9 @@ PointerRangeSize(T *begin, T *end) class SystemAllocPolicy { public: - void *malloc(size_t bytes) { return ::malloc(bytes); } - void *realloc(void *p, size_t bytes) { return ::realloc(p, bytes); } - void free(void *p) { ::free(p); } + void *malloc(size_t bytes) { return js_malloc(bytes); } + void *realloc(void *p, size_t bytes) { return js_realloc(p, bytes); } + void free(void *p) { js_free(p); } void reportAllocOverflow() const {} }; diff --git a/js/src/jstracer.cpp b/js/src/jstracer.cpp index 771668b8906e..aeb1f890ab9e 100644 --- a/js/src/jstracer.cpp +++ b/js/src/jstracer.cpp @@ -107,7 +107,7 @@ nanojit::Allocator::allocChunk(size_t nbytes) { VMAllocator *vma = (VMAllocator*)this; JS_ASSERT(!vma->outOfMemory()); - void *p = calloc(1, nbytes); + void *p = js_calloc(nbytes); if (!p) { JS_ASSERT(nbytes < sizeof(vma->mReserve)); vma->mOutOfMemory = true; @@ -121,7 +121,7 @@ void nanojit::Allocator::freeChunk(void *p) { VMAllocator *vma = (VMAllocator*)this; if (p != &vma->mReserve[0]) - free(p); + js_free(p); } void @@ -906,12 +906,6 @@ TraceRecorder::tprint(const char *format, LIns *ins1, LIns *ins2, LIns *ins3, LI } #endif -/* - * The entire VM shares one oracle. Collisions and concurrent updates are - * tolerated and worst case cause performance regressions. - */ -static Oracle oracle; - Tracker::Tracker() { pagelist = NULL; @@ -951,7 +945,7 @@ struct Tracker::TrackerPage* Tracker::addTrackerPage(const void* v) { jsuword base = getTrackerPageBase(v); - struct TrackerPage* p = (struct TrackerPage*) calloc(1, sizeof(*p)); + struct TrackerPage* p = (struct TrackerPage*) js_calloc(sizeof(*p)); p->base = base; p->next = pagelist; pagelist = p; @@ -964,7 +958,7 @@ Tracker::clear() while (pagelist) { TrackerPage* p = pagelist; pagelist = pagelist->next; - free(p); + js_free(p); } } @@ -1217,44 +1211,38 @@ Oracle::clearDemotability() _pcDontDemote.reset(); } -JS_REQUIRES_STACK static JS_INLINE void -MarkSlotUndemotable(JSContext* cx, LinkableFragment* f, unsigned slot) +JS_REQUIRES_STACK void +TraceRecorder::markSlotUndemotable(LinkableFragment* f, unsigned slot) { if (slot < f->nStackTypes) { - oracle.markStackSlotUndemotable(cx, slot); + oracle->markStackSlotUndemotable(cx, slot); return; } uint16* gslots = f->globalSlots->data(); - oracle.markGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]); + oracle->markGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]); } -JS_REQUIRES_STACK static JS_INLINE void -MarkSlotUndemotable(JSContext* cx, LinkableFragment* f, unsigned slot, const void* pc) +JS_REQUIRES_STACK void +TraceRecorder::markSlotUndemotable(LinkableFragment* f, unsigned slot, const void* pc) { if (slot < f->nStackTypes) { - oracle.markStackSlotUndemotable(cx, slot, pc); + oracle->markStackSlotUndemotable(cx, slot, pc); return; } uint16* gslots = f->globalSlots->data(); - oracle.markGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]); + oracle->markGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]); } -static JS_REQUIRES_STACK inline bool -IsSlotUndemotable(JSContext* cx, LinkableFragment* f, unsigned slot, const void* ip) +static JS_REQUIRES_STACK bool +IsSlotUndemotable(Oracle* oracle, JSContext* cx, LinkableFragment* f, unsigned slot, const void* ip) { if (slot < f->nStackTypes) - return oracle.isStackSlotUndemotable(cx, slot, ip); + return oracle->isStackSlotUndemotable(cx, slot, ip); uint16* gslots = f->globalSlots->data(); - return oracle.isGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]); -} - -static JS_REQUIRES_STACK inline bool -IsSlotUndemotable(JSContext* cx, LinkableFragment* f, unsigned slot) -{ - return IsSlotUndemotable(cx, f, slot, cx->fp->regs->pc); + return oracle->isGlobalSlotUndemotable(cx, gslots[slot - f->nStackTypes]); } class FrameInfoCache @@ -1967,7 +1955,7 @@ public: visitGlobalSlot(jsval *vp, unsigned n, unsigned slot) { TraceType type = getCoercedType(*vp); if (type == TT_INT32 && - oracle.isGlobalSlotUndemotable(mCx, slot)) + JS_TRACE_MONITOR(mCx).oracle->isGlobalSlotUndemotable(mCx, slot)) type = TT_DOUBLE; JS_ASSERT(type != TT_JSVAL); debug_only_printf(LC_TMTracer, @@ -1981,7 +1969,7 @@ public: for (int i = 0; i < count; ++i) { TraceType type = getCoercedType(vp[i]); if (type == TT_INT32 && - oracle.isStackSlotUndemotable(mCx, length())) + JS_TRACE_MONITOR(mCx).oracle->isStackSlotUndemotable(mCx, length())) type = TT_DOUBLE; JS_ASSERT(type != TT_JSVAL); debug_only_printf(LC_TMTracer, @@ -2137,6 +2125,7 @@ TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* anchor, VMFragment* frag RecordReason recordReason) : cx(cx), traceMonitor(&JS_TRACE_MONITOR(cx)), + oracle(JS_TRACE_MONITOR(cx).oracle), fragment(fragment), tree(fragment->root), recordReason(recordReason), @@ -2684,6 +2673,7 @@ TraceMonitor::flush() codeAlloc->reset(); tempAlloc->reset(); reTempAlloc->reset(); + oracle->clear(); Allocator& alloc = *dataAlloc; @@ -3532,7 +3522,7 @@ TraceRecorder::importGlobalSlot(unsigned slot) int index = tree->globalSlots->offsetOf(uint16(slot)); if (index == -1) { type = getCoercedType(*vp); - if (type == TT_INT32 && oracle.isGlobalSlotUndemotable(cx, slot)) + if (type == TT_INT32 && oracle->isGlobalSlotUndemotable(cx, slot)) type = TT_DOUBLE; index = (int)tree->globalSlots->length(); tree->globalSlots->add(uint16(slot)); @@ -3763,7 +3753,7 @@ public: * Aggressively undo speculation so the inner tree will compile * if this fails. */ - oracle.markGlobalSlotUndemotable(mCx, slot); + mRecorder.oracle->markGlobalSlotUndemotable(mCx, slot); } JS_ASSERT(!(!isPromote && *mTypeMap == TT_INT32)); ++mTypeMap; @@ -3807,7 +3797,7 @@ public: * Aggressively undo speculation so the inner tree will compile * if this fails. */ - oracle.markStackSlotUndemotable(mCx, mSlotnum); + mRecorder.oracle->markStackSlotUndemotable(mCx, mSlotnum); } JS_ASSERT(!(!isPromote && *mTypeMap == TT_INT32)); ++vp; @@ -4439,7 +4429,7 @@ class SlotMap : public SlotVisitorBase { for (unsigned i = 0; i < length(); i++) { if (get(i).lastCheck == TypeCheck_Undemote) - MarkSlotUndemotable(mRecorder.cx, mRecorder.tree, i); + mRecorder.markSlotUndemotable(mRecorder.tree, i); } } @@ -4764,7 +4754,7 @@ TypeMapLinkability(JSContext* cx, const TypeMap& typeMap, TreeFragment* peer) if (typeMap[i] == peerMap[i]) continue; if (typeMap[i] == TT_INT32 && peerMap[i] == TT_DOUBLE && - IsSlotUndemotable(cx, peer, i, peer->ip)) { + IsSlotUndemotable(JS_TRACE_MONITOR(cx).oracle, cx, peer, i, peer->ip)) { consensus = TypeConsensus_Undemotes; } else { return TypeConsensus_Bad; @@ -4773,8 +4763,8 @@ TypeMapLinkability(JSContext* cx, const TypeMap& typeMap, TreeFragment* peer) return consensus; } -static JS_REQUIRES_STACK unsigned -FindUndemotesInTypemaps(JSContext* cx, const TypeMap& typeMap, LinkableFragment* f, +JS_REQUIRES_STACK unsigned +TraceRecorder::findUndemotesInTypemaps(const TypeMap& typeMap, LinkableFragment* f, Queue& undemotes) { undemotes.setLength(0); @@ -4787,7 +4777,7 @@ FindUndemotesInTypemaps(JSContext* cx, const TypeMap& typeMap, LinkableFragment* } } for (unsigned i = 0; i < undemotes.length(); i++) - MarkSlotUndemotable(cx, f, undemotes[i]); + markSlotUndemotable(f, undemotes[i]); return undemotes.length(); } @@ -4836,7 +4826,7 @@ TraceRecorder::joinEdgesToEntry(TreeFragment* peer_root) uexit = peer->removeUnstableExit(uexit->exit); } else { /* Check for int32->double slots that suggest trashing. */ - if (FindUndemotesInTypemaps(cx, typeMap, tree, undemotes)) { + if (findUndemotesInTypemaps(typeMap, tree, undemotes)) { JS_ASSERT(peer == uexit->fragment->root); if (fragment == peer) trashSelf = true; @@ -5683,6 +5673,7 @@ FindLoopEdgeTarget(JSContext* cx, VMSideExit* exit, TreeFragment** peerp) TreeFragment* from = exit->root(); JS_ASSERT(from->code()); + Oracle* oracle = JS_TRACE_MONITOR(cx).oracle; TypeMap typeMap(NULL); FullMapFromExit(typeMap, exit); @@ -5694,14 +5685,14 @@ FindLoopEdgeTarget(JSContext* cx, VMSideExit* exit, TreeFragment** peerp) if (typeMap[i] == TT_DOUBLE) { if (exit->exitType == RECURSIVE_UNLINKED_EXIT) { if (i < exit->numStackSlots) - oracle.markStackSlotUndemotable(cx, i, exit->recursive_pc); + oracle->markStackSlotUndemotable(cx, i, exit->recursive_pc); else - oracle.markGlobalSlotUndemotable(cx, gslots[i - exit->numStackSlots]); + oracle->markGlobalSlotUndemotable(cx, gslots[i - exit->numStackSlots]); } if (i < from->nStackTypes) - oracle.markStackSlotUndemotable(cx, i, from->ip); + oracle->markStackSlotUndemotable(cx, i, from->ip); else if (i >= exit->numStackSlots) - oracle.markGlobalSlotUndemotable(cx, gslots[i - exit->numStackSlots]); + oracle->markGlobalSlotUndemotable(cx, gslots[i - exit->numStackSlots]); } } @@ -6069,7 +6060,7 @@ TraceRecorder::attemptTreeCall(TreeFragment* f, uintN& inlineCallCount) } case OVERFLOW_EXIT: - oracle.markInstructionUndemotable(cx->fp->regs->pc); + oracle->markInstructionUndemotable(cx->fp->regs->pc); /* FALL THROUGH */ case RECURSIVE_SLURP_FAIL_EXIT: case RECURSIVE_SLURP_MISMATCH_EXIT: @@ -6177,10 +6168,10 @@ public: if (!IsEntryTypeCompatible(vp, mTypeMap)) { mOk = false; } else if (!isPromoteInt(mRecorder.get(vp)) && *mTypeMap == TT_INT32) { - oracle.markGlobalSlotUndemotable(mCx, slot); + mRecorder.oracle->markGlobalSlotUndemotable(mCx, slot); mOk = false; } else if (JSVAL_IS_INT(*vp) && *mTypeMap == TT_DOUBLE) { - oracle.markGlobalSlotUndemotable(mCx, slot); + mRecorder.oracle->markGlobalSlotUndemotable(mCx, slot); } mTypeMap++; } @@ -6192,10 +6183,10 @@ public: if (!IsEntryTypeCompatible(vp, mTypeMap)) { mOk = false; } else if (!isPromoteInt(mRecorder.get(vp)) && *mTypeMap == TT_INT32) { - oracle.markStackSlotUndemotable(mCx, mStackSlotNum); + mRecorder.oracle->markStackSlotUndemotable(mCx, mStackSlotNum); mOk = false; } else if (JSVAL_IS_INT(*vp) && *mTypeMap == TT_DOUBLE) { - oracle.markStackSlotUndemotable(mCx, mStackSlotNum); + mRecorder.oracle->markStackSlotUndemotable(mCx, mStackSlotNum); } vp++; mTypeMap++; @@ -7025,7 +7016,7 @@ MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount, RecordReason reason) return rv; case OVERFLOW_EXIT: - oracle.markInstructionUndemotable(cx->fp->regs->pc); + tm->oracle->markInstructionUndemotable(cx->fp->regs->pc); /* FALL THROUGH */ case RECURSIVE_SLURP_FAIL_EXIT: case RECURSIVE_SLURP_MISMATCH_EXIT: @@ -7466,6 +7457,8 @@ InitJIT(TraceMonitor *tm) /* Set the default size for the code cache to 16MB. */ tm->maxCodeCacheBytes = 16 M; + tm->oracle = new Oracle(); + tm->recordAttempts = new RecordAttemptMap; if (!tm->recordAttempts->init(PC_HASH_COUNT)) abort(); @@ -7548,6 +7541,7 @@ FinishJIT(TraceMonitor *tm) #endif delete tm->recordAttempts; + delete tm->oracle; #ifdef DEBUG // Recover profiling data from expiring Fragments, and display @@ -7619,12 +7613,6 @@ FinishJIT(TraceMonitor *tm) tm->cachedTempTypeMap = NULL; } -void -PurgeJITOracle() -{ - oracle.clear(); -} - JS_REQUIRES_STACK void PurgeScriptFragments(JSContext* cx, JSScript* script) { @@ -8082,7 +8070,7 @@ TraceRecorder::alu(LOpcode v, jsdouble v0, jsdouble v1, LIns* s0, LIns* s1) * integers and the oracle must not give us a negative hint for the * instruction. */ - if (oracle.isInstructionUndemotable(cx->fp->regs->pc) || !isPromoteInt(s0) || !isPromoteInt(s1)) { + if (oracle->isInstructionUndemotable(cx->fp->regs->pc) || !isPromoteInt(s0) || !isPromoteInt(s1)) { out: if (v == LIR_fmod) { LIns* args[] = { s1, s0 }; @@ -10242,7 +10230,7 @@ TraceRecorder::record_JSOP_NEG() * a double. Only follow this path if we're not an integer that's 0 and * we're not a double that's zero. */ - if (!oracle.isInstructionUndemotable(cx->fp->regs->pc) && + if (!oracle->isInstructionUndemotable(cx->fp->regs->pc) && isPromoteInt(a) && (!JSVAL_IS_INT(v) || JSVAL_TO_INT(v) != 0) && (!JSVAL_IS_DOUBLE(v) || !JSDOUBLE_IS_NEGZERO(*JSVAL_TO_DOUBLE(v))) && @@ -15312,7 +15300,7 @@ StopTraceVisNative(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval #endif /* MOZ_TRACEVIS */ JS_REQUIRES_STACK void -CaptureStackTypes(JSContext* cx, unsigned callDepth, TraceType* typeMap) +TraceRecorder::captureStackTypes(unsigned callDepth, TraceType* typeMap) { CaptureTypesVisitor capVisitor(cx, typeMap); VisitStackSlots(capVisitor, cx, callDepth); diff --git a/js/src/jstracer.h b/js/src/jstracer.h index 63d1561bc3b1..b041d4901e6c 100644 --- a/js/src/jstracer.h +++ b/js/src/jstracer.h @@ -78,7 +78,7 @@ public: memcpy(tmp, _data, _len * sizeof(T)); _data = tmp; } else { - _data = (T*)realloc(_data, _max * sizeof(T)); + _data = (T*)js_realloc(_data, _max * sizeof(T)); } #if defined(DEBUG) memset(&_data[_len], 0xcd, _max - _len); @@ -95,7 +95,7 @@ public: ~Queue() { if (!alloc) - free(_data); + js_free(_data); } bool contains(T a) { @@ -926,6 +926,9 @@ class TraceRecorder /* Cached value of JS_TRACE_MONITOR(cx). */ TraceMonitor* const traceMonitor; + /* Cached oracle keeps track of hit counts for program counter locations */ + Oracle* oracle; + /* The Fragment being recorded by this recording session. */ VMFragment* const fragment; @@ -1065,6 +1068,17 @@ class TraceRecorder */ JS_REQUIRES_STACK nanojit::GuardRecord* createGuardRecord(VMSideExit* exit); + JS_REQUIRES_STACK JS_INLINE void markSlotUndemotable(LinkableFragment* f, unsigned slot); + + JS_REQUIRES_STACK JS_INLINE void markSlotUndemotable(LinkableFragment* f, unsigned slot, const void* pc); + + JS_REQUIRES_STACK unsigned findUndemotesInTypemaps(const TypeMap& typeMap, LinkableFragment* f, + Queue& undemotes); + + JS_REQUIRES_STACK void assertDownFrameIsConsistent(VMSideExit* anchor, FrameInfo* fi); + + JS_REQUIRES_STACK void captureStackTypes(unsigned callDepth, TraceType* typeMap); + bool isGlobal(jsval* p) const; ptrdiff_t nativeGlobalSlot(jsval *p) const; ptrdiff_t nativeGlobalOffset(jsval* p) const; @@ -1380,8 +1394,8 @@ class TraceRecorder # include "jsopcode.tbl" #undef OPDEF - inline void* operator new(size_t size) { return calloc(1, size); } - inline void operator delete(void *p) { free(p); } + inline void* operator new(size_t size) { return js_calloc(size); } + inline void operator delete(void *p) { js_free(p); } JS_REQUIRES_STACK TraceRecorder(JSContext* cx, VMSideExit*, VMFragment*, diff --git a/js/src/jsutil.h b/js/src/jsutil.h index d60804050df8..fb4956003429 100644 --- a/js/src/jsutil.h +++ b/js/src/jsutil.h @@ -44,6 +44,7 @@ #ifndef jsutil_h___ #define jsutil_h___ +#include "jstypes.h" #include JS_BEGIN_EXTERN_C @@ -182,6 +183,12 @@ extern JS_FRIEND_API(void) JS_DumpBacktrace(JSCallsite *trace); #endif +#if defined JS_USE_CUSTOM_ALLOCATOR + +#include "jscustomallocator.h" + +#else + static JS_INLINE void* js_malloc(size_t bytes) { if (bytes < sizeof(void*)) /* for asyncFree */ bytes = sizeof(void*); @@ -203,6 +210,7 @@ static JS_INLINE void* js_realloc(void* p, size_t bytes) { static JS_INLINE void js_free(void* p) { free(p); } +#endif/* JS_USE_CUSTOM_ALLOCATOR */ JS_END_EXTERN_C