bug 521390 - avoid checking for malloc memory pressure when allocating GC things from free lists. r=brendan

This commit is contained in:
Igor Bukanov 2009-10-18 19:40:19 +04:00
Родитель f570fef7f9
Коммит ab47abb015
5 изменённых файлов: 152 добавлений и 102 удалений

Просмотреть файл

@ -2498,7 +2498,7 @@ JS_MaybeGC(JSContext *cx)
* or approximately F == 0 && B > 4/3 Bl.
*/
if ((bytes > 8192 && bytes > lastBytes + lastBytes / 3) ||
rt->gcMallocBytes >= rt->gcMaxMallocBytes) {
rt->isGCMallocLimitReached()) {
JS_GC(cx);
}
}
@ -2535,7 +2535,7 @@ JS_SetGCParameter(JSRuntime *rt, JSGCParamKey key, uint32 value)
rt->gcMaxBytes = value;
break;
case JSGC_MAX_MALLOC_BYTES:
rt->gcMaxMallocBytes = value;
rt->setGCMaxMallocBytes(value);
break;
case JSGC_STACKPOOL_LIFESPAN:
rt->gcEmptyArenaPoolLifespan = value;

Просмотреть файл

@ -278,6 +278,7 @@ thread_purger(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 /* index */,
return JS_DHASH_REMOVE;
}
PurgeThreadData(cx, &thread->data);
thread->gcThreadMallocBytes = JS_GC_THREAD_MALLOC_LIMIT;
return JS_DHASH_NEXT;
}
@ -1863,3 +1864,37 @@ js_CurrentPCIsInImacro(JSContext *cx)
return false;
#endif
}
void
JSContext::checkMallocGCPressure(void *p)
{
if (!p) {
js_ReportOutOfMemory(this);
return;
}
#ifdef JS_THREADSAFE
JS_ASSERT(thread->gcThreadMallocBytes <= 0);
ptrdiff_t n = JS_GC_THREAD_MALLOC_LIMIT - thread->gcThreadMallocBytes;
thread->gcThreadMallocBytes = JS_GC_THREAD_MALLOC_LIMIT;
JS_LOCK_GC(runtime);
runtime->gcMallocBytes -= n;
if (runtime->isGCMallocLimitReached())
#endif
{
JS_ASSERT(runtime->isGCMallocLimitReached());
runtime->gcMallocBytes = -1;
/*
* Empty the GC free lists to trigger a last-ditch GC when allocating
* any GC thing later on this thread. This minimizes the amount of
* checks on the fast path of the GC allocator. Note that we cannot
* touch the free lists on other threads as their manipulation is not
* thread-safe.
*/
JS_THREAD_DATA(this)->gcFreeLists.purge();
js_TriggerGC(this, true);
}
JS_UNLOCK_GC(runtime);
}

Просмотреть файл

@ -315,12 +315,6 @@ struct JSThreadData {
JSEvalCacheMeter evalCacheMeter;
#endif
/*
* Thread-local version of JSRuntime.gcMallocBytes to avoid taking
* locks on each JS_malloc.
*/
size_t gcMallocBytes;
/*
* Cache of reusable JSNativeEnumerators mapped by shape identifiers (as
* stored in scope->shape). This cache is nulled by the GC and protected
@ -365,10 +359,23 @@ struct JSThread {
/* Indicates that the thread is waiting in ClaimTitle from jslock.cpp. */
JSTitle *titleToShare;
/*
* Thread-local version of JSRuntime.gcMallocBytes to avoid taking
* locks on each JS_malloc.
*/
ptrdiff_t gcThreadMallocBytes;
/* Factored out of JSThread for !JS_THREADSAFE embedding in JSRuntime. */
JSThreadData data;
};
/*
* Only when JSThread::gcThreadMallocBytes exhausts the following limit we
* update JSRuntime::gcMallocBytes.
* .
*/
const size_t JS_GC_THREAD_MALLOC_LIMIT = 1 << 19;
#define JS_THREAD_DATA(cx) (&(cx)->thread->data)
struct JSThreadsHashEntry {
@ -494,7 +501,18 @@ struct JSRuntime {
#endif
JSGCCallback gcCallback;
size_t gcMallocBytes;
/*
* Malloc counter to measure memory pressure for GC scheduling. It runs
* from gcMaxMallocBytes down to zero.
*/
ptrdiff_t gcMallocBytes;
/*
* Stack of GC arenas containing things that the GC marked, where children
* reached from those things have not yet been marked. This helps avoid
* using too much native stack during recursive GC marking.
*/
JSGCArenaInfo *gcUntracedArenaStackTop;
#ifdef DEBUG
size_t gcTraceLaterCount;
@ -671,6 +689,10 @@ struct JSRuntime {
/* Literal table maintained by jsatom.c functions. */
JSAtomState atomState;
#ifdef JS_THREADSAFE
JSBackgroundThread *deallocatorThread;
#endif
/*
* Various metering fields are defined at the end of JSRuntime. In this
* way there is no need to recompile all the code that refers to other
@ -753,25 +775,26 @@ struct JSRuntime {
void setGCTriggerFactor(uint32 factor);
void setGCLastBytes(size_t lastBytes);
inline void* malloc(size_t bytes) {
return ::js_malloc(bytes);
}
void* malloc(size_t bytes) { return ::js_malloc(bytes); }
inline void* calloc(size_t bytes) {
return ::js_calloc(bytes);
}
void* calloc(size_t bytes) { return ::js_calloc(bytes); }
inline void* realloc(void* p, size_t bytes) {
return ::js_realloc(p, bytes);
}
void* realloc(void* p, size_t bytes) { return ::js_realloc(p, bytes); }
inline void free(void* p) {
::js_free(p);
}
void free(void* p) { ::js_free(p); }
#ifdef JS_THREADSAFE
JSBackgroundThread *deallocatorThread;
#endif
bool isGCMallocLimitReached() const { return gcMallocBytes <= 0; }
void resetGCMallocBytes() { gcMallocBytes = ptrdiff_t(gcMaxMallocBytes); }
void setGCMaxMallocBytes(size_t value) {
/*
* For compatibility treat any value that exceeds PTRDIFF_T_MAX to
* mean that value.
*/
gcMaxMallocBytes = (ptrdiff_t(value) >= 0) ? value : size_t(-1) >> 1;
resetGCMallocBytes();
}
};
/* Common macros to access thread-local caches in JSThread or JSRuntime. */
@ -1130,23 +1153,47 @@ struct JSContext {
}
#endif
/* Call this after succesful malloc of memory for GC-related things. */
inline void updateMallocCounter(size_t nbytes) {
size_t *pbytes, bytes;
ptrdiff_t &getMallocCounter() {
#ifdef JS_THREADSAFE
return thread->gcThreadMallocBytes;
#else
return runtime->gcMallocBytes;
#endif
}
pbytes = &JS_THREAD_DATA(this)->gcMallocBytes;
bytes = *pbytes;
*pbytes = (size_t(-1) - bytes <= nbytes) ? size_t(-1) : bytes + nbytes;
/*
* Call this after allocating memory held by GC things, to update memory
* pressure counters or report the OOM error if necessary.
*/
inline void updateMallocCounter(void *p, size_t nbytes) {
JS_ASSERT(ptrdiff_t(nbytes) >= 0);
ptrdiff_t &counter = getMallocCounter();
counter -= ptrdiff_t(nbytes);
if (!p || counter <= 0)
checkMallocGCPressure(p);
}
/*
* Call this after successfully allocating memory held by GC things, to
* update memory pressure counters.
*/
inline void updateMallocCounter(size_t nbytes) {
JS_ASSERT(ptrdiff_t(nbytes) >= 0);
ptrdiff_t &counter = getMallocCounter();
counter -= ptrdiff_t(nbytes);
if (counter <= 0) {
/*
* Use 1 as an arbitrary non-null pointer indicating successful
* allocation.
*/
checkMallocGCPressure(reinterpret_cast<void *>(jsuword(1)));
}
}
inline void* malloc(size_t bytes) {
JS_ASSERT(bytes != 0);
void *p = runtime->malloc(bytes);
if (!p) {
JS_ReportOutOfMemory(this);
return NULL;
}
updateMallocCounter(bytes);
updateMallocCounter(p, bytes);
return p;
}
@ -1162,23 +1209,19 @@ struct JSContext {
inline void* calloc(size_t bytes) {
JS_ASSERT(bytes != 0);
void *p = runtime->calloc(bytes);
if (!p) {
JS_ReportOutOfMemory(this);
return NULL;
}
updateMallocCounter(bytes);
updateMallocCounter(p, bytes);
return p;
}
inline void* realloc(void* p, size_t bytes) {
void *orig = p;
p = runtime->realloc(p, bytes);
if (!p) {
JS_ReportOutOfMemory(this);
return NULL;
}
if (!orig)
updateMallocCounter(bytes);
/*
* For compatibility we do not account for realloc that increases
* previously allocated memory.
*/
updateMallocCounter(p, orig ? 0 : bytes);
return p;
}
@ -1210,10 +1253,8 @@ struct JSContext {
*/
#define CREATE_BODY(parms) \
void *memory = this->malloc(sizeof(T)); \
if (!memory) { \
JS_ReportOutOfMemory(this); \
if (!memory) \
return NULL; \
} \
return new(memory) T parms;
template <class T>
@ -1242,6 +1283,16 @@ struct JSContext {
p->~T();
this->free(p);
}
private:
/*
* The allocation code calls the function to indicate either OOM failure
* when p is null or that a memory pressure counter has reached some
* threshold when p is not null. The function takes the pointer and not
* a boolean flag to minimize the amount of code in its inlined callers.
*/
void checkMallocGCPressure(void *p);
};
#ifdef JS_THREADSAFE

Просмотреть файл

@ -1313,7 +1313,9 @@ js_InitGC(JSRuntime *rt, uint32 maxbytes)
* Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
* for default backward API compatibility.
*/
rt->gcMaxBytes = rt->gcMaxMallocBytes = maxbytes;
rt->gcMaxBytes = maxbytes;
rt->setGCMaxMallocBytes(maxbytes);
rt->gcEmptyArenaPoolLifespan = 30000;
/*
@ -1739,17 +1741,6 @@ JSRuntime::setGCLastBytes(size_t lastBytes)
gcTriggerBytes = size_t(triggerBytes);
}
static inline void
RestoreGCArenaFreeList(JSGCThing **freeListp)
{
JSGCThing *freeListHead = *freeListp;
JS_ASSERT(freeListHead);
JSGCArenaInfo *a = THING_TO_ARENA(freeListHead);
JS_ASSERT(!a->finalizable.freeList);
a->finalizable.freeList = freeListHead;
*freeListp = NULL;
}
void
JSGCFreeLists::purge()
{
@ -1758,9 +1749,13 @@ JSGCFreeLists::purge()
* run the finalizers over unitialized bytes from free things.
*/
for (JSGCThing **p = finalizables; p != JS_ARRAY_END(finalizables); ++p) {
JSGCThing *thing = *p;
if (thing)
RestoreGCArenaFreeList(p);
JSGCThing *freeListHead = *p;
if (freeListHead) {
JSGCArenaInfo *a = THING_TO_ARENA(freeListHead);
JS_ASSERT(!a->finalizable.freeList);
a->finalizable.freeList = freeListHead;
*p = NULL;
}
}
doubles = NULL;
}
@ -1778,8 +1773,7 @@ IsGCThresholdReached(JSRuntime *rt)
* zero (see the js_InitGC function) the return value is false when
* the gcBytes value is close to zero at the JS engine start.
*/
return rt->gcMallocBytes >= rt->gcMaxMallocBytes ||
rt->gcBytes >= rt->gcTriggerBytes;
return rt->isGCMallocLimitReached() || rt->gcBytes >= rt->gcTriggerBytes;
}
static JSGCThing *
@ -1795,18 +1789,6 @@ RefillFinalizableFreeList(JSContext *cx, unsigned thingKind)
return NULL;
}
#ifdef JS_THREADSAFE
/* Transfer thread-local counter to global one. */
size_t localMallocBytes = JS_THREAD_DATA(cx)->gcMallocBytes;
if (localMallocBytes != 0) {
JS_THREAD_DATA(cx)->gcMallocBytes = 0;
if (rt->gcMaxMallocBytes - rt->gcMallocBytes < localMallocBytes)
rt->gcMallocBytes = rt->gcMaxMallocBytes;
else
rt->gcMallocBytes += localMallocBytes;
}
#endif
METER(JSGCArenaStats *astats = &cx->runtime->gcStats.arenaStats[thingKind]);
bool canGC = !JS_ON_TRACE(cx);
bool doGC = canGC && IsGCThresholdReached(rt);
@ -1895,37 +1877,19 @@ NewFinalizableGCThing(JSContext *cx, unsigned thingKind)
JSGCThing **freeListp =
JS_THREAD_DATA(cx)->gcFreeLists.finalizables + thingKind;
JSGCThing *thing = *freeListp;
JSRuntime *rt = cx->runtime;
#ifdef JS_TRACER
bool fromTraceReserve = false;
#endif
for (;;) {
if (thing) {
#ifdef JS_THREADSAFE
bool tooMuchMalloc = (rt->gcMaxMallocBytes - rt->gcMallocBytes <=
JS_THREAD_DATA(cx)->gcMallocBytes);
#else
bool tooMuchMalloc = (rt->gcMaxMallocBytes <= rt->gcMallocBytes);
#endif
if (!tooMuchMalloc || JS_ON_TRACE(cx)) {
*freeListp = thing->link;
METER(astats->localalloc++);
break;
}
/*
* We will try to run the GC in RefillFinalizableFreeList and need
* to put the free list starting in thing back into its arena.
* Without this, if the GC will be canceled, we would lose this
* free list when assigning after RefillFinalizableFreeList
* returns and eventually would run finalizars on free GC things.
*/
RestoreGCArenaFreeList(freeListp);
*freeListp = thing->link;
METER(astats->localalloc++);
break;
}
#if defined JS_GC_ZEAL && defined JS_TRACER
if (rt->gcZeal >= 1 && JS_TRACE_MONITOR(cx).useReservedObjects)
if (cx->runtime->gcZeal >= 1 && JS_TRACE_MONITOR(cx).useReservedObjects)
goto testReservedObjects;
#endif
@ -3513,7 +3477,7 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind)
rt->gcIsNeeded = JS_FALSE;
/* Reset malloc counter. */
rt->gcMallocBytes = 0;
rt->resetGCMallocBytes();
#ifdef JS_DUMP_SCOPE_METERS
{ extern void js_DumpScopeMeters(JSRuntime *rt);

Просмотреть файл

@ -391,7 +391,7 @@ JSScope::changeTable(JSContext *cx, int change)
table = newtable;
/* Treat the above calloc as a JS_malloc, to match CreateScopeTable. */
cx->runtime->gcMallocBytes += nbytes;
cx->updateMallocCounter(nbytes);
/* Copy only live entries, leaving removed and free ones behind. */
for (oldspp = oldtable; oldsize != 0; oldspp++) {