Backed out 8 changesets (bug 1141234) for bustage.

Backed out changeset 9c7dd01f338e (bug 1141234)
Backed out changeset df39d787c523 (bug 1141234)
Backed out changeset 3632c514a358 (bug 1141234)
Backed out changeset 1e8743d47b41 (bug 1141234)
Backed out changeset ddd5d5191e49 (bug 1141234)
Backed out changeset 274b1f5afc29 (bug 1141234)
Backed out changeset 65e299373b5b (bug 1141234)
Backed out changeset eff0109392e9 (bug 1141234)
This commit is contained in:
Ryan VanderMeulen 2015-03-12 15:01:16 -04:00
Родитель 473dfdcf5f
Коммит 8fbe69b141
8 изменённых файлов: 357 добавлений и 375 удалений

Просмотреть файл

@ -95,22 +95,9 @@ static MOZ_NEVER_INLINE void js_failedAllocBreakpoint() { asm(""); }
} \
} while (0)
namespace js {
namespace oom {
static inline bool ShouldFailWithOOM()
{
if (++OOM_counter > OOM_maxAllocations) {
JS_OOM_CALL_BP_FUNC();
return true;
}
return false;
}
}
}
# else
# define JS_OOM_POSSIBLY_FAIL() do {} while(0)
# define JS_OOM_POSSIBLY_FAIL_BOOL() do {} while(0)
namespace js { namespace oom { static inline bool ShouldFailWithOOM() { return false; } } }
# endif /* DEBUG || JS_OOM_BREAKPOINT */
static inline void* js_malloc(size_t bytes)

Просмотреть файл

@ -6,11 +6,8 @@
#include "gc/Allocator.h"
#include "mozilla/UniquePtr.h"
#include "jscntxt.h"
#include "gc/GCInternals.h"
#include "gc/GCTrace.h"
#include "gc/Nursery.h"
#include "jit/JitCompartment.h"
@ -22,23 +19,101 @@
using namespace js;
using namespace gc;
static inline bool
ShouldNurseryAllocateObject(const Nursery &nursery, InitialHeap heap)
{
return nursery.isEnabled() && heap != TenuredHeap;
}
/*
* Attempt to allocate a new GC thing out of the nursery. If there is not enough
* room in the nursery or there is an OOM, this method will return nullptr.
*/
template <AllowGC allowGC>
bool
GCRuntime::checkAllocatorState(JSContext *cx, AllocKind kind)
inline JSObject *
TryNewNurseryObject(JSContext *cx, size_t thingSize, size_t nDynamicSlots, const Class *clasp)
{
MOZ_ASSERT(!IsAtomsCompartment(cx->compartment()));
JSRuntime *rt = cx->runtime();
Nursery &nursery = rt->gc.nursery;
JSObject *obj = nursery.allocateObject(cx, thingSize, nDynamicSlots, clasp);
if (obj)
return obj;
if (allowGC && !rt->mainThread.suppressGC) {
cx->minorGC(JS::gcreason::OUT_OF_NURSERY);
/* Exceeding gcMaxBytes while tenuring can disable the Nursery. */
if (nursery.isEnabled()) {
JSObject *obj = nursery.allocateObject(cx, thingSize, nDynamicSlots, clasp);
MOZ_ASSERT(obj);
return obj;
}
}
return nullptr;
}
static inline bool
PossiblyFail()
{
JS_OOM_POSSIBLY_FAIL_BOOL();
return true;
}
static inline bool
GCIfNeeded(ExclusiveContext *cx)
{
if (cx->isJSContext()) {
JSContext *ncx = cx->asJSContext();
JSRuntime *rt = ncx->runtime();
#ifdef JS_GC_ZEAL
if (rt->gc.needZealousGC())
rt->gc.runDebugGC();
#endif
// Invoking the interrupt callback can fail and we can't usefully
// handle that here. Just check in case we need to collect instead.
if (rt->hasPendingInterrupt())
rt->gc.gcIfRequested(ncx);
// If we have grown past our GC heap threshold while in the middle of
// an incremental GC, we're growing faster than we're GCing, so stop
// the world and do a full, non-incremental GC right now, if possible.
if (rt->gc.isIncrementalGCInProgress() &&
cx->zone()->usage.gcBytes() > cx->zone()->threshold.gcTriggerBytes())
{
PrepareZoneForGC(cx->zone());
AutoKeepAtoms keepAtoms(cx->perThreadData);
rt->gc.gc(GC_NORMAL, JS::gcreason::INCREMENTAL_TOO_SLOW);
}
}
return true;
}
template <AllowGC allowGC>
static inline bool
CheckAllocatorState(ExclusiveContext *cx, AllocKind kind)
{
if (allowGC) {
if (!gcIfNeededPerAllocation(cx))
if (!GCIfNeeded(cx))
return false;
}
if (!cx->isJSContext())
return true;
JSContext *ncx = cx->asJSContext();
JSRuntime *rt = ncx->runtime();
#if defined(JS_GC_ZEAL) || defined(DEBUG)
MOZ_ASSERT_IF(rt->isAtomsCompartment(cx->compartment()),
MOZ_ASSERT_IF(rt->isAtomsCompartment(ncx->compartment()),
kind == FINALIZE_STRING ||
kind == FINALIZE_FAT_INLINE_STRING ||
kind == FINALIZE_SYMBOL ||
kind == FINALIZE_JITCODE);
MOZ_ASSERT(!rt->isHeapBusy());
MOZ_ASSERT(isAllocAllowed());
MOZ_ASSERT(rt->gc.isAllocAllowed());
#endif
// Crash if we perform a GC action when it is not safe.
@ -46,8 +121,8 @@ GCRuntime::checkAllocatorState(JSContext *cx, AllocKind kind)
JS::AutoAssertOnGC::VerifyIsSafeToGC(rt);
// For testing out of memory conditions
if (js::oom::ShouldFailWithOOM()) {
ReportOutOfMemory(cx);
if (!PossiblyFail()) {
ReportOutOfMemory(ncx);
return false;
}
@ -55,8 +130,8 @@ GCRuntime::checkAllocatorState(JSContext *cx, AllocKind kind)
}
template <typename T>
/* static */ void
GCRuntime::checkIncrementalZoneState(ExclusiveContext *cx, T *t)
static inline void
CheckIncrementalZoneState(ExclusiveContext *cx, T *t)
{
#ifdef DEBUG
if (!cx->isJSContext())
@ -68,37 +143,13 @@ GCRuntime::checkIncrementalZoneState(ExclusiveContext *cx, T *t)
#endif
}
bool
GCRuntime::gcIfNeededPerAllocation(JSContext *cx)
{
#ifdef JS_GC_ZEAL
if (needZealousGC())
runDebugGC();
#endif
/*
* Allocate a new GC thing. After a successful allocation the caller must
* fully initialize the thing before calling any function that can potentially
* trigger GC. This will ensure that GC tracing never sees junk values stored
* in the partially initialized thing.
*/
// Invoking the interrupt callback can fail and we can't usefully
// handle that here. Just check in case we need to collect instead.
if (rt->hasPendingInterrupt())
gcIfRequested(cx);
// If we have grown past our GC heap threshold while in the middle of
// an incremental GC, we're growing faster than we're GCing, so stop
// the world and do a full, non-incremental GC right now, if possible.
if (isIncrementalGCInProgress() &&
cx->zone()->usage.gcBytes() > cx->zone()->threshold.gcTriggerBytes())
{
PrepareZoneForGC(cx->zone());
AutoKeepAtoms keepAtoms(cx->perThreadData);
gc(GC_NORMAL, JS::gcreason::INCREMENTAL_TOO_SLOW);
}
return true;
}
// Allocate a new GC thing. After a successful allocation the caller must
// fully initialize the thing before calling any function that can potentially
// trigger GC. This will ensure that GC tracing never sees junk values stored
// in the partially initialized thing.
template <typename T, AllowGC allowGC /* = CanGC */>
JSObject *
js::Allocate(ExclusiveContext *cx, AllocKind kind, size_t nDynamicSlots, InitialHeap heap,
@ -113,17 +164,14 @@ js::Allocate(ExclusiveContext *cx, AllocKind kind, size_t nDynamicSlots, Initial
static_assert(sizeof(JSObject_Slots0) >= CellSize,
"All allocations must be at least the allocator-imposed minimum size.");
// Off-main-thread alloc cannot trigger GC or make runtime assertions.
if (!cx->isJSContext())
return GCRuntime::tryNewTenuredObject<NoGC>(cx, kind, thingSize, nDynamicSlots);
JSContext *ncx = cx->asJSContext();
JSRuntime *rt = ncx->runtime();
if (!rt->gc.checkAllocatorState<allowGC>(ncx, kind))
if (!CheckAllocatorState<allowGC>(cx, kind))
return nullptr;
if (ncx->nursery().isEnabled() && heap != TenuredHeap) {
JSObject *obj = rt->gc.tryNewNurseryObject<allowGC>(ncx, thingSize, nDynamicSlots, clasp);
if (cx->isJSContext() &&
ShouldNurseryAllocateObject(cx->asJSContext()->nursery(), heap))
{
JSObject *obj = TryNewNurseryObject<allowGC>(cx->asJSContext(), thingSize, nDynamicSlots,
clasp);
if (obj)
return obj;
@ -136,7 +184,26 @@ js::Allocate(ExclusiveContext *cx, AllocKind kind, size_t nDynamicSlots, Initial
return nullptr;
}
return GCRuntime::tryNewTenuredObject<allowGC>(cx, kind, thingSize, nDynamicSlots);
HeapSlot *slots = nullptr;
if (nDynamicSlots) {
slots = cx->zone()->pod_malloc<HeapSlot>(nDynamicSlots);
if (MOZ_UNLIKELY(!slots))
return nullptr;
Debug_SetSlotRangeToCrashOnTouch(slots, nDynamicSlots);
}
JSObject *obj = reinterpret_cast<JSObject *>(cx->arenas()->allocateFromFreeList(kind, thingSize));
if (!obj)
obj = reinterpret_cast<JSObject *>(GCRuntime::refillFreeListFromAnyThread<allowGC>(cx, kind));
if (obj)
obj->setInitialSlotsMaybeNonNative(slots);
else
js_free(slots);
CheckIncrementalZoneState(cx, obj);
TraceTenuredAlloc(obj, kind);
return obj;
}
template JSObject *js::Allocate<JSObject, NoGC>(ExclusiveContext *cx, gc::AllocKind kind,
size_t nDynamicSlots, gc::InitialHeap heap,
@ -145,65 +212,6 @@ template JSObject *js::Allocate<JSObject, CanGC>(ExclusiveContext *cx, gc::Alloc
size_t nDynamicSlots, gc::InitialHeap heap,
const Class *clasp);
// Attempt to allocate a new GC thing out of the nursery. If there is not enough
// room in the nursery or there is an OOM, this method will return nullptr.
template <AllowGC allowGC>
JSObject *
GCRuntime::tryNewNurseryObject(JSContext *cx, size_t thingSize, size_t nDynamicSlots, const Class *clasp)
{
MOZ_ASSERT(!IsAtomsCompartment(cx->compartment()));
JSObject *obj = nursery.allocateObject(cx, thingSize, nDynamicSlots, clasp);
if (obj)
return obj;
if (allowGC && !rt->mainThread.suppressGC) {
minorGC(cx, JS::gcreason::OUT_OF_NURSERY);
// Exceeding gcMaxBytes while tenuring can disable the Nursery.
if (nursery.isEnabled()) {
JSObject *obj = nursery.allocateObject(cx, thingSize, nDynamicSlots, clasp);
MOZ_ASSERT(obj);
return obj;
}
}
return nullptr;
}
typedef mozilla::UniquePtr<HeapSlot, JS::FreePolicy> UniqueSlots;
static inline UniqueSlots
MakeSlotArray(ExclusiveContext *cx, size_t count)
{
HeapSlot *slots = nullptr;
if (count) {
slots = cx->zone()->pod_malloc<HeapSlot>(count);
if (slots)
Debug_SetSlotRangeToCrashOnTouch(slots, count);
}
return UniqueSlots(slots);
}
template <AllowGC allowGC>
JSObject *
GCRuntime::tryNewTenuredObject(ExclusiveContext *cx, AllocKind kind, size_t thingSize,
size_t nDynamicSlots)
{
// The analysis is not aware that the HeapSlots in |slots| here are
// disconnected because they are uninitialized.
AutoSuppressGCAnalysis suppress(cx);
UniqueSlots slots = MakeSlotArray(cx, nDynamicSlots);
if (nDynamicSlots && !slots)
return nullptr;
JSObject *obj = tryNewTenuredThing<JSObject, allowGC>(cx, kind, thingSize);
if (obj)
obj->setInitialSlotsMaybeNonNative(slots.release());
return obj;
}
template <typename T, AllowGC allowGC /* = CanGC */>
T *
js::Allocate(ExclusiveContext *cx)
@ -214,15 +222,18 @@ js::Allocate(ExclusiveContext *cx)
AllocKind kind = MapTypeToFinalizeKind<T>::kind;
size_t thingSize = sizeof(T);
MOZ_ASSERT(thingSize == Arena::thingSize(kind));
if (!CheckAllocatorState<allowGC>(cx, kind))
return nullptr;
if (cx->isJSContext()) {
JSContext *ncx = cx->asJSContext();
if (!ncx->runtime()->gc.checkAllocatorState<allowGC>(ncx, kind))
return nullptr;
}
T *t = static_cast<T *>(cx->arenas()->allocateFromFreeList(kind, thingSize));
if (!t)
t = static_cast<T *>(GCRuntime::refillFreeListFromAnyThread<allowGC>(cx, kind));
return GCRuntime::tryNewTenuredThing<T, allowGC>(cx, kind, thingSize);
CheckIncrementalZoneState(cx, t);
gc::TraceTenuredAlloc(t, kind);
return t;
}
#define FOR_ALL_NON_OBJECT_GC_LAYOUTS(macro) \
@ -243,195 +254,3 @@ js::Allocate(ExclusiveContext *cx)
template type *js::Allocate<type, CanGC>(ExclusiveContext *cx);
FOR_ALL_NON_OBJECT_GC_LAYOUTS(DECL_ALLOCATOR_INSTANCES)
#undef DECL_ALLOCATOR_INSTANCES
template <typename T, AllowGC allowGC>
/* static */ T *
GCRuntime::tryNewTenuredThing(ExclusiveContext *cx, AllocKind kind, size_t thingSize)
{
T *t = reinterpret_cast<T *>(cx->arenas()->allocateFromFreeList(kind, thingSize));
if (!t)
t = reinterpret_cast<T *>(refillFreeListFromAnyThread<allowGC>(cx, kind));
checkIncrementalZoneState(cx, t);
TraceTenuredAlloc(t, kind);
return t;
}
template <AllowGC allowGC>
/* static */ void *
GCRuntime::refillFreeListFromAnyThread(ExclusiveContext *cx, AllocKind thingKind)
{
MOZ_ASSERT(cx->arenas()->freeLists[thingKind].isEmpty());
if (cx->isJSContext())
return refillFreeListFromMainThread<allowGC>(cx->asJSContext(), thingKind);
return refillFreeListOffMainThread(cx, thingKind);
}
template <AllowGC allowGC>
/* static */ void *
GCRuntime::refillFreeListFromMainThread(JSContext *cx, AllocKind thingKind)
{
JSRuntime *rt = cx->runtime();
MOZ_ASSERT(!rt->isHeapBusy(), "allocating while under GC");
MOZ_ASSERT_IF(allowGC, !rt->currentThreadHasExclusiveAccess());
// Try to allocate; synchronize with background GC threads if necessary.
void *thing = tryRefillFreeListFromMainThread(cx, thingKind);
if (MOZ_LIKELY(thing))
return thing;
// Perform a last-ditch GC to hopefully free up some memory.
{
// If we are doing a fallible allocation, percolate up the OOM
// instead of reporting it.
if (!allowGC)
return nullptr;
JS::PrepareForFullGC(rt);
AutoKeepAtoms keepAtoms(cx->perThreadData);
rt->gc.gc(GC_SHRINK, JS::gcreason::LAST_DITCH);
}
// Retry the allocation after the last-ditch GC.
thing = tryRefillFreeListFromMainThread(cx, thingKind);
if (thing)
return thing;
// We are really just totally out of memory.
MOZ_ASSERT(allowGC, "A fallible allocation must not report OOM on failure.");
ReportOutOfMemory(cx);
return nullptr;
}
/* static */ void *
GCRuntime::tryRefillFreeListFromMainThread(JSContext *cx, AllocKind thingKind)
{
ArenaLists *arenas = cx->arenas();
Zone *zone = cx->zone();
AutoMaybeStartBackgroundAllocation maybeStartBGAlloc;
void *thing = arenas->allocateFromArena(zone, thingKind, maybeStartBGAlloc);
if (MOZ_LIKELY(thing))
return thing;
// Even if allocateFromArena failed due to OOM, a background
// finalization or allocation task may be running freeing more memory
// or adding more available memory to our free pool; wait for them to
// finish, then try to allocate again in case they made more memory
// available.
cx->runtime()->gc.waitBackgroundSweepOrAllocEnd();
thing = arenas->allocateFromArena(zone, thingKind, maybeStartBGAlloc);
if (thing)
return thing;
return nullptr;
}
/* static */ void *
GCRuntime::refillFreeListOffMainThread(ExclusiveContext *cx, AllocKind thingKind)
{
ArenaLists *arenas = cx->arenas();
Zone *zone = cx->zone();
JSRuntime *rt = zone->runtimeFromAnyThread();
AutoMaybeStartBackgroundAllocation maybeStartBGAlloc;
// If we're off the main thread, we try to allocate once and return
// whatever value we get. We need to first ensure the main thread is not in
// a GC session.
AutoLockHelperThreadState lock;
while (rt->isHeapBusy())
HelperThreadState().wait(GlobalHelperThreadState::PRODUCER);
void *thing = arenas->allocateFromArena(zone, thingKind, maybeStartBGAlloc);
if (thing)
return thing;
ReportOutOfMemory(cx);
return nullptr;
}
TenuredCell *
ArenaLists::allocateFromArena(JS::Zone *zone, AllocKind thingKind,
AutoMaybeStartBackgroundAllocation &maybeStartBGAlloc)
{
JSRuntime *rt = zone->runtimeFromAnyThread();
Maybe<AutoLockGC> maybeLock;
// See if we can proceed without taking the GC lock.
if (backgroundFinalizeState[thingKind] != BFS_DONE)
maybeLock.emplace(rt);
ArenaList &al = arenaLists[thingKind];
ArenaHeader *aheader = al.takeNextArena();
if (aheader) {
// Empty arenas should be immediately freed.
MOZ_ASSERT(!aheader->isEmpty());
return allocateFromArenaInner<HasFreeThings>(zone, aheader, thingKind);
}
// Parallel threads have their own ArenaLists, but chunks are shared;
// if we haven't already, take the GC lock now to avoid racing.
if (maybeLock.isNothing())
maybeLock.emplace(rt);
Chunk *chunk = rt->gc.pickChunk(maybeLock.ref(), maybeStartBGAlloc);
if (!chunk)
return nullptr;
// Although our chunk should definitely have enough space for another arena,
// there are other valid reasons why Chunk::allocateArena() may fail.
aheader = rt->gc.allocateArena(chunk, zone, thingKind, maybeLock.ref());
if (!aheader)
return nullptr;
MOZ_ASSERT(!maybeLock->wasUnlocked());
MOZ_ASSERT(al.isCursorAtEnd());
al.insertAtCursor(aheader);
return allocateFromArenaInner<IsEmpty>(zone, aheader, thingKind);
}
template <ArenaLists::ArenaAllocMode hasFreeThings>
TenuredCell *
ArenaLists::allocateFromArenaInner(JS::Zone *zone, ArenaHeader *aheader, AllocKind kind)
{
size_t thingSize = Arena::thingSize(kind);
FreeSpan span;
if (hasFreeThings) {
MOZ_ASSERT(aheader->hasFreeThings());
span = aheader->getFirstFreeSpan();
aheader->setAsFullyUsed();
} else {
MOZ_ASSERT(!aheader->hasFreeThings());
Arena *arena = aheader->getArena();
span.initFinal(arena->thingsStart(kind), arena->thingsEnd() - thingSize, thingSize);
}
freeLists[kind].setHead(&span);
if (MOZ_UNLIKELY(zone->wasGCStarted()))
zone->runtimeFromAnyThread()->gc.arenaAllocatedDuringGC(zone, aheader);
TenuredCell *thing = freeLists[kind].allocate(thingSize);
MOZ_ASSERT(thing); // This allocation is infallible.
return thing;
}
void
GCRuntime::arenaAllocatedDuringGC(JS::Zone *zone, ArenaHeader *arena)
{
if (zone->needsIncrementalBarrier()) {
arena->allocatedDuringIncremental = true;
marker.delayMarkingArena(arena);
} else if (zone->isGCSweeping()) {
arena->setNextAllocDuringSweep(arenasAllocatedDuringSweep);
arenasAllocatedDuringSweep = arena;
}
}

Просмотреть файл

@ -149,29 +149,6 @@ struct MovingTracer : JSTracer {
}
};
class AutoMaybeStartBackgroundAllocation
{
private:
JSRuntime *runtime;
MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
public:
explicit AutoMaybeStartBackgroundAllocation(MOZ_GUARD_OBJECT_NOTIFIER_ONLY_PARAM)
: runtime(nullptr)
{
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
}
void tryToStartBackgroundAllocation(JSRuntime *rt) {
runtime = rt;
}
~AutoMaybeStartBackgroundAllocation() {
if (runtime)
runtime->gc.startBackgroundAllocTaskIfIdle();
}
};
} /* namespace gc */
} /* namespace js */

Просмотреть файл

@ -821,6 +821,10 @@ class GCRuntime
bool isVerifyPreBarriersEnabled() const { return false; }
#endif
template <AllowGC allowGC>
static void *refillFreeListFromAnyThread(ExclusiveContext *cx, AllocKind thingKind);
static void *refillFreeListInGC(Zone *zone, AllocKind thingKind);
// Free certain LifoAlloc blocks from the background sweep thread.
void freeUnusedLifoBlocksAfterSweeping(LifoAlloc *lifo);
void freeAllLifoBlocksAfterSweeping(LifoAlloc *lifo);
@ -830,19 +834,6 @@ class GCRuntime
void releaseHeldRelocatedArenas();
// Allocator
template <AllowGC allowGC>
bool checkAllocatorState(JSContext *cx, AllocKind kind);
template <AllowGC allowGC>
JSObject *tryNewNurseryObject(JSContext *cx, size_t thingSize, size_t nDynamicSlots,
const Class *clasp);
template <AllowGC allowGC>
static JSObject *tryNewTenuredObject(ExclusiveContext *cx, AllocKind kind, size_t thingSize,
size_t nDynamicSlots);
template <typename T, AllowGC allowGC>
static T *tryNewTenuredThing(ExclusiveContext *cx, AllocKind kind, size_t thingSize);
static void *refillFreeListInGC(Zone *zone, AllocKind thingKind);
private:
enum IncrementalProgress
{
@ -857,14 +848,8 @@ class GCRuntime
Chunk *pickChunk(const AutoLockGC &lock,
AutoMaybeStartBackgroundAllocation &maybeStartBGAlloc);
ArenaHeader *allocateArena(Chunk *chunk, Zone *zone, AllocKind kind, const AutoLockGC &lock);
void arenaAllocatedDuringGC(JS::Zone *zone, ArenaHeader *arena);
inline void arenaAllocatedDuringGC(JS::Zone *zone, ArenaHeader *arena);
// Allocator internals
bool gcIfNeededPerAllocation(JSContext *cx);
template <typename T>
static void checkIncrementalZoneState(ExclusiveContext *cx, T *t);
template <AllowGC allowGC>
static void *refillFreeListFromAnyThread(ExclusiveContext *cx, AllocKind thingKind);
template <AllowGC allowGC>
static void *refillFreeListFromMainThread(JSContext *cx, AllocKind thingKind);
static void *tryRefillFreeListFromMainThread(JSContext *cx, AllocKind thingKind);

Просмотреть файл

@ -478,8 +478,7 @@ js::Nursery::allocateFromTenured(Zone *zone, AllocKind thingKind)
if (t)
return t;
zone->arenas.checkEmptyFreeList(thingKind);
AutoMaybeStartBackgroundAllocation maybeStartBackgroundAllocation;
return zone->arenas.allocateFromArena(zone, thingKind, maybeStartBackgroundAllocation);
return zone->arenas.allocateFromArena(zone, thingKind);
}
void

Просмотреть файл

@ -187,7 +187,7 @@ class GCMarker : public JSTracer
}
uint32_t markColor() const { return color; }
void delayMarkingArena(gc::ArenaHeader *aheader);
inline void delayMarkingArena(gc::ArenaHeader *aheader);
void delayMarkingChildren(const void *thing);
void markDelayedChildren(gc::ArenaHeader *aheader);
bool markDelayedChildren(SliceBudget &budget);

Просмотреть файл

@ -982,6 +982,29 @@ GCRuntime::startBackgroundAllocTaskIfIdle()
allocTask.startWithLockHeld();
}
class js::gc::AutoMaybeStartBackgroundAllocation
{
private:
JSRuntime *runtime;
MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
public:
explicit AutoMaybeStartBackgroundAllocation(MOZ_GUARD_OBJECT_NOTIFIER_ONLY_PARAM)
: runtime(nullptr)
{
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
}
void tryToStartBackgroundAllocation(JSRuntime *rt) {
runtime = rt;
}
~AutoMaybeStartBackgroundAllocation() {
if (runtime)
runtime->gc.startBackgroundAllocTaskIfIdle();
}
};
Chunk *
GCRuntime::pickChunk(const AutoLockGC &lock,
AutoMaybeStartBackgroundAllocation &maybeStartBackgroundAllocation)
@ -1741,7 +1764,7 @@ ZoneHeapThreshold::updateForRemovedArena(const GCSchedulingTunables &tunables)
gcTriggerBytes_ -= amount;
}
void
inline void
GCMarker::delayMarkingArena(ArenaHeader *aheader)
{
if (aheader->hasDelayedMarking) {
@ -1774,6 +1797,92 @@ ArenaLists::prepareForIncrementalGC(JSRuntime *rt)
}
}
inline void
GCRuntime::arenaAllocatedDuringGC(JS::Zone *zone, ArenaHeader *arena)
{
if (zone->needsIncrementalBarrier()) {
arena->allocatedDuringIncremental = true;
marker.delayMarkingArena(arena);
} else if (zone->isGCSweeping()) {
arena->setNextAllocDuringSweep(arenasAllocatedDuringSweep);
arenasAllocatedDuringSweep = arena;
}
}
TenuredCell *
ArenaLists::allocateFromArena(JS::Zone *zone, AllocKind thingKind)
{
AutoMaybeStartBackgroundAllocation maybeStartBackgroundAllocation;
return allocateFromArena(zone, thingKind, maybeStartBackgroundAllocation);
}
TenuredCell *
ArenaLists::allocateFromArena(JS::Zone *zone, AllocKind thingKind,
AutoMaybeStartBackgroundAllocation &maybeStartBGAlloc)
{
JSRuntime *rt = zone->runtimeFromAnyThread();
Maybe<AutoLockGC> maybeLock;
// See if we can proceed without taking the GC lock.
if (backgroundFinalizeState[thingKind] != BFS_DONE)
maybeLock.emplace(rt);
ArenaList &al = arenaLists[thingKind];
ArenaHeader *aheader = al.takeNextArena();
if (aheader) {
// Empty arenas should be immediately freed.
MOZ_ASSERT(!aheader->isEmpty());
return allocateFromArenaInner<HasFreeThings>(zone, aheader, thingKind);
}
// Parallel threads have their own ArenaLists, but chunks are shared;
// if we haven't already, take the GC lock now to avoid racing.
if (maybeLock.isNothing())
maybeLock.emplace(rt);
Chunk *chunk = rt->gc.pickChunk(maybeLock.ref(), maybeStartBGAlloc);
if (!chunk)
return nullptr;
// Although our chunk should definitely have enough space for another arena,
// there are other valid reasons why Chunk::allocateArena() may fail.
aheader = rt->gc.allocateArena(chunk, zone, thingKind, maybeLock.ref());
if (!aheader)
return nullptr;
MOZ_ASSERT(!maybeLock->wasUnlocked());
MOZ_ASSERT(al.isCursorAtEnd());
al.insertAtCursor(aheader);
return allocateFromArenaInner<IsEmpty>(zone, aheader, thingKind);
}
template <ArenaLists::ArenaAllocMode hasFreeThings>
inline TenuredCell *
ArenaLists::allocateFromArenaInner(JS::Zone *zone, ArenaHeader *aheader, AllocKind thingKind)
{
size_t thingSize = Arena::thingSize(thingKind);
FreeSpan span;
if (hasFreeThings) {
MOZ_ASSERT(aheader->hasFreeThings());
span = aheader->getFirstFreeSpan();
aheader->setAsFullyUsed();
} else {
MOZ_ASSERT(!aheader->hasFreeThings());
Arena *arena = aheader->getArena();
span.initFinal(arena->thingsStart(thingKind), arena->thingsEnd() - thingSize, thingSize);
}
freeLists[thingKind].setHead(&span);
if (MOZ_UNLIKELY(zone->wasGCStarted()))
zone->runtimeFromAnyThread()->gc.arenaAllocatedDuringGC(zone, aheader);
TenuredCell *thing = freeLists[thingKind].allocate(thingSize);
MOZ_ASSERT(thing); // This allocation is infallible.
return thing;
}
/* Compacting GC */
bool
@ -2861,6 +2970,110 @@ ArenaLists::queueForegroundThingsForSweep(FreeOp *fop)
gcScriptArenasToUpdate = arenaListsToSweep[FINALIZE_SCRIPT];
}
/* static */ void *
GCRuntime::tryRefillFreeListFromMainThread(JSContext *cx, AllocKind thingKind)
{
ArenaLists *arenas = cx->arenas();
Zone *zone = cx->zone();
AutoMaybeStartBackgroundAllocation maybeStartBGAlloc;
void *thing = arenas->allocateFromArena(zone, thingKind, maybeStartBGAlloc);
if (MOZ_LIKELY(thing))
return thing;
// Even if allocateFromArena failed due to OOM, a background
// finalization or allocation task may be running freeing more memory
// or adding more available memory to our free pool; wait for them to
// finish, then try to allocate again in case they made more memory
// available.
cx->runtime()->gc.waitBackgroundSweepOrAllocEnd();
thing = arenas->allocateFromArena(zone, thingKind, maybeStartBGAlloc);
if (thing)
return thing;
return nullptr;
}
template <AllowGC allowGC>
/* static */ void *
GCRuntime::refillFreeListFromMainThread(JSContext *cx, AllocKind thingKind)
{
JSRuntime *rt = cx->runtime();
MOZ_ASSERT(!rt->isHeapBusy(), "allocating while under GC");
MOZ_ASSERT_IF(allowGC, !rt->currentThreadHasExclusiveAccess());
// Try to allocate; synchronize with background GC threads if necessary.
void *thing = tryRefillFreeListFromMainThread(cx, thingKind);
if (MOZ_LIKELY(thing))
return thing;
// Perform a last-ditch GC to hopefully free up some memory.
{
// If we are doing a fallible allocation, percolate up the OOM
// instead of reporting it.
if (!allowGC)
return nullptr;
JS::PrepareForFullGC(rt);
AutoKeepAtoms keepAtoms(cx->perThreadData);
rt->gc.gc(GC_SHRINK, JS::gcreason::LAST_DITCH);
}
// Retry the allocation after the last-ditch GC.
thing = tryRefillFreeListFromMainThread(cx, thingKind);
if (thing)
return thing;
// We are really just totally out of memory.
MOZ_ASSERT(allowGC, "A fallible allocation must not report OOM on failure.");
ReportOutOfMemory(cx);
return nullptr;
}
/* static */ void *
GCRuntime::refillFreeListOffMainThread(ExclusiveContext *cx, AllocKind thingKind)
{
ArenaLists *arenas = cx->arenas();
Zone *zone = cx->zone();
JSRuntime *rt = zone->runtimeFromAnyThread();
AutoMaybeStartBackgroundAllocation maybeStartBGAlloc;
// If we're off the main thread, we try to allocate once and return
// whatever value we get. We need to first ensure the main thread is not in
// a GC session.
AutoLockHelperThreadState lock;
while (rt->isHeapBusy())
HelperThreadState().wait(GlobalHelperThreadState::PRODUCER);
void *thing = arenas->allocateFromArena(zone, thingKind, maybeStartBGAlloc);
if (thing)
return thing;
ReportOutOfMemory(cx);
return nullptr;
}
template <AllowGC allowGC>
/* static */ void *
GCRuntime::refillFreeListFromAnyThread(ExclusiveContext *cx, AllocKind thingKind)
{
MOZ_ASSERT(cx->arenas()->freeLists[thingKind].isEmpty());
if (cx->isJSContext())
return refillFreeListFromMainThread<allowGC>(cx->asJSContext(), thingKind);
return refillFreeListOffMainThread(cx, thingKind);
}
template void *
GCRuntime::refillFreeListFromAnyThread<NoGC>(ExclusiveContext *cx, AllocKind thingKind);
template void *
GCRuntime::refillFreeListFromAnyThread<CanGC>(ExclusiveContext *cx, AllocKind thingKind);
/* static */ void *
GCRuntime::refillFreeListInGC(Zone *zone, AllocKind thingKind)
{
@ -2873,8 +3086,7 @@ GCRuntime::refillFreeListInGC(Zone *zone, AllocKind thingKind)
MOZ_ASSERT(rt->isHeapMajorCollecting());
MOZ_ASSERT(!rt->gc.isBackgroundSweeping());
AutoMaybeStartBackgroundAllocation maybeStartBackgroundAllocation;
return zone->arenas.allocateFromArena(zone, thingKind, maybeStartBackgroundAllocation);
return zone->arenas.allocateFromArena(zone, thingKind);
}
SliceBudget::SliceBudget()

Просмотреть файл

@ -49,8 +49,6 @@ namespace gcstats {
struct Statistics;
}
class Nursery;
namespace gc {
struct FinalizePhase;
@ -782,6 +780,11 @@ class ArenaLists
return freeLists[thingKind].allocate(thingSize);
}
// Returns false on Out-Of-Memory. This method makes no attempt to
// synchronize with background finalization, so may miss available memory
// that is waiting to be finalized.
TenuredCell *allocateFromArena(JS::Zone *zone, AllocKind thingKind);
/*
* Moves all arenas from |fromArenaLists| into |this|.
*/
@ -838,12 +841,12 @@ class ArenaLists
enum ArenaAllocMode { HasFreeThings = true, IsEmpty = false };
template <ArenaAllocMode hasFreeThings>
TenuredCell *allocateFromArenaInner(JS::Zone *zone, ArenaHeader *aheader, AllocKind kind);
inline TenuredCell *allocateFromArenaInner(JS::Zone *zone, ArenaHeader *aheader,
AllocKind thingKind);
inline void normalizeBackgroundFinalizeState(AllocKind thingKind);
friend class GCRuntime;
friend class js::Nursery;
};
/* The number of GC cycles an empty chunk can survive before been released. */