Bug 1681533 - Only collect the nursery when necessary in major GC r=sfink

This rearranges a few things so that we can run a minor GC inside a major GC
slice, so we don't have to be conservative about collecting the nursery at the
start of a slice.

Differential Revision: https://phabricator.services.mozilla.com/D99366
This commit is contained in:
Jon Coppeard 2021-01-04 17:01:50 +00:00
Родитель 56e67b321d
Коммит fc91cd019c
9 изменённых файлов: 170 добавлений и 176 удалений

Просмотреть файл

@ -391,6 +391,16 @@ typedef enum JSGCParamKey {
* string will be allocated in nursery.
*/
JSGC_STOP_PRETENURE_STRING_THRESHOLD = 43,
/**
* A number that is incremented on every major GC slice.
*/
JSGC_MAJOR_GC_NUMBER = 44,
/**
* A number that is incremented on every minor GC.
*/
JSGC_MINOR_GC_NUMBER = 45,
} JSGCParamKey;
/*

Просмотреть файл

@ -614,6 +614,8 @@ static bool MinorGC(JSContext* cx, unsigned argc, Value* vp) {
_("gcBytes", JSGC_BYTES, false) \
_("nurseryBytes", JSGC_NURSERY_BYTES, false) \
_("gcNumber", JSGC_NUMBER, false) \
_("majorGCNumber", JSGC_MAJOR_GC_NUMBER, false) \
_("minorGCNumber", JSGC_MINOR_GC_NUMBER, false) \
_("mode", JSGC_MODE, true) \
_("unusedChunks", JSGC_UNUSED_CHUNKS, false) \
_("totalChunks", JSGC_TOTAL_CHUNKS, false) \

Просмотреть файл

@ -1504,6 +1504,10 @@ uint32_t GCRuntime::getParameter(JSGCParamKey key, const AutoLockGC& lock) {
return nursery().capacity();
case JSGC_NUMBER:
return uint32_t(number);
case JSGC_MAJOR_GC_NUMBER:
return uint32_t(majorGCNumber);
case JSGC_MINOR_GC_NUMBER:
return uint32_t(minorGCNumber);
case JSGC_MODE:
return uint32_t(mode);
case JSGC_UNUSED_CHUNKS:
@ -4082,12 +4086,6 @@ void GCRuntime::unmarkWeakMaps() {
bool GCRuntime::beginPreparePhase(JS::GCReason reason, AutoGCSession& session) {
gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::PREPARE);
#ifdef DEBUG
if (fullCompartmentChecks) {
checkForCompartmentMismatches();
}
#endif
if (!prepareZonesForCollection(reason, &isFull.ref())) {
return false;
}
@ -4284,6 +4282,12 @@ void GCRuntime::endPreparePhase(JS::GCReason reason) {
}
}
}
#ifdef DEBUG
if (fullCompartmentChecks) {
checkForCompartmentMismatches();
}
#endif
}
void GCRuntime::beginMarkPhase(AutoGCSession& session) {
@ -6429,6 +6433,10 @@ void GCRuntime::finishCollection() {
zone->notifyObservingDebuggers();
}
#ifdef JS_GC_ZEAL
clearSelectedForMarking();
#endif
auto currentTime = ReallyNow();
schedulingState.updateHighFrequencyMode(lastGCEndTime_, currentTime,
tunables);
@ -6524,7 +6532,9 @@ static JS::ProfilingCategoryPair GCHeapStateToProfilingCategory(
AutoHeapSession::AutoHeapSession(GCRuntime* gc, JS::HeapState heapState)
: gc(gc), prevState(gc->heapState_) {
MOZ_ASSERT(CurrentThreadCanAccessRuntime(gc->rt));
MOZ_ASSERT(prevState == JS::HeapState::Idle);
MOZ_ASSERT(prevState == JS::HeapState::Idle ||
(prevState == JS::HeapState::MajorCollecting &&
heapState == JS::HeapState::MinorCollecting));
MOZ_ASSERT(heapState != JS::HeapState::Idle);
gc->heapState_ = heapState;
@ -6710,11 +6720,23 @@ static bool ShouldSweepOnBackgroundThread(JS::GCReason reason) {
return reason != JS::GCReason::DESTROY_RUNTIME && CanUseExtraThreads();
}
static bool NeedToCollectNursery(GCRuntime* gc) {
return !gc->nursery().isEmpty() || !gc->storeBuffer().isEmpty();
}
void GCRuntime::incrementalSlice(SliceBudget& budget,
const MaybeInvocationKind& gckind,
JS::GCReason reason, AutoGCSession& session) {
JS::GCReason reason) {
AutoSetThreadIsPerformingGC performingGC;
AutoGCSession session(this, JS::HeapState::MajorCollecting);
// We don't allow off-thread parsing to start while we're doing an
// incremental GC of the atoms zone.
if (rt->activeGCInAtomsZone()) {
session.maybeCheckAtomsAccess.emplace(rt);
}
bool destroyingRuntime = (reason == JS::GCReason::DESTROY_RUNTIME);
initialState = incrementalState;
@ -6744,14 +6766,6 @@ void GCRuntime::incrementalSlice(SliceBudget& budget,
isIncremental = !budget.isUnlimited();
/*
* Non-incremental collection expects that the nursery is empty.
*/
if (!isIncremental && !isIncrementalGCInProgress()) {
MOZ_ASSERT(nursery().isEmpty());
storeBuffer().checkEmpty();
}
if (useZeal && hasIncrementalTwoSliceZealMode()) {
/*
* Yields between slices occurs at predetermined points in these modes;
@ -6761,8 +6775,6 @@ void GCRuntime::incrementalSlice(SliceBudget& budget,
budget.makeUnlimited();
}
incGcSliceNumber();
switch (incrementalState) {
case State::NotActive:
invocationKind = gckind.valueOr(GC_NORMAL);
@ -6783,7 +6795,7 @@ void GCRuntime::incrementalSlice(SliceBudget& budget,
incrementalState = State::Prepare;
if (!beginPreparePhase(reason, session)) {
incrementalState = State::NotActive;
return;
break;
}
if (isIncremental && useZeal &&
@ -6799,15 +6811,19 @@ void GCRuntime::incrementalSlice(SliceBudget& budget,
break;
}
endPreparePhase(reason);
incrementalState = State::MarkRoots;
[[fallthrough]];
case State::MarkRoots:
if (NeedToCollectNursery(this)) {
collectNurseryFromMajorGC(gckind, reason);
}
endPreparePhase(reason);
beginMarkPhase(session);
/* If we needed delayed marking for gray roots, then collect until done.
*/
// If we needed delayed marking for gray roots, then collect until done.
if (isIncremental && !hasValidGrayRootsBuffer()) {
budget.makeUnlimited();
isIncremental = false;
@ -6875,6 +6891,10 @@ void GCRuntime::incrementalSlice(SliceBudget& budget,
[[fallthrough]];
case State::Sweep:
if (storeBuffer().mayHavePointersToDeadCells()) {
collectNurseryFromMajorGC(gckind, reason);
}
if (initialState == State::Sweep) {
rt->mainContextFromOwnThread()->traceWrapperGCRooters(&marker);
}
@ -6918,7 +6938,10 @@ void GCRuntime::incrementalSlice(SliceBudget& budget,
case State::Compact:
if (isCompacting) {
MOZ_ASSERT(nursery().isEmpty());
if (NeedToCollectNursery(this)) {
collectNurseryFromMajorGC(gckind, reason);
}
storeBuffer().checkEmpty();
if (!startedCompacting) {
beginCompactPhase();
@ -6956,6 +6979,12 @@ void GCRuntime::incrementalSlice(SliceBudget& budget,
MOZ_ASSERT(marker.markColor() == MarkColor::Black);
}
void GCRuntime::collectNurseryFromMajorGC(const MaybeInvocationKind& gckind,
JS::GCReason reason) {
collectNursery(gckind.valueOr(GC_NORMAL), reason,
gcstats::PhaseKind::EVICT_NURSERY_FOR_MAJOR_GC);
}
bool GCRuntime::hasForegroundWork() const {
switch (incrementalState) {
case State::NotActive:
@ -7055,20 +7084,22 @@ GCRuntime::IncrementalResult GCRuntime::budgetIncrementalGC(
return resetIncrementalGC(GCAbortReason::AbortRequested);
}
GCAbortReason unsafeReason = IsIncrementalGCUnsafe(rt);
if (unsafeReason == GCAbortReason::None) {
if (reason == JS::GCReason::COMPARTMENT_REVIVED) {
unsafeReason = GCAbortReason::CompartmentRevived;
} else if (mode != JSGC_MODE_INCREMENTAL &&
mode != JSGC_MODE_ZONE_INCREMENTAL) {
unsafeReason = GCAbortReason::ModeChange;
if (!budget.isUnlimited()) {
GCAbortReason unsafeReason = IsIncrementalGCUnsafe(rt);
if (unsafeReason == GCAbortReason::None) {
if (reason == JS::GCReason::COMPARTMENT_REVIVED) {
unsafeReason = GCAbortReason::CompartmentRevived;
} else if (mode != JSGC_MODE_INCREMENTAL &&
mode != JSGC_MODE_ZONE_INCREMENTAL) {
unsafeReason = GCAbortReason::ModeChange;
}
}
}
if (unsafeReason != GCAbortReason::None) {
budget.makeUnlimited();
stats().nonincremental(unsafeReason);
return resetIncrementalGC(unsafeReason);
if (unsafeReason != GCAbortReason::None) {
budget.makeUnlimited();
stats().nonincremental(unsafeReason);
return resetIncrementalGC(unsafeReason);
}
}
GCAbortReason resetReason = GCAbortReason::None;
@ -7245,6 +7276,17 @@ MOZ_NEVER_INLINE GCRuntime::IncrementalResult GCRuntime::gcCycle(
// they are operating on zones which will not be collected from here.
MOZ_ASSERT(!rt->mainContextFromOwnThread()->suppressGC);
// This reason is used internally. See below.
MOZ_ASSERT(reason != JS::GCReason::RESET);
// Background finalization and decommit are finished by definition before we
// can start a new major GC. Background allocation may still be running, but
// that's OK because chunk pools are protected by the GC lock.
if (!isIncrementalGCInProgress()) {
assertBackgroundSweepingFinished();
MOZ_ASSERT(decommitTask.isIdle());
}
// Note that GC callbacks are allowed to re-enter GC.
AutoCallGCCallbacks callCallbacks(*this, reason);
@ -7256,7 +7298,8 @@ MOZ_NEVER_INLINE GCRuntime::IncrementalResult GCRuntime::gcCycle(
gcstats::AutoGCSlice agc(stats(), scanZonesBeforeGC(),
gckind.valueOr(invocationKind), budget, reason);
auto result = budgetIncrementalGC(nonincrementalByAPI, reason, budget);
IncrementalResult result =
budgetIncrementalGC(nonincrementalByAPI, reason, budget);
if (result == IncrementalResult::ResetIncremental) {
if (incrementalState == State::NotActive) {
// The collection was reset and has finished.
@ -7267,95 +7310,38 @@ MOZ_NEVER_INLINE GCRuntime::IncrementalResult GCRuntime::gcCycle(
reason = JS::GCReason::RESET;
}
if (shouldCollectNurseryForSlice(nonincrementalByAPI, budget)) {
collectNursery(gckind.valueOr(GC_NORMAL), reason,
gcstats::PhaseKind::EVICT_NURSERY_FOR_MAJOR_GC);
} else {
++number; // This otherwise happens in Nursery::collect().
}
AutoGCSession session(this, JS::HeapState::MajorCollecting);
majorGCTriggerReason = JS::GCReason::NO_REASON;
MOZ_ASSERT(!stats().hasTrigger());
{
gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::WAIT_BACKGROUND_THREAD);
// Background finalization and decommit are finished by defininition
// before we can start a new GC session.
if (!isIncrementalGCInProgress()) {
assertBackgroundSweepingFinished();
MOZ_ASSERT(decommitTask.isIdle());
}
// We must also wait for background allocation to finish so we can
// avoid taking the GC lock when manipulating the chunks during the GC.
// The background alloc task can run between slices, so we must wait
// for it at the start of every slice.
allocTask.cancelAndWait();
}
// We don't allow off-thread parsing to start while we're doing an
// incremental GC of the atoms zone.
if (rt->activeGCInAtomsZone()) {
session.maybeCheckAtomsAccess.emplace(rt);
}
incGcNumber();
incGcSliceNumber();
gcprobes::MajorGCStart();
incrementalSlice(budget, gckind, reason, session);
#ifdef JS_GC_ZEAL
clearSelectedForMarking();
#endif
incrementalSlice(budget, gckind, reason);
gcprobes::MajorGCEnd();
MOZ_ASSERT_IF(result == IncrementalResult::ResetIncremental,
!isIncrementalGCInProgress());
return result;
}
bool GCRuntime::shouldCollectNurseryForSlice(bool nonincrementalByAPI,
SliceBudget& budget) {
if (!nursery().isEnabled()) {
return false;
void GCRuntime::waitForBackgroundTasksBeforeSlice() {
gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::WAIT_BACKGROUND_THREAD);
// Background finalization and decommit are finished by definition before we
// can start a new major GC.
if (!isIncrementalGCInProgress()) {
assertBackgroundSweepingFinished();
MOZ_ASSERT(decommitTask.isIdle());
}
if (nursery().shouldCollect()) {
return true;
}
bool nonIncremental = nonincrementalByAPI || budget.isUnlimited();
bool shouldCollectForSweeping = storeBuffer().mayHavePointersToDeadCells();
switch (incrementalState) {
case State::NotActive:
return true;
case State::Prepare:
return true;
case State::Mark:
return (mightSweepInThisSlice(nonIncremental) &&
shouldCollectForSweeping) ||
mightCompactInThisSlice(nonIncremental);
case State::Sweep:
return shouldCollectForSweeping ||
mightCompactInThisSlice(nonIncremental);
case State::Finalize:
return mightCompactInThisSlice(nonIncremental);
case State::Compact:
return true;
case State::Decommit:
case State::Finish:
return false;
default:
MOZ_CRASH("Unexpected GC state");
}
return false;
// We must also wait for background allocation to finish so we can avoid
// taking the GC lock when manipulating the chunks during the GC. The
// background alloc task can run between slices, so we must wait for it at the
// start of every slice.
//
// TODO: Is this still necessary?
allocTask.cancelAndWait();
}
inline bool GCRuntime::mightSweepInThisSlice(bool nonIncremental) {
@ -7363,11 +7349,6 @@ inline bool GCRuntime::mightSweepInThisSlice(bool nonIncremental) {
return nonIncremental || lastMarkSlice || hasIncrementalTwoSliceZealMode();
}
inline bool GCRuntime::mightCompactInThisSlice(bool nonIncremental) {
MOZ_ASSERT(incrementalState < State::Compact);
return isCompacting && (nonIncremental || hasIncrementalTwoSliceZealMode());
}
#ifdef JS_GC_ZEAL
static bool IsDeterministicGCReason(JS::GCReason reason) {
switch (reason) {
@ -7730,8 +7711,17 @@ void GCRuntime::minorGC(JS::GCReason reason, gcstats::PhaseKind phase) {
return;
}
incGcNumber();
collectNursery(GC_NORMAL, reason, phase);
#ifdef JS_GC_ZEAL
if (hasZealMode(ZealMode::CheckHeapAfterGC)) {
gcstats::AutoPhase ap(stats(), phase);
CheckHeapAfterGC(rt);
}
#endif
for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
maybeTriggerGCAfterAlloc(zone);
maybeTriggerGCAfterMalloc(zone);
@ -7761,12 +7751,6 @@ void GCRuntime::collectNursery(JSGCInvocationKind kind, JS::GCReason reason,
MOZ_ASSERT(nursery().isEmpty());
startBackgroundFreeAfterMinorGC();
#ifdef JS_GC_ZEAL
if (hasZealMode(ZealMode::CheckHeapAfterGC)) {
CheckHeapAfterGC(rt);
}
#endif
}
void GCRuntime::startBackgroundFreeAfterMinorGC() {

Просмотреть файл

@ -464,12 +464,10 @@ class GCRuntime {
}
uint64_t gcNumber() const { return number; }
void incGcNumber() { ++number; }
uint64_t minorGCCount() const { return minorGCNumber; }
void incMinorGcNumber() {
++minorGCNumber;
++number;
}
void incMinorGcNumber() { ++minorGCNumber; }
uint64_t majorGCCount() const { return majorGCNumber; }
void incMajorGcNumber() { ++majorGCNumber; }
@ -689,12 +687,14 @@ class GCRuntime {
const MaybeInvocationKind& gckind,
JS::GCReason reason);
bool shouldRepeatForDeadZone(JS::GCReason reason);
void incrementalSlice(SliceBudget& budget, const MaybeInvocationKind& gckind,
JS::GCReason reason, AutoGCSession& session);
MOZ_MUST_USE bool shouldCollectNurseryForSlice(bool nonincrementalByAPI,
SliceBudget& budget);
JS::GCReason reason);
void waitForBackgroundTasksBeforeSlice();
bool mightSweepInThisSlice(bool nonIncremental);
bool mightCompactInThisSlice(bool nonIncremental);
void collectNurseryFromMajorGC(const MaybeInvocationKind& gckind,
JS::GCReason reason);
void collectNursery(JSGCInvocationKind kind, JS::GCReason reason,
gcstats::PhaseKind phase);

Просмотреть файл

@ -89,14 +89,13 @@ StoreBuffer::StoreBuffer(JSRuntime* rt, const Nursery& nursery)
{
}
void StoreBuffer::checkEmpty() const {
MOZ_ASSERT(bufferVal.isEmpty());
MOZ_ASSERT(bufStrCell.isEmpty());
MOZ_ASSERT(bufBigIntCell.isEmpty());
MOZ_ASSERT(bufObjCell.isEmpty());
MOZ_ASSERT(bufferSlot.isEmpty());
MOZ_ASSERT(bufferWholeCell.isEmpty());
MOZ_ASSERT(bufferGeneric.isEmpty());
void StoreBuffer::checkEmpty() const { MOZ_ASSERT(isEmpty()); }
bool StoreBuffer::isEmpty() const {
return bufferVal.isEmpty() && bufStrCell.isEmpty() &&
bufBigIntCell.isEmpty() && bufObjCell.isEmpty() &&
bufferSlot.isEmpty() && bufferWholeCell.isEmpty() &&
bufferGeneric.isEmpty();
}
bool StoreBuffer::enable() {

Просмотреть файл

@ -480,6 +480,7 @@ class StoreBuffer {
void disable();
bool isEnabled() const { return enabled_; }
bool isEmpty() const;
void clear();
const Nursery& nursery() const { return nursery_; }

Просмотреть файл

@ -357,13 +357,6 @@ void gc::GCRuntime::endVerifyPreBarriers() {
zone->setNeedsIncrementalBarrier(false);
}
/*
* We need to bump gcNumber so that the methodjit knows that jitcode has
* been discarded.
*/
MOZ_ASSERT(trc->number == number);
number++;
verifyPreData = nullptr;
MOZ_ASSERT(incrementalState == State::Mark);
incrementalState = State::NotActive;

Просмотреть файл

@ -1,30 +1,39 @@
// Test that we don't repeatedly trigger last-ditch GCs.
function allocUntilFail() {
gc();
let initGCNumber = gcparam("gcNumber");
let error;
try {
let a = [];
while (true) {
a.push(Symbol()); // Symbols are tenured.
}
} catch(err) {
error = err;
}
let finalGCNumber = gcparam("gcNumber");
gc();
assertEq(error, "out of memory");
return finalGCNumber - initGCNumber;
}
// Turn of any zeal which will disrupt GC number checks.
gczeal(0);
// Set a small heap limit.
// Get initial heap size and limit.
gc();
let currentSize = gcparam("gcBytes");
gcparam("maxBytes", currentSize + 16 * 1024);
const initialSize = gcparam("gcBytes");
const initialMaxSize = gcparam("maxBytes");
function allocUntilFail() {
gc();
const initGCNumber = gcparam("majorGCNumber");
// Set a small heap limit.
gcparam("maxBytes", initialSize + 16 * 1024);
let error;
try {
let a = [];
while (true) {
a.push(Symbol()); // Symbols are tenured.
}
} catch(err) {
error = err;
}
const finalGCNumber = gcparam("majorGCNumber");
// Resetore heap limit.
gcparam("maxBytes", initialMaxSize);
gc();
assertEq(error, "out of memory");
return finalGCNumber - initGCNumber;
}
// Set the time limit for skipping last ditch GCs to 5 seconds.
gcparam("minLastDitchGCPeriod", 5);

Просмотреть файл

@ -1186,22 +1186,18 @@ class MOZ_RAII AutoUnsafeCallWithABI {
namespace gc {
// Set/unset the performing GC flag for the current thread.
// Set/restore the performing GC flag for the current thread.
class MOZ_RAII AutoSetThreadIsPerformingGC {
JSContext* cx;
bool prev;
public:
AutoSetThreadIsPerformingGC() : cx(TlsContext.get()) {
JSFreeOp* fop = cx->defaultFreeOp();
MOZ_ASSERT(!fop->isCollecting());
fop->isCollecting_ = true;
AutoSetThreadIsPerformingGC()
: cx(TlsContext.get()), prev(cx->defaultFreeOp()->isCollecting_) {
cx->defaultFreeOp()->isCollecting_ = true;
}
~AutoSetThreadIsPerformingGC() {
JSFreeOp* fop = cx->defaultFreeOp();
MOZ_ASSERT(fop->isCollecting());
fop->isCollecting_ = false;
}
~AutoSetThreadIsPerformingGC() { cx->defaultFreeOp()->isCollecting_ = prev; }
};
struct MOZ_RAII AutoSetThreadGCUse {