зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1575175 - Rename memory counter classes now they're used for both GC and malloc heaps r=sfink
This renames: HeapSize::gcBytes -> bytes (it's not just for GC heaps any more) ZoneThreshold -> HeapThreshold (to go with HeapSize) HeapThreshold::triggerBytes -> bytes (what else could it be?) I renamed the ZoneAllocator members to make them more uniform/consitent so we now have gcHeapSize/gcHeapThreshold, mallocHeapSize/mallocHeapThreshold etc. I also renamed the heap threshold classes. Differential Revision: https://phabricator.services.mozilla.com/D42868 --HG-- extra : moz-landing-system : lando
This commit is contained in:
Родитель
1652ece8e3
Коммит
266f4c6efb
|
@ -145,8 +145,8 @@ typedef enum JSGCParamKey {
|
|||
* collections.
|
||||
*
|
||||
* The RHS of the equation above is calculated and sets
|
||||
* zone->threshold.gcTriggerBytes(). When usage.gcBytes() surpasses
|
||||
* threshold.gcTriggerBytes() for a zone, the zone may be scheduled for a GC.
|
||||
* zone->gcHeapThreshold.bytes(). When gcHeapSize.bytes() exeeds
|
||||
* gcHeapThreshold.bytes() for a zone, the zone may be scheduled for a GC.
|
||||
*/
|
||||
|
||||
/**
|
||||
|
|
|
@ -461,7 +461,7 @@ static bool GC(JSContext* cx, unsigned argc, Value* vp) {
|
|||
}
|
||||
|
||||
#ifndef JS_MORE_DETERMINISTIC
|
||||
size_t preBytes = cx->runtime()->gc.heapSize.gcBytes();
|
||||
size_t preBytes = cx->runtime()->gc.heapSize.bytes();
|
||||
#endif
|
||||
|
||||
if (zone) {
|
||||
|
@ -476,7 +476,7 @@ static bool GC(JSContext* cx, unsigned argc, Value* vp) {
|
|||
char buf[256] = {'\0'};
|
||||
#ifndef JS_MORE_DETERMINISTIC
|
||||
SprintfLiteral(buf, "before %zu, after %zu\n", preBytes,
|
||||
cx->runtime()->gc.heapSize.gcBytes());
|
||||
cx->runtime()->gc.heapSize.bytes());
|
||||
#endif
|
||||
return ReturnStringCopy(cx, args, buf);
|
||||
}
|
||||
|
|
|
@ -373,7 +373,7 @@ bool GCRuntime::gcIfNeededAtAllocation(JSContext* cx) {
|
|||
// the world and do a full, non-incremental GC right now, if possible.
|
||||
Zone* zone = cx->zone();
|
||||
if (isIncrementalGCInProgress() &&
|
||||
zone->zoneSize.gcBytes() > zone->threshold.gcTriggerBytes()) {
|
||||
zone->gcHeapSize.bytes() > zone->gcHeapThreshold.bytes()) {
|
||||
PrepareZoneForGC(cx->zone());
|
||||
gc(GC_NORMAL, JS::GCReason::INCREMENTAL_TOO_SLOW);
|
||||
}
|
||||
|
@ -595,11 +595,11 @@ Arena* GCRuntime::allocateArena(Chunk* chunk, Zone* zone, AllocKind thingKind,
|
|||
|
||||
// Fail the allocation if we are over our heap size limits.
|
||||
if ((checkThresholds != ShouldCheckThresholds::DontCheckThresholds) &&
|
||||
(heapSize.gcBytes() >= tunables.gcMaxBytes()))
|
||||
(heapSize.bytes() >= tunables.gcMaxBytes()))
|
||||
return nullptr;
|
||||
|
||||
Arena* arena = chunk->allocateArena(rt, zone, thingKind, lock);
|
||||
zone->zoneSize.addGCArena();
|
||||
zone->gcHeapSize.addGCArena();
|
||||
|
||||
// Trigger an incremental slice if needed.
|
||||
if (checkThresholds != ShouldCheckThresholds::DontCheckThresholds) {
|
||||
|
|
|
@ -957,7 +957,7 @@ void GCRuntime::releaseArena(Arena* arena, const AutoLockGC& lock) {
|
|||
MOZ_ASSERT(arena->allocated());
|
||||
MOZ_ASSERT(!arena->onDelayedMarkingList());
|
||||
|
||||
arena->zone->zoneSize.removeGCArena();
|
||||
arena->zone->gcHeapSize.removeGCArena();
|
||||
arena->release(lock);
|
||||
arena->chunk()->releaseArena(rt, arena, lock);
|
||||
}
|
||||
|
@ -1797,7 +1797,7 @@ uint32_t GCRuntime::getParameter(JSGCParamKey key, const AutoLockGC& lock) {
|
|||
MOZ_ASSERT(tunables.gcMaxNurseryBytes() < UINT32_MAX);
|
||||
return uint32_t(tunables.gcMaxNurseryBytes());
|
||||
case JSGC_BYTES:
|
||||
return uint32_t(heapSize.gcBytes());
|
||||
return uint32_t(heapSize.bytes());
|
||||
case JSGC_NURSERY_BYTES:
|
||||
return nursery().capacity();
|
||||
case JSGC_NUMBER:
|
||||
|
@ -2054,15 +2054,15 @@ extern JS_FRIEND_API void js::RemoveRawValueRoot(JSContext* cx, Value* vp) {
|
|||
cx->runtime()->gc.removeRoot(vp);
|
||||
}
|
||||
|
||||
float ZoneThreshold::eagerAllocTrigger(bool highFrequencyGC) const {
|
||||
float HeapThreshold::eagerAllocTrigger(bool highFrequencyGC) const {
|
||||
float eagerTriggerFactor = highFrequencyGC
|
||||
? HighFrequencyEagerAllocTriggerFactor
|
||||
: LowFrequencyEagerAllocTriggerFactor;
|
||||
return eagerTriggerFactor * gcTriggerBytes();
|
||||
return eagerTriggerFactor * bytes();
|
||||
}
|
||||
|
||||
/* static */
|
||||
float ZoneHeapThreshold::computeZoneHeapGrowthFactorForHeapSize(
|
||||
float GCHeapThreshold::computeZoneHeapGrowthFactorForHeapSize(
|
||||
size_t lastBytes, const GCSchedulingTunables& tunables,
|
||||
const GCSchedulingState& state) {
|
||||
if (!tunables.isDynamicHeapGrowthEnabled()) {
|
||||
|
@ -2114,7 +2114,7 @@ float ZoneHeapThreshold::computeZoneHeapGrowthFactorForHeapSize(
|
|||
}
|
||||
|
||||
/* static */
|
||||
size_t ZoneHeapThreshold::computeZoneTriggerBytes(
|
||||
size_t GCHeapThreshold::computeZoneTriggerBytes(
|
||||
float growthFactor, size_t lastBytes, JSGCInvocationKind gckind,
|
||||
const GCSchedulingTunables& tunables, const AutoLockGC& lock) {
|
||||
size_t baseMin = gckind == GC_SHRINK
|
||||
|
@ -2127,30 +2127,28 @@ size_t ZoneHeapThreshold::computeZoneTriggerBytes(
|
|||
return size_t(Min(triggerMax, trigger));
|
||||
}
|
||||
|
||||
void ZoneHeapThreshold::updateAfterGC(size_t lastBytes,
|
||||
JSGCInvocationKind gckind,
|
||||
const GCSchedulingTunables& tunables,
|
||||
const GCSchedulingState& state,
|
||||
const AutoLockGC& lock) {
|
||||
void GCHeapThreshold::updateAfterGC(size_t lastBytes, JSGCInvocationKind gckind,
|
||||
const GCSchedulingTunables& tunables,
|
||||
const GCSchedulingState& state,
|
||||
const AutoLockGC& lock) {
|
||||
float growthFactor =
|
||||
computeZoneHeapGrowthFactorForHeapSize(lastBytes, tunables, state);
|
||||
gcTriggerBytes_ =
|
||||
bytes_ =
|
||||
computeZoneTriggerBytes(growthFactor, lastBytes, gckind, tunables, lock);
|
||||
}
|
||||
|
||||
/* static */
|
||||
size_t ZoneMallocThreshold::computeZoneTriggerBytes(float growthFactor,
|
||||
size_t MallocHeapThreshold::computeZoneTriggerBytes(float growthFactor,
|
||||
size_t lastBytes,
|
||||
size_t baseBytes,
|
||||
const AutoLockGC& lock) {
|
||||
return size_t(float(Max(lastBytes, baseBytes)) * growthFactor);
|
||||
}
|
||||
|
||||
void ZoneMallocThreshold::updateAfterGC(size_t lastBytes, size_t baseBytes,
|
||||
void MallocHeapThreshold::updateAfterGC(size_t lastBytes, size_t baseBytes,
|
||||
float growthFactor,
|
||||
const AutoLockGC& lock) {
|
||||
gcTriggerBytes_ =
|
||||
computeZoneTriggerBytes(growthFactor, lastBytes, baseBytes, lock);
|
||||
bytes_ = computeZoneTriggerBytes(growthFactor, lastBytes, baseBytes, lock);
|
||||
}
|
||||
|
||||
/* Compacting GC */
|
||||
|
@ -3046,7 +3044,7 @@ void GCRuntime::clearRelocatedArenasWithoutUnlocking(Arena* arenaList,
|
|||
// everything to new arenas, as that will already have allocated a similar
|
||||
// number of arenas. This only happens for collections triggered by GC zeal.
|
||||
bool allArenasRelocated = ShouldRelocateAllArenas(reason);
|
||||
arena->zone->zoneSize.removeBytes(ArenaSize, !allArenasRelocated);
|
||||
arena->zone->gcHeapSize.removeBytes(ArenaSize, !allArenasRelocated);
|
||||
|
||||
// Release the arena but don't return it to the chunk yet.
|
||||
arena->release(lock);
|
||||
|
@ -3413,8 +3411,8 @@ void GCRuntime::maybeAllocTriggerZoneGC(Zone* zone, size_t nbytes) {
|
|||
MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
|
||||
|
||||
size_t usedBytes =
|
||||
zone->zoneSize.gcBytes(); // This already includes |nbytes|.
|
||||
size_t thresholdBytes = zone->threshold.gcTriggerBytes();
|
||||
zone->gcHeapSize.bytes(); // This already includes |nbytes|.
|
||||
size_t thresholdBytes = zone->gcHeapThreshold.bytes();
|
||||
if (usedBytes < thresholdBytes) {
|
||||
return;
|
||||
}
|
||||
|
@ -3459,25 +3457,25 @@ void GCRuntime::maybeAllocTriggerZoneGC(Zone* zone, size_t nbytes) {
|
|||
|
||||
void js::gc::MaybeMallocTriggerZoneGC(JSRuntime* rt, ZoneAllocator* zoneAlloc,
|
||||
const HeapSize& heap,
|
||||
const ZoneThreshold& threshold,
|
||||
const HeapThreshold& threshold,
|
||||
JS::GCReason reason) {
|
||||
rt->gc.maybeMallocTriggerZoneGC(Zone::from(zoneAlloc), heap, threshold,
|
||||
reason);
|
||||
}
|
||||
|
||||
void GCRuntime::maybeMallocTriggerZoneGC(Zone* zone) {
|
||||
if (maybeMallocTriggerZoneGC(zone, zone->gcMallocBytes,
|
||||
zone->gcMallocThreshold,
|
||||
if (maybeMallocTriggerZoneGC(zone, zone->mallocHeapSize,
|
||||
zone->mallocHeapThreshold,
|
||||
JS::GCReason::TOO_MUCH_MALLOC)) {
|
||||
return;
|
||||
}
|
||||
|
||||
maybeMallocTriggerZoneGC(zone, zone->gcJitBytes, zone->gcJitThreshold,
|
||||
maybeMallocTriggerZoneGC(zone, zone->jitHeapSize, zone->jitHeapThreshold,
|
||||
JS::GCReason::TOO_MUCH_JIT_CODE);
|
||||
}
|
||||
|
||||
bool GCRuntime::maybeMallocTriggerZoneGC(Zone* zone, const HeapSize& heap,
|
||||
const ZoneThreshold& threshold,
|
||||
const HeapThreshold& threshold,
|
||||
JS::GCReason reason) {
|
||||
if (!CurrentThreadCanAccessRuntime(rt)) {
|
||||
// Zones in use by a helper thread can't be collected.
|
||||
|
@ -3490,8 +3488,8 @@ bool GCRuntime::maybeMallocTriggerZoneGC(Zone* zone, const HeapSize& heap,
|
|||
return false;
|
||||
}
|
||||
|
||||
size_t usedBytes = heap.gcBytes();
|
||||
size_t thresholdBytes = threshold.gcTriggerBytes();
|
||||
size_t usedBytes = heap.bytes();
|
||||
size_t thresholdBytes = threshold.bytes();
|
||||
if (usedBytes < thresholdBytes) {
|
||||
return false;
|
||||
}
|
||||
|
@ -3576,8 +3574,9 @@ void GCRuntime::maybeGC() {
|
|||
|
||||
bool scheduledZones = false;
|
||||
for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
|
||||
if (checkEagerAllocTrigger(zone->zoneSize, zone->threshold) ||
|
||||
checkEagerAllocTrigger(zone->gcMallocBytes, zone->gcMallocThreshold)) {
|
||||
if (checkEagerAllocTrigger(zone->gcHeapSize, zone->gcHeapThreshold) ||
|
||||
checkEagerAllocTrigger(zone->mallocHeapSize,
|
||||
zone->mallocHeapThreshold)) {
|
||||
zone->scheduleGC();
|
||||
scheduledZones = true;
|
||||
}
|
||||
|
@ -3589,10 +3588,10 @@ void GCRuntime::maybeGC() {
|
|||
}
|
||||
|
||||
bool GCRuntime::checkEagerAllocTrigger(const HeapSize& size,
|
||||
const ZoneThreshold& threshold) {
|
||||
const HeapThreshold& threshold) {
|
||||
float thresholdBytes =
|
||||
threshold.eagerAllocTrigger(schedulingState.inHighFrequencyGCMode());
|
||||
float usedBytes = size.gcBytes();
|
||||
float usedBytes = size.bytes();
|
||||
if (usedBytes <= 1024 * 1024 || usedBytes < thresholdBytes) {
|
||||
return false;
|
||||
}
|
||||
|
@ -7370,8 +7369,8 @@ GCRuntime::IncrementalResult GCRuntime::budgetIncrementalGC(
|
|||
continue;
|
||||
}
|
||||
|
||||
if (zone->zoneSize.gcBytes() >=
|
||||
zone->threshold.nonIncrementalTriggerBytes(tunables)) {
|
||||
if (zone->gcHeapSize.bytes() >=
|
||||
zone->gcHeapThreshold.nonIncrementalTriggerBytes(tunables)) {
|
||||
CheckZoneIsScheduled(zone, reason, "GC bytes");
|
||||
budget.makeUnlimited();
|
||||
stats().nonincremental(AbortReason::GCBytesTrigger);
|
||||
|
@ -7380,8 +7379,8 @@ GCRuntime::IncrementalResult GCRuntime::budgetIncrementalGC(
|
|||
}
|
||||
}
|
||||
|
||||
if (zone->gcMallocBytes.gcBytes() >=
|
||||
zone->gcMallocThreshold.nonIncrementalTriggerBytes(tunables)) {
|
||||
if (zone->mallocHeapSize.bytes() >=
|
||||
zone->mallocHeapThreshold.nonIncrementalTriggerBytes(tunables)) {
|
||||
CheckZoneIsScheduled(zone, reason, "malloc bytes");
|
||||
budget.makeUnlimited();
|
||||
stats().nonincremental(AbortReason::MallocBytesTrigger);
|
||||
|
@ -7390,8 +7389,8 @@ GCRuntime::IncrementalResult GCRuntime::budgetIncrementalGC(
|
|||
}
|
||||
}
|
||||
|
||||
if (zone->gcJitBytes.gcBytes() >=
|
||||
zone->gcJitThreshold.nonIncrementalTriggerBytes(tunables)) {
|
||||
if (zone->jitHeapSize.bytes() >=
|
||||
zone->jitHeapThreshold.nonIncrementalTriggerBytes(tunables)) {
|
||||
CheckZoneIsScheduled(zone, reason, "JIT code bytes");
|
||||
budget.makeUnlimited();
|
||||
stats().nonincremental(AbortReason::JitCodeBytesTrigger);
|
||||
|
@ -7435,11 +7434,11 @@ static void ScheduleZones(GCRuntime* gc) {
|
|||
|
||||
// This is a heuristic to reduce the total number of collections.
|
||||
bool inHighFrequencyMode = gc->schedulingState.inHighFrequencyGCMode();
|
||||
if (zone->zoneSize.gcBytes() >=
|
||||
zone->threshold.eagerAllocTrigger(inHighFrequencyMode) ||
|
||||
zone->gcMallocBytes.gcBytes() >=
|
||||
zone->gcMallocThreshold.eagerAllocTrigger(inHighFrequencyMode) ||
|
||||
zone->gcJitBytes.gcBytes() >= zone->gcJitThreshold.gcTriggerBytes()) {
|
||||
if (zone->gcHeapSize.bytes() >=
|
||||
zone->gcHeapThreshold.eagerAllocTrigger(inHighFrequencyMode) ||
|
||||
zone->mallocHeapSize.bytes() >=
|
||||
zone->mallocHeapThreshold.eagerAllocTrigger(inHighFrequencyMode) ||
|
||||
zone->jitHeapSize.bytes() >= zone->jitHeapThreshold.bytes()) {
|
||||
zone->scheduleGC();
|
||||
}
|
||||
}
|
||||
|
@ -8257,7 +8256,7 @@ void GCRuntime::mergeRealms(Realm* source, Realm* target) {
|
|||
targetZoneIsCollecting);
|
||||
target->zone()->addTenuredAllocsSinceMinorGC(
|
||||
source->zone()->getAndResetTenuredAllocsSinceMinorGC());
|
||||
target->zone()->zoneSize.adopt(source->zone()->zoneSize);
|
||||
target->zone()->gcHeapSize.adopt(source->zone()->gcHeapSize);
|
||||
target->zone()->adoptUniqueIds(source->zone());
|
||||
target->zone()->adoptMallocBytes(source->zone());
|
||||
|
||||
|
@ -8861,7 +8860,7 @@ namespace MemInfo {
|
|||
|
||||
static bool GCBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
args.rval().setNumber(double(cx->runtime()->gc.heapSize.gcBytes()));
|
||||
args.rval().setNumber(double(cx->runtime()->gc.heapSize.bytes()));
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -8904,13 +8903,13 @@ static bool GCSliceCountGetter(JSContext* cx, unsigned argc, Value* vp) {
|
|||
|
||||
static bool ZoneGCBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
args.rval().setNumber(double(cx->zone()->zoneSize.gcBytes()));
|
||||
args.rval().setNumber(double(cx->zone()->gcHeapSize.bytes()));
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool ZoneGCTriggerBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
args.rval().setNumber(double(cx->zone()->threshold.gcTriggerBytes()));
|
||||
args.rval().setNumber(double(cx->zone()->gcHeapThreshold.bytes()));
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -8919,20 +8918,20 @@ static bool ZoneGCAllocTriggerGetter(JSContext* cx, unsigned argc, Value* vp) {
|
|||
bool highFrequency =
|
||||
cx->runtime()->gc.schedulingState.inHighFrequencyGCMode();
|
||||
args.rval().setNumber(
|
||||
double(cx->zone()->threshold.eagerAllocTrigger(highFrequency)));
|
||||
double(cx->zone()->gcHeapThreshold.eagerAllocTrigger(highFrequency)));
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool ZoneMallocBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
args.rval().setNumber(double(cx->zone()->gcMallocBytes.gcBytes()));
|
||||
args.rval().setNumber(double(cx->zone()->mallocHeapSize.bytes()));
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool ZoneMallocTriggerBytesGetter(JSContext* cx, unsigned argc,
|
||||
Value* vp) {
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
args.rval().setNumber(double(cx->zone()->gcMallocThreshold.gcTriggerBytes()));
|
||||
args.rval().setNumber(double(cx->zone()->mallocHeapThreshold.bytes()));
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -267,14 +267,14 @@ class GCRuntime {
|
|||
// Check whether to trigger a zone GC after malloc memory.
|
||||
void maybeMallocTriggerZoneGC(Zone* zone);
|
||||
bool maybeMallocTriggerZoneGC(Zone* zone, const HeapSize& heap,
|
||||
const ZoneThreshold& threshold,
|
||||
const HeapThreshold& threshold,
|
||||
JS::GCReason reason);
|
||||
// The return value indicates if we were able to do the GC.
|
||||
bool triggerZoneGC(Zone* zone, JS::GCReason reason, size_t usedBytes,
|
||||
size_t thresholdBytes);
|
||||
void maybeGC();
|
||||
bool checkEagerAllocTrigger(const HeapSize& size,
|
||||
const ZoneThreshold& threshold);
|
||||
const HeapThreshold& threshold);
|
||||
// The return value indicates whether a major GC was performed.
|
||||
bool gcIfRequested();
|
||||
void gc(JSGCInvocationKind gckind, JS::GCReason reason);
|
||||
|
|
|
@ -957,8 +957,8 @@ void js::Nursery::collect(JS::GCReason reason) {
|
|||
|
||||
// We ignore gcMaxBytes when allocating for minor collection. However, if we
|
||||
// overflowed, we disable the nursery. The next time we allocate, we'll fail
|
||||
// because gcBytes >= gcMaxBytes.
|
||||
if (rt->gc.heapSize.gcBytes() >= tunables().gcMaxBytes()) {
|
||||
// because bytes >= gcMaxBytes.
|
||||
if (rt->gc.heapSize.bytes() >= tunables().gcMaxBytes()) {
|
||||
disable();
|
||||
}
|
||||
|
||||
|
|
|
@ -260,7 +260,7 @@
|
|||
*
|
||||
* Assumptions:
|
||||
* -> Common web scripts will return to the event loop before using
|
||||
* 10% of the current gcTriggerBytes worth of GC memory.
|
||||
* 10% of the current triggerBytes worth of GC memory.
|
||||
*
|
||||
* ALLOC_TRIGGER (incremental)
|
||||
* ---------------------------
|
||||
|
@ -348,30 +348,30 @@ class GCSchedulingTunables {
|
|||
/*
|
||||
* JSGC_ALLOCATION_THRESHOLD
|
||||
*
|
||||
* The base value used to compute zone->threshold.gcTriggerBytes(). When
|
||||
* usage.gcBytes() surpasses threshold.gcTriggerBytes() for a zone, the
|
||||
* zone may be scheduled for a GC, depending on the exact circumstances.
|
||||
* The base value used to compute zone->threshold.bytes(). When
|
||||
* gcHeapSize.bytes() exceeds threshold.bytes() for a zone, the zone may be
|
||||
* scheduled for a GC, depending on the exact circumstances.
|
||||
*/
|
||||
MainThreadOrGCTaskData<size_t> gcZoneAllocThresholdBase_;
|
||||
|
||||
/*
|
||||
* JSGC_NON_INCREMENTAL_FACTOR
|
||||
*
|
||||
* Multiple of threshold.gcBytes() which triggers a non-incremental GC.
|
||||
* Multiple of threshold.bytes() which triggers a non-incremental GC.
|
||||
*/
|
||||
UnprotectedData<float> nonIncrementalFactor_;
|
||||
|
||||
/*
|
||||
* JSGC_AVOID_INTERRUPT_FACTOR
|
||||
*
|
||||
* Multiple of threshold.gcBytes() which triggers a new incremental GC when
|
||||
* Multiple of threshold.bytes() which triggers a new incremental GC when
|
||||
* doing so would interrupt an ongoing incremental GC.
|
||||
*/
|
||||
UnprotectedData<float> avoidInterruptFactor_;
|
||||
|
||||
/*
|
||||
* Number of bytes to allocate between incremental slices in GCs triggered
|
||||
* by the zone allocation threshold.
|
||||
* Number of bytes to allocate between incremental slices in GCs triggered by
|
||||
* the zone allocation threshold.
|
||||
*
|
||||
* This value does not have a JSGCParamKey parameter yet.
|
||||
*/
|
||||
|
@ -569,44 +569,43 @@ class GCSchedulingState {
|
|||
}
|
||||
};
|
||||
|
||||
using AtomicByteCount =
|
||||
mozilla::Atomic<size_t, mozilla::ReleaseAcquire,
|
||||
mozilla::recordreplay::Behavior::DontPreserve>;
|
||||
|
||||
/*
|
||||
* Tracks the used sizes for owned heap data and automatically maintains the
|
||||
* memory usage relationship between GCRuntime and Zones.
|
||||
* Tracks the size of allocated data. This is used for both GC and malloc data.
|
||||
* It automatically maintains the memory usage relationship between parent and
|
||||
* child instances, i.e. between those in a GCRuntime and its Zones.
|
||||
*/
|
||||
class HeapSize {
|
||||
/*
|
||||
* A heap usage that contains our parent's heap usage, or null if this is
|
||||
* the top-level usage container.
|
||||
* An instance that contains our parent's heap usage, or null if this is the
|
||||
* top-level usage container.
|
||||
*/
|
||||
HeapSize* const parent_;
|
||||
|
||||
/*
|
||||
* The approximate number of bytes in use on the GC heap, to the nearest
|
||||
* ArenaSize. This does not include any malloc data. It also does not
|
||||
* include not-actively-used addresses that are still reserved at the OS
|
||||
* level for GC usage. It is atomic because it is updated by both the active
|
||||
* and GC helper threads.
|
||||
* The number of bytes in use. For GC heaps this is approximate to the nearest
|
||||
* ArenaSize. It is atomic because it is updated by both the active and GC
|
||||
* helper threads.
|
||||
*/
|
||||
mozilla::Atomic<size_t, mozilla::ReleaseAcquire,
|
||||
mozilla::recordreplay::Behavior::DontPreserve>
|
||||
gcBytes_;
|
||||
AtomicByteCount bytes_;
|
||||
|
||||
/*
|
||||
* The number of bytes retained after the last collection. This is updated
|
||||
* dynamically during incremental GC. It does not include allocations that
|
||||
* happen during a GC.
|
||||
*/
|
||||
mozilla::Atomic<size_t, mozilla::ReleaseAcquire,
|
||||
mozilla::recordreplay::Behavior::DontPreserve>
|
||||
retainedBytes_;
|
||||
AtomicByteCount retainedBytes_;
|
||||
|
||||
public:
|
||||
explicit HeapSize(HeapSize* parent) : parent_(parent), gcBytes_(0) {}
|
||||
explicit HeapSize(HeapSize* parent) : parent_(parent), bytes_(0) {}
|
||||
|
||||
size_t gcBytes() const { return gcBytes_; }
|
||||
size_t bytes() const { return bytes_; }
|
||||
size_t retainedBytes() const { return retainedBytes_; }
|
||||
|
||||
void updateOnGCStart() { retainedBytes_ = size_t(gcBytes_); }
|
||||
void updateOnGCStart() { retainedBytes_ = size_t(bytes_); }
|
||||
|
||||
void addGCArena() { addBytes(ArenaSize); }
|
||||
void removeGCArena() {
|
||||
|
@ -615,9 +614,9 @@ class HeapSize {
|
|||
}
|
||||
|
||||
void addBytes(size_t nbytes) {
|
||||
mozilla::DebugOnly<size_t> initialBytes(gcBytes_);
|
||||
mozilla::DebugOnly<size_t> initialBytes(bytes_);
|
||||
MOZ_ASSERT(initialBytes + nbytes > initialBytes);
|
||||
gcBytes_ += nbytes;
|
||||
bytes_ += nbytes;
|
||||
if (parent_) {
|
||||
parent_->addBytes(nbytes);
|
||||
}
|
||||
|
@ -628,8 +627,8 @@ class HeapSize {
|
|||
// we can't do that yet, so clamp the result to zero.
|
||||
retainedBytes_ = nbytes <= retainedBytes_ ? retainedBytes_ - nbytes : 0;
|
||||
}
|
||||
MOZ_ASSERT(gcBytes_ >= nbytes);
|
||||
gcBytes_ -= nbytes;
|
||||
MOZ_ASSERT(bytes_ >= nbytes);
|
||||
bytes_ -= nbytes;
|
||||
if (parent_) {
|
||||
parent_->removeBytes(nbytes, wasSwept);
|
||||
}
|
||||
|
@ -639,31 +638,33 @@ class HeapSize {
|
|||
void adopt(HeapSize& source) {
|
||||
// Skip retainedBytes_: we never adopt zones that are currently being
|
||||
// collected.
|
||||
gcBytes_ += source.gcBytes_;
|
||||
bytes_ += source.bytes_;
|
||||
source.retainedBytes_ = 0;
|
||||
source.gcBytes_ = 0;
|
||||
source.bytes_ = 0;
|
||||
}
|
||||
};
|
||||
|
||||
// Base class for GC heap and malloc thresholds.
|
||||
class ZoneThreshold {
|
||||
// A heap size threshold used to trigger GC. This is an abstract base class for
|
||||
// GC heap and malloc thresholds defined below.
|
||||
class HeapThreshold {
|
||||
protected:
|
||||
HeapThreshold() = default;
|
||||
|
||||
// GC trigger threshold.
|
||||
mozilla::Atomic<size_t, mozilla::Relaxed,
|
||||
mozilla::recordreplay::Behavior::DontPreserve>
|
||||
gcTriggerBytes_;
|
||||
AtomicByteCount bytes_;
|
||||
|
||||
public:
|
||||
size_t gcTriggerBytes() const { return gcTriggerBytes_; }
|
||||
size_t bytes() const { return bytes_; }
|
||||
size_t nonIncrementalTriggerBytes(GCSchedulingTunables& tunables) const {
|
||||
return gcTriggerBytes_ * tunables.nonIncrementalFactor();
|
||||
return bytes_ * tunables.nonIncrementalFactor();
|
||||
}
|
||||
float eagerAllocTrigger(bool highFrequencyGC) const;
|
||||
};
|
||||
|
||||
// This class encapsulates the data that determines when we need to do a zone GC
|
||||
// base on GC heap size.
|
||||
class ZoneHeapThreshold : public ZoneThreshold {
|
||||
// A heap threshold that is based on a multiple of the retained size after the
|
||||
// last collection adjusted based on collection frequency and retained
|
||||
// size. This is used to determine when to do a zone GC based on GC heap size.
|
||||
class GCHeapThreshold : public HeapThreshold {
|
||||
public:
|
||||
void updateAfterGC(size_t lastBytes, JSGCInvocationKind gckind,
|
||||
const GCSchedulingTunables& tunables,
|
||||
|
@ -679,9 +680,10 @@ class ZoneHeapThreshold : public ZoneThreshold {
|
|||
const AutoLockGC& lock);
|
||||
};
|
||||
|
||||
// This class encapsulates the data that determines when we need to do a zone
|
||||
// A heap threshold that is calculated as a constant multiple of the retained
|
||||
// size after the last collection. This is used to determines when to do a zone
|
||||
// GC based on malloc data.
|
||||
class ZoneMallocThreshold : public ZoneThreshold {
|
||||
class MallocHeapThreshold : public HeapThreshold {
|
||||
public:
|
||||
void updateAfterGC(size_t lastBytes, size_t baseBytes, float growthFactor,
|
||||
const AutoLockGC& lock);
|
||||
|
@ -692,11 +694,11 @@ class ZoneMallocThreshold : public ZoneThreshold {
|
|||
const AutoLockGC& lock);
|
||||
};
|
||||
|
||||
// A fixed threshold that determines when we need to do a zone GC based on
|
||||
// allocated JIT code.
|
||||
class ZoneFixedThreshold : public ZoneThreshold {
|
||||
// A fixed threshold that's used to determine when we need to do a zone GC based
|
||||
// on allocated JIT code.
|
||||
class JitHeapThreshold : public HeapThreshold {
|
||||
public:
|
||||
explicit ZoneFixedThreshold(size_t bytes) { gcTriggerBytes_ = bytes; }
|
||||
explicit JitHeapThreshold(size_t bytes) { bytes_ = bytes; }
|
||||
};
|
||||
|
||||
#ifdef DEBUG
|
||||
|
|
|
@ -990,7 +990,7 @@ void Statistics::beginGC(JSGCInvocationKind kind,
|
|||
nonincrementalReason_ = gc::AbortReason::None;
|
||||
|
||||
GCRuntime& gc = runtime->gc;
|
||||
preTotalHeapBytes = gc.heapSize.gcBytes();
|
||||
preTotalHeapBytes = gc.heapSize.bytes();
|
||||
|
||||
preCollectedHeapBytes = 0;
|
||||
|
||||
|
@ -1005,7 +1005,7 @@ void Statistics::beginGC(JSGCInvocationKind kind,
|
|||
void Statistics::measureInitialHeapSize() {
|
||||
MOZ_ASSERT(preCollectedHeapBytes == 0);
|
||||
for (GCZonesIter zone(runtime, WithAtoms); !zone.done(); zone.next()) {
|
||||
preCollectedHeapBytes += zone->zoneSize.gcBytes();
|
||||
preCollectedHeapBytes += zone->gcHeapSize.bytes();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1013,11 +1013,11 @@ void Statistics::adoptHeapSizeDuringIncrementalGC(Zone* mergedZone) {
|
|||
// A zone is being merged into a zone that's currently being collected so we
|
||||
// need to adjust our record of the total size of heap for collected zones.
|
||||
MOZ_ASSERT(runtime->gc.isIncrementalGCInProgress());
|
||||
preCollectedHeapBytes += mergedZone->zoneSize.gcBytes();
|
||||
preCollectedHeapBytes += mergedZone->gcHeapSize.bytes();
|
||||
}
|
||||
|
||||
void Statistics::endGC() {
|
||||
postTotalHeapBytes = runtime->gc.heapSize.gcBytes();
|
||||
postTotalHeapBytes = runtime->gc.heapSize.bytes();
|
||||
|
||||
sendGCTelemetry();
|
||||
|
||||
|
@ -1091,7 +1091,7 @@ void Statistics::sendGCTelemetry() {
|
|||
size_t bytesSurvived = 0;
|
||||
for (ZonesIter zone(runtime, WithAtoms); !zone.done(); zone.next()) {
|
||||
if (zone->wasCollected()) {
|
||||
bytesSurvived += zone->zoneSize.retainedBytes();
|
||||
bytesSurvived += zone->gcHeapSize.retainedBytes();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -31,32 +31,32 @@ Zone* const Zone::NotOnList = reinterpret_cast<Zone*>(1);
|
|||
|
||||
ZoneAllocator::ZoneAllocator(JSRuntime* rt)
|
||||
: JS::shadow::Zone(rt, &rt->gc.marker),
|
||||
zoneSize(&rt->gc.heapSize),
|
||||
gcMallocBytes(nullptr),
|
||||
gcJitBytes(nullptr),
|
||||
gcJitThreshold(jit::MaxCodeBytesPerProcess * 0.8) {
|
||||
gcHeapSize(&rt->gc.heapSize),
|
||||
mallocHeapSize(nullptr),
|
||||
jitHeapSize(nullptr),
|
||||
jitHeapThreshold(jit::MaxCodeBytesPerProcess * 0.8) {
|
||||
AutoLockGC lock(rt);
|
||||
updateGCThresholds(rt->gc, GC_NORMAL, lock);
|
||||
}
|
||||
|
||||
ZoneAllocator::~ZoneAllocator() {
|
||||
#ifdef DEBUG
|
||||
gcMallocTracker.checkEmptyOnDestroy();
|
||||
MOZ_ASSERT(zoneSize.gcBytes() == 0);
|
||||
MOZ_ASSERT(gcMallocBytes.gcBytes() == 0);
|
||||
MOZ_ASSERT(gcJitBytes.gcBytes() == 0);
|
||||
mallocTracker.checkEmptyOnDestroy();
|
||||
MOZ_ASSERT(gcHeapSize.bytes() == 0);
|
||||
MOZ_ASSERT(mallocHeapSize.bytes() == 0);
|
||||
MOZ_ASSERT(jitHeapSize.bytes() == 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
void ZoneAllocator::fixupAfterMovingGC() {
|
||||
#ifdef DEBUG
|
||||
gcMallocTracker.fixupAfterMovingGC();
|
||||
mallocTracker.fixupAfterMovingGC();
|
||||
#endif
|
||||
}
|
||||
|
||||
void js::ZoneAllocator::updateMemoryCountersOnGCStart() {
|
||||
zoneSize.updateOnGCStart();
|
||||
gcMallocBytes.updateOnGCStart();
|
||||
gcHeapSize.updateOnGCStart();
|
||||
mallocHeapSize.updateOnGCStart();
|
||||
}
|
||||
|
||||
void js::ZoneAllocator::updateGCThresholds(GCRuntime& gc,
|
||||
|
@ -64,11 +64,11 @@ void js::ZoneAllocator::updateGCThresholds(GCRuntime& gc,
|
|||
const js::AutoLockGC& lock) {
|
||||
// This is called repeatedly during a GC to update thresholds as memory is
|
||||
// freed.
|
||||
threshold.updateAfterGC(zoneSize.retainedBytes(), invocationKind, gc.tunables,
|
||||
gc.schedulingState, lock);
|
||||
gcMallocThreshold.updateAfterGC(gcMallocBytes.retainedBytes(),
|
||||
gc.tunables.mallocThresholdBase(),
|
||||
gc.tunables.mallocGrowthFactor(), lock);
|
||||
gcHeapThreshold.updateAfterGC(gcHeapSize.retainedBytes(), invocationKind,
|
||||
gc.tunables, gc.schedulingState, lock);
|
||||
mallocHeapThreshold.updateAfterGC(mallocHeapSize.retainedBytes(),
|
||||
gc.tunables.mallocThresholdBase(),
|
||||
gc.tunables.mallocGrowthFactor(), lock);
|
||||
}
|
||||
|
||||
void ZoneAllocPolicy::decMemory(size_t nbytes) {
|
||||
|
|
|
@ -30,7 +30,7 @@ bool CurrentThreadIsGCSweeping();
|
|||
namespace gc {
|
||||
void MaybeMallocTriggerZoneGC(JSRuntime* rt, ZoneAllocator* zoneAlloc,
|
||||
const HeapSize& heap,
|
||||
const ZoneThreshold& threshold,
|
||||
const HeapThreshold& threshold,
|
||||
JS::GCReason reason);
|
||||
}
|
||||
|
||||
|
@ -54,10 +54,10 @@ class ZoneAllocator : public JS::shadow::Zone,
|
|||
void reportAllocationOverflow() const;
|
||||
|
||||
void adoptMallocBytes(ZoneAllocator* other) {
|
||||
gcMallocBytes.adopt(other->gcMallocBytes);
|
||||
gcJitBytes.adopt(other->gcJitBytes);
|
||||
mallocHeapSize.adopt(other->mallocHeapSize);
|
||||
jitHeapSize.adopt(other->jitHeapSize);
|
||||
#ifdef DEBUG
|
||||
gcMallocTracker.adopt(other->gcMallocTracker);
|
||||
mallocTracker.adopt(other->mallocTracker);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -70,12 +70,12 @@ class ZoneAllocator : public JS::shadow::Zone,
|
|||
void addCellMemory(js::gc::Cell* cell, size_t nbytes, js::MemoryUse use) {
|
||||
MOZ_ASSERT(cell);
|
||||
MOZ_ASSERT(nbytes);
|
||||
gcMallocBytes.addBytes(nbytes);
|
||||
mallocHeapSize.addBytes(nbytes);
|
||||
|
||||
// We don't currently check GC triggers here.
|
||||
|
||||
#ifdef DEBUG
|
||||
gcMallocTracker.trackMemory(cell, nbytes, use);
|
||||
mallocTracker.trackMemory(cell, nbytes, use);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -85,37 +85,37 @@ class ZoneAllocator : public JS::shadow::Zone,
|
|||
MOZ_ASSERT(nbytes);
|
||||
MOZ_ASSERT_IF(CurrentThreadIsGCSweeping(), wasSwept);
|
||||
|
||||
gcMallocBytes.removeBytes(nbytes, wasSwept);
|
||||
mallocHeapSize.removeBytes(nbytes, wasSwept);
|
||||
|
||||
#ifdef DEBUG
|
||||
gcMallocTracker.untrackMemory(cell, nbytes, use);
|
||||
mallocTracker.untrackMemory(cell, nbytes, use);
|
||||
#endif
|
||||
}
|
||||
|
||||
void swapCellMemory(js::gc::Cell* a, js::gc::Cell* b, js::MemoryUse use) {
|
||||
#ifdef DEBUG
|
||||
gcMallocTracker.swapMemory(a, b, use);
|
||||
mallocTracker.swapMemory(a, b, use);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
void registerPolicy(js::ZoneAllocPolicy* policy) {
|
||||
return gcMallocTracker.registerPolicy(policy);
|
||||
return mallocTracker.registerPolicy(policy);
|
||||
}
|
||||
void unregisterPolicy(js::ZoneAllocPolicy* policy) {
|
||||
return gcMallocTracker.unregisterPolicy(policy);
|
||||
return mallocTracker.unregisterPolicy(policy);
|
||||
}
|
||||
void movePolicy(js::ZoneAllocPolicy* dst, js::ZoneAllocPolicy* src) {
|
||||
return gcMallocTracker.movePolicy(dst, src);
|
||||
return mallocTracker.movePolicy(dst, src);
|
||||
}
|
||||
#endif
|
||||
|
||||
void incPolicyMemory(js::ZoneAllocPolicy* policy, size_t nbytes) {
|
||||
MOZ_ASSERT(nbytes);
|
||||
gcMallocBytes.addBytes(nbytes);
|
||||
mallocHeapSize.addBytes(nbytes);
|
||||
|
||||
#ifdef DEBUG
|
||||
gcMallocTracker.incPolicyMemory(policy, nbytes);
|
||||
mallocTracker.incPolicyMemory(policy, nbytes);
|
||||
#endif
|
||||
|
||||
maybeMallocTriggerZoneGC();
|
||||
|
@ -125,69 +125,69 @@ class ZoneAllocator : public JS::shadow::Zone,
|
|||
MOZ_ASSERT(nbytes);
|
||||
MOZ_ASSERT_IF(CurrentThreadIsGCSweeping(), wasSwept);
|
||||
|
||||
gcMallocBytes.removeBytes(nbytes, wasSwept);
|
||||
mallocHeapSize.removeBytes(nbytes, wasSwept);
|
||||
|
||||
#ifdef DEBUG
|
||||
gcMallocTracker.decPolicyMemory(policy, nbytes);
|
||||
mallocTracker.decPolicyMemory(policy, nbytes);
|
||||
#endif
|
||||
}
|
||||
|
||||
void incJitMemory(size_t nbytes) {
|
||||
MOZ_ASSERT(nbytes);
|
||||
gcJitBytes.addBytes(nbytes);
|
||||
maybeTriggerZoneGC(gcJitBytes, gcJitThreshold,
|
||||
jitHeapSize.addBytes(nbytes);
|
||||
maybeTriggerZoneGC(jitHeapSize, jitHeapThreshold,
|
||||
JS::GCReason::TOO_MUCH_JIT_CODE);
|
||||
}
|
||||
void decJitMemory(size_t nbytes) {
|
||||
MOZ_ASSERT(nbytes);
|
||||
gcJitBytes.removeBytes(nbytes, true);
|
||||
jitHeapSize.removeBytes(nbytes, true);
|
||||
}
|
||||
|
||||
// Check malloc allocation threshold and trigger a zone GC if necessary.
|
||||
void maybeMallocTriggerZoneGC() {
|
||||
maybeTriggerZoneGC(gcMallocBytes, gcMallocThreshold,
|
||||
maybeTriggerZoneGC(mallocHeapSize, mallocHeapThreshold,
|
||||
JS::GCReason::TOO_MUCH_MALLOC);
|
||||
}
|
||||
|
||||
private:
|
||||
void maybeTriggerZoneGC(const js::gc::HeapSize& heap,
|
||||
const js::gc::ZoneThreshold& threshold,
|
||||
const js::gc::HeapThreshold& threshold,
|
||||
JS::GCReason reason) {
|
||||
if (heap.gcBytes() >= threshold.gcTriggerBytes()) {
|
||||
if (heap.bytes() >= threshold.bytes()) {
|
||||
gc::MaybeMallocTriggerZoneGC(runtimeFromAnyThread(), this, heap,
|
||||
threshold, reason);
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
// Track GC heap size under this Zone.
|
||||
js::gc::HeapSize zoneSize;
|
||||
// The size of allocated GC arenas in this zone.
|
||||
js::gc::HeapSize gcHeapSize;
|
||||
|
||||
// Thresholds used to trigger GC based on heap size.
|
||||
js::gc::ZoneHeapThreshold threshold;
|
||||
// Threshold used to trigger GC based on GC heap size.
|
||||
js::gc::GCHeapThreshold gcHeapThreshold;
|
||||
|
||||
// Amount of data to allocate before triggering a new incremental slice for
|
||||
// the current GC.
|
||||
js::MainThreadData<size_t> gcDelayBytes;
|
||||
|
||||
// Malloc counter used for allocations where size information is
|
||||
// available. Used for some internal and all tracked external allocations.
|
||||
js::gc::HeapSize gcMallocBytes;
|
||||
// Amount of malloc data owned by GC things in this zone, including external
|
||||
// allocations supplied by JS::AddAssociatedMemory.
|
||||
js::gc::HeapSize mallocHeapSize;
|
||||
|
||||
// Thresholds used to trigger GC based on malloc allocations.
|
||||
js::gc::ZoneMallocThreshold gcMallocThreshold;
|
||||
// Threshold used to trigger GC based on malloc allocations.
|
||||
js::gc::MallocHeapThreshold mallocHeapThreshold;
|
||||
|
||||
// Malloc counter used for JIT code allocation.
|
||||
js::gc::HeapSize gcJitBytes;
|
||||
// Amount of exectuable JIT code owned by GC things in this zone.
|
||||
js::gc::HeapSize jitHeapSize;
|
||||
|
||||
// Thresholds used to trigger GC based on JIT allocations.
|
||||
js::gc::ZoneFixedThreshold gcJitThreshold;
|
||||
// Threshold used to trigger GC based on JIT allocations.
|
||||
js::gc::JitHeapThreshold jitHeapThreshold;
|
||||
|
||||
private:
|
||||
#ifdef DEBUG
|
||||
// In debug builds, malloc allocations can be tracked to make debugging easier
|
||||
// (possible?) if allocation and free sizes don't balance.
|
||||
js::gc::MemoryTracker gcMallocTracker;
|
||||
js::gc::MemoryTracker mallocTracker;
|
||||
#endif
|
||||
|
||||
friend class js::gc::GCRuntime;
|
||||
|
|
|
@ -1401,7 +1401,7 @@ JS_FRIEND_API JS::Value js::MaybeGetScriptPrivate(JSObject* object) {
|
|||
}
|
||||
|
||||
JS_FRIEND_API uint64_t js::GetGCHeapUsageForObjectZone(JSObject* obj) {
|
||||
return obj->zone()->zoneSize.gcBytes();
|
||||
return obj->zone()->gcHeapSize.bytes();
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
|
|
Загрузка…
Ссылка в новой задаче