Bug 1916758 - Part 6: Rename TenuredChunk to ArenaChunk since we will add another kind of tenured chunk r=sfink

To avoid confusion, TenuredChunk should end up as a base class if anything.
This renames TenuredChunk and TenuredChunkBase to ArenaChunk and ArenaChunkBase
and ChunkKind::TenuredHeap to TenuredArenas.

Differential Revision: https://phabricator.services.mozilla.com/D221065
This commit is contained in:
Jon Coppeard 2024-09-05 09:52:49 +00:00
Родитель 3235af3a70
Коммит 0689a43d58
14 изменённых файлов: 149 добавлений и 154 удалений

Просмотреть файл

@ -37,7 +37,7 @@ namespace gc {
class Arena; class Arena;
struct Cell; struct Cell;
class TenuredChunk; class ArenaChunk;
class StoreBuffer; class StoreBuffer;
class TenuredCell; class TenuredCell;
@ -84,7 +84,7 @@ const size_t ArenaBitmapWords = HowMany(ArenaBitmapBits, JS_BITS_PER_WORD);
enum class ChunkKind : uint8_t { enum class ChunkKind : uint8_t {
Invalid = 0, Invalid = 0,
TenuredHeap, TenuredArenas,
NurseryToSpace, NurseryToSpace,
NurseryFromSpace NurseryFromSpace
}; };
@ -97,13 +97,13 @@ class ChunkBase {
// Initialize a tenured heap chunk. // Initialize a tenured heap chunk.
explicit ChunkBase(JSRuntime* rt) { explicit ChunkBase(JSRuntime* rt) {
MOZ_ASSERT((uintptr_t(this) & ChunkMask) == 0); MOZ_ASSERT((uintptr_t(this) & ChunkMask) == 0);
initBaseForTenuredChunk(rt); initBaseForArenaChunk(rt);
} }
void initBaseForTenuredChunk(JSRuntime* rt) { void initBaseForArenaChunk(JSRuntime* rt) {
runtime = rt; runtime = rt;
storeBuffer = nullptr; storeBuffer = nullptr;
kind = ChunkKind::TenuredHeap; kind = ChunkKind::TenuredArenas;
nurseryChunkIndex = UINT8_MAX; nurseryChunkIndex = UINT8_MAX;
} }
@ -123,7 +123,7 @@ class ChunkBase {
ChunkKind getKind() const { ChunkKind getKind() const {
MOZ_ASSERT_IF(storeBuffer, kind == ChunkKind::NurseryToSpace || MOZ_ASSERT_IF(storeBuffer, kind == ChunkKind::NurseryToSpace ||
kind == ChunkKind::NurseryFromSpace); kind == ChunkKind::NurseryFromSpace);
MOZ_ASSERT_IF(!storeBuffer, kind == ChunkKind::TenuredHeap); MOZ_ASSERT_IF(!storeBuffer, kind == ChunkKind::TenuredArenas);
return kind; return kind;
} }
@ -139,12 +139,12 @@ class ChunkBase {
uint8_t nurseryChunkIndex; uint8_t nurseryChunkIndex;
}; };
// Information about tenured heap chunks. // Information about tenured heap chunks containing arenas.
struct TenuredChunkInfo { struct ArenaChunkInfo {
private: private:
friend class ChunkPool; friend class ChunkPool;
TenuredChunk* next = nullptr; ArenaChunk* next = nullptr;
TenuredChunk* prev = nullptr; ArenaChunk* prev = nullptr;
public: public:
/* Number of free arenas, either committed or decommitted. */ /* Number of free arenas, either committed or decommitted. */
@ -180,7 +180,7 @@ const size_t BitsPerPageWithHeaders =
(ArenaSize + ArenaBitmapBytes) * ArenasPerPage * CHAR_BIT + ArenasPerPage + (ArenaSize + ArenaBitmapBytes) * ArenasPerPage * CHAR_BIT + ArenasPerPage +
1; 1;
const size_t ChunkBitsAvailable = const size_t ChunkBitsAvailable =
(ChunkSize - sizeof(ChunkBase) - sizeof(TenuredChunkInfo)) * CHAR_BIT; (ChunkSize - sizeof(ChunkBase) - sizeof(ArenaChunkInfo)) * CHAR_BIT;
const size_t PagesPerChunk = ChunkBitsAvailable / BitsPerPageWithHeaders; const size_t PagesPerChunk = ChunkBitsAvailable / BitsPerPageWithHeaders;
const size_t ArenasPerChunk = PagesPerChunk * ArenasPerPage; const size_t ArenasPerChunk = PagesPerChunk * ArenasPerPage;
const size_t FreeCommittedBits = ArenasPerChunk; const size_t FreeCommittedBits = ArenasPerChunk;
@ -190,7 +190,7 @@ const size_t BitsPerArenaWithHeaders =
(DecommitBits / ArenasPerChunk) + 1; (DecommitBits / ArenasPerChunk) + 1;
const size_t CalculatedChunkSizeRequired = const size_t CalculatedChunkSizeRequired =
sizeof(ChunkBase) + sizeof(TenuredChunkInfo) + sizeof(ChunkBase) + sizeof(ArenaChunkInfo) +
RoundUp(ArenasPerChunk * ArenaBitmapBytes, sizeof(uintptr_t)) + RoundUp(ArenasPerChunk * ArenaBitmapBytes, sizeof(uintptr_t)) +
RoundUp(FreeCommittedBits, sizeof(uint32_t) * CHAR_BIT) / CHAR_BIT + RoundUp(FreeCommittedBits, sizeof(uint32_t) * CHAR_BIT) / CHAR_BIT +
RoundUp(DecommitBits, sizeof(uint32_t) * CHAR_BIT) / CHAR_BIT + RoundUp(DecommitBits, sizeof(uint32_t) * CHAR_BIT) / CHAR_BIT +
@ -307,16 +307,16 @@ using ChunkPageBitmap = mozilla::BitSet<PagesPerChunk, uint32_t>;
// Bitmap with one bit per arena used for free committed arena set. // Bitmap with one bit per arena used for free committed arena set.
using ChunkArenaBitmap = mozilla::BitSet<ArenasPerChunk, uint32_t>; using ChunkArenaBitmap = mozilla::BitSet<ArenasPerChunk, uint32_t>;
// Base class containing data members for a tenured heap chunk. // Base class for a tenured heap chunk containing fixed size arenas.
class TenuredChunkBase : public ChunkBase { class ArenaChunkBase : public ChunkBase {
public: public:
TenuredChunkInfo info; ArenaChunkInfo info;
ChunkMarkBitmap markBits; ChunkMarkBitmap markBits;
ChunkArenaBitmap freeCommittedArenas; ChunkArenaBitmap freeCommittedArenas;
ChunkPageBitmap decommittedPages; ChunkPageBitmap decommittedPages;
protected: protected:
explicit TenuredChunkBase(JSRuntime* runtime) : ChunkBase(runtime) { explicit ArenaChunkBase(JSRuntime* runtime) : ChunkBase(runtime) {
static_assert(sizeof(markBits) == ArenaBitmapBytes * ArenasPerChunk, static_assert(sizeof(markBits) == ArenaBitmapBytes * ArenasPerChunk,
"Ensure our MarkBitmap actually covers all arenas."); "Ensure our MarkBitmap actually covers all arenas.");
info.numArenasFree = ArenasPerChunk; info.numArenasFree = ArenasPerChunk;
@ -325,7 +325,7 @@ class TenuredChunkBase : public ChunkBase {
void initAsDecommitted(); void initAsDecommitted();
}; };
static_assert(FirstArenaOffset == static_assert(FirstArenaOffset ==
RoundUp(sizeof(gc::TenuredChunkBase), ArenaSize)); RoundUp(sizeof(gc::ArenaChunkBase), ArenaSize));
/* /*
* We sometimes use an index to refer to a cell in an arena. The index for a * We sometimes use an index to refer to a cell in an arena. The index for a
@ -336,7 +336,7 @@ const size_t ArenaCellIndexBytes = CellAlignBytes;
const size_t MaxArenaCellIndex = ArenaSize / CellAlignBytes; const size_t MaxArenaCellIndex = ArenaSize / CellAlignBytes;
const size_t ChunkStoreBufferOffset = offsetof(ChunkBase, storeBuffer); const size_t ChunkStoreBufferOffset = offsetof(ChunkBase, storeBuffer);
const size_t ChunkMarkBitmapOffset = offsetof(TenuredChunkBase, markBits); const size_t ChunkMarkBitmapOffset = offsetof(ArenaChunkBase, markBits);
// Hardcoded offsets into Arena class. // Hardcoded offsets into Arena class.
const size_t ArenaZoneOffset = 2 * sizeof(uint32_t); const size_t ArenaZoneOffset = 2 * sizeof(uint32_t);
@ -592,13 +592,12 @@ static MOZ_ALWAYS_INLINE ChunkBase* GetCellChunkBase(const Cell* cell) {
return GetGCAddressChunkBase(cell); return GetGCAddressChunkBase(cell);
} }
static MOZ_ALWAYS_INLINE TenuredChunkBase* GetCellChunkBase( static MOZ_ALWAYS_INLINE ArenaChunkBase* GetCellChunkBase(
const TenuredCell* cell) { const TenuredCell* cell) {
MOZ_ASSERT(cell); MOZ_ASSERT(cell);
auto* chunk = auto* chunk = reinterpret_cast<ArenaChunkBase*>(uintptr_t(cell) & ~ChunkMask);
reinterpret_cast<TenuredChunkBase*>(uintptr_t(cell) & ~ChunkMask);
MOZ_ASSERT(chunk->runtime); MOZ_ASSERT(chunk->runtime);
MOZ_ASSERT(chunk->kind == ChunkKind::TenuredHeap); MOZ_ASSERT(chunk->kind == ChunkKind::TenuredArenas);
return chunk; return chunk;
} }
@ -615,7 +614,7 @@ static MOZ_ALWAYS_INLINE bool TenuredCellIsMarkedBlack(
MOZ_ASSERT(cell); MOZ_ASSERT(cell);
MOZ_ASSERT(!js::gc::IsInsideNursery(cell)); MOZ_ASSERT(!js::gc::IsInsideNursery(cell));
TenuredChunkBase* chunk = GetCellChunkBase(cell); ArenaChunkBase* chunk = GetCellChunkBase(cell);
return chunk->markBits.isMarkedBlack(cell); return chunk->markBits.isMarkedBlack(cell);
} }
@ -627,14 +626,14 @@ static MOZ_ALWAYS_INLINE bool NonBlackCellIsMarkedGray(
MOZ_ASSERT(!js::gc::IsInsideNursery(cell)); MOZ_ASSERT(!js::gc::IsInsideNursery(cell));
MOZ_ASSERT(!TenuredCellIsMarkedBlack(cell)); MOZ_ASSERT(!TenuredCellIsMarkedBlack(cell));
TenuredChunkBase* chunk = GetCellChunkBase(cell); ArenaChunkBase* chunk = GetCellChunkBase(cell);
return chunk->markBits.markBit(cell, ColorBit::GrayOrBlackBit); return chunk->markBits.markBit(cell, ColorBit::GrayOrBlackBit);
} }
static MOZ_ALWAYS_INLINE bool TenuredCellIsMarkedGray(const TenuredCell* cell) { static MOZ_ALWAYS_INLINE bool TenuredCellIsMarkedGray(const TenuredCell* cell) {
MOZ_ASSERT(cell); MOZ_ASSERT(cell);
MOZ_ASSERT(!js::gc::IsInsideNursery(cell)); MOZ_ASSERT(!js::gc::IsInsideNursery(cell));
TenuredChunkBase* chunk = GetCellChunkBase(cell); ArenaChunkBase* chunk = GetCellChunkBase(cell);
return chunk->markBits.isMarkedGray(cell); return chunk->markBits.isMarkedGray(cell);
} }

Просмотреть файл

@ -305,7 +305,7 @@ AllocSite* CellAllocator::MaybeGenerateMissingAllocSite(JSContext* cx,
void CellAllocator::CheckIncrementalZoneState(JS::Zone* zone, void* ptr) { void CellAllocator::CheckIncrementalZoneState(JS::Zone* zone, void* ptr) {
MOZ_ASSERT(ptr); MOZ_ASSERT(ptr);
TenuredCell* cell = reinterpret_cast<TenuredCell*>(ptr); TenuredCell* cell = reinterpret_cast<TenuredCell*>(ptr);
TenuredChunkBase* chunk = detail::GetCellChunkBase(cell); ArenaChunkBase* chunk = detail::GetCellChunkBase(cell);
if (zone->isGCMarkingOrSweeping()) { if (zone->isGCMarkingOrSweeping()) {
MOZ_ASSERT(chunk->markBits.isMarkedBlack(cell)); MOZ_ASSERT(chunk->markBits.isMarkedBlack(cell));
} else { } else {
@ -388,13 +388,13 @@ void* ArenaLists::refillFreeListAndAllocate(
maybeLock.emplace(rt); maybeLock.emplace(rt);
} }
TenuredChunk* chunk = rt->gc.pickChunk(maybeLock.ref()); ArenaChunk* chunk = rt->gc.pickChunk(maybeLock.ref());
if (!chunk) { if (!chunk) {
return nullptr; return nullptr;
} }
// Although our chunk should definitely have enough space for another arena, // Although our chunk should definitely have enough space for another arena,
// there are other valid reasons why TenuredChunk::allocateArena() may fail. // there are other valid reasons why ArenaChunk::allocateArena() may fail.
arena = rt->gc.allocateArena(chunk, zone_, thingKind, checkThresholds, arena = rt->gc.allocateArena(chunk, zone_, thingKind, checkThresholds,
maybeLock.ref()); maybeLock.ref());
if (!arena) { if (!arena) {
@ -442,7 +442,7 @@ void Arena::arenaAllocatedDuringGC() {
} }
} }
// /////////// TenuredChunk -> Arena Allocator /////////////////////////////// // /////////// ArenaChunk -> Arena Allocator /////////////////////////////////
bool GCRuntime::wantBackgroundAllocation(const AutoLockGC& lock) const { bool GCRuntime::wantBackgroundAllocation(const AutoLockGC& lock) const {
// To minimize memory waste, we do not want to run the background chunk // To minimize memory waste, we do not want to run the background chunk
@ -453,7 +453,7 @@ bool GCRuntime::wantBackgroundAllocation(const AutoLockGC& lock) const {
(fullChunks(lock).count() + availableChunks(lock).count()) >= 4; (fullChunks(lock).count() + availableChunks(lock).count()) >= 4;
} }
Arena* GCRuntime::allocateArena(TenuredChunk* chunk, Zone* zone, Arena* GCRuntime::allocateArena(ArenaChunk* chunk, Zone* zone,
AllocKind thingKind, AllocKind thingKind,
ShouldCheckThresholds checkThresholds, ShouldCheckThresholds checkThresholds,
const AutoLockGC& lock) { const AutoLockGC& lock) {
@ -476,9 +476,8 @@ Arena* GCRuntime::allocateArena(TenuredChunk* chunk, Zone* zone,
return arena; return arena;
} }
Arena* TenuredChunk::allocateArena(GCRuntime* gc, Zone* zone, Arena* ArenaChunk::allocateArena(GCRuntime* gc, Zone* zone, AllocKind thingKind,
AllocKind thingKind, const AutoLockGC& lock) {
const AutoLockGC& lock) {
if (info.numArenasFreeCommitted == 0) { if (info.numArenasFreeCommitted == 0) {
commitOnePage(gc); commitOnePage(gc);
MOZ_ASSERT(info.numArenasFreeCommitted == ArenasPerPage); MOZ_ASSERT(info.numArenasFreeCommitted == ArenasPerPage);
@ -511,7 +510,7 @@ static inline size_t FindFirstBitSet(
MOZ_CRASH("No bits found"); MOZ_CRASH("No bits found");
} }
void TenuredChunk::commitOnePage(GCRuntime* gc) { void ArenaChunk::commitOnePage(GCRuntime* gc) {
MOZ_ASSERT(info.numArenasFreeCommitted == 0); MOZ_ASSERT(info.numArenasFreeCommitted == 0);
MOZ_ASSERT(info.numArenasFree >= ArenasPerPage); MOZ_ASSERT(info.numArenasFree >= ArenasPerPage);
@ -535,7 +534,7 @@ void TenuredChunk::commitOnePage(GCRuntime* gc) {
verify(); verify();
} }
Arena* TenuredChunk::fetchNextFreeArena(GCRuntime* gc) { Arena* ArenaChunk::fetchNextFreeArena(GCRuntime* gc) {
MOZ_ASSERT(info.numArenasFreeCommitted > 0); MOZ_ASSERT(info.numArenasFreeCommitted > 0);
MOZ_ASSERT(info.numArenasFreeCommitted <= info.numArenasFree); MOZ_ASSERT(info.numArenasFreeCommitted <= info.numArenasFree);
@ -549,23 +548,23 @@ Arena* TenuredChunk::fetchNextFreeArena(GCRuntime* gc) {
return &arenas[index]; return &arenas[index];
} }
// /////////// System -> TenuredChunk Allocator ////////////////////////////// // /////////// System -> ArenaChunk Allocator ////////////////////////////////
TenuredChunk* GCRuntime::getOrAllocChunk(AutoLockGCBgAlloc& lock) { ArenaChunk* GCRuntime::getOrAllocChunk(AutoLockGCBgAlloc& lock) {
TenuredChunk* chunk = emptyChunks(lock).pop(); ArenaChunk* chunk = emptyChunks(lock).pop();
if (chunk) { if (chunk) {
// Reinitialize ChunkBase; arenas are all free and may or may not be // Reinitialize ChunkBase; arenas are all free and may or may not be
// committed. // committed.
SetMemCheckKind(chunk, sizeof(ChunkBase), MemCheckKind::MakeUndefined); SetMemCheckKind(chunk, sizeof(ChunkBase), MemCheckKind::MakeUndefined);
chunk->initBaseForTenuredChunk(rt); chunk->initBaseForArenaChunk(rt);
MOZ_ASSERT(chunk->unused()); MOZ_ASSERT(chunk->unused());
} else { } else {
void* ptr = TenuredChunk::allocate(this); void* ptr = ArenaChunk::allocate(this);
if (!ptr) { if (!ptr) {
return nullptr; return nullptr;
} }
chunk = TenuredChunk::emplace(ptr, this, /* allMemoryCommitted = */ true); chunk = ArenaChunk::emplace(ptr, this, /* allMemoryCommitted = */ true);
MOZ_ASSERT(chunk->info.numArenasFreeCommitted == 0); MOZ_ASSERT(chunk->info.numArenasFreeCommitted == 0);
} }
@ -576,7 +575,7 @@ TenuredChunk* GCRuntime::getOrAllocChunk(AutoLockGCBgAlloc& lock) {
return chunk; return chunk;
} }
void GCRuntime::recycleChunk(TenuredChunk* chunk, const AutoLockGC& lock) { void GCRuntime::recycleChunk(ArenaChunk* chunk, const AutoLockGC& lock) {
#ifdef DEBUG #ifdef DEBUG
MOZ_ASSERT(chunk->unused()); MOZ_ASSERT(chunk->unused());
chunk->verify(); chunk->verify();
@ -589,12 +588,12 @@ void GCRuntime::recycleChunk(TenuredChunk* chunk, const AutoLockGC& lock) {
emptyChunks(lock).push(chunk); emptyChunks(lock).push(chunk);
} }
TenuredChunk* GCRuntime::pickChunk(AutoLockGCBgAlloc& lock) { ArenaChunk* GCRuntime::pickChunk(AutoLockGCBgAlloc& lock) {
if (availableChunks(lock).count()) { if (availableChunks(lock).count()) {
return availableChunks(lock).head(); return availableChunks(lock).head();
} }
TenuredChunk* chunk = getOrAllocChunk(lock); ArenaChunk* chunk = getOrAllocChunk(lock);
if (!chunk) { if (!chunk) {
return nullptr; return nullptr;
} }
@ -623,21 +622,21 @@ void BackgroundAllocTask::run(AutoLockHelperThreadState& lock) {
AutoLockGC gcLock(gc); AutoLockGC gcLock(gc);
while (!isCancelled() && gc->wantBackgroundAllocation(gcLock)) { while (!isCancelled() && gc->wantBackgroundAllocation(gcLock)) {
TenuredChunk* chunk; ArenaChunk* chunk;
{ {
AutoUnlockGC unlock(gcLock); AutoUnlockGC unlock(gcLock);
void* ptr = TenuredChunk::allocate(gc); void* ptr = ArenaChunk::allocate(gc);
if (!ptr) { if (!ptr) {
break; break;
} }
chunk = TenuredChunk::emplace(ptr, gc, /* allMemoryCommitted = */ true); chunk = ArenaChunk::emplace(ptr, gc, /* allMemoryCommitted = */ true);
} }
chunkPool_.ref().push(chunk); chunkPool_.ref().push(chunk);
} }
} }
/* static */ /* static */
void* TenuredChunk::allocate(GCRuntime* gc) { void* ArenaChunk::allocate(GCRuntime* gc) {
void* chunk = MapAlignedPages(ChunkSize, ChunkSize); void* chunk = MapAlignedPages(ChunkSize, ChunkSize);
if (!chunk) { if (!chunk) {
return nullptr; return nullptr;
@ -656,8 +655,8 @@ static inline bool ShouldDecommitNewChunk(bool allMemoryCommitted,
return !allMemoryCommitted || !state.inHighFrequencyGCMode(); return !allMemoryCommitted || !state.inHighFrequencyGCMode();
} }
TenuredChunk* TenuredChunk::emplace(void* ptr, GCRuntime* gc, ArenaChunk* ArenaChunk::emplace(void* ptr, GCRuntime* gc,
bool allMemoryCommitted) { bool allMemoryCommitted) {
/* The chunk may still have some regions marked as no-access. */ /* The chunk may still have some regions marked as no-access. */
MOZ_MAKE_MEM_UNDEFINED(ptr, ChunkSize); MOZ_MAKE_MEM_UNDEFINED(ptr, ChunkSize);
@ -667,7 +666,7 @@ TenuredChunk* TenuredChunk::emplace(void* ptr, GCRuntime* gc,
*/ */
Poison(ptr, JS_FRESH_TENURED_PATTERN, ChunkSize, MemCheckKind::MakeUndefined); Poison(ptr, JS_FRESH_TENURED_PATTERN, ChunkSize, MemCheckKind::MakeUndefined);
TenuredChunk* chunk = new (mozilla::KnownNotNull, ptr) TenuredChunk(gc->rt); ArenaChunk* chunk = new (mozilla::KnownNotNull, ptr) ArenaChunk(gc->rt);
if (ShouldDecommitNewChunk(allMemoryCommitted, gc->schedulingState)) { if (ShouldDecommitNewChunk(allMemoryCommitted, gc->schedulingState)) {
// Decommit the arenas. We do this after poisoning so that if the OS does // Decommit the arenas. We do this after poisoning so that if the OS does
@ -684,13 +683,13 @@ TenuredChunk* TenuredChunk::emplace(void* ptr, GCRuntime* gc,
return chunk; return chunk;
} }
void TenuredChunk::decommitAllArenas() { void ArenaChunk::decommitAllArenas() {
MOZ_ASSERT(unused()); MOZ_ASSERT(unused());
MarkPagesUnusedSoft(&arenas[0], ArenasPerChunk * ArenaSize); MarkPagesUnusedSoft(&arenas[0], ArenasPerChunk * ArenaSize);
initAsDecommitted(); initAsDecommitted();
} }
void TenuredChunkBase::initAsDecommitted() { void ArenaChunkBase::initAsDecommitted() {
// Set the state of all arenas to free and decommitted. They might not // Set the state of all arenas to free and decommitted. They might not
// actually be decommitted, but in that case the re-commit operation is a // actually be decommitted, but in that case the re-commit operation is a
// no-op so it doesn't matter. // no-op so it doesn't matter.

Просмотреть файл

@ -239,9 +239,7 @@ class TenuredCell : public Cell {
return true; return true;
} }
TenuredChunk* chunk() const { ArenaChunk* chunk() const { return static_cast<ArenaChunk*>(Cell::chunk()); }
return static_cast<TenuredChunk*>(Cell::chunk());
}
// Mark bit management. // Mark bit management.
MOZ_ALWAYS_INLINE bool isMarkedAny() const; MOZ_ALWAYS_INLINE bool isMarkedAny() const;
@ -352,7 +350,7 @@ inline JSRuntime* Cell::runtimeFromAnyThread() const {
inline uintptr_t Cell::address() const { inline uintptr_t Cell::address() const {
uintptr_t addr = uintptr_t(this); uintptr_t addr = uintptr_t(this);
MOZ_ASSERT(addr % CellAlignBytes == 0); MOZ_ASSERT(addr % CellAlignBytes == 0);
MOZ_ASSERT(TenuredChunk::withinValidRange(addr)); MOZ_ASSERT(ArenaChunk::withinValidRange(addr));
return addr; return addr;
} }

Просмотреть файл

@ -341,7 +341,7 @@ ChunkPool GCRuntime::expireEmptyChunkPool(const AutoLockGC& lock) {
ChunkPool expired; ChunkPool expired;
while (tooManyEmptyChunks(lock)) { while (tooManyEmptyChunks(lock)) {
TenuredChunk* chunk = emptyChunks(lock).pop(); ArenaChunk* chunk = emptyChunks(lock).pop();
prepareToFreeChunk(chunk->info); prepareToFreeChunk(chunk->info);
expired.push(chunk); expired.push(chunk);
} }
@ -354,7 +354,7 @@ ChunkPool GCRuntime::expireEmptyChunkPool(const AutoLockGC& lock) {
static void FreeChunkPool(ChunkPool& pool) { static void FreeChunkPool(ChunkPool& pool) {
for (ChunkPool::Iter iter(pool); !iter.done();) { for (ChunkPool::Iter iter(pool); !iter.done();) {
TenuredChunk* chunk = iter.get(); ArenaChunk* chunk = iter.get();
iter.next(); iter.next();
pool.remove(chunk); pool.remove(chunk);
MOZ_ASSERT(chunk->unused()); MOZ_ASSERT(chunk->unused());
@ -367,7 +367,7 @@ void GCRuntime::freeEmptyChunks(const AutoLockGC& lock) {
FreeChunkPool(emptyChunks(lock)); FreeChunkPool(emptyChunks(lock));
} }
inline void GCRuntime::prepareToFreeChunk(TenuredChunkInfo& info) { inline void GCRuntime::prepareToFreeChunk(ArenaChunkInfo& info) {
stats().count(gcstats::COUNT_DESTROY_CHUNK); stats().count(gcstats::COUNT_DESTROY_CHUNK);
#ifdef DEBUG #ifdef DEBUG
/* /*
@ -2126,14 +2126,14 @@ void js::gc::BackgroundDecommitTask::run(AutoLockHelperThreadState& lock) {
gc->maybeRequestGCAfterBackgroundTask(lock); gc->maybeRequestGCAfterBackgroundTask(lock);
} }
static inline bool CanDecommitWholeChunk(TenuredChunk* chunk) { static inline bool CanDecommitWholeChunk(ArenaChunk* chunk) {
return chunk->unused() && chunk->info.numArenasFreeCommitted != 0; return chunk->unused() && chunk->info.numArenasFreeCommitted != 0;
} }
// Called from a background thread to decommit free arenas. Releases the GC // Called from a background thread to decommit free arenas. Releases the GC
// lock. // lock.
void GCRuntime::decommitEmptyChunks(const bool& cancel, AutoLockGC& lock) { void GCRuntime::decommitEmptyChunks(const bool& cancel, AutoLockGC& lock) {
Vector<TenuredChunk*, 0, SystemAllocPolicy> chunksToDecommit; Vector<ArenaChunk*, 0, SystemAllocPolicy> chunksToDecommit;
for (ChunkPool::Iter chunk(emptyChunks(lock)); !chunk.done(); chunk.next()) { for (ChunkPool::Iter chunk(emptyChunks(lock)); !chunk.done(); chunk.next()) {
if (CanDecommitWholeChunk(chunk) && !chunksToDecommit.append(chunk)) { if (CanDecommitWholeChunk(chunk) && !chunksToDecommit.append(chunk)) {
onOutOfMallocMemory(lock); onOutOfMallocMemory(lock);
@ -2141,7 +2141,7 @@ void GCRuntime::decommitEmptyChunks(const bool& cancel, AutoLockGC& lock) {
} }
} }
for (TenuredChunk* chunk : chunksToDecommit) { for (ArenaChunk* chunk : chunksToDecommit) {
if (cancel) { if (cancel) {
break; break;
} }
@ -2174,7 +2174,7 @@ void GCRuntime::decommitFreeArenas(const bool& cancel, AutoLockGC& lock) {
// it is dangerous to iterate the available list directly, as the active // it is dangerous to iterate the available list directly, as the active
// thread could modify it concurrently. Instead, we build and pass an // thread could modify it concurrently. Instead, we build and pass an
// explicit Vector containing the Chunks we want to visit. // explicit Vector containing the Chunks we want to visit.
Vector<TenuredChunk*, 0, SystemAllocPolicy> chunksToDecommit; Vector<ArenaChunk*, 0, SystemAllocPolicy> chunksToDecommit;
for (ChunkPool::Iter chunk(availableChunks(lock)); !chunk.done(); for (ChunkPool::Iter chunk(availableChunks(lock)); !chunk.done();
chunk.next()) { chunk.next()) {
if (chunk->info.numArenasFreeCommitted != 0 && if (chunk->info.numArenasFreeCommitted != 0 &&
@ -2184,7 +2184,7 @@ void GCRuntime::decommitFreeArenas(const bool& cancel, AutoLockGC& lock) {
} }
} }
for (TenuredChunk* chunk : chunksToDecommit) { for (ArenaChunk* chunk : chunksToDecommit) {
chunk->decommitFreeArenas(this, cancel, lock); chunk->decommitFreeArenas(this, cancel, lock);
} }
} }

Просмотреть файл

@ -31,7 +31,7 @@ class Nursery;
namespace gc { namespace gc {
class Arena; class Arena;
class TenuredChunk; class ArenaChunk;
} /* namespace gc */ } /* namespace gc */
@ -111,7 +111,7 @@ extern unsigned NotifyGCPreSwap(JSObject* a, JSObject* b);
extern void NotifyGCPostSwap(JSObject* a, JSObject* b, unsigned removedFlags); extern void NotifyGCPostSwap(JSObject* a, JSObject* b, unsigned removedFlags);
using IterateChunkCallback = void (*)(JSRuntime*, void*, gc::TenuredChunk*, using IterateChunkCallback = void (*)(JSRuntime*, void*, gc::ArenaChunk*,
const JS::AutoRequireNoGC&); const JS::AutoRequireNoGC&);
using IterateZoneCallback = void (*)(JSRuntime*, void*, JS::Zone*, using IterateZoneCallback = void (*)(JSRuntime*, void*, JS::Zone*,
const JS::AutoRequireNoGC&); const JS::AutoRequireNoGC&);

Просмотреть файл

@ -72,7 +72,7 @@ struct SweepAction {
}; };
class ChunkPool { class ChunkPool {
TenuredChunk* head_; ArenaChunk* head_;
size_t count_; size_t count_;
public: public:
@ -97,41 +97,41 @@ class ChunkPool {
bool empty() const { return !head_; } bool empty() const { return !head_; }
size_t count() const { return count_; } size_t count() const { return count_; }
TenuredChunk* head() { ArenaChunk* head() {
MOZ_ASSERT(head_); MOZ_ASSERT(head_);
return head_; return head_;
} }
TenuredChunk* pop(); ArenaChunk* pop();
void push(TenuredChunk* chunk); void push(ArenaChunk* chunk);
TenuredChunk* remove(TenuredChunk* chunk); ArenaChunk* remove(ArenaChunk* chunk);
void sort(); void sort();
private: private:
TenuredChunk* mergeSort(TenuredChunk* list, size_t count); ArenaChunk* mergeSort(ArenaChunk* list, size_t count);
bool isSorted() const; bool isSorted() const;
#ifdef DEBUG #ifdef DEBUG
public: public:
bool contains(TenuredChunk* chunk) const; bool contains(ArenaChunk* chunk) const;
bool verify() const; bool verify() const;
void verifyChunks() const; void verifyChunks() const;
#endif #endif
public: public:
// Pool mutation does not invalidate an Iter unless the mutation // Pool mutation does not invalidate an Iter unless the mutation
// is of the TenuredChunk currently being visited by the Iter. // is of the ArenaChunk currently being visited by the Iter.
class Iter { class Iter {
public: public:
explicit Iter(ChunkPool& pool) : current_(pool.head_) {} explicit Iter(ChunkPool& pool) : current_(pool.head_) {}
bool done() const { return !current_; } bool done() const { return !current_; }
void next(); void next();
TenuredChunk* get() const { return current_; } ArenaChunk* get() const { return current_; }
operator TenuredChunk*() const { return get(); } operator ArenaChunk*() const { return get(); }
TenuredChunk* operator->() const { return get(); } ArenaChunk* operator->() const { return get(); }
private: private:
TenuredChunk* current_; ArenaChunk* current_;
}; };
}; };
@ -568,8 +568,8 @@ class GCRuntime {
void verifyAllChunks(); void verifyAllChunks();
#endif #endif
TenuredChunk* getOrAllocChunk(AutoLockGCBgAlloc& lock); ArenaChunk* getOrAllocChunk(AutoLockGCBgAlloc& lock);
void recycleChunk(TenuredChunk* chunk, const AutoLockGC& lock); void recycleChunk(ArenaChunk* chunk, const AutoLockGC& lock);
#ifdef JS_GC_ZEAL #ifdef JS_GC_ZEAL
void startVerifyPreBarriers(); void startVerifyPreBarriers();
@ -691,8 +691,8 @@ class GCRuntime {
// For ArenaLists::allocateFromArena() // For ArenaLists::allocateFromArena()
friend class ArenaLists; friend class ArenaLists;
TenuredChunk* pickChunk(AutoLockGCBgAlloc& lock); ArenaChunk* pickChunk(AutoLockGCBgAlloc& lock);
Arena* allocateArena(TenuredChunk* chunk, Zone* zone, AllocKind kind, Arena* allocateArena(ArenaChunk* chunk, Zone* zone, AllocKind kind,
ShouldCheckThresholds checkThresholds, ShouldCheckThresholds checkThresholds,
const AutoLockGC& lock); const AutoLockGC& lock);
@ -704,7 +704,7 @@ class GCRuntime {
bool tooManyEmptyChunks(const AutoLockGC& lock); bool tooManyEmptyChunks(const AutoLockGC& lock);
ChunkPool expireEmptyChunkPool(const AutoLockGC& lock); ChunkPool expireEmptyChunkPool(const AutoLockGC& lock);
void freeEmptyChunks(const AutoLockGC& lock); void freeEmptyChunks(const AutoLockGC& lock);
void prepareToFreeChunk(TenuredChunkInfo& info); void prepareToFreeChunk(ArenaChunkInfo& info);
void setMinEmptyChunkCount(uint32_t value, const AutoLockGC& lock); void setMinEmptyChunkCount(uint32_t value, const AutoLockGC& lock);
friend class BackgroundAllocTask; friend class BackgroundAllocTask;

Просмотреть файл

@ -159,7 +159,7 @@ template <size_t BytesPerMarkBit, size_t FirstThingOffset>
MOZ_ALWAYS_INLINE void MOZ_ALWAYS_INLINE void
js::gc::MarkBitmap<BytesPerMarkBit, FirstThingOffset>::copyMarkBit( js::gc::MarkBitmap<BytesPerMarkBit, FirstThingOffset>::copyMarkBit(
TenuredCell* dst, const TenuredCell* src, ColorBit colorBit) { TenuredCell* dst, const TenuredCell* src, ColorBit colorBit) {
TenuredChunkBase* srcChunk = detail::GetCellChunkBase(src); ArenaChunkBase* srcChunk = detail::GetCellChunkBase(src);
MarkBitmapWord* srcWord; MarkBitmapWord* srcWord;
uintptr_t srcMask; uintptr_t srcMask;
srcChunk->markBits.getMarkWordAndMask(src, colorBit, &srcWord, &srcMask); srcChunk->markBits.getMarkWordAndMask(src, colorBit, &srcWord, &srcMask);

Просмотреть файл

@ -14,7 +14,7 @@
* - ArenaList * - ArenaList
* - FreeLists * - FreeLists
* - ArenaLists * - ArenaLists
* - TenuredChunk * - ArenaChunk
* - ChunkPool * - ChunkPool
*/ */
@ -305,7 +305,7 @@ void ArenaLists::checkNoArenasToUpdateForKind(AllocKind kind) {
#endif #endif
} }
inline bool TenuredChunk::canDecommitPage(size_t pageIndex) const { inline bool ArenaChunk::canDecommitPage(size_t pageIndex) const {
if (decommittedPages[pageIndex]) { if (decommittedPages[pageIndex]) {
return false; return false;
} }
@ -320,8 +320,8 @@ inline bool TenuredChunk::canDecommitPage(size_t pageIndex) const {
return true; return true;
} }
void TenuredChunk::decommitFreeArenas(GCRuntime* gc, const bool& cancel, void ArenaChunk::decommitFreeArenas(GCRuntime* gc, const bool& cancel,
AutoLockGC& lock) { AutoLockGC& lock) {
MOZ_ASSERT(DecommitEnabled()); MOZ_ASSERT(DecommitEnabled());
for (size_t i = 0; i < PagesPerChunk; i++) { for (size_t i = 0; i < PagesPerChunk; i++) {
@ -335,14 +335,14 @@ void TenuredChunk::decommitFreeArenas(GCRuntime* gc, const bool& cancel,
} }
} }
void TenuredChunk::recycleArena(Arena* arena, SortedArenaList& dest, void ArenaChunk::recycleArena(Arena* arena, SortedArenaList& dest,
size_t thingsPerArena) { size_t thingsPerArena) {
arena->setAsFullyUnused(); arena->setAsFullyUnused();
dest.insertAt(arena, thingsPerArena); dest.insertAt(arena, thingsPerArena);
} }
void TenuredChunk::releaseArena(GCRuntime* gc, Arena* arena, void ArenaChunk::releaseArena(GCRuntime* gc, Arena* arena,
const AutoLockGC& lock) { const AutoLockGC& lock) {
MOZ_ASSERT(!arena->allocated()); MOZ_ASSERT(!arena->allocated());
MOZ_ASSERT(!freeCommittedArenas[arenaIndex(arena)]); MOZ_ASSERT(!freeCommittedArenas[arenaIndex(arena)]);
@ -355,8 +355,8 @@ void TenuredChunk::releaseArena(GCRuntime* gc, Arena* arena,
updateChunkListAfterFree(gc, 1, lock); updateChunkListAfterFree(gc, 1, lock);
} }
bool TenuredChunk::decommitOneFreePage(GCRuntime* gc, size_t pageIndex, bool ArenaChunk::decommitOneFreePage(GCRuntime* gc, size_t pageIndex,
AutoLockGC& lock) { AutoLockGC& lock) {
MOZ_ASSERT(DecommitEnabled()); MOZ_ASSERT(DecommitEnabled());
MOZ_ASSERT(canDecommitPage(pageIndex)); MOZ_ASSERT(canDecommitPage(pageIndex));
MOZ_ASSERT(info.numArenasFreeCommitted >= ArenasPerPage); MOZ_ASSERT(info.numArenasFreeCommitted >= ArenasPerPage);
@ -401,7 +401,7 @@ bool TenuredChunk::decommitOneFreePage(GCRuntime* gc, size_t pageIndex,
return ok; return ok;
} }
void TenuredChunk::decommitFreeArenasWithoutUnlocking(const AutoLockGC& lock) { void ArenaChunk::decommitFreeArenasWithoutUnlocking(const AutoLockGC& lock) {
MOZ_ASSERT(DecommitEnabled()); MOZ_ASSERT(DecommitEnabled());
for (size_t i = 0; i < PagesPerChunk; i++) { for (size_t i = 0; i < PagesPerChunk; i++) {
@ -429,16 +429,16 @@ void TenuredChunk::decommitFreeArenasWithoutUnlocking(const AutoLockGC& lock) {
verify(); verify();
} }
void TenuredChunk::updateChunkListAfterAlloc(GCRuntime* gc, void ArenaChunk::updateChunkListAfterAlloc(GCRuntime* gc,
const AutoLockGC& lock) { const AutoLockGC& lock) {
if (MOZ_UNLIKELY(!hasAvailableArenas())) { if (MOZ_UNLIKELY(!hasAvailableArenas())) {
gc->availableChunks(lock).remove(this); gc->availableChunks(lock).remove(this);
gc->fullChunks(lock).push(this); gc->fullChunks(lock).push(this);
} }
} }
void TenuredChunk::updateChunkListAfterFree(GCRuntime* gc, size_t numArenasFree, void ArenaChunk::updateChunkListAfterFree(GCRuntime* gc, size_t numArenasFree,
const AutoLockGC& lock) { const AutoLockGC& lock) {
if (info.numArenasFree == numArenasFree) { if (info.numArenasFree == numArenasFree) {
gc->fullChunks(lock).remove(this); gc->fullChunks(lock).remove(this);
gc->availableChunks(lock).push(this); gc->availableChunks(lock).push(this);
@ -451,7 +451,7 @@ void TenuredChunk::updateChunkListAfterFree(GCRuntime* gc, size_t numArenasFree,
} }
} }
TenuredChunk* ChunkPool::pop() { ArenaChunk* ChunkPool::pop() {
MOZ_ASSERT(bool(head_) == bool(count_)); MOZ_ASSERT(bool(head_) == bool(count_));
if (!count_) { if (!count_) {
return nullptr; return nullptr;
@ -459,7 +459,7 @@ TenuredChunk* ChunkPool::pop() {
return remove(head_); return remove(head_);
} }
void ChunkPool::push(TenuredChunk* chunk) { void ChunkPool::push(ArenaChunk* chunk) {
MOZ_ASSERT(!chunk->info.next); MOZ_ASSERT(!chunk->info.next);
MOZ_ASSERT(!chunk->info.prev); MOZ_ASSERT(!chunk->info.prev);
@ -471,7 +471,7 @@ void ChunkPool::push(TenuredChunk* chunk) {
++count_; ++count_;
} }
TenuredChunk* ChunkPool::remove(TenuredChunk* chunk) { ArenaChunk* ChunkPool::remove(ArenaChunk* chunk) {
MOZ_ASSERT(count_ > 0); MOZ_ASSERT(count_ > 0);
MOZ_ASSERT(contains(chunk)); MOZ_ASSERT(contains(chunk));
@ -499,8 +499,8 @@ void ChunkPool::sort() {
head_ = mergeSort(head(), count()); head_ = mergeSort(head(), count());
// Fixup prev pointers. // Fixup prev pointers.
TenuredChunk* prev = nullptr; ArenaChunk* prev = nullptr;
for (TenuredChunk* cur = head_; cur; cur = cur->info.next) { for (ArenaChunk* cur = head_; cur; cur = cur->info.next) {
cur->info.prev = prev; cur->info.prev = prev;
prev = cur; prev = cur;
} }
@ -510,7 +510,7 @@ void ChunkPool::sort() {
MOZ_ASSERT(isSorted()); MOZ_ASSERT(isSorted());
} }
TenuredChunk* ChunkPool::mergeSort(TenuredChunk* list, size_t count) { ArenaChunk* ChunkPool::mergeSort(ArenaChunk* list, size_t count) {
MOZ_ASSERT(bool(list) == bool(count)); MOZ_ASSERT(bool(list) == bool(count));
if (count < 2) { if (count < 2) {
@ -520,10 +520,10 @@ TenuredChunk* ChunkPool::mergeSort(TenuredChunk* list, size_t count) {
size_t half = count / 2; size_t half = count / 2;
// Split; // Split;
TenuredChunk* front = list; ArenaChunk* front = list;
TenuredChunk* back; ArenaChunk* back;
{ {
TenuredChunk* cur = list; ArenaChunk* cur = list;
for (size_t i = 0; i < half - 1; i++) { for (size_t i = 0; i < half - 1; i++) {
MOZ_ASSERT(cur); MOZ_ASSERT(cur);
cur = cur->info.next; cur = cur->info.next;
@ -537,7 +537,7 @@ TenuredChunk* ChunkPool::mergeSort(TenuredChunk* list, size_t count) {
// Merge // Merge
list = nullptr; list = nullptr;
TenuredChunk** cur = &list; ArenaChunk** cur = &list;
while (front || back) { while (front || back) {
if (!front) { if (!front) {
*cur = back; *cur = back;
@ -566,7 +566,7 @@ TenuredChunk* ChunkPool::mergeSort(TenuredChunk* list, size_t count) {
bool ChunkPool::isSorted() const { bool ChunkPool::isSorted() const {
uint32_t last = 1; uint32_t last = 1;
for (TenuredChunk* cursor = head_; cursor; cursor = cursor->info.next) { for (ArenaChunk* cursor = head_; cursor; cursor = cursor->info.next) {
if (cursor->info.numArenasFree < last) { if (cursor->info.numArenasFree < last) {
return false; return false;
} }
@ -577,9 +577,9 @@ bool ChunkPool::isSorted() const {
#ifdef DEBUG #ifdef DEBUG
bool ChunkPool::contains(TenuredChunk* chunk) const { bool ChunkPool::contains(ArenaChunk* chunk) const {
verify(); verify();
for (TenuredChunk* cursor = head_; cursor; cursor = cursor->info.next) { for (ArenaChunk* cursor = head_; cursor; cursor = cursor->info.next) {
if (cursor == chunk) { if (cursor == chunk) {
return true; return true;
} }
@ -590,7 +590,7 @@ bool ChunkPool::contains(TenuredChunk* chunk) const {
bool ChunkPool::verify() const { bool ChunkPool::verify() const {
MOZ_ASSERT(bool(head_) == bool(count_)); MOZ_ASSERT(bool(head_) == bool(count_));
uint32_t count = 0; uint32_t count = 0;
for (TenuredChunk* cursor = head_; cursor; for (ArenaChunk* cursor = head_; cursor;
cursor = cursor->info.next, ++count) { cursor = cursor->info.next, ++count) {
MOZ_ASSERT_IF(cursor->info.prev, cursor->info.prev->info.next == cursor); MOZ_ASSERT_IF(cursor->info.prev, cursor->info.prev->info.next == cursor);
MOZ_ASSERT_IF(cursor->info.next, cursor->info.next->info.prev == cursor); MOZ_ASSERT_IF(cursor->info.next, cursor->info.next->info.prev == cursor);
@ -600,19 +600,19 @@ bool ChunkPool::verify() const {
} }
void ChunkPool::verifyChunks() const { void ChunkPool::verifyChunks() const {
for (TenuredChunk* chunk = head_; chunk; chunk = chunk->info.next) { for (ArenaChunk* chunk = head_; chunk; chunk = chunk->info.next) {
chunk->verify(); chunk->verify();
} }
} }
void TenuredChunk::verify() const { void ArenaChunk::verify() const {
// Check the mark bits for each arena are aligned to the cache line size. // Check the mark bits for each arena are aligned to the cache line size.
static_assert((offsetof(TenuredChunk, arenas) % ArenaSize) == 0); static_assert((offsetof(ArenaChunk, arenas) % ArenaSize) == 0);
constexpr size_t CellBytesPerMarkByte = CellBytesPerMarkBit * 8; constexpr size_t CellBytesPerMarkByte = CellBytesPerMarkBit * 8;
static_assert((ArenaSize % CellBytesPerMarkByte) == 0); static_assert((ArenaSize % CellBytesPerMarkByte) == 0);
constexpr size_t MarkBytesPerArena = ArenaSize / CellBytesPerMarkByte; constexpr size_t MarkBytesPerArena = ArenaSize / CellBytesPerMarkByte;
static_assert((MarkBytesPerArena % TypicalCacheLineSize) == 0); static_assert((MarkBytesPerArena % TypicalCacheLineSize) == 0);
static_assert((offsetof(TenuredChunk, markBits) % TypicalCacheLineSize) == 0); static_assert((offsetof(ArenaChunk, markBits) % TypicalCacheLineSize) == 0);
MOZ_ASSERT(info.numArenasFree <= ArenasPerChunk); MOZ_ASSERT(info.numArenasFree <= ArenasPerChunk);
MOZ_ASSERT(info.numArenasFreeCommitted <= info.numArenasFree); MOZ_ASSERT(info.numArenasFreeCommitted <= info.numArenasFree);

Просмотреть файл

@ -272,7 +272,7 @@ class alignas(ArenaSize) Arena {
inline void checkAddress() const; inline void checkAddress() const;
inline TenuredChunk* chunk() const; inline ArenaChunk* chunk() const;
bool allocated() const { bool allocated() const {
MOZ_ASSERT(IsAllocKind(AllocKind(allocKind))); MOZ_ASSERT(IsAllocKind(AllocKind(allocKind)));
@ -478,38 +478,38 @@ inline void FreeSpan::checkRange(uintptr_t first, uintptr_t last,
} }
/* /*
* A chunk in the tenured heap. TenuredChunks contain arenas and associated data * A chunk in the tenured heap. ArenaChunks contain arenas and associated data
* structures (mark bitmap, delayed marking state). * structures (mark bitmap, delayed marking state).
*/ */
class TenuredChunk : public TenuredChunkBase { class ArenaChunk : public ArenaChunkBase {
Arena arenas[ArenasPerChunk]; Arena arenas[ArenasPerChunk];
friend class GCRuntime; friend class GCRuntime;
friend class MarkingValidator; friend class MarkingValidator;
public: public:
static TenuredChunk* fromAddress(uintptr_t addr) { static ArenaChunk* fromAddress(uintptr_t addr) {
addr &= ~ChunkMask; addr &= ~ChunkMask;
return reinterpret_cast<TenuredChunk*>(addr); return reinterpret_cast<ArenaChunk*>(addr);
} }
static bool withinValidRange(uintptr_t addr) { static bool withinValidRange(uintptr_t addr) {
uintptr_t offset = addr & ChunkMask; uintptr_t offset = addr & ChunkMask;
if (TenuredChunk::fromAddress(addr)->isNurseryChunk()) { if (ArenaChunk::fromAddress(addr)->isNurseryChunk()) {
return offset >= sizeof(ChunkBase) && offset < ChunkSize; return offset >= sizeof(ChunkBase) && offset < ChunkSize;
} }
return offset >= offsetof(TenuredChunk, arenas) && offset < ChunkSize; return offset >= offsetof(ArenaChunk, arenas) && offset < ChunkSize;
} }
static size_t arenaIndex(const Arena* arena) { static size_t arenaIndex(const Arena* arena) {
uintptr_t addr = arena->address(); uintptr_t addr = arena->address();
MOZ_ASSERT(!TenuredChunk::fromAddress(addr)->isNurseryChunk()); MOZ_ASSERT(!ArenaChunk::fromAddress(addr)->isNurseryChunk());
MOZ_ASSERT(withinValidRange(addr)); MOZ_ASSERT(withinValidRange(addr));
uintptr_t offset = addr & ChunkMask; uintptr_t offset = addr & ChunkMask;
return (offset - offsetof(TenuredChunk, arenas)) >> ArenaShift; return (offset - offsetof(ArenaChunk, arenas)) >> ArenaShift;
} }
explicit TenuredChunk(JSRuntime* runtime) : TenuredChunkBase(runtime) {} explicit ArenaChunk(JSRuntime* runtime) : ArenaChunkBase(runtime) {}
uintptr_t address() const { uintptr_t address() const {
uintptr_t addr = reinterpret_cast<uintptr_t>(this); uintptr_t addr = reinterpret_cast<uintptr_t>(this);
@ -539,8 +539,7 @@ class TenuredChunk : public TenuredChunkBase {
void decommitFreeArenasWithoutUnlocking(const AutoLockGC& lock); void decommitFreeArenasWithoutUnlocking(const AutoLockGC& lock);
static void* allocate(GCRuntime* gc); static void* allocate(GCRuntime* gc);
static TenuredChunk* emplace(void* ptr, GCRuntime* gc, static ArenaChunk* emplace(void* ptr, GCRuntime* gc, bool allMemoryCommitted);
bool allMemoryCommitted);
/* Unlink and return the freeArenasHead. */ /* Unlink and return the freeArenasHead. */
Arena* fetchNextFreeArena(GCRuntime* gc); Arena* fetchNextFreeArena(GCRuntime* gc);
@ -585,11 +584,11 @@ inline void Arena::checkAddress() const {
mozilla::DebugOnly<uintptr_t> addr = uintptr_t(this); mozilla::DebugOnly<uintptr_t> addr = uintptr_t(this);
MOZ_ASSERT(addr); MOZ_ASSERT(addr);
MOZ_ASSERT(!(addr & ArenaMask)); MOZ_ASSERT(!(addr & ArenaMask));
MOZ_ASSERT(TenuredChunk::withinValidRange(addr)); MOZ_ASSERT(ArenaChunk::withinValidRange(addr));
} }
inline TenuredChunk* Arena::chunk() const { inline ArenaChunk* Arena::chunk() const {
return TenuredChunk::fromAddress(address()); return ArenaChunk::fromAddress(address());
} }
// Cell header stored before all nursery cells. // Cell header stored before all nursery cells.

Просмотреть файл

@ -2928,7 +2928,7 @@ uintptr_t* GetMarkWordAddress(Cell* cell) {
MarkBitmapWord* wordp; MarkBitmapWord* wordp;
uintptr_t mask; uintptr_t mask;
TenuredChunkBase* chunk = gc::detail::GetCellChunkBase(&cell->asTenured()); ArenaChunkBase* chunk = gc::detail::GetCellChunkBase(&cell->asTenured());
chunk->markBits.getMarkWordAndMask(&cell->asTenured(), ColorBit::BlackBit, chunk->markBits.getMarkWordAndMask(&cell->asTenured(), ColorBit::BlackBit,
&wordp, &mask); &wordp, &mask);
return reinterpret_cast<uintptr_t*>(wordp); return reinterpret_cast<uintptr_t*>(wordp);
@ -2944,7 +2944,7 @@ uintptr_t GetMarkMask(Cell* cell, uint32_t colorBit) {
ColorBit bit = colorBit == 0 ? ColorBit::BlackBit : ColorBit::GrayOrBlackBit; ColorBit bit = colorBit == 0 ? ColorBit::BlackBit : ColorBit::GrayOrBlackBit;
MarkBitmapWord* wordp; MarkBitmapWord* wordp;
uintptr_t mask; uintptr_t mask;
TenuredChunkBase* chunk = gc::detail::GetCellChunkBase(&cell->asTenured()); ArenaChunkBase* chunk = gc::detail::GetCellChunkBase(&cell->asTenured());
chunk->markBits.getMarkWordAndMask(&cell->asTenured(), bit, &wordp, &mask); chunk->markBits.getMarkWordAndMask(&cell->asTenured(), bit, &wordp, &mask);
return mask; return mask;
} }

Просмотреть файл

@ -61,7 +61,7 @@ static constexpr size_t NurseryChunkUsableSize =
struct NurseryChunk : public ChunkBase { struct NurseryChunk : public ChunkBase {
alignas(CellAlignBytes) uint8_t data[NurseryChunkUsableSize]; alignas(CellAlignBytes) uint8_t data[NurseryChunkUsableSize];
static NurseryChunk* fromChunk(TenuredChunk* chunk, ChunkKind kind, static NurseryChunk* fromChunk(ArenaChunk* chunk, ChunkKind kind,
uint8_t index); uint8_t index);
explicit NurseryChunk(JSRuntime* runtime, ChunkKind kind, uint8_t chunkIndex) explicit NurseryChunk(JSRuntime* runtime, ChunkKind kind, uint8_t chunkIndex)
@ -166,7 +166,7 @@ inline bool js::NurseryChunk::markPagesInUseHard(size_t endOffset) {
} }
// static // static
inline js::NurseryChunk* js::NurseryChunk::fromChunk(TenuredChunk* chunk, inline js::NurseryChunk* js::NurseryChunk::fromChunk(ArenaChunk* chunk,
ChunkKind kind, ChunkKind kind,
uint8_t index) { uint8_t index) {
return new (chunk) NurseryChunk(chunk->runtime, kind, index); return new (chunk) NurseryChunk(chunk->runtime, kind, index);
@ -210,8 +210,8 @@ void js::NurseryDecommitTask::run(AutoLockHelperThreadState& lock) {
NurseryChunk* nurseryChunk = chunksToDecommit().popCopy(); NurseryChunk* nurseryChunk = chunksToDecommit().popCopy();
AutoUnlockHelperThreadState unlock(lock); AutoUnlockHelperThreadState unlock(lock);
nurseryChunk->~NurseryChunk(); nurseryChunk->~NurseryChunk();
TenuredChunk* tenuredChunk = TenuredChunk::emplace( ArenaChunk* tenuredChunk =
nurseryChunk, gc, /* allMemoryCommitted = */ false); ArenaChunk::emplace(nurseryChunk, gc, /* allMemoryCommitted = */ false);
AutoLockGC lock(gc); AutoLockGC lock(gc);
gc->recycleChunk(tenuredChunk, lock); gc->recycleChunk(tenuredChunk, lock);
} }
@ -2113,12 +2113,12 @@ bool js::Nursery::allocateNextChunk(AutoLockGCBgAlloc& lock) {
return false; return false;
} }
TenuredChunk* toSpaceChunk = gc->getOrAllocChunk(lock); ArenaChunk* toSpaceChunk = gc->getOrAllocChunk(lock);
if (!toSpaceChunk) { if (!toSpaceChunk) {
return false; return false;
} }
TenuredChunk* fromSpaceChunk = nullptr; ArenaChunk* fromSpaceChunk = nullptr;
if (semispaceEnabled_ && !(fromSpaceChunk = gc->getOrAllocChunk(lock))) { if (semispaceEnabled_ && !(fromSpaceChunk = gc->getOrAllocChunk(lock))) {
gc->recycleChunk(toSpaceChunk, lock); gc->recycleChunk(toSpaceChunk, lock);
return false; return false;

Просмотреть файл

@ -443,18 +443,18 @@ void js::gc::GCRuntime::finishVerifier() {
} }
struct GCChunkHasher { struct GCChunkHasher {
using Lookup = gc::TenuredChunk*; using Lookup = gc::ArenaChunk*;
/* /*
* Strip zeros for better distribution after multiplying by the golden * Strip zeros for better distribution after multiplying by the golden
* ratio. * ratio.
*/ */
static HashNumber hash(gc::TenuredChunk* chunk) { static HashNumber hash(gc::ArenaChunk* chunk) {
MOZ_ASSERT(!(uintptr_t(chunk) & gc::ChunkMask)); MOZ_ASSERT(!(uintptr_t(chunk) & gc::ChunkMask));
return HashNumber(uintptr_t(chunk) >> gc::ChunkShift); return HashNumber(uintptr_t(chunk) >> gc::ChunkShift);
} }
static bool match(gc::TenuredChunk* k, gc::TenuredChunk* l) { static bool match(gc::ArenaChunk* k, gc::ArenaChunk* l) {
MOZ_ASSERT(!(uintptr_t(k) & gc::ChunkMask)); MOZ_ASSERT(!(uintptr_t(k) & gc::ChunkMask));
MOZ_ASSERT(!(uintptr_t(l) & gc::ChunkMask)); MOZ_ASSERT(!(uintptr_t(l) & gc::ChunkMask));
return k == l; return k == l;
@ -471,7 +471,7 @@ class js::gc::MarkingValidator {
GCRuntime* gc; GCRuntime* gc;
bool initialized; bool initialized;
using BitmapMap = HashMap<TenuredChunk*, UniquePtr<ChunkMarkBitmap>, using BitmapMap = HashMap<ArenaChunk*, UniquePtr<ChunkMarkBitmap>,
GCChunkHasher, SystemAllocPolicy>; GCChunkHasher, SystemAllocPolicy>;
BitmapMap map; BitmapMap map;
}; };

Просмотреть файл

@ -19,9 +19,9 @@ BEGIN_TEST(testGCChunkPool) {
// Create. // Create.
for (int i = 0; i < N; ++i) { for (int i = 0; i < N; ++i) {
void* ptr = TenuredChunk::allocate(&cx->runtime()->gc); void* ptr = ArenaChunk::allocate(&cx->runtime()->gc);
CHECK(ptr); CHECK(ptr);
TenuredChunk* chunk = TenuredChunk::emplace(ptr, &cx->runtime()->gc, true); ArenaChunk* chunk = ArenaChunk::emplace(ptr, &cx->runtime()->gc, true);
CHECK(chunk); CHECK(chunk);
pool.push(chunk); pool.push(chunk);
} }
@ -37,9 +37,9 @@ BEGIN_TEST(testGCChunkPool) {
// Push/Pop. // Push/Pop.
for (int i = 0; i < N; ++i) { for (int i = 0; i < N; ++i) {
TenuredChunk* chunkA = pool.pop(); ArenaChunk* chunkA = pool.pop();
TenuredChunk* chunkB = pool.pop(); ArenaChunk* chunkB = pool.pop();
TenuredChunk* chunkC = pool.pop(); ArenaChunk* chunkC = pool.pop();
pool.push(chunkA); pool.push(chunkA);
pool.push(chunkB); pool.push(chunkB);
pool.push(chunkC); pool.push(chunkC);
@ -47,7 +47,7 @@ BEGIN_TEST(testGCChunkPool) {
MOZ_ASSERT(pool.verify()); MOZ_ASSERT(pool.verify());
// Remove. // Remove.
TenuredChunk* chunk = nullptr; ArenaChunk* chunk = nullptr;
int offset = N / 2; int offset = N / 2;
for (ChunkPool::Iter iter(pool); !iter.done(); iter.next(), --offset) { for (ChunkPool::Iter iter(pool); !iter.done(); iter.next(), --offset) {
if (offset == 0) { if (offset == 0) {
@ -63,7 +63,7 @@ BEGIN_TEST(testGCChunkPool) {
// Destruct. // Destruct.
js::AutoLockGC lock(cx->runtime()); js::AutoLockGC lock(cx->runtime());
for (ChunkPool::Iter iter(pool); !iter.done();) { for (ChunkPool::Iter iter(pool); !iter.done();) {
TenuredChunk* chunk = iter.get(); ArenaChunk* chunk = iter.get();
iter.next(); iter.next();
pool.remove(chunk); pool.remove(chunk);
UnmapPages(chunk, ChunkSize); UnmapPages(chunk, ChunkSize);

Просмотреть файл

@ -188,7 +188,7 @@ struct StatsClosure {
}; };
static void DecommittedPagesChunkCallback(JSRuntime* rt, void* data, static void DecommittedPagesChunkCallback(JSRuntime* rt, void* data,
gc::TenuredChunk* chunk, gc::ArenaChunk* chunk,
const JS::AutoRequireNoGC& nogc) { const JS::AutoRequireNoGC& nogc) {
size_t n = 0; size_t n = 0;
for (uint32_t word : chunk->decommittedPages.Storage()) { for (uint32_t word : chunk->decommittedPages.Storage()) {
@ -731,7 +731,7 @@ static bool CollectRuntimeStatsHelper(JSContext* cx, RuntimeStats* rtStats,
size_t numDirtyChunks = size_t numDirtyChunks =
(rtStats->gcHeapChunkTotal - rtStats->gcHeapUnusedChunks) / gc::ChunkSize; (rtStats->gcHeapChunkTotal - rtStats->gcHeapUnusedChunks) / gc::ChunkSize;
size_t perChunkAdmin = size_t perChunkAdmin =
sizeof(gc::TenuredChunk) - (sizeof(gc::Arena) * gc::ArenasPerChunk); sizeof(gc::ArenaChunk) - (sizeof(gc::Arena) * gc::ArenasPerChunk);
rtStats->gcHeapChunkAdmin = numDirtyChunks * perChunkAdmin; rtStats->gcHeapChunkAdmin = numDirtyChunks * perChunkAdmin;
// |gcHeapUnusedArenas| is the only thing left. Compute it in terms of // |gcHeapUnusedArenas| is the only thing left. Compute it in terms of