Bug 1916758 - Part 6: Rename TenuredChunk to ArenaChunk since we will add another kind of tenured chunk r=sfink

To avoid confusion, TenuredChunk should end up as a base class if anything.
This renames TenuredChunk and TenuredChunkBase to ArenaChunk and ArenaChunkBase
and ChunkKind::TenuredHeap to TenuredArenas.

Differential Revision: https://phabricator.services.mozilla.com/D221065
This commit is contained in:
Jon Coppeard 2024-09-05 09:52:49 +00:00
Родитель 3235af3a70
Коммит 0689a43d58
14 изменённых файлов: 149 добавлений и 154 удалений

Просмотреть файл

@ -37,7 +37,7 @@ namespace gc {
class Arena;
struct Cell;
class TenuredChunk;
class ArenaChunk;
class StoreBuffer;
class TenuredCell;
@ -84,7 +84,7 @@ const size_t ArenaBitmapWords = HowMany(ArenaBitmapBits, JS_BITS_PER_WORD);
enum class ChunkKind : uint8_t {
Invalid = 0,
TenuredHeap,
TenuredArenas,
NurseryToSpace,
NurseryFromSpace
};
@ -97,13 +97,13 @@ class ChunkBase {
// Initialize a tenured heap chunk.
explicit ChunkBase(JSRuntime* rt) {
MOZ_ASSERT((uintptr_t(this) & ChunkMask) == 0);
initBaseForTenuredChunk(rt);
initBaseForArenaChunk(rt);
}
void initBaseForTenuredChunk(JSRuntime* rt) {
void initBaseForArenaChunk(JSRuntime* rt) {
runtime = rt;
storeBuffer = nullptr;
kind = ChunkKind::TenuredHeap;
kind = ChunkKind::TenuredArenas;
nurseryChunkIndex = UINT8_MAX;
}
@ -123,7 +123,7 @@ class ChunkBase {
ChunkKind getKind() const {
MOZ_ASSERT_IF(storeBuffer, kind == ChunkKind::NurseryToSpace ||
kind == ChunkKind::NurseryFromSpace);
MOZ_ASSERT_IF(!storeBuffer, kind == ChunkKind::TenuredHeap);
MOZ_ASSERT_IF(!storeBuffer, kind == ChunkKind::TenuredArenas);
return kind;
}
@ -139,12 +139,12 @@ class ChunkBase {
uint8_t nurseryChunkIndex;
};
// Information about tenured heap chunks.
struct TenuredChunkInfo {
// Information about tenured heap chunks containing arenas.
struct ArenaChunkInfo {
private:
friend class ChunkPool;
TenuredChunk* next = nullptr;
TenuredChunk* prev = nullptr;
ArenaChunk* next = nullptr;
ArenaChunk* prev = nullptr;
public:
/* Number of free arenas, either committed or decommitted. */
@ -180,7 +180,7 @@ const size_t BitsPerPageWithHeaders =
(ArenaSize + ArenaBitmapBytes) * ArenasPerPage * CHAR_BIT + ArenasPerPage +
1;
const size_t ChunkBitsAvailable =
(ChunkSize - sizeof(ChunkBase) - sizeof(TenuredChunkInfo)) * CHAR_BIT;
(ChunkSize - sizeof(ChunkBase) - sizeof(ArenaChunkInfo)) * CHAR_BIT;
const size_t PagesPerChunk = ChunkBitsAvailable / BitsPerPageWithHeaders;
const size_t ArenasPerChunk = PagesPerChunk * ArenasPerPage;
const size_t FreeCommittedBits = ArenasPerChunk;
@ -190,7 +190,7 @@ const size_t BitsPerArenaWithHeaders =
(DecommitBits / ArenasPerChunk) + 1;
const size_t CalculatedChunkSizeRequired =
sizeof(ChunkBase) + sizeof(TenuredChunkInfo) +
sizeof(ChunkBase) + sizeof(ArenaChunkInfo) +
RoundUp(ArenasPerChunk * ArenaBitmapBytes, sizeof(uintptr_t)) +
RoundUp(FreeCommittedBits, sizeof(uint32_t) * CHAR_BIT) / CHAR_BIT +
RoundUp(DecommitBits, sizeof(uint32_t) * CHAR_BIT) / CHAR_BIT +
@ -307,16 +307,16 @@ using ChunkPageBitmap = mozilla::BitSet<PagesPerChunk, uint32_t>;
// Bitmap with one bit per arena used for free committed arena set.
using ChunkArenaBitmap = mozilla::BitSet<ArenasPerChunk, uint32_t>;
// Base class containing data members for a tenured heap chunk.
class TenuredChunkBase : public ChunkBase {
// Base class for a tenured heap chunk containing fixed size arenas.
class ArenaChunkBase : public ChunkBase {
public:
TenuredChunkInfo info;
ArenaChunkInfo info;
ChunkMarkBitmap markBits;
ChunkArenaBitmap freeCommittedArenas;
ChunkPageBitmap decommittedPages;
protected:
explicit TenuredChunkBase(JSRuntime* runtime) : ChunkBase(runtime) {
explicit ArenaChunkBase(JSRuntime* runtime) : ChunkBase(runtime) {
static_assert(sizeof(markBits) == ArenaBitmapBytes * ArenasPerChunk,
"Ensure our MarkBitmap actually covers all arenas.");
info.numArenasFree = ArenasPerChunk;
@ -325,7 +325,7 @@ class TenuredChunkBase : public ChunkBase {
void initAsDecommitted();
};
static_assert(FirstArenaOffset ==
RoundUp(sizeof(gc::TenuredChunkBase), ArenaSize));
RoundUp(sizeof(gc::ArenaChunkBase), ArenaSize));
/*
* We sometimes use an index to refer to a cell in an arena. The index for a
@ -336,7 +336,7 @@ const size_t ArenaCellIndexBytes = CellAlignBytes;
const size_t MaxArenaCellIndex = ArenaSize / CellAlignBytes;
const size_t ChunkStoreBufferOffset = offsetof(ChunkBase, storeBuffer);
const size_t ChunkMarkBitmapOffset = offsetof(TenuredChunkBase, markBits);
const size_t ChunkMarkBitmapOffset = offsetof(ArenaChunkBase, markBits);
// Hardcoded offsets into Arena class.
const size_t ArenaZoneOffset = 2 * sizeof(uint32_t);
@ -592,13 +592,12 @@ static MOZ_ALWAYS_INLINE ChunkBase* GetCellChunkBase(const Cell* cell) {
return GetGCAddressChunkBase(cell);
}
static MOZ_ALWAYS_INLINE TenuredChunkBase* GetCellChunkBase(
static MOZ_ALWAYS_INLINE ArenaChunkBase* GetCellChunkBase(
const TenuredCell* cell) {
MOZ_ASSERT(cell);
auto* chunk =
reinterpret_cast<TenuredChunkBase*>(uintptr_t(cell) & ~ChunkMask);
auto* chunk = reinterpret_cast<ArenaChunkBase*>(uintptr_t(cell) & ~ChunkMask);
MOZ_ASSERT(chunk->runtime);
MOZ_ASSERT(chunk->kind == ChunkKind::TenuredHeap);
MOZ_ASSERT(chunk->kind == ChunkKind::TenuredArenas);
return chunk;
}
@ -615,7 +614,7 @@ static MOZ_ALWAYS_INLINE bool TenuredCellIsMarkedBlack(
MOZ_ASSERT(cell);
MOZ_ASSERT(!js::gc::IsInsideNursery(cell));
TenuredChunkBase* chunk = GetCellChunkBase(cell);
ArenaChunkBase* chunk = GetCellChunkBase(cell);
return chunk->markBits.isMarkedBlack(cell);
}
@ -627,14 +626,14 @@ static MOZ_ALWAYS_INLINE bool NonBlackCellIsMarkedGray(
MOZ_ASSERT(!js::gc::IsInsideNursery(cell));
MOZ_ASSERT(!TenuredCellIsMarkedBlack(cell));
TenuredChunkBase* chunk = GetCellChunkBase(cell);
ArenaChunkBase* chunk = GetCellChunkBase(cell);
return chunk->markBits.markBit(cell, ColorBit::GrayOrBlackBit);
}
static MOZ_ALWAYS_INLINE bool TenuredCellIsMarkedGray(const TenuredCell* cell) {
MOZ_ASSERT(cell);
MOZ_ASSERT(!js::gc::IsInsideNursery(cell));
TenuredChunkBase* chunk = GetCellChunkBase(cell);
ArenaChunkBase* chunk = GetCellChunkBase(cell);
return chunk->markBits.isMarkedGray(cell);
}

Просмотреть файл

@ -305,7 +305,7 @@ AllocSite* CellAllocator::MaybeGenerateMissingAllocSite(JSContext* cx,
void CellAllocator::CheckIncrementalZoneState(JS::Zone* zone, void* ptr) {
MOZ_ASSERT(ptr);
TenuredCell* cell = reinterpret_cast<TenuredCell*>(ptr);
TenuredChunkBase* chunk = detail::GetCellChunkBase(cell);
ArenaChunkBase* chunk = detail::GetCellChunkBase(cell);
if (zone->isGCMarkingOrSweeping()) {
MOZ_ASSERT(chunk->markBits.isMarkedBlack(cell));
} else {
@ -388,13 +388,13 @@ void* ArenaLists::refillFreeListAndAllocate(
maybeLock.emplace(rt);
}
TenuredChunk* chunk = rt->gc.pickChunk(maybeLock.ref());
ArenaChunk* chunk = rt->gc.pickChunk(maybeLock.ref());
if (!chunk) {
return nullptr;
}
// Although our chunk should definitely have enough space for another arena,
// there are other valid reasons why TenuredChunk::allocateArena() may fail.
// there are other valid reasons why ArenaChunk::allocateArena() may fail.
arena = rt->gc.allocateArena(chunk, zone_, thingKind, checkThresholds,
maybeLock.ref());
if (!arena) {
@ -442,7 +442,7 @@ void Arena::arenaAllocatedDuringGC() {
}
}
// /////////// TenuredChunk -> Arena Allocator ///////////////////////////////
// /////////// ArenaChunk -> Arena Allocator /////////////////////////////////
bool GCRuntime::wantBackgroundAllocation(const AutoLockGC& lock) const {
// To minimize memory waste, we do not want to run the background chunk
@ -453,7 +453,7 @@ bool GCRuntime::wantBackgroundAllocation(const AutoLockGC& lock) const {
(fullChunks(lock).count() + availableChunks(lock).count()) >= 4;
}
Arena* GCRuntime::allocateArena(TenuredChunk* chunk, Zone* zone,
Arena* GCRuntime::allocateArena(ArenaChunk* chunk, Zone* zone,
AllocKind thingKind,
ShouldCheckThresholds checkThresholds,
const AutoLockGC& lock) {
@ -476,9 +476,8 @@ Arena* GCRuntime::allocateArena(TenuredChunk* chunk, Zone* zone,
return arena;
}
Arena* TenuredChunk::allocateArena(GCRuntime* gc, Zone* zone,
AllocKind thingKind,
const AutoLockGC& lock) {
Arena* ArenaChunk::allocateArena(GCRuntime* gc, Zone* zone, AllocKind thingKind,
const AutoLockGC& lock) {
if (info.numArenasFreeCommitted == 0) {
commitOnePage(gc);
MOZ_ASSERT(info.numArenasFreeCommitted == ArenasPerPage);
@ -511,7 +510,7 @@ static inline size_t FindFirstBitSet(
MOZ_CRASH("No bits found");
}
void TenuredChunk::commitOnePage(GCRuntime* gc) {
void ArenaChunk::commitOnePage(GCRuntime* gc) {
MOZ_ASSERT(info.numArenasFreeCommitted == 0);
MOZ_ASSERT(info.numArenasFree >= ArenasPerPage);
@ -535,7 +534,7 @@ void TenuredChunk::commitOnePage(GCRuntime* gc) {
verify();
}
Arena* TenuredChunk::fetchNextFreeArena(GCRuntime* gc) {
Arena* ArenaChunk::fetchNextFreeArena(GCRuntime* gc) {
MOZ_ASSERT(info.numArenasFreeCommitted > 0);
MOZ_ASSERT(info.numArenasFreeCommitted <= info.numArenasFree);
@ -549,23 +548,23 @@ Arena* TenuredChunk::fetchNextFreeArena(GCRuntime* gc) {
return &arenas[index];
}
// /////////// System -> TenuredChunk Allocator //////////////////////////////
// /////////// System -> ArenaChunk Allocator ////////////////////////////////
TenuredChunk* GCRuntime::getOrAllocChunk(AutoLockGCBgAlloc& lock) {
TenuredChunk* chunk = emptyChunks(lock).pop();
ArenaChunk* GCRuntime::getOrAllocChunk(AutoLockGCBgAlloc& lock) {
ArenaChunk* chunk = emptyChunks(lock).pop();
if (chunk) {
// Reinitialize ChunkBase; arenas are all free and may or may not be
// committed.
SetMemCheckKind(chunk, sizeof(ChunkBase), MemCheckKind::MakeUndefined);
chunk->initBaseForTenuredChunk(rt);
chunk->initBaseForArenaChunk(rt);
MOZ_ASSERT(chunk->unused());
} else {
void* ptr = TenuredChunk::allocate(this);
void* ptr = ArenaChunk::allocate(this);
if (!ptr) {
return nullptr;
}
chunk = TenuredChunk::emplace(ptr, this, /* allMemoryCommitted = */ true);
chunk = ArenaChunk::emplace(ptr, this, /* allMemoryCommitted = */ true);
MOZ_ASSERT(chunk->info.numArenasFreeCommitted == 0);
}
@ -576,7 +575,7 @@ TenuredChunk* GCRuntime::getOrAllocChunk(AutoLockGCBgAlloc& lock) {
return chunk;
}
void GCRuntime::recycleChunk(TenuredChunk* chunk, const AutoLockGC& lock) {
void GCRuntime::recycleChunk(ArenaChunk* chunk, const AutoLockGC& lock) {
#ifdef DEBUG
MOZ_ASSERT(chunk->unused());
chunk->verify();
@ -589,12 +588,12 @@ void GCRuntime::recycleChunk(TenuredChunk* chunk, const AutoLockGC& lock) {
emptyChunks(lock).push(chunk);
}
TenuredChunk* GCRuntime::pickChunk(AutoLockGCBgAlloc& lock) {
ArenaChunk* GCRuntime::pickChunk(AutoLockGCBgAlloc& lock) {
if (availableChunks(lock).count()) {
return availableChunks(lock).head();
}
TenuredChunk* chunk = getOrAllocChunk(lock);
ArenaChunk* chunk = getOrAllocChunk(lock);
if (!chunk) {
return nullptr;
}
@ -623,21 +622,21 @@ void BackgroundAllocTask::run(AutoLockHelperThreadState& lock) {
AutoLockGC gcLock(gc);
while (!isCancelled() && gc->wantBackgroundAllocation(gcLock)) {
TenuredChunk* chunk;
ArenaChunk* chunk;
{
AutoUnlockGC unlock(gcLock);
void* ptr = TenuredChunk::allocate(gc);
void* ptr = ArenaChunk::allocate(gc);
if (!ptr) {
break;
}
chunk = TenuredChunk::emplace(ptr, gc, /* allMemoryCommitted = */ true);
chunk = ArenaChunk::emplace(ptr, gc, /* allMemoryCommitted = */ true);
}
chunkPool_.ref().push(chunk);
}
}
/* static */
void* TenuredChunk::allocate(GCRuntime* gc) {
void* ArenaChunk::allocate(GCRuntime* gc) {
void* chunk = MapAlignedPages(ChunkSize, ChunkSize);
if (!chunk) {
return nullptr;
@ -656,8 +655,8 @@ static inline bool ShouldDecommitNewChunk(bool allMemoryCommitted,
return !allMemoryCommitted || !state.inHighFrequencyGCMode();
}
TenuredChunk* TenuredChunk::emplace(void* ptr, GCRuntime* gc,
bool allMemoryCommitted) {
ArenaChunk* ArenaChunk::emplace(void* ptr, GCRuntime* gc,
bool allMemoryCommitted) {
/* The chunk may still have some regions marked as no-access. */
MOZ_MAKE_MEM_UNDEFINED(ptr, ChunkSize);
@ -667,7 +666,7 @@ TenuredChunk* TenuredChunk::emplace(void* ptr, GCRuntime* gc,
*/
Poison(ptr, JS_FRESH_TENURED_PATTERN, ChunkSize, MemCheckKind::MakeUndefined);
TenuredChunk* chunk = new (mozilla::KnownNotNull, ptr) TenuredChunk(gc->rt);
ArenaChunk* chunk = new (mozilla::KnownNotNull, ptr) ArenaChunk(gc->rt);
if (ShouldDecommitNewChunk(allMemoryCommitted, gc->schedulingState)) {
// Decommit the arenas. We do this after poisoning so that if the OS does
@ -684,13 +683,13 @@ TenuredChunk* TenuredChunk::emplace(void* ptr, GCRuntime* gc,
return chunk;
}
void TenuredChunk::decommitAllArenas() {
void ArenaChunk::decommitAllArenas() {
MOZ_ASSERT(unused());
MarkPagesUnusedSoft(&arenas[0], ArenasPerChunk * ArenaSize);
initAsDecommitted();
}
void TenuredChunkBase::initAsDecommitted() {
void ArenaChunkBase::initAsDecommitted() {
// Set the state of all arenas to free and decommitted. They might not
// actually be decommitted, but in that case the re-commit operation is a
// no-op so it doesn't matter.

Просмотреть файл

@ -239,9 +239,7 @@ class TenuredCell : public Cell {
return true;
}
TenuredChunk* chunk() const {
return static_cast<TenuredChunk*>(Cell::chunk());
}
ArenaChunk* chunk() const { return static_cast<ArenaChunk*>(Cell::chunk()); }
// Mark bit management.
MOZ_ALWAYS_INLINE bool isMarkedAny() const;
@ -352,7 +350,7 @@ inline JSRuntime* Cell::runtimeFromAnyThread() const {
inline uintptr_t Cell::address() const {
uintptr_t addr = uintptr_t(this);
MOZ_ASSERT(addr % CellAlignBytes == 0);
MOZ_ASSERT(TenuredChunk::withinValidRange(addr));
MOZ_ASSERT(ArenaChunk::withinValidRange(addr));
return addr;
}

Просмотреть файл

@ -341,7 +341,7 @@ ChunkPool GCRuntime::expireEmptyChunkPool(const AutoLockGC& lock) {
ChunkPool expired;
while (tooManyEmptyChunks(lock)) {
TenuredChunk* chunk = emptyChunks(lock).pop();
ArenaChunk* chunk = emptyChunks(lock).pop();
prepareToFreeChunk(chunk->info);
expired.push(chunk);
}
@ -354,7 +354,7 @@ ChunkPool GCRuntime::expireEmptyChunkPool(const AutoLockGC& lock) {
static void FreeChunkPool(ChunkPool& pool) {
for (ChunkPool::Iter iter(pool); !iter.done();) {
TenuredChunk* chunk = iter.get();
ArenaChunk* chunk = iter.get();
iter.next();
pool.remove(chunk);
MOZ_ASSERT(chunk->unused());
@ -367,7 +367,7 @@ void GCRuntime::freeEmptyChunks(const AutoLockGC& lock) {
FreeChunkPool(emptyChunks(lock));
}
inline void GCRuntime::prepareToFreeChunk(TenuredChunkInfo& info) {
inline void GCRuntime::prepareToFreeChunk(ArenaChunkInfo& info) {
stats().count(gcstats::COUNT_DESTROY_CHUNK);
#ifdef DEBUG
/*
@ -2126,14 +2126,14 @@ void js::gc::BackgroundDecommitTask::run(AutoLockHelperThreadState& lock) {
gc->maybeRequestGCAfterBackgroundTask(lock);
}
static inline bool CanDecommitWholeChunk(TenuredChunk* chunk) {
static inline bool CanDecommitWholeChunk(ArenaChunk* chunk) {
return chunk->unused() && chunk->info.numArenasFreeCommitted != 0;
}
// Called from a background thread to decommit free arenas. Releases the GC
// lock.
void GCRuntime::decommitEmptyChunks(const bool& cancel, AutoLockGC& lock) {
Vector<TenuredChunk*, 0, SystemAllocPolicy> chunksToDecommit;
Vector<ArenaChunk*, 0, SystemAllocPolicy> chunksToDecommit;
for (ChunkPool::Iter chunk(emptyChunks(lock)); !chunk.done(); chunk.next()) {
if (CanDecommitWholeChunk(chunk) && !chunksToDecommit.append(chunk)) {
onOutOfMallocMemory(lock);
@ -2141,7 +2141,7 @@ void GCRuntime::decommitEmptyChunks(const bool& cancel, AutoLockGC& lock) {
}
}
for (TenuredChunk* chunk : chunksToDecommit) {
for (ArenaChunk* chunk : chunksToDecommit) {
if (cancel) {
break;
}
@ -2174,7 +2174,7 @@ void GCRuntime::decommitFreeArenas(const bool& cancel, AutoLockGC& lock) {
// it is dangerous to iterate the available list directly, as the active
// thread could modify it concurrently. Instead, we build and pass an
// explicit Vector containing the Chunks we want to visit.
Vector<TenuredChunk*, 0, SystemAllocPolicy> chunksToDecommit;
Vector<ArenaChunk*, 0, SystemAllocPolicy> chunksToDecommit;
for (ChunkPool::Iter chunk(availableChunks(lock)); !chunk.done();
chunk.next()) {
if (chunk->info.numArenasFreeCommitted != 0 &&
@ -2184,7 +2184,7 @@ void GCRuntime::decommitFreeArenas(const bool& cancel, AutoLockGC& lock) {
}
}
for (TenuredChunk* chunk : chunksToDecommit) {
for (ArenaChunk* chunk : chunksToDecommit) {
chunk->decommitFreeArenas(this, cancel, lock);
}
}

Просмотреть файл

@ -31,7 +31,7 @@ class Nursery;
namespace gc {
class Arena;
class TenuredChunk;
class ArenaChunk;
} /* namespace gc */
@ -111,7 +111,7 @@ extern unsigned NotifyGCPreSwap(JSObject* a, JSObject* b);
extern void NotifyGCPostSwap(JSObject* a, JSObject* b, unsigned removedFlags);
using IterateChunkCallback = void (*)(JSRuntime*, void*, gc::TenuredChunk*,
using IterateChunkCallback = void (*)(JSRuntime*, void*, gc::ArenaChunk*,
const JS::AutoRequireNoGC&);
using IterateZoneCallback = void (*)(JSRuntime*, void*, JS::Zone*,
const JS::AutoRequireNoGC&);

Просмотреть файл

@ -72,7 +72,7 @@ struct SweepAction {
};
class ChunkPool {
TenuredChunk* head_;
ArenaChunk* head_;
size_t count_;
public:
@ -97,41 +97,41 @@ class ChunkPool {
bool empty() const { return !head_; }
size_t count() const { return count_; }
TenuredChunk* head() {
ArenaChunk* head() {
MOZ_ASSERT(head_);
return head_;
}
TenuredChunk* pop();
void push(TenuredChunk* chunk);
TenuredChunk* remove(TenuredChunk* chunk);
ArenaChunk* pop();
void push(ArenaChunk* chunk);
ArenaChunk* remove(ArenaChunk* chunk);
void sort();
private:
TenuredChunk* mergeSort(TenuredChunk* list, size_t count);
ArenaChunk* mergeSort(ArenaChunk* list, size_t count);
bool isSorted() const;
#ifdef DEBUG
public:
bool contains(TenuredChunk* chunk) const;
bool contains(ArenaChunk* chunk) const;
bool verify() const;
void verifyChunks() const;
#endif
public:
// Pool mutation does not invalidate an Iter unless the mutation
// is of the TenuredChunk currently being visited by the Iter.
// is of the ArenaChunk currently being visited by the Iter.
class Iter {
public:
explicit Iter(ChunkPool& pool) : current_(pool.head_) {}
bool done() const { return !current_; }
void next();
TenuredChunk* get() const { return current_; }
operator TenuredChunk*() const { return get(); }
TenuredChunk* operator->() const { return get(); }
ArenaChunk* get() const { return current_; }
operator ArenaChunk*() const { return get(); }
ArenaChunk* operator->() const { return get(); }
private:
TenuredChunk* current_;
ArenaChunk* current_;
};
};
@ -568,8 +568,8 @@ class GCRuntime {
void verifyAllChunks();
#endif
TenuredChunk* getOrAllocChunk(AutoLockGCBgAlloc& lock);
void recycleChunk(TenuredChunk* chunk, const AutoLockGC& lock);
ArenaChunk* getOrAllocChunk(AutoLockGCBgAlloc& lock);
void recycleChunk(ArenaChunk* chunk, const AutoLockGC& lock);
#ifdef JS_GC_ZEAL
void startVerifyPreBarriers();
@ -691,8 +691,8 @@ class GCRuntime {
// For ArenaLists::allocateFromArena()
friend class ArenaLists;
TenuredChunk* pickChunk(AutoLockGCBgAlloc& lock);
Arena* allocateArena(TenuredChunk* chunk, Zone* zone, AllocKind kind,
ArenaChunk* pickChunk(AutoLockGCBgAlloc& lock);
Arena* allocateArena(ArenaChunk* chunk, Zone* zone, AllocKind kind,
ShouldCheckThresholds checkThresholds,
const AutoLockGC& lock);
@ -704,7 +704,7 @@ class GCRuntime {
bool tooManyEmptyChunks(const AutoLockGC& lock);
ChunkPool expireEmptyChunkPool(const AutoLockGC& lock);
void freeEmptyChunks(const AutoLockGC& lock);
void prepareToFreeChunk(TenuredChunkInfo& info);
void prepareToFreeChunk(ArenaChunkInfo& info);
void setMinEmptyChunkCount(uint32_t value, const AutoLockGC& lock);
friend class BackgroundAllocTask;

Просмотреть файл

@ -159,7 +159,7 @@ template <size_t BytesPerMarkBit, size_t FirstThingOffset>
MOZ_ALWAYS_INLINE void
js::gc::MarkBitmap<BytesPerMarkBit, FirstThingOffset>::copyMarkBit(
TenuredCell* dst, const TenuredCell* src, ColorBit colorBit) {
TenuredChunkBase* srcChunk = detail::GetCellChunkBase(src);
ArenaChunkBase* srcChunk = detail::GetCellChunkBase(src);
MarkBitmapWord* srcWord;
uintptr_t srcMask;
srcChunk->markBits.getMarkWordAndMask(src, colorBit, &srcWord, &srcMask);

Просмотреть файл

@ -14,7 +14,7 @@
* - ArenaList
* - FreeLists
* - ArenaLists
* - TenuredChunk
* - ArenaChunk
* - ChunkPool
*/
@ -305,7 +305,7 @@ void ArenaLists::checkNoArenasToUpdateForKind(AllocKind kind) {
#endif
}
inline bool TenuredChunk::canDecommitPage(size_t pageIndex) const {
inline bool ArenaChunk::canDecommitPage(size_t pageIndex) const {
if (decommittedPages[pageIndex]) {
return false;
}
@ -320,8 +320,8 @@ inline bool TenuredChunk::canDecommitPage(size_t pageIndex) const {
return true;
}
void TenuredChunk::decommitFreeArenas(GCRuntime* gc, const bool& cancel,
AutoLockGC& lock) {
void ArenaChunk::decommitFreeArenas(GCRuntime* gc, const bool& cancel,
AutoLockGC& lock) {
MOZ_ASSERT(DecommitEnabled());
for (size_t i = 0; i < PagesPerChunk; i++) {
@ -335,14 +335,14 @@ void TenuredChunk::decommitFreeArenas(GCRuntime* gc, const bool& cancel,
}
}
void TenuredChunk::recycleArena(Arena* arena, SortedArenaList& dest,
size_t thingsPerArena) {
void ArenaChunk::recycleArena(Arena* arena, SortedArenaList& dest,
size_t thingsPerArena) {
arena->setAsFullyUnused();
dest.insertAt(arena, thingsPerArena);
}
void TenuredChunk::releaseArena(GCRuntime* gc, Arena* arena,
const AutoLockGC& lock) {
void ArenaChunk::releaseArena(GCRuntime* gc, Arena* arena,
const AutoLockGC& lock) {
MOZ_ASSERT(!arena->allocated());
MOZ_ASSERT(!freeCommittedArenas[arenaIndex(arena)]);
@ -355,8 +355,8 @@ void TenuredChunk::releaseArena(GCRuntime* gc, Arena* arena,
updateChunkListAfterFree(gc, 1, lock);
}
bool TenuredChunk::decommitOneFreePage(GCRuntime* gc, size_t pageIndex,
AutoLockGC& lock) {
bool ArenaChunk::decommitOneFreePage(GCRuntime* gc, size_t pageIndex,
AutoLockGC& lock) {
MOZ_ASSERT(DecommitEnabled());
MOZ_ASSERT(canDecommitPage(pageIndex));
MOZ_ASSERT(info.numArenasFreeCommitted >= ArenasPerPage);
@ -401,7 +401,7 @@ bool TenuredChunk::decommitOneFreePage(GCRuntime* gc, size_t pageIndex,
return ok;
}
void TenuredChunk::decommitFreeArenasWithoutUnlocking(const AutoLockGC& lock) {
void ArenaChunk::decommitFreeArenasWithoutUnlocking(const AutoLockGC& lock) {
MOZ_ASSERT(DecommitEnabled());
for (size_t i = 0; i < PagesPerChunk; i++) {
@ -429,16 +429,16 @@ void TenuredChunk::decommitFreeArenasWithoutUnlocking(const AutoLockGC& lock) {
verify();
}
void TenuredChunk::updateChunkListAfterAlloc(GCRuntime* gc,
const AutoLockGC& lock) {
void ArenaChunk::updateChunkListAfterAlloc(GCRuntime* gc,
const AutoLockGC& lock) {
if (MOZ_UNLIKELY(!hasAvailableArenas())) {
gc->availableChunks(lock).remove(this);
gc->fullChunks(lock).push(this);
}
}
void TenuredChunk::updateChunkListAfterFree(GCRuntime* gc, size_t numArenasFree,
const AutoLockGC& lock) {
void ArenaChunk::updateChunkListAfterFree(GCRuntime* gc, size_t numArenasFree,
const AutoLockGC& lock) {
if (info.numArenasFree == numArenasFree) {
gc->fullChunks(lock).remove(this);
gc->availableChunks(lock).push(this);
@ -451,7 +451,7 @@ void TenuredChunk::updateChunkListAfterFree(GCRuntime* gc, size_t numArenasFree,
}
}
TenuredChunk* ChunkPool::pop() {
ArenaChunk* ChunkPool::pop() {
MOZ_ASSERT(bool(head_) == bool(count_));
if (!count_) {
return nullptr;
@ -459,7 +459,7 @@ TenuredChunk* ChunkPool::pop() {
return remove(head_);
}
void ChunkPool::push(TenuredChunk* chunk) {
void ChunkPool::push(ArenaChunk* chunk) {
MOZ_ASSERT(!chunk->info.next);
MOZ_ASSERT(!chunk->info.prev);
@ -471,7 +471,7 @@ void ChunkPool::push(TenuredChunk* chunk) {
++count_;
}
TenuredChunk* ChunkPool::remove(TenuredChunk* chunk) {
ArenaChunk* ChunkPool::remove(ArenaChunk* chunk) {
MOZ_ASSERT(count_ > 0);
MOZ_ASSERT(contains(chunk));
@ -499,8 +499,8 @@ void ChunkPool::sort() {
head_ = mergeSort(head(), count());
// Fixup prev pointers.
TenuredChunk* prev = nullptr;
for (TenuredChunk* cur = head_; cur; cur = cur->info.next) {
ArenaChunk* prev = nullptr;
for (ArenaChunk* cur = head_; cur; cur = cur->info.next) {
cur->info.prev = prev;
prev = cur;
}
@ -510,7 +510,7 @@ void ChunkPool::sort() {
MOZ_ASSERT(isSorted());
}
TenuredChunk* ChunkPool::mergeSort(TenuredChunk* list, size_t count) {
ArenaChunk* ChunkPool::mergeSort(ArenaChunk* list, size_t count) {
MOZ_ASSERT(bool(list) == bool(count));
if (count < 2) {
@ -520,10 +520,10 @@ TenuredChunk* ChunkPool::mergeSort(TenuredChunk* list, size_t count) {
size_t half = count / 2;
// Split;
TenuredChunk* front = list;
TenuredChunk* back;
ArenaChunk* front = list;
ArenaChunk* back;
{
TenuredChunk* cur = list;
ArenaChunk* cur = list;
for (size_t i = 0; i < half - 1; i++) {
MOZ_ASSERT(cur);
cur = cur->info.next;
@ -537,7 +537,7 @@ TenuredChunk* ChunkPool::mergeSort(TenuredChunk* list, size_t count) {
// Merge
list = nullptr;
TenuredChunk** cur = &list;
ArenaChunk** cur = &list;
while (front || back) {
if (!front) {
*cur = back;
@ -566,7 +566,7 @@ TenuredChunk* ChunkPool::mergeSort(TenuredChunk* list, size_t count) {
bool ChunkPool::isSorted() const {
uint32_t last = 1;
for (TenuredChunk* cursor = head_; cursor; cursor = cursor->info.next) {
for (ArenaChunk* cursor = head_; cursor; cursor = cursor->info.next) {
if (cursor->info.numArenasFree < last) {
return false;
}
@ -577,9 +577,9 @@ bool ChunkPool::isSorted() const {
#ifdef DEBUG
bool ChunkPool::contains(TenuredChunk* chunk) const {
bool ChunkPool::contains(ArenaChunk* chunk) const {
verify();
for (TenuredChunk* cursor = head_; cursor; cursor = cursor->info.next) {
for (ArenaChunk* cursor = head_; cursor; cursor = cursor->info.next) {
if (cursor == chunk) {
return true;
}
@ -590,7 +590,7 @@ bool ChunkPool::contains(TenuredChunk* chunk) const {
bool ChunkPool::verify() const {
MOZ_ASSERT(bool(head_) == bool(count_));
uint32_t count = 0;
for (TenuredChunk* cursor = head_; cursor;
for (ArenaChunk* cursor = head_; cursor;
cursor = cursor->info.next, ++count) {
MOZ_ASSERT_IF(cursor->info.prev, cursor->info.prev->info.next == cursor);
MOZ_ASSERT_IF(cursor->info.next, cursor->info.next->info.prev == cursor);
@ -600,19 +600,19 @@ bool ChunkPool::verify() const {
}
void ChunkPool::verifyChunks() const {
for (TenuredChunk* chunk = head_; chunk; chunk = chunk->info.next) {
for (ArenaChunk* chunk = head_; chunk; chunk = chunk->info.next) {
chunk->verify();
}
}
void TenuredChunk::verify() const {
void ArenaChunk::verify() const {
// Check the mark bits for each arena are aligned to the cache line size.
static_assert((offsetof(TenuredChunk, arenas) % ArenaSize) == 0);
static_assert((offsetof(ArenaChunk, arenas) % ArenaSize) == 0);
constexpr size_t CellBytesPerMarkByte = CellBytesPerMarkBit * 8;
static_assert((ArenaSize % CellBytesPerMarkByte) == 0);
constexpr size_t MarkBytesPerArena = ArenaSize / CellBytesPerMarkByte;
static_assert((MarkBytesPerArena % TypicalCacheLineSize) == 0);
static_assert((offsetof(TenuredChunk, markBits) % TypicalCacheLineSize) == 0);
static_assert((offsetof(ArenaChunk, markBits) % TypicalCacheLineSize) == 0);
MOZ_ASSERT(info.numArenasFree <= ArenasPerChunk);
MOZ_ASSERT(info.numArenasFreeCommitted <= info.numArenasFree);

Просмотреть файл

@ -272,7 +272,7 @@ class alignas(ArenaSize) Arena {
inline void checkAddress() const;
inline TenuredChunk* chunk() const;
inline ArenaChunk* chunk() const;
bool allocated() const {
MOZ_ASSERT(IsAllocKind(AllocKind(allocKind)));
@ -478,38 +478,38 @@ inline void FreeSpan::checkRange(uintptr_t first, uintptr_t last,
}
/*
* A chunk in the tenured heap. TenuredChunks contain arenas and associated data
* A chunk in the tenured heap. ArenaChunks contain arenas and associated data
* structures (mark bitmap, delayed marking state).
*/
class TenuredChunk : public TenuredChunkBase {
class ArenaChunk : public ArenaChunkBase {
Arena arenas[ArenasPerChunk];
friend class GCRuntime;
friend class MarkingValidator;
public:
static TenuredChunk* fromAddress(uintptr_t addr) {
static ArenaChunk* fromAddress(uintptr_t addr) {
addr &= ~ChunkMask;
return reinterpret_cast<TenuredChunk*>(addr);
return reinterpret_cast<ArenaChunk*>(addr);
}
static bool withinValidRange(uintptr_t addr) {
uintptr_t offset = addr & ChunkMask;
if (TenuredChunk::fromAddress(addr)->isNurseryChunk()) {
if (ArenaChunk::fromAddress(addr)->isNurseryChunk()) {
return offset >= sizeof(ChunkBase) && offset < ChunkSize;
}
return offset >= offsetof(TenuredChunk, arenas) && offset < ChunkSize;
return offset >= offsetof(ArenaChunk, arenas) && offset < ChunkSize;
}
static size_t arenaIndex(const Arena* arena) {
uintptr_t addr = arena->address();
MOZ_ASSERT(!TenuredChunk::fromAddress(addr)->isNurseryChunk());
MOZ_ASSERT(!ArenaChunk::fromAddress(addr)->isNurseryChunk());
MOZ_ASSERT(withinValidRange(addr));
uintptr_t offset = addr & ChunkMask;
return (offset - offsetof(TenuredChunk, arenas)) >> ArenaShift;
return (offset - offsetof(ArenaChunk, arenas)) >> ArenaShift;
}
explicit TenuredChunk(JSRuntime* runtime) : TenuredChunkBase(runtime) {}
explicit ArenaChunk(JSRuntime* runtime) : ArenaChunkBase(runtime) {}
uintptr_t address() const {
uintptr_t addr = reinterpret_cast<uintptr_t>(this);
@ -539,8 +539,7 @@ class TenuredChunk : public TenuredChunkBase {
void decommitFreeArenasWithoutUnlocking(const AutoLockGC& lock);
static void* allocate(GCRuntime* gc);
static TenuredChunk* emplace(void* ptr, GCRuntime* gc,
bool allMemoryCommitted);
static ArenaChunk* emplace(void* ptr, GCRuntime* gc, bool allMemoryCommitted);
/* Unlink and return the freeArenasHead. */
Arena* fetchNextFreeArena(GCRuntime* gc);
@ -585,11 +584,11 @@ inline void Arena::checkAddress() const {
mozilla::DebugOnly<uintptr_t> addr = uintptr_t(this);
MOZ_ASSERT(addr);
MOZ_ASSERT(!(addr & ArenaMask));
MOZ_ASSERT(TenuredChunk::withinValidRange(addr));
MOZ_ASSERT(ArenaChunk::withinValidRange(addr));
}
inline TenuredChunk* Arena::chunk() const {
return TenuredChunk::fromAddress(address());
inline ArenaChunk* Arena::chunk() const {
return ArenaChunk::fromAddress(address());
}
// Cell header stored before all nursery cells.

Просмотреть файл

@ -2928,7 +2928,7 @@ uintptr_t* GetMarkWordAddress(Cell* cell) {
MarkBitmapWord* wordp;
uintptr_t mask;
TenuredChunkBase* chunk = gc::detail::GetCellChunkBase(&cell->asTenured());
ArenaChunkBase* chunk = gc::detail::GetCellChunkBase(&cell->asTenured());
chunk->markBits.getMarkWordAndMask(&cell->asTenured(), ColorBit::BlackBit,
&wordp, &mask);
return reinterpret_cast<uintptr_t*>(wordp);
@ -2944,7 +2944,7 @@ uintptr_t GetMarkMask(Cell* cell, uint32_t colorBit) {
ColorBit bit = colorBit == 0 ? ColorBit::BlackBit : ColorBit::GrayOrBlackBit;
MarkBitmapWord* wordp;
uintptr_t mask;
TenuredChunkBase* chunk = gc::detail::GetCellChunkBase(&cell->asTenured());
ArenaChunkBase* chunk = gc::detail::GetCellChunkBase(&cell->asTenured());
chunk->markBits.getMarkWordAndMask(&cell->asTenured(), bit, &wordp, &mask);
return mask;
}

Просмотреть файл

@ -61,7 +61,7 @@ static constexpr size_t NurseryChunkUsableSize =
struct NurseryChunk : public ChunkBase {
alignas(CellAlignBytes) uint8_t data[NurseryChunkUsableSize];
static NurseryChunk* fromChunk(TenuredChunk* chunk, ChunkKind kind,
static NurseryChunk* fromChunk(ArenaChunk* chunk, ChunkKind kind,
uint8_t index);
explicit NurseryChunk(JSRuntime* runtime, ChunkKind kind, uint8_t chunkIndex)
@ -166,7 +166,7 @@ inline bool js::NurseryChunk::markPagesInUseHard(size_t endOffset) {
}
// static
inline js::NurseryChunk* js::NurseryChunk::fromChunk(TenuredChunk* chunk,
inline js::NurseryChunk* js::NurseryChunk::fromChunk(ArenaChunk* chunk,
ChunkKind kind,
uint8_t index) {
return new (chunk) NurseryChunk(chunk->runtime, kind, index);
@ -210,8 +210,8 @@ void js::NurseryDecommitTask::run(AutoLockHelperThreadState& lock) {
NurseryChunk* nurseryChunk = chunksToDecommit().popCopy();
AutoUnlockHelperThreadState unlock(lock);
nurseryChunk->~NurseryChunk();
TenuredChunk* tenuredChunk = TenuredChunk::emplace(
nurseryChunk, gc, /* allMemoryCommitted = */ false);
ArenaChunk* tenuredChunk =
ArenaChunk::emplace(nurseryChunk, gc, /* allMemoryCommitted = */ false);
AutoLockGC lock(gc);
gc->recycleChunk(tenuredChunk, lock);
}
@ -2113,12 +2113,12 @@ bool js::Nursery::allocateNextChunk(AutoLockGCBgAlloc& lock) {
return false;
}
TenuredChunk* toSpaceChunk = gc->getOrAllocChunk(lock);
ArenaChunk* toSpaceChunk = gc->getOrAllocChunk(lock);
if (!toSpaceChunk) {
return false;
}
TenuredChunk* fromSpaceChunk = nullptr;
ArenaChunk* fromSpaceChunk = nullptr;
if (semispaceEnabled_ && !(fromSpaceChunk = gc->getOrAllocChunk(lock))) {
gc->recycleChunk(toSpaceChunk, lock);
return false;

Просмотреть файл

@ -443,18 +443,18 @@ void js::gc::GCRuntime::finishVerifier() {
}
struct GCChunkHasher {
using Lookup = gc::TenuredChunk*;
using Lookup = gc::ArenaChunk*;
/*
* Strip zeros for better distribution after multiplying by the golden
* ratio.
*/
static HashNumber hash(gc::TenuredChunk* chunk) {
static HashNumber hash(gc::ArenaChunk* chunk) {
MOZ_ASSERT(!(uintptr_t(chunk) & gc::ChunkMask));
return HashNumber(uintptr_t(chunk) >> gc::ChunkShift);
}
static bool match(gc::TenuredChunk* k, gc::TenuredChunk* l) {
static bool match(gc::ArenaChunk* k, gc::ArenaChunk* l) {
MOZ_ASSERT(!(uintptr_t(k) & gc::ChunkMask));
MOZ_ASSERT(!(uintptr_t(l) & gc::ChunkMask));
return k == l;
@ -471,7 +471,7 @@ class js::gc::MarkingValidator {
GCRuntime* gc;
bool initialized;
using BitmapMap = HashMap<TenuredChunk*, UniquePtr<ChunkMarkBitmap>,
using BitmapMap = HashMap<ArenaChunk*, UniquePtr<ChunkMarkBitmap>,
GCChunkHasher, SystemAllocPolicy>;
BitmapMap map;
};

Просмотреть файл

@ -19,9 +19,9 @@ BEGIN_TEST(testGCChunkPool) {
// Create.
for (int i = 0; i < N; ++i) {
void* ptr = TenuredChunk::allocate(&cx->runtime()->gc);
void* ptr = ArenaChunk::allocate(&cx->runtime()->gc);
CHECK(ptr);
TenuredChunk* chunk = TenuredChunk::emplace(ptr, &cx->runtime()->gc, true);
ArenaChunk* chunk = ArenaChunk::emplace(ptr, &cx->runtime()->gc, true);
CHECK(chunk);
pool.push(chunk);
}
@ -37,9 +37,9 @@ BEGIN_TEST(testGCChunkPool) {
// Push/Pop.
for (int i = 0; i < N; ++i) {
TenuredChunk* chunkA = pool.pop();
TenuredChunk* chunkB = pool.pop();
TenuredChunk* chunkC = pool.pop();
ArenaChunk* chunkA = pool.pop();
ArenaChunk* chunkB = pool.pop();
ArenaChunk* chunkC = pool.pop();
pool.push(chunkA);
pool.push(chunkB);
pool.push(chunkC);
@ -47,7 +47,7 @@ BEGIN_TEST(testGCChunkPool) {
MOZ_ASSERT(pool.verify());
// Remove.
TenuredChunk* chunk = nullptr;
ArenaChunk* chunk = nullptr;
int offset = N / 2;
for (ChunkPool::Iter iter(pool); !iter.done(); iter.next(), --offset) {
if (offset == 0) {
@ -63,7 +63,7 @@ BEGIN_TEST(testGCChunkPool) {
// Destruct.
js::AutoLockGC lock(cx->runtime());
for (ChunkPool::Iter iter(pool); !iter.done();) {
TenuredChunk* chunk = iter.get();
ArenaChunk* chunk = iter.get();
iter.next();
pool.remove(chunk);
UnmapPages(chunk, ChunkSize);

Просмотреть файл

@ -188,7 +188,7 @@ struct StatsClosure {
};
static void DecommittedPagesChunkCallback(JSRuntime* rt, void* data,
gc::TenuredChunk* chunk,
gc::ArenaChunk* chunk,
const JS::AutoRequireNoGC& nogc) {
size_t n = 0;
for (uint32_t word : chunk->decommittedPages.Storage()) {
@ -731,7 +731,7 @@ static bool CollectRuntimeStatsHelper(JSContext* cx, RuntimeStats* rtStats,
size_t numDirtyChunks =
(rtStats->gcHeapChunkTotal - rtStats->gcHeapUnusedChunks) / gc::ChunkSize;
size_t perChunkAdmin =
sizeof(gc::TenuredChunk) - (sizeof(gc::Arena) * gc::ArenasPerChunk);
sizeof(gc::ArenaChunk) - (sizeof(gc::Arena) * gc::ArenasPerChunk);
rtStats->gcHeapChunkAdmin = numDirtyChunks * perChunkAdmin;
// |gcHeapUnusedArenas| is the only thing left. Compute it in terms of