Bug 1916758 - Part 5: Allow MarkBitmap to work with different allocation densities r=sfink

This templates MarkBitmap on the the number of bytes per mark bit allowing its
reuse for with a different allocation density.

Differential Revision: https://phabricator.services.mozilla.com/D221064
This commit is contained in:
Jon Coppeard 2024-09-05 09:52:49 +00:00
Родитель aa999e3bf4
Коммит 3235af3a70
5 изменённых файлов: 102 добавлений и 84 удалений

Просмотреть файл

@ -205,10 +205,13 @@ static_assert(CalculatedChunkPadSize * CHAR_BIT < BitsPerArenaWithHeaders,
static_assert(ArenasPerChunk == 252,
"Do not accidentally change our heap's density.");
const size_t FirstArenaOffset = ChunkSize - ArenasPerChunk * ArenaSize;
// Mark bitmaps are atomic because they can be written by gray unmarking on the
// main thread while read by sweeping on a background thread. The former does
// not affect the result of the latter.
using MarkBitmapWord = mozilla::Atomic<uintptr_t, mozilla::Relaxed>;
static constexpr size_t MarkBitmapWordBits = sizeof(MarkBitmapWord) * CHAR_BIT;
/*
* Live objects are marked black or gray. Everything reachable from a JS root is
@ -227,19 +230,63 @@ enum class ColorBit : uint32_t { BlackBit = 0, GrayOrBlackBit = 1 };
enum class MarkColor : uint8_t { Gray = 1, Black = 2 };
// Mark bitmap for a tenured heap chunk.
template <size_t BytesPerMarkBit, size_t FirstThingOffset>
class alignas(TypicalCacheLineSize) MarkBitmap {
static constexpr size_t WordCount = ArenaBitmapWords * ArenasPerChunk;
static constexpr size_t ByteCount =
(ChunkSize - FirstThingOffset) / BytesPerMarkBit;
static constexpr size_t WordCount = HowMany(ByteCount, MarkBitmapWordBits);
MarkBitmapWord bitmap[WordCount];
public:
inline void getMarkWordAndMask(const TenuredCell* cell, ColorBit colorBit,
MarkBitmapWord** wordp, uintptr_t* maskp);
static constexpr size_t FirstThingAdjustmentBits =
FirstThingOffset / BytesPerMarkBit;
static constexpr size_t FirstThingAdjustmentWords =
FirstThingAdjustmentBits / MarkBitmapWordBits;
MOZ_ALWAYS_INLINE void getMarkWordAndMask(const TenuredCell* cell,
ColorBit colorBit,
MarkBitmapWord** wordp,
uintptr_t* maskp) {
// Note: the JIT pre-barrier trampolines inline this code. Update
// MacroAssembler::emitPreBarrierFastPath code too when making changes here!
MOZ_ASSERT(size_t(colorBit) < MarkBitsPerCell);
size_t offset = uintptr_t(cell) & ChunkMask;
MOZ_ASSERT(offset >= FirstThingOffset);
const size_t bit = offset / BytesPerMarkBit + size_t(colorBit);
size_t word = bit / MarkBitmapWordBits - FirstThingAdjustmentWords;
MOZ_ASSERT(word < WordCount);
*wordp = &bitmap[word];
*maskp = uintptr_t(1) << (bit % MarkBitmapWordBits);
}
// The following are not exported and are defined in gc/Heap.h:
inline bool markBit(const TenuredCell* cell, ColorBit colorBit);
inline bool isMarkedAny(const TenuredCell* cell);
inline bool isMarkedBlack(const TenuredCell* cell);
inline bool isMarkedGray(const TenuredCell* cell);
MOZ_ALWAYS_INLINE bool markBit(const TenuredCell* cell, ColorBit colorBit) {
MarkBitmapWord* word;
uintptr_t mask;
getMarkWordAndMask(cell, colorBit, &word, &mask);
return *word & mask;
}
MOZ_ALWAYS_INLINE bool isMarkedAny(const TenuredCell* cell) {
return markBit(cell, ColorBit::BlackBit) ||
markBit(cell, ColorBit::GrayOrBlackBit);
}
MOZ_ALWAYS_INLINE bool isMarkedBlack(const TenuredCell* cell) {
// Return true if BlackBit is set.
return markBit(cell, ColorBit::BlackBit);
}
MOZ_ALWAYS_INLINE bool isMarkedGray(const TenuredCell* cell) {
// Return true if GrayOrBlackBit is set and BlackBit is not set.
return !markBit(cell, ColorBit::BlackBit) &&
markBit(cell, ColorBit::GrayOrBlackBit);
}
inline bool markIfUnmarked(const TenuredCell* cell, MarkColor color);
inline bool markIfUnmarkedAtomic(const TenuredCell* cell, MarkColor color);
inline void markBlack(const TenuredCell* cell);
@ -252,8 +299,7 @@ class alignas(TypicalCacheLineSize) MarkBitmap {
inline void copyFrom(const MarkBitmap& other);
};
static_assert(ArenaBitmapBytes * ArenasPerChunk == sizeof(MarkBitmap),
"Ensure our MarkBitmap actually covers all arenas.");
using ChunkMarkBitmap = MarkBitmap<CellBytesPerMarkBit, FirstArenaOffset>;
// Bitmap with one bit per page used for decommitted page set.
using ChunkPageBitmap = mozilla::BitSet<PagesPerChunk, uint32_t>;
@ -265,17 +311,21 @@ using ChunkArenaBitmap = mozilla::BitSet<ArenasPerChunk, uint32_t>;
class TenuredChunkBase : public ChunkBase {
public:
TenuredChunkInfo info;
MarkBitmap markBits;
ChunkMarkBitmap markBits;
ChunkArenaBitmap freeCommittedArenas;
ChunkPageBitmap decommittedPages;
protected:
explicit TenuredChunkBase(JSRuntime* runtime) : ChunkBase(runtime) {
static_assert(sizeof(markBits) == ArenaBitmapBytes * ArenasPerChunk,
"Ensure our MarkBitmap actually covers all arenas.");
info.numArenasFree = ArenasPerChunk;
}
void initAsDecommitted();
};
static_assert(FirstArenaOffset ==
RoundUp(sizeof(gc::TenuredChunkBase), ArenaSize));
/*
* We sometimes use an index to refer to a cell in an arena. The index for a
@ -285,15 +335,6 @@ class TenuredChunkBase : public ChunkBase {
const size_t ArenaCellIndexBytes = CellAlignBytes;
const size_t MaxArenaCellIndex = ArenaSize / CellAlignBytes;
const size_t MarkBitmapWordBits = sizeof(MarkBitmapWord) * CHAR_BIT;
constexpr size_t FirstArenaAdjustmentBits =
RoundUp(sizeof(gc::TenuredChunkBase), ArenaSize) / gc::CellBytesPerMarkBit;
static_assert((FirstArenaAdjustmentBits % MarkBitmapWordBits) == 0);
constexpr size_t FirstArenaAdjustmentWords =
FirstArenaAdjustmentBits / MarkBitmapWordBits;
const size_t ChunkStoreBufferOffset = offsetof(ChunkBase, storeBuffer);
const size_t ChunkMarkBitmapOffset = offsetof(TenuredChunkBase, markBits);
@ -535,48 +576,6 @@ inline bool operator!=(JS::GCCellPtr ptr1, JS::GCCellPtr ptr2) {
namespace js {
namespace gc {
/* static */
MOZ_ALWAYS_INLINE void MarkBitmap::getMarkWordAndMask(const TenuredCell* cell,
ColorBit colorBit,
MarkBitmapWord** wordp,
uintptr_t* maskp) {
// Note: the JIT pre-barrier trampolines inline this code. Update
// MacroAssembler::emitPreBarrierFastPath code too when making changes here!
MOZ_ASSERT(size_t(colorBit) < MarkBitsPerCell);
size_t offset = uintptr_t(cell) & ChunkMask;
const size_t bit = offset / CellBytesPerMarkBit + size_t(colorBit);
size_t word = bit / MarkBitmapWordBits - FirstArenaAdjustmentWords;
MOZ_ASSERT(word < WordCount);
*wordp = &bitmap[word];
*maskp = uintptr_t(1) << (bit % MarkBitmapWordBits);
}
MOZ_ALWAYS_INLINE bool MarkBitmap::markBit(const TenuredCell* cell,
ColorBit colorBit) {
MarkBitmapWord* word;
uintptr_t mask;
getMarkWordAndMask(cell, colorBit, &word, &mask);
return *word & mask;
}
MOZ_ALWAYS_INLINE bool MarkBitmap::isMarkedAny(const TenuredCell* cell) {
return markBit(cell, ColorBit::BlackBit) ||
markBit(cell, ColorBit::GrayOrBlackBit);
}
MOZ_ALWAYS_INLINE bool MarkBitmap::isMarkedBlack(const TenuredCell* cell) {
// Return true if BlackBit is set.
return markBit(cell, ColorBit::BlackBit);
}
MOZ_ALWAYS_INLINE bool MarkBitmap::isMarkedGray(const TenuredCell* cell) {
// Return true if GrayOrBlackBit is set and BlackBit is not set.
return !markBit(cell, ColorBit::BlackBit) &&
markBit(cell, ColorBit::GrayOrBlackBit);
}
namespace detail {
// `addr` must be an address within GC-controlled memory. Note that it cannot

Просмотреть файл

@ -297,7 +297,8 @@ class TenuredCell : public Cell {
// Default implementation for kinds that don't require fixup.
void fixupAfterMovingGC() {}
static inline CellColor getColor(MarkBitmap* bitmap, const TenuredCell* cell);
static inline CellColor getColor(ChunkMarkBitmap* bitmap,
const TenuredCell* cell);
#ifdef DEBUG
inline bool isAligned() const;
@ -428,7 +429,7 @@ MOZ_ALWAYS_INLINE CellColor TenuredCell::color() const {
}
/* static */
inline CellColor TenuredCell::getColor(MarkBitmap* bitmap,
inline CellColor TenuredCell::getColor(ChunkMarkBitmap* bitmap,
const TenuredCell* cell) {
// Note that this method isn't synchronised so may give surprising results if
// the mark bitmap is being modified concurrently.

Просмотреть файл

@ -80,7 +80,9 @@ inline size_t& js::gc::Arena::atomBitmapStart() {
// unmarking occurs in parallel with background sweeping.
// The return value indicates if the cell went from unmarked to marked.
MOZ_ALWAYS_INLINE bool js::gc::MarkBitmap::markIfUnmarked(
template <size_t BytesPerMarkBit, size_t FirstThingOffset>
MOZ_ALWAYS_INLINE bool
js::gc::MarkBitmap<BytesPerMarkBit, FirstThingOffset>::markIfUnmarked(
const TenuredCell* cell, MarkColor color) {
MarkBitmapWord* word;
uintptr_t mask;
@ -104,7 +106,9 @@ MOZ_ALWAYS_INLINE bool js::gc::MarkBitmap::markIfUnmarked(
return true;
}
MOZ_ALWAYS_INLINE bool js::gc::MarkBitmap::markIfUnmarkedAtomic(
template <size_t BytesPerMarkBit, size_t FirstThingOffset>
MOZ_ALWAYS_INLINE bool
js::gc::MarkBitmap<BytesPerMarkBit, FirstThingOffset>::markIfUnmarkedAtomic(
const TenuredCell* cell, MarkColor color) {
// This version of the method is safe in the face of concurrent writes to the
// mark bitmap but may return false positives. The extra synchronisation
@ -130,7 +134,10 @@ MOZ_ALWAYS_INLINE bool js::gc::MarkBitmap::markIfUnmarkedAtomic(
return true;
}
MOZ_ALWAYS_INLINE void js::gc::MarkBitmap::markBlack(const TenuredCell* cell) {
template <size_t BytesPerMarkBit, size_t FirstThingOffset>
MOZ_ALWAYS_INLINE void
js::gc::MarkBitmap<BytesPerMarkBit, FirstThingOffset>::markBlack(
const TenuredCell* cell) {
MarkBitmapWord* word;
uintptr_t mask;
getMarkWordAndMask(cell, ColorBit::BlackBit, &word, &mask);
@ -138,7 +145,9 @@ MOZ_ALWAYS_INLINE void js::gc::MarkBitmap::markBlack(const TenuredCell* cell) {
*word = bits | mask;
}
MOZ_ALWAYS_INLINE void js::gc::MarkBitmap::markBlackAtomic(
template <size_t BytesPerMarkBit, size_t FirstThingOffset>
MOZ_ALWAYS_INLINE void
js::gc::MarkBitmap<BytesPerMarkBit, FirstThingOffset>::markBlackAtomic(
const TenuredCell* cell) {
MarkBitmapWord* word;
uintptr_t mask;
@ -146,9 +155,10 @@ MOZ_ALWAYS_INLINE void js::gc::MarkBitmap::markBlackAtomic(
*word |= mask;
}
MOZ_ALWAYS_INLINE void js::gc::MarkBitmap::copyMarkBit(TenuredCell* dst,
const TenuredCell* src,
ColorBit colorBit) {
template <size_t BytesPerMarkBit, size_t FirstThingOffset>
MOZ_ALWAYS_INLINE void
js::gc::MarkBitmap<BytesPerMarkBit, FirstThingOffset>::copyMarkBit(
TenuredCell* dst, const TenuredCell* src, ColorBit colorBit) {
TenuredChunkBase* srcChunk = detail::GetCellChunkBase(src);
MarkBitmapWord* srcWord;
uintptr_t srcMask;
@ -166,7 +176,10 @@ MOZ_ALWAYS_INLINE void js::gc::MarkBitmap::copyMarkBit(TenuredCell* dst,
*dstWord = bits;
}
MOZ_ALWAYS_INLINE void js::gc::MarkBitmap::unmark(const TenuredCell* cell) {
template <size_t BytesPerMarkBit, size_t FirstThingOffset>
MOZ_ALWAYS_INLINE void
js::gc::MarkBitmap<BytesPerMarkBit, FirstThingOffset>::unmark(
const TenuredCell* cell) {
MarkBitmapWord* word;
uintptr_t mask;
uintptr_t bits;
@ -178,7 +191,9 @@ MOZ_ALWAYS_INLINE void js::gc::MarkBitmap::unmark(const TenuredCell* cell) {
*word = bits & ~mask;
}
inline js::gc::MarkBitmapWord* js::gc::MarkBitmap::arenaBits(Arena* arena) {
template <size_t BytesPerMarkBit, size_t FirstThingOffset>
inline js::gc::MarkBitmapWord*
js::gc::MarkBitmap<BytesPerMarkBit, FirstThingOffset>::arenaBits(Arena* arena) {
static_assert(
ArenaBitmapBits == ArenaBitmapWords * JS_BITS_PER_WORD,
"We assume that the part of the bitmap corresponding to the arena "
@ -192,7 +207,9 @@ inline js::gc::MarkBitmapWord* js::gc::MarkBitmap::arenaBits(Arena* arena) {
return word;
}
void js::gc::MarkBitmap::copyFrom(const MarkBitmap& other) {
template <size_t BytesPerMarkBit, size_t FirstThingOffset>
void js::gc::MarkBitmap<BytesPerMarkBit, FirstThingOffset>::copyFrom(
const MarkBitmap& other) {
for (size_t i = 0; i < WordCount; i++) {
bitmap[i] = uintptr_t(other.bitmap[i]);
}
@ -215,7 +232,7 @@ void js::gc::TenuredCell::markBlackAtomic() const {
}
void js::gc::TenuredCell::copyMarkBitsFrom(const TenuredCell* src) {
MarkBitmap& markBits = chunk()->markBits;
ChunkMarkBitmap& markBits = chunk()->markBits;
markBits.copyMarkBit(this, src, ColorBit::BlackBit);
markBits.copyMarkBit(this, src, ColorBit::GrayOrBlackBit);
}

Просмотреть файл

@ -471,8 +471,8 @@ class js::gc::MarkingValidator {
GCRuntime* gc;
bool initialized;
using BitmapMap = HashMap<TenuredChunk*, UniquePtr<MarkBitmap>, GCChunkHasher,
SystemAllocPolicy>;
using BitmapMap = HashMap<TenuredChunk*, UniquePtr<ChunkMarkBitmap>,
GCChunkHasher, SystemAllocPolicy>;
BitmapMap map;
};
@ -506,11 +506,11 @@ void js::gc::MarkingValidator::nonIncrementalMark(AutoGCSession& session) {
chunk.next()) {
// Bug 1842582: Allocate mark bit buffer in two stages to avoid alignment
// restriction which we currently can't support.
void* buffer = js_malloc(sizeof(MarkBitmap));
void* buffer = js_malloc(sizeof(ChunkMarkBitmap));
if (!buffer) {
return;
}
UniquePtr<MarkBitmap> entry(new (buffer) MarkBitmap);
UniquePtr<ChunkMarkBitmap> entry(new (buffer) ChunkMarkBitmap);
entry->copyFrom(chunk->markBits);
if (!map.putNew(chunk, std::move(entry))) {
return;
@ -628,11 +628,11 @@ void js::gc::MarkingValidator::nonIncrementalMark(AutoGCSession& session) {
AutoLockGC lock(gc);
for (auto chunk = gc->allNonEmptyChunks(lock); !chunk.done();
chunk.next()) {
MarkBitmap* bitmap = &chunk->markBits;
ChunkMarkBitmap* bitmap = &chunk->markBits;
auto ptr = map.lookup(chunk);
MOZ_RELEASE_ASSERT(ptr, "Chunk not found in map");
MarkBitmap* entry = ptr->value().get();
MarkBitmap temp;
ChunkMarkBitmap* entry = ptr->value().get();
ChunkMarkBitmap temp;
temp.copyFrom(*entry);
entry->copyFrom(*bitmap);
bitmap->copyFrom(temp);
@ -688,8 +688,8 @@ void js::gc::MarkingValidator::validate() {
continue; /* Allocated after we did the non-incremental mark. */
}
MarkBitmap* bitmap = ptr->value().get();
MarkBitmap* incBitmap = &chunk->markBits;
ChunkMarkBitmap* bitmap = ptr->value().get();
ChunkMarkBitmap* incBitmap = &chunk->markBits;
for (size_t i = 0; i < ArenasPerChunk; i++) {
if (chunk->decommittedPages[chunk->pageIndex(i)]) {

Просмотреть файл

@ -7642,7 +7642,8 @@ void MacroAssembler::emitPreBarrierFastPath(JSRuntime* rt, MIRType type,
// Fold the adjustment for the fact that arenas don't start at the beginning
// of the chunk into the offset to the chunk bitmap.
const size_t firstArenaAdjustment = gc::FirstArenaAdjustmentBits / CHAR_BIT;
const size_t firstArenaAdjustment =
gc::ChunkMarkBitmap::FirstThingAdjustmentBits / CHAR_BIT;
const intptr_t offset =
intptr_t(gc::ChunkMarkBitmapOffset) - intptr_t(firstArenaAdjustment);