Bug 1291292 - Use dynamic chunk allocation for the nursery r=terrence

This commit is contained in:
Jon Coppeard 2016-08-11 17:14:56 +01:00
Родитель 7acfb399ba
Коммит 17304689a2
12 изменённых файлов: 261 добавлений и 196 удалений

Просмотреть файл

@ -283,7 +283,6 @@ struct GCSizes
#define FOR_EACH_SIZE(macro) \
macro(_, MallocHeap, marker) \
macro(_, NonHeap, nurseryCommitted) \
macro(_, NonHeap, nurseryDecommitted) \
macro(_, MallocHeap, nurseryMallocedBuffers) \
macro(_, MallocHeap, storeBufferVals) \
macro(_, MallocHeap, storeBufferCells) \

Просмотреть файл

@ -260,30 +260,6 @@ GCRuntime::checkIncrementalZoneState(ExclusiveContext* cx, T* t)
// /////////// Arena -> Thing Allocator //////////////////////////////////////
// After pulling a Chunk out of the empty chunks pool, we want to run the
// background allocator to refill it. The code that takes Chunks does so under
// the GC lock. We need to start the background allocation under the helper
// threads lock. To avoid lock inversion we have to delay the start until after
// we are outside the GC lock. This class handles that delay automatically.
class MOZ_RAII js::gc::AutoMaybeStartBackgroundAllocation
{
JSRuntime* runtime;
public:
AutoMaybeStartBackgroundAllocation()
: runtime(nullptr)
{}
void tryToStartBackgroundAllocation(JSRuntime* rt) {
runtime = rt;
}
~AutoMaybeStartBackgroundAllocation() {
if (runtime)
runtime->gc.startBackgroundAllocTaskIfIdle();
}
};
void
GCRuntime::startBackgroundAllocTaskIfIdle()
{
@ -547,7 +523,7 @@ GCRuntime::getOrAllocChunk(const AutoLockGC& lock,
}
if (wantBackgroundAllocation(lock))
maybeStartBackgroundAllocation.tryToStartBackgroundAllocation(rt);
maybeStartBackgroundAllocation.tryToStartBackgroundAllocation(rt->gc);
return chunk;
}

Просмотреть файл

@ -1387,6 +1387,30 @@ class MOZ_RAII AutoEnterIteration {
}
};
// After pulling a Chunk out of the empty chunks pool, we want to run the
// background allocator to refill it. The code that takes Chunks does so under
// the GC lock. We need to start the background allocation under the helper
// threads lock. To avoid lock inversion we have to delay the start until after
// we are outside the GC lock. This class handles that delay automatically.
class MOZ_RAII AutoMaybeStartBackgroundAllocation
{
GCRuntime* gc;
public:
AutoMaybeStartBackgroundAllocation()
: gc(nullptr)
{}
void tryToStartBackgroundAllocation(GCRuntime& gc) {
this->gc = &gc;
}
~AutoMaybeStartBackgroundAllocation() {
if (gc)
gc->startBackgroundAllocTaskIfIdle();
}
};
#ifdef JS_GC_ZEAL
inline bool

Просмотреть файл

@ -1002,7 +1002,7 @@ struct Chunk
void decommitAllArenasWithoutUnlocking(const AutoLockGC& lock);
static Chunk* allocate(JSRuntime* rt);
inline void init(JSRuntime* rt);
void init(JSRuntime* rt);
private:
void decommitAllArenas(JSRuntime* rt);

Просмотреть файл

@ -82,16 +82,41 @@ struct js::Nursery::Canary
};
#endif
inline void
js::Nursery::NurseryChunk::poisonAndInit(JSRuntime* rt, uint8_t poison)
{
JS_POISON(this, poison, ChunkSize);
init(rt);
}
inline void
js::Nursery::NurseryChunk::init(JSRuntime* rt)
{
new (&trailer) gc::ChunkTrailer(rt, &rt->gc.storeBuffer);
}
/* static */ inline js::Nursery::NurseryChunk*
js::Nursery::NurseryChunk::fromChunk(Chunk* chunk)
{
return reinterpret_cast<NurseryChunk*>(chunk);
}
inline Chunk*
js::Nursery::NurseryChunk::toChunk(JSRuntime* rt)
{
auto chunk = reinterpret_cast<Chunk*>(this);
chunk->init(rt);
return chunk;
}
js::Nursery::Nursery(JSRuntime* rt)
: runtime_(rt)
, position_(0)
, currentStart_(0)
, currentStartChunk_(0)
, currentStartPosition_(0)
, currentEnd_(0)
, heapStart_(0)
, heapEnd_(0)
, currentChunk_(0)
, numActiveChunks_(0)
, numNurseryChunks_(0)
, maxNurseryChunks_(0)
, previousPromotionRate_(0)
, profileThreshold_(0)
, enableProfiling_(false)
@ -104,13 +129,13 @@ js::Nursery::Nursery(JSRuntime* rt)
{}
bool
js::Nursery::init(uint32_t maxNurseryBytes)
js::Nursery::init(uint32_t maxNurseryBytes, AutoLockGC& lock)
{
/* maxNurseryBytes parameter is rounded down to a multiple of chunk size. */
numNurseryChunks_ = maxNurseryBytes >> ChunkShift;
maxNurseryChunks_ = maxNurseryBytes >> ChunkShift;
/* If no chunks are specified then the nursery is permenantly disabled. */
if (numNurseryChunks_ == 0)
if (maxNurseryChunks_ == 0)
return true;
if (!mallocedBuffers.init())
@ -119,21 +144,16 @@ js::Nursery::init(uint32_t maxNurseryBytes)
if (!cellsWithUid_.init())
return false;
void* heap = MapAlignedPages(nurserySize(), Alignment);
if (!heap)
return false;
freeMallocedBuffersTask = js_new<FreeMallocedBuffersTask>(runtime()->defaultFreeOp());
if (!freeMallocedBuffersTask || !freeMallocedBuffersTask->init())
return false;
heapStart_ = uintptr_t(heap);
heapEnd_ = heapStart_ + nurserySize();
currentStart_ = start();
numActiveChunks_ = numNurseryChunks_;
JS_POISON(heap, JS_FRESH_NURSERY_PATTERN, nurserySize());
updateNumActiveChunks(1);
updateNumChunksLocked(1, lock);
if (numChunks() == 0)
return false;
setCurrentChunk(0);
setStartPosition();
char* env = getenv("JS_GC_PROFILE_NURSERY");
if (env) {
@ -150,15 +170,16 @@ js::Nursery::init(uint32_t maxNurseryBytes)
PodZero(&profileTimes_);
PodZero(&totalTimes_);
if (!runtime()->gc.storeBuffer.enable())
return false;
MOZ_ASSERT(isEnabled());
return true;
}
js::Nursery::~Nursery()
{
if (start())
UnmapPages((void*)start(), nurserySize());
disable();
js_delete(freeMallocedBuffersTask);
}
@ -169,13 +190,20 @@ js::Nursery::enable()
MOZ_ASSERT(!runtime()->gc.isVerifyPreBarriersEnabled());
if (isEnabled())
return;
updateNumActiveChunks(1);
updateNumChunks(1);
if (numChunks() == 0)
return;
setCurrentChunk(0);
currentStart_ = position();
setStartPosition();
#ifdef JS_GC_ZEAL
if (runtime()->hasZealMode(ZealMode::GenerationalGC))
enterZealMode();
#endif
MOZ_ALWAYS_TRUE(runtime()->gc.storeBuffer.enable());
return;
}
void
@ -184,8 +212,9 @@ js::Nursery::disable()
MOZ_ASSERT(isEmpty());
if (!isEnabled())
return;
updateNumActiveChunks(0);
updateNumChunks(0);
currentEnd_ = 0;
runtime()->gc.storeBuffer.disable();
}
bool
@ -194,15 +223,19 @@ js::Nursery::isEmpty() const
MOZ_ASSERT(runtime_);
if (!isEnabled())
return true;
MOZ_ASSERT_IF(!runtime_->hasZealMode(ZealMode::GenerationalGC), currentStart_ == start());
return position() == currentStart_;
if (!runtime_->hasZealMode(ZealMode::GenerationalGC)) {
MOZ_ASSERT(currentStartChunk_ == 0);
MOZ_ASSERT(currentStartPosition_ == chunk(0).start());
}
return position() == currentStartPosition_;
}
#ifdef JS_GC_ZEAL
void
js::Nursery::enterZealMode() {
if (isEnabled())
numActiveChunks_ = numNurseryChunks_;
updateNumChunks(maxNurseryChunks_);
}
void
@ -210,7 +243,7 @@ js::Nursery::leaveZealMode() {
if (isEnabled()) {
MOZ_ASSERT(isEmpty());
setCurrentChunk(0);
currentStart_ = start();
setStartPosition();
}
}
#endif // JS_GC_ZEAL
@ -260,7 +293,7 @@ js::Nursery::allocate(size_t size)
{
MOZ_ASSERT(isEnabled());
MOZ_ASSERT(!runtime()->isHeapBusy());
MOZ_ASSERT(position() >= currentStart_);
MOZ_ASSERT_IF(currentChunk_ == currentStartChunk_, position() >= currentStartPosition_);
MOZ_ASSERT(position() % gc::CellSize == 0);
MOZ_ASSERT(size % gc::CellSize == 0);
@ -271,7 +304,7 @@ js::Nursery::allocate(size_t size)
#endif
if (currentEnd() < position() + size) {
if (currentChunk_ + 1 == numActiveChunks_)
if (currentChunk_ + 1 == numChunks())
return nullptr;
setCurrentChunk(currentChunk_ + 1);
}
@ -366,10 +399,10 @@ Nursery::setForwardingPointer(void* oldData, void* newData, bool direct)
{
MOZ_ASSERT(isInside(oldData));
// Bug 1196210: If a zero-capacity header lands in the last 2 words of the
// jemalloc chunk abutting the start of the nursery, the (invalid) newData
// pointer will appear to be "inside" the nursery.
MOZ_ASSERT(!isInside(newData) || uintptr_t(newData) == heapStart_);
// Bug 1196210: If a zero-capacity header lands in the last 2 words of a
// jemalloc chunk abutting the start of a nursery chunk, the (invalid)
// newData pointer will appear to be "inside" the nursery.
MOZ_ASSERT(!isInside(newData) || (uintptr_t(newData) & ChunkMask) == 0);
if (direct) {
*reinterpret_cast<void**>(oldData) = newData;
@ -539,7 +572,7 @@ js::Nursery::collect(JSRuntime* rt, JS::gcreason::Reason reason)
AutoDisableProxyCheck disableStrictProxyChecking(rt);
mozilla::DebugOnly<AutoEnterOOMUnsafeRegion> oomUnsafeRegion;
size_t initialUsedSpace = position() - start();
size_t initialUsedSpace = usedSpace();
// Move objects pointed to by roots from the nursery to the major heap.
TenuringTracer mover(rt, this);
@ -697,10 +730,10 @@ js::Nursery::collect(JSRuntime* rt, JS::gcreason::Reason reason)
printProfileHeader();
}
fprintf(stderr, "MinorGC: %20s %5.1f%% %4d ",
fprintf(stderr, "MinorGC: %20s %5.1f%% %4u ",
JS::gcreason::ExplainReason(reason),
promotionRate * 100,
numActiveChunks_);
numChunks());
printProfileTimes(profileTimes_);
}
}
@ -768,40 +801,69 @@ js::Nursery::sweep()
#ifdef JS_GC_ZEAL
/* Poison the nursery contents so touching a freed object will crash. */
JS_POISON((void*)start(), JS_SWEPT_NURSERY_PATTERN, nurserySize());
for (int i = 0; i < numNurseryChunks_; ++i)
initChunk(i);
for (unsigned i = 0; i < numChunks(); i++)
chunk(i).poisonAndInit(runtime(), JS_SWEPT_NURSERY_PATTERN);
if (runtime()->hasZealMode(ZealMode::GenerationalGC)) {
MOZ_ASSERT(numActiveChunks_ == numNurseryChunks_);
/* Only reset the alloc point when we are close to the end. */
if (currentChunk_ + 1 == numNurseryChunks_)
if (currentChunk_ + 1 == numChunks())
setCurrentChunk(0);
} else
#endif
{
#ifdef JS_CRASH_DIAGNOSTICS
JS_POISON((void*)start(), JS_SWEPT_NURSERY_PATTERN, allocationEnd() - start());
for (int i = 0; i < numActiveChunks_; ++i)
initChunk(i);
for (unsigned i = 0; i < numChunks(); ++i)
chunk(i).poisonAndInit(runtime(), JS_SWEPT_NURSERY_PATTERN);
#endif
setCurrentChunk(0);
}
/* Set current start position for isEmpty checks. */
currentStart_ = position();
setStartPosition();
MemProfiler::SweepNursery(runtime());
}
size_t
js::Nursery::usedSpace() const
{
MOZ_ASSERT(currentChunk_ >= currentStartChunk_);
MOZ_ASSERT(currentStartPosition_ - chunk(currentStartChunk_).start() <= NurseryChunkUsableSize);
MOZ_ASSERT(position_ - chunk(currentChunk_).start() <= NurseryChunkUsableSize);
if (currentChunk_ == currentStartChunk_)
return position_ - currentStartPosition_;
size_t bytes = (chunk(currentStartChunk_).end() - currentStartPosition_) +
((currentChunk_ - currentStartChunk_ - 1) * NurseryChunkUsableSize) +
position_ - chunk(currentChunk_).start();
MOZ_ASSERT(bytes <= numChunks() * NurseryChunkUsableSize);
return bytes;
}
MOZ_ALWAYS_INLINE void
js::Nursery::setCurrentChunk(unsigned chunkno)
{
MOZ_ASSERT(chunkno < maxChunks());
MOZ_ASSERT(chunkno < numChunks());
currentChunk_ = chunkno;
position_ = chunk(chunkno).start();
currentEnd_ = chunk(chunkno).end();
chunk(chunkno).poisonAndInit(runtime(), JS_FRESH_NURSERY_PATTERN);
}
MOZ_ALWAYS_INLINE void
js::Nursery::setStartPosition()
{
currentStartChunk_ = currentChunk_;
currentStartPosition_ = position();
}
void
js::Nursery::growAllocableSpace()
{
#ifdef JS_GC_ZEAL
MOZ_ASSERT_IF(runtime()->hasZealMode(ZealMode::GenerationalGC),
numActiveChunks_ == numNurseryChunks_);
#endif
updateNumActiveChunks(Min(numActiveChunks_ * 2, numNurseryChunks_));
updateNumChunks(Min(numChunks() * 2, maxNurseryChunks_));
}
void
@ -811,30 +873,51 @@ js::Nursery::shrinkAllocableSpace()
if (runtime()->hasZealMode(ZealMode::GenerationalGC))
return;
#endif
updateNumActiveChunks(Max(numActiveChunks_ - 1, 1));
updateNumChunks(Max(numChunks() - 1, 1u));
}
void
js::Nursery::updateNumActiveChunks(int newCount)
js::Nursery::updateNumChunks(unsigned newCount)
{
#ifndef JS_GC_ZEAL
int priorChunks = numActiveChunks_;
#endif
numActiveChunks_ = newCount;
// In zeal mode, we want to keep the unused memory poisoned so that we
// will crash sooner. Avoid decommit in that case to avoid having the
// system zero the pages.
#ifndef JS_GC_ZEAL
if (numActiveChunks_ < priorChunks) {
uintptr_t decommitStart = chunk(numActiveChunks_).start();
uintptr_t decommitSize = chunk(priorChunks - 1).start() + ChunkSize - decommitStart;
MOZ_ASSERT(decommitSize != 0);
MOZ_ASSERT(decommitStart == AlignBytes(decommitStart, Alignment));
MOZ_ASSERT(decommitSize == AlignBytes(decommitSize, Alignment));
MarkPagesUnused((void*)decommitStart, decommitSize);
if (numChunks() != newCount) {
AutoLockGC lock(runtime());
updateNumChunksLocked(newCount, lock);
}
}
void
js::Nursery::updateNumChunksLocked(unsigned newCount, AutoLockGC& lock)
{
// The GC nursery is an optimization and so if we fail to allocate nursery
// chunks we do not report an error.
unsigned priorCount = numChunks();
MOZ_ASSERT(priorCount != newCount);
AutoMaybeStartBackgroundAllocation maybeBgAlloc;
if (newCount < priorCount) {
// Shrink the nursery and free unused chunks.
for (unsigned i = newCount; i < priorCount; i++)
runtime()->gc.recycleChunk(chunk(i).toChunk(runtime()), lock);
chunks_.shrinkTo(newCount);
return;
}
// Grow the nursery and allocate new chunks.
if (!chunks_.resize(newCount))
return;
for (unsigned i = priorCount; i < newCount; i++) {
auto newChunk = runtime()->gc.getOrAllocChunk(lock, maybeBgAlloc);
if (!newChunk) {
chunks_.shrinkTo(i);
return;
}
chunks_[i] = NurseryChunk::fromChunk(newChunk);
chunk(i).poisonAndInit(runtime(), JS_FRESH_NURSERY_PATTERN);
}
#endif // !defined(JS_GC_ZEAL)
}
void

Просмотреть файл

@ -121,15 +121,17 @@ class Nursery
explicit Nursery(JSRuntime* rt);
~Nursery();
MOZ_MUST_USE bool init(uint32_t maxNurseryBytes);
MOZ_MUST_USE bool init(uint32_t maxNurseryBytes, AutoLockGC& lock);
bool exists() const { return numNurseryChunks_ != 0; }
size_t numChunks() const { return numNurseryChunks_; }
size_t nurserySize() const { return numNurseryChunks_ << ChunkShift; }
unsigned maxChunks() const { return maxNurseryChunks_; }
unsigned numChunks() const { return chunks_.length(); }
bool exists() const { return maxChunks() != 0; }
size_t nurserySize() const { return maxChunks() << ChunkShift; }
void enable();
void disable();
bool isEnabled() const { return numActiveChunks_ != 0; }
bool isEnabled() const { return numChunks() != 0; }
/* Return true if no allocations have been made since the last collection. */
bool isEmpty() const;
@ -140,7 +142,11 @@ class Nursery
*/
MOZ_ALWAYS_INLINE bool isInside(gc::Cell* cellp) const = delete;
MOZ_ALWAYS_INLINE bool isInside(const void* p) const {
return uintptr_t(p) >= heapStart_ && uintptr_t(p) < heapEnd_;
for (auto chunk : chunks_) {
if (uintptr_t(p) - chunk->start() < gc::ChunkSize)
return true;
}
return false;
}
template<typename T>
bool isInside(const SharedMem<T>& p) const {
@ -212,10 +218,7 @@ class Nursery
void queueSweepAction(SweepThunk thunk, void* data);
size_t sizeOfHeapCommitted() const {
return numActiveChunks_ * gc::ChunkSize;
}
size_t sizeOfHeapDecommitted() const {
return (numNurseryChunks_ - numActiveChunks_) * gc::ChunkSize;
return numChunks() * gc::ChunkSize;
}
size_t sizeOfMallocedBuffers(mozilla::MallocSizeOf mallocSizeOf) const {
size_t total = 0;
@ -225,17 +228,13 @@ class Nursery
return total;
}
MOZ_ALWAYS_INLINE uintptr_t start() const {
return heapStart_;
}
MOZ_ALWAYS_INLINE uintptr_t heapEnd() const {
return heapEnd_;
}
size_t usedSpace() const;
// Free space remaining, not counting chunk trailers.
MOZ_ALWAYS_INLINE size_t approxFreeSpace() const {
return heapEnd_ - position_;
MOZ_ALWAYS_INLINE size_t freeSpace() const {
MOZ_ASSERT(currentEnd_ - position_ <= NurseryChunkUsableSize);
return (currentEnd_ - position_) +
(numChunks() - currentChunk_ - 1) * NurseryChunkUsableSize;
}
#ifdef JS_GC_ZEAL
@ -247,6 +246,22 @@ class Nursery
void printTotalProfileTimes();
private:
/* The amount of space in the mapped nursery available to allocations. */
static const size_t NurseryChunkUsableSize = gc::ChunkSize - sizeof(gc::ChunkTrailer);
struct NurseryChunk {
char data[NurseryChunkUsableSize];
gc::ChunkTrailer trailer;
static NurseryChunk* fromChunk(gc::Chunk* chunk);
void init(JSRuntime* rt);
void poisonAndInit(JSRuntime* rt, uint8_t poison);
uintptr_t start() const { return uintptr_t(&data); }
uintptr_t end() const { return uintptr_t(&trailer); }
gc::Chunk* toChunk(JSRuntime* rt);
};
static_assert(sizeof(NurseryChunk) == gc::ChunkSize,
"Nursery chunk size must match gc::Chunk size.");
/*
* The start and end pointers are stored under the runtime so that we can
* inline the isInsideNursery check into embedder code. Use the start()
@ -254,27 +269,24 @@ class Nursery
*/
JSRuntime* runtime_;
/* Vector of allocated chunks to allocate from. */
Vector<NurseryChunk*, 0, SystemAllocPolicy> chunks_;
/* Pointer to the first unallocated byte in the nursery. */
uintptr_t position_;
/* Pointer to the logical start of the Nursery. */
uintptr_t currentStart_;
unsigned currentStartChunk_;
uintptr_t currentStartPosition_;
/* Pointer to the last byte of space in the current chunk. */
uintptr_t currentEnd_;
/* Pointer to first and last address of the total nursery allocation. */
uintptr_t heapStart_;
uintptr_t heapEnd_;
/* The index of the chunk that is currently being allocated from. */
int currentChunk_;
unsigned currentChunk_;
/* The index after the last chunk that we will allocate from. */
int numActiveChunks_;
/* Number of chunks allocated for the nursery. */
int numNurseryChunks_;
/* Maximum number of chunks to allocate for the nursery. */
unsigned maxNurseryChunks_;
/* Promotion rate for the previous minor collection. */
double previousPromotionRate_;
@ -346,42 +358,21 @@ class Nursery
Canary* lastCanary_;
#endif
/* The amount of space in the mapped nursery available to allocations. */
static const size_t NurseryChunkUsableSize = gc::ChunkSize - sizeof(gc::ChunkTrailer);
NurseryChunk* allocChunk();
struct NurseryChunkLayout {
char data[NurseryChunkUsableSize];
gc::ChunkTrailer trailer;
uintptr_t start() const { return uintptr_t(&data); }
uintptr_t end() const { return uintptr_t(&trailer); }
};
static_assert(sizeof(NurseryChunkLayout) == gc::ChunkSize,
"Nursery chunk size must match gc::Chunk size.");
NurseryChunkLayout& chunk(int index) const {
MOZ_ASSERT(index < numNurseryChunks_);
MOZ_ASSERT(start());
return reinterpret_cast<NurseryChunkLayout*>(start())[index];
NurseryChunk& chunk(unsigned index) const {
return *chunks_[index];
}
MOZ_ALWAYS_INLINE void initChunk(int chunkno) {
gc::StoreBuffer* sb = JS::shadow::Runtime::asShadowRuntime(runtime())->gcStoreBufferPtr();
new (&chunk(chunkno).trailer) gc::ChunkTrailer(runtime(), sb);
}
void setCurrentChunk(unsigned chunkno);
void setStartPosition();
MOZ_ALWAYS_INLINE void setCurrentChunk(int chunkno) {
MOZ_ASSERT(chunkno < numNurseryChunks_);
MOZ_ASSERT(chunkno < numActiveChunks_);
currentChunk_ = chunkno;
position_ = chunk(chunkno).start();
currentEnd_ = chunk(chunkno).end();
initChunk(chunkno);
}
void updateNumActiveChunks(int newCount);
void updateNumChunks(unsigned newCount);
void updateNumChunksLocked(unsigned newCount, AutoLockGC& lock);
MOZ_ALWAYS_INLINE uintptr_t allocationEnd() const {
MOZ_ASSERT(numActiveChunks_ > 0);
return chunk(numActiveChunks_ - 1).end();
MOZ_ASSERT(numChunks() > 0);
return chunks_.back()->end();
}
MOZ_ALWAYS_INLINE uintptr_t currentEnd() const {

Просмотреть файл

@ -138,7 +138,7 @@ js::gc::AllocateWholeCellSet(Arena* arena)
return nullptr;
}
if (nursery.approxFreeSpace() < ArenaCellSet::NurseryFreeThresholdBytes)
if (nursery.freeSpace() < ArenaCellSet::NurseryFreeThresholdBytes)
rt->gc.storeBuffer.setAboutToOverflow();
auto cells = static_cast<ArenaCellSet*>(data);

Просмотреть файл

@ -384,7 +384,7 @@ class StoreBuffer
{
}
bool enable();
MOZ_MUST_USE bool enable();
void disable();
bool isEnabled() const { return enabled_; }

Просмотреть файл

@ -1031,30 +1031,31 @@ GCRuntime::init(uint32_t maxbytes, uint32_t maxNurseryBytes)
if (!rootsHash.init(256))
return false;
/*
* Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
* for default backward API compatibility.
*/
AutoLockGC lock(rt);
MOZ_ALWAYS_TRUE(tunables.setParameter(JSGC_MAX_BYTES, maxbytes, lock));
setMaxMallocBytes(maxbytes);
{
AutoLockGC lock(rt);
const char* size = getenv("JSGC_MARK_STACK_LIMIT");
if (size)
setMarkStackLimit(atoi(size), lock);
/*
* Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
* for default backward API compatibility.
*/
MOZ_ALWAYS_TRUE(tunables.setParameter(JSGC_MAX_BYTES, maxbytes, lock));
setMaxMallocBytes(maxbytes);
jitReleaseNumber = majorGCNumber + JIT_SCRIPT_RELEASE_TYPES_PERIOD;
const char* size = getenv("JSGC_MARK_STACK_LIMIT");
if (size)
setMarkStackLimit(atoi(size), lock);
if (!nursery.init(maxNurseryBytes))
return false;
jitReleaseNumber = majorGCNumber + JIT_SCRIPT_RELEASE_TYPES_PERIOD;
if (!nursery.isEnabled()) {
MOZ_ASSERT(nursery.nurserySize() == 0);
++rt->gc.generationalDisabled;
} else {
MOZ_ASSERT(nursery.nurserySize() > 0);
if (!storeBuffer.enable())
if (!nursery.init(maxNurseryBytes, lock))
return false;
if (!nursery.isEnabled()) {
MOZ_ASSERT(nursery.nurserySize() == 0);
++rt->gc.generationalDisabled;
} else {
MOZ_ASSERT(nursery.nurserySize() > 0);
}
}
#ifdef JS_GC_ZEAL
@ -6494,7 +6495,6 @@ GCRuntime::disableGenerationalGC()
if (isGenerationalGCEnabled()) {
evictNursery(JS::gcreason::API);
nursery.disable();
storeBuffer.disable();
}
++rt->gc.generationalDisabled;
}
@ -6504,10 +6504,8 @@ GCRuntime::enableGenerationalGC()
{
MOZ_ASSERT(generationalDisabled > 0);
--generationalDisabled;
if (generationalDisabled == 0) {
if (generationalDisabled == 0)
nursery.enable();
storeBuffer.enable();
}
}
bool

Просмотреть файл

@ -532,7 +532,6 @@ JSRuntime::addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf, JS::Runtim
rtSizes->gc.marker += gc.marker.sizeOfExcludingThis(mallocSizeOf);
rtSizes->gc.nurseryCommitted += gc.nursery.sizeOfHeapCommitted();
rtSizes->gc.nurseryDecommitted += gc.nursery.sizeOfHeapDecommitted();
rtSizes->gc.nurseryMallocedBuffers += gc.nursery.sizeOfMallocedBuffers(mallocSizeOf);
gc.storeBuffer.addSizeOfExcludingThis(mallocSizeOf, &rtSizes->gc);
}

Просмотреть файл

@ -491,16 +491,16 @@ class TypedArrayObjectTemplate : public TypedArrayObject
// If the buffer is for an inline typed object, the data pointer
// may be in the nursery, so include a barrier to make sure this
// object is updated if that typed object moves.
if (!IsInsideNursery(obj) && cx->runtime()->gc.nursery.isInside(buffer->dataPointerEither())) {
// Shared buffer data should never be nursery-allocated, so
// we need to fail here if isSharedMemory. However, mmap()
// can place a SharedArrayRawBuffer up against the bottom end
// of the nursery, and a zero-length buffer will erroneously be
auto ptr = buffer->dataPointerEither();
if (!IsInsideNursery(obj) && cx->runtime()->gc.nursery.isInside(ptr)) {
// Shared buffer data should never be nursery-allocated, so we
// need to fail here if isSharedMemory. However, mmap() can
// place a SharedArrayRawBuffer up against the bottom end of a
// nursery chunk, and a zero-length buffer will erroneously be
// perceived as being inside the nursery; sidestep that.
if (isSharedMemory) {
MOZ_ASSERT(buffer->byteLength() == 0 &&
cx->runtime()->gc.nursery.start() ==
buffer->dataPointerEither().unwrapValue());
(uintptr_t(ptr.unwrapValue()) & gc::ChunkMask) == 0);
} else {
cx->runtime()->gc.storeBuffer.putWholeCell(obj);
}

Просмотреть файл

@ -2631,11 +2631,6 @@ ReportJSRuntimeExplicitTreeStats(const JS::RuntimeStats& rtStats,
"GC arenas in non-empty chunks that is decommitted, i.e. it takes up "
"address space but no physical memory or swap space.");
REPORT_BYTES(rtPath2 + NS_LITERAL_CSTRING("runtime/gc/nursery-decommitted"),
KIND_NONHEAP, rtStats.runtime.gc.nurseryDecommitted,
"Memory allocated to the GC's nursery that is decommitted, i.e. it takes up "
"address space but no physical memory or swap space.");
REPORT_GC_BYTES(rtPath + NS_LITERAL_CSTRING("gc-heap/unused-chunks"),
rtStats.gcHeapUnusedChunks,
"Empty GC chunks which will soon be released unless claimed for new "