зеркало из https://github.com/mozilla/gecko-dev.git
Bug 670596 - Mark ununused arenas as decommitted; r=?,njn
This patch tells the OS which pages we are not using after we do a GC_SHRINK. The result is that the OS can do a better job managing memory, send less to swap and will not count these pages against us in RSS.
This commit is contained in:
Родитель
ead08943a9
Коммит
e6363db3f2
|
@ -245,6 +245,7 @@ EXPORTS_vm = \
|
|||
|
||||
EXPORTS_ds = \
|
||||
LifoAlloc.h \
|
||||
BitArray.h \
|
||||
$(NULL)
|
||||
|
||||
EXPORTS_gc = \
|
||||
|
|
|
@ -0,0 +1,91 @@
|
|||
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||
* vim: set ts=4 sw=4 et tw=99 ft=cpp:
|
||||
*
|
||||
* ***** BEGIN LICENSE BLOCK *****
|
||||
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
|
||||
*
|
||||
* The contents of this file are subject to the Mozilla Public License Version
|
||||
* 1.1 (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
* http://www.mozilla.org/MPL/
|
||||
*
|
||||
* Software distributed under the License is distributed on an "AS IS" basis,
|
||||
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
* for the specific language governing rights and limitations under the
|
||||
* License.
|
||||
*
|
||||
* The Original Code is SpiderMonkey JavaScript engine.
|
||||
*
|
||||
* The Initial Developer of the Original Code is
|
||||
* Mozilla Corporation.
|
||||
* Portions created by the Initial Developer are Copyright (C) 2011
|
||||
* the Initial Developer. All Rights Reserved.
|
||||
*
|
||||
* Contributor(s):
|
||||
* Terrence Cole <terrence@mozilla.com>
|
||||
*
|
||||
* Alternatively, the contents of this file may be used under the terms of
|
||||
* either the GNU General Public License Version 2 or later (the "GPL"), or
|
||||
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
|
||||
* in which case the provisions of the GPL or the LGPL are applicable instead
|
||||
* of those above. If you wish to allow use of your version of this file only
|
||||
* under the terms of either the GPL or the LGPL, and not to allow others to
|
||||
* use your version of this file under the terms of the MPL, indicate your
|
||||
* decision by deleting the provisions above and replace them with the notice
|
||||
* and other provisions required by the GPL or the LGPL. If you do not delete
|
||||
* the provisions above, a recipient may use your version of this file under
|
||||
* the terms of any one of the MPL, the GPL or the LGPL.
|
||||
*
|
||||
* ***** END LICENSE BLOCK ***** */
|
||||
|
||||
#ifndef BitArray_h__
|
||||
#define BitArray_h__
|
||||
|
||||
#include "jstypes.h"
|
||||
|
||||
#include "js/TemplateLib.h"
|
||||
|
||||
namespace js {
|
||||
|
||||
template <size_t nbits>
|
||||
class BitArray {
|
||||
private:
|
||||
uintptr_t map[nbits / JS_BITS_PER_WORD + (nbits % JS_BITS_PER_WORD == 0 ? 0 : 1)];
|
||||
|
||||
public:
|
||||
void clear(bool value) {
|
||||
if (value)
|
||||
memset(map, 0xFF, sizeof(map));
|
||||
else
|
||||
memset(map, 0, sizeof(map));
|
||||
}
|
||||
|
||||
inline bool get(size_t offset) const {
|
||||
uintptr_t index, mask;
|
||||
getMarkWordAndMask(offset, &index, &mask);
|
||||
return map[index] & mask;
|
||||
}
|
||||
|
||||
inline void set(size_t offset) {
|
||||
uintptr_t index, mask;
|
||||
getMarkWordAndMask(offset, &index, &mask);
|
||||
map[index] |= mask;
|
||||
}
|
||||
|
||||
inline void unset(size_t offset) {
|
||||
uintptr_t index, mask;
|
||||
getMarkWordAndMask(offset, &index, &mask);
|
||||
map[index] &= ~mask;
|
||||
}
|
||||
|
||||
private:
|
||||
inline void getMarkWordAndMask(size_t offset,
|
||||
uintptr_t *indexp, uintptr_t *maskp) const {
|
||||
*indexp = offset >> tl::FloorLog2<JS_BITS_PER_WORD>::result;
|
||||
*maskp = uintptr_t(1) << (offset & (JS_BITS_PER_WORD - 1));
|
||||
}
|
||||
};
|
||||
|
||||
} /* namespace js */
|
||||
|
||||
#endif
|
200
js/src/jsgc.cpp
200
js/src/jsgc.cpp
|
@ -525,10 +525,26 @@ ChunkPool::expire(JSRuntime *rt, bool releaseAll)
|
|||
JS_ASSERT_IF(releaseAll, !emptyCount);
|
||||
}
|
||||
|
||||
JS_FRIEND_API(int64)
|
||||
ChunkPool::countDecommittedArenas(JSRuntime *rt)
|
||||
{
|
||||
JS_ASSERT(this == &rt->gcChunkPool);
|
||||
|
||||
int64 numDecommitted = 0;
|
||||
Chunk *chunk = emptyChunkListHead;
|
||||
while (chunk) {
|
||||
for (uint32 i = 0; i < ArenasPerChunk; ++i)
|
||||
if (chunk->decommittedArenas.get(i))
|
||||
++numDecommitted;
|
||||
chunk = chunk->info.next;
|
||||
}
|
||||
return numDecommitted;
|
||||
}
|
||||
|
||||
/* static */ Chunk *
|
||||
Chunk::allocate(JSRuntime *rt)
|
||||
{
|
||||
Chunk *chunk = static_cast<Chunk *>(AllocGCChunk());
|
||||
Chunk *chunk = static_cast<Chunk *>(AllocChunk());
|
||||
if (!chunk)
|
||||
return NULL;
|
||||
chunk->init();
|
||||
|
@ -541,30 +557,37 @@ Chunk::release(JSRuntime *rt, Chunk *chunk)
|
|||
{
|
||||
JS_ASSERT(chunk);
|
||||
rt->gcStats.count(gcstats::STAT_DESTROY_CHUNK);
|
||||
FreeGCChunk(chunk);
|
||||
FreeChunk(chunk);
|
||||
}
|
||||
|
||||
void
|
||||
Chunk::init()
|
||||
{
|
||||
JS_POISON(this, JS_FREE_PATTERN, GC_CHUNK_SIZE);
|
||||
JS_POISON(this, JS_FREE_PATTERN, ChunkSize);
|
||||
|
||||
/* Assemble all arenas into a linked list and mark them as not allocated. */
|
||||
ArenaHeader **prevp = &info.emptyArenaListHead;
|
||||
Arena *end = &arenas[ArrayLength(arenas)];
|
||||
for (Arena *a = &arenas[0]; a != end; ++a) {
|
||||
*prevp = &a->aheader;
|
||||
a->aheader.setAsNotAllocated();
|
||||
prevp = &a->aheader.next;
|
||||
}
|
||||
*prevp = NULL;
|
||||
|
||||
/* We clear the bitmap to guard against xpc_IsGrayGCThing being called on
|
||||
uninitialized data, which would happen before the first GC cycle. */
|
||||
/*
|
||||
* We clear the bitmap to guard against xpc_IsGrayGCThing being called on
|
||||
* uninitialized data, which would happen before the first GC cycle.
|
||||
*/
|
||||
bitmap.clear();
|
||||
|
||||
/* Initialize the arena tracking bitmap. */
|
||||
decommittedArenas.clear(false);
|
||||
|
||||
/* Initialize the chunk info. */
|
||||
info.freeArenasHead = &arenas[0].aheader;
|
||||
info.lastDecommittedArenaOffset = 0;
|
||||
info.numArenasFree = ArenasPerChunk;
|
||||
info.numArenasFreeCommitted = ArenasPerChunk;
|
||||
info.age = 0;
|
||||
info.numFree = ArenasPerChunk;
|
||||
|
||||
/* Initialize the arena header state. */
|
||||
for (jsuint i = 0; i < ArenasPerChunk; i++) {
|
||||
arenas[i].aheader.setAsNotAllocated();
|
||||
arenas[i].aheader.next = (i + 1 < ArenasPerChunk)
|
||||
? &arenas[i + 1].aheader
|
||||
: NULL;
|
||||
}
|
||||
|
||||
/* The rest of info fields are initialized in PickChunk. */
|
||||
}
|
||||
|
@ -607,16 +630,66 @@ Chunk::removeFromAvailableList()
|
|||
info.next = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Search for and return the next decommitted Arena. Our goal is to keep
|
||||
* lastDecommittedArenaOffset "close" to a free arena. We do this by setting
|
||||
* it to the most recently freed arena when we free, and forcing it to
|
||||
* the last alloc + 1 when we allocate.
|
||||
*/
|
||||
jsuint
|
||||
Chunk::findDecommittedArenaOffset()
|
||||
{
|
||||
/* Note: lastFreeArenaOffset can be past the end of the list. */
|
||||
for (jsuint i = info.lastDecommittedArenaOffset; i < ArenasPerChunk; i++)
|
||||
if (decommittedArenas.get(i))
|
||||
return i;
|
||||
for (jsuint i = 0; i < info.lastDecommittedArenaOffset; i++)
|
||||
if (decommittedArenas.get(i))
|
||||
return i;
|
||||
JS_NOT_REACHED("No decommitted arenas found.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
ArenaHeader *
|
||||
Chunk::fetchNextDecommittedArena()
|
||||
{
|
||||
JS_ASSERT(info.numArenasFreeCommitted < info.numArenasFree);
|
||||
|
||||
jsuint offset = findDecommittedArenaOffset();
|
||||
info.lastDecommittedArenaOffset = offset + 1;
|
||||
--info.numArenasFree;
|
||||
decommittedArenas.unset(offset);
|
||||
|
||||
Arena *arena = &arenas[offset];
|
||||
CommitMemory(arena, ArenaSize);
|
||||
arena->aheader.setAsNotAllocated();
|
||||
|
||||
return &arena->aheader;
|
||||
}
|
||||
|
||||
inline ArenaHeader *
|
||||
Chunk::fetchNextFreeArena()
|
||||
{
|
||||
JS_ASSERT(info.numArenasFreeCommitted > 0);
|
||||
|
||||
ArenaHeader *aheader = info.freeArenasHead;
|
||||
info.freeArenasHead = aheader->next;
|
||||
--info.numArenasFreeCommitted;
|
||||
--info.numArenasFree;
|
||||
|
||||
return aheader;
|
||||
}
|
||||
|
||||
ArenaHeader *
|
||||
Chunk::allocateArena(JSCompartment *comp, AllocKind thingKind)
|
||||
{
|
||||
JS_ASSERT(hasAvailableArenas());
|
||||
ArenaHeader *aheader = info.emptyArenaListHead;
|
||||
info.emptyArenaListHead = aheader->next;
|
||||
aheader->init(comp, thingKind);
|
||||
--info.numFree;
|
||||
JS_ASSERT(!noAvailableArenas());
|
||||
|
||||
if (!hasAvailableArenas())
|
||||
ArenaHeader *aheader = JS_LIKELY(info.numArenasFreeCommitted > 0)
|
||||
? fetchNextFreeArena()
|
||||
: fetchNextDecommittedArena();
|
||||
aheader->init(comp, thingKind);
|
||||
if (JS_UNLIKELY(noAvailableArenas()))
|
||||
removeFromAvailableList();
|
||||
|
||||
JSRuntime *rt = comp->rt;
|
||||
|
@ -655,13 +728,15 @@ Chunk::releaseArena(ArenaHeader *aheader)
|
|||
JS_ATOMIC_ADD(&comp->gcBytes, -int32(ArenaSize));
|
||||
|
||||
aheader->setAsNotAllocated();
|
||||
aheader->next = info.emptyArenaListHead;
|
||||
info.emptyArenaListHead = aheader;
|
||||
++info.numFree;
|
||||
if (info.numFree == 1) {
|
||||
aheader->next = info.freeArenasHead;
|
||||
info.freeArenasHead = aheader;
|
||||
++info.numArenasFreeCommitted;
|
||||
++info.numArenasFree;
|
||||
|
||||
if (info.numArenasFree == 1) {
|
||||
JS_ASSERT(!info.prevp);
|
||||
JS_ASSERT(!info.next);
|
||||
addToAvailableList(aheader->compartment);
|
||||
addToAvailableList(comp);
|
||||
} else if (!unused()) {
|
||||
JS_ASSERT(info.prevp);
|
||||
} else {
|
||||
|
@ -850,7 +925,12 @@ MarkIfGCThingWord(JSTracer *trc, jsuword w)
|
|||
if (!Chunk::withinArenasRange(addr))
|
||||
return CGCT_NOTARENA;
|
||||
|
||||
ArenaHeader *aheader = &chunk->arenas[Chunk::arenaIndex(addr)].aheader;
|
||||
/* If the arena is not currently allocated, don't access the header. */
|
||||
size_t arenaOffset = Chunk::arenaIndex(addr);
|
||||
if (chunk->decommittedArenas.get(arenaOffset))
|
||||
return CGCT_FREEARENA;
|
||||
|
||||
ArenaHeader *aheader = &chunk->arenas[arenaOffset].aheader;
|
||||
|
||||
if (!aheader->allocated())
|
||||
return CGCT_FREEARENA;
|
||||
|
@ -2292,6 +2372,46 @@ ReleaseObservedTypes(JSContext *cx)
|
|||
return releaseTypes;
|
||||
}
|
||||
|
||||
static void
|
||||
DecommitFreePages(JSContext *cx)
|
||||
{
|
||||
JSRuntime *rt = cx->runtime;
|
||||
|
||||
for (GCChunkSet::Range r(rt->gcChunkSet.all()); !r.empty(); r.popFront()) {
|
||||
Chunk *chunk = r.front();
|
||||
while (chunk) {
|
||||
ArenaHeader *aheader = static_cast<ArenaHeader*>(chunk->info.freeArenasHead);
|
||||
|
||||
/*
|
||||
* In the non-failure case, the list will be gone at the end of
|
||||
* the loop. In the case where we fail, we relink all failed
|
||||
* decommits into a new list on freeArenasHead.
|
||||
*/
|
||||
chunk->info.freeArenasHead = NULL;
|
||||
|
||||
while (aheader) {
|
||||
/* Store aside everything we will need after decommit. */
|
||||
ArenaHeader *next = aheader->next;
|
||||
|
||||
bool success = DecommitMemory(aheader, ArenaSize);
|
||||
if (!success) {
|
||||
aheader->next = chunk->info.freeArenasHead;
|
||||
chunk->info.freeArenasHead = aheader;
|
||||
continue;
|
||||
}
|
||||
|
||||
size_t arenaOffset = Chunk::arenaIndex(reinterpret_cast<uintptr_t>(aheader));
|
||||
chunk->decommittedArenas.set(arenaOffset);
|
||||
--chunk->info.numArenasFreeCommitted;
|
||||
|
||||
aheader = next;
|
||||
}
|
||||
|
||||
chunk = chunk->info.next;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
SweepCompartments(JSContext *cx, JSGCInvocationKind gckind)
|
||||
{
|
||||
|
@ -2504,6 +2624,9 @@ SweepPhase(JSContext *cx, GCMarker *gcmarker, JSGCInvocationKind gckind)
|
|||
*/
|
||||
rt->gcChunkPool.expire(rt, gckind == GC_SHRINK);
|
||||
#endif
|
||||
|
||||
if (gckind == GC_SHRINK)
|
||||
DecommitFreePages(cx);
|
||||
}
|
||||
|
||||
if (rt->gcCallback)
|
||||
|
@ -2978,6 +3101,27 @@ IterateCompartmentsArenasCells(JSContext *cx, void *data,
|
|||
}
|
||||
}
|
||||
|
||||
void
|
||||
IterateChunks(JSContext *cx, void *data, IterateChunkCallback chunkCallback)
|
||||
{
|
||||
/* :XXX: Any way to common this preamble with IterateCompartmentsArenasCells? */
|
||||
CHECK_REQUEST(cx);
|
||||
LeaveTrace(cx);
|
||||
|
||||
JSRuntime *rt = cx->runtime;
|
||||
JS_ASSERT(!rt->gcRunning);
|
||||
|
||||
AutoLockGC lock(rt);
|
||||
AutoGCSession gcsession(cx);
|
||||
#ifdef JS_THREADSAFE
|
||||
rt->gcHelperThread.waitBackgroundSweepEnd();
|
||||
#endif
|
||||
AutoUnlockGC unlock(rt);
|
||||
|
||||
for (js::GCChunkSet::Range r = rt->gcChunkSet.all(); !r.empty(); r.popFront())
|
||||
chunkCallback(cx, data, r.front());
|
||||
}
|
||||
|
||||
void
|
||||
IterateCells(JSContext *cx, JSCompartment *compartment, AllocKind thingKind,
|
||||
void *data, IterateCellCallback cellCallback)
|
||||
|
|
157
js/src/jsgc.h
157
js/src/jsgc.h
|
@ -59,6 +59,7 @@
|
|||
#include "jsgcstats.h"
|
||||
#include "jscell.h"
|
||||
|
||||
#include "ds/BitArray.h"
|
||||
#include "gc/Statistics.h"
|
||||
#include "js/HashTable.h"
|
||||
#include "js/Vector.h"
|
||||
|
@ -363,6 +364,12 @@ struct ArenaHeader {
|
|||
friend struct FreeLists;
|
||||
|
||||
JSCompartment *compartment;
|
||||
|
||||
/*
|
||||
* ArenaHeader::next has two purposes: when unallocated, it points to the
|
||||
* next available Arena's header. When allocated, it points to the next
|
||||
* arena of the same size class and compartment.
|
||||
*/
|
||||
ArenaHeader *next;
|
||||
|
||||
private:
|
||||
|
@ -413,18 +420,28 @@ struct ArenaHeader {
|
|||
inline uintptr_t address() const;
|
||||
inline Chunk *chunk() const;
|
||||
|
||||
void setAsNotAllocated() {
|
||||
allocKind = size_t(FINALIZE_LIMIT);
|
||||
hasDelayedMarking = 0;
|
||||
nextDelayedMarking = 0;
|
||||
}
|
||||
|
||||
bool allocated() const {
|
||||
JS_ASSERT(allocKind <= size_t(FINALIZE_LIMIT));
|
||||
return allocKind < size_t(FINALIZE_LIMIT);
|
||||
}
|
||||
|
||||
inline void init(JSCompartment *comp, AllocKind kind);
|
||||
void init(JSCompartment *comp, AllocKind kind) {
|
||||
JS_ASSERT(!allocated());
|
||||
JS_ASSERT(!hasDelayedMarking);
|
||||
compartment = comp;
|
||||
|
||||
JS_STATIC_ASSERT(FINALIZE_LIMIT <= 255);
|
||||
allocKind = size_t(kind);
|
||||
|
||||
/* See comments in FreeSpan::allocateFromNewArena. */
|
||||
firstFreeSpanOffsets = FreeSpan::FullArenaOffsets;
|
||||
}
|
||||
|
||||
void setAsNotAllocated() {
|
||||
allocKind = size_t(FINALIZE_LIMIT);
|
||||
hasDelayedMarking = 0;
|
||||
nextDelayedMarking = 0;
|
||||
}
|
||||
|
||||
uintptr_t arenaAddress() const {
|
||||
return address();
|
||||
|
@ -543,13 +560,60 @@ struct Arena {
|
|||
struct ChunkInfo {
|
||||
Chunk *next;
|
||||
Chunk **prevp;
|
||||
ArenaHeader *emptyArenaListHead;
|
||||
size_t age;
|
||||
size_t numFree;
|
||||
|
||||
/* Free arenas are linked together with aheader.next. */
|
||||
ArenaHeader *freeArenasHead;
|
||||
|
||||
/*
|
||||
* Decommitted arenas are tracked by a bitmap in the chunk header. We use
|
||||
* this offset to start our search iteration close to a decommitted arena
|
||||
* that we can allocate.
|
||||
*/
|
||||
uint32 lastDecommittedArenaOffset;
|
||||
|
||||
/* Number of free arenas, either committed or decommitted. */
|
||||
uint32 numArenasFree;
|
||||
|
||||
/* Number of free, committed arenas. */
|
||||
uint32 numArenasFreeCommitted;
|
||||
|
||||
/* Number of GC cycles this chunk has survived. */
|
||||
uint32 age;
|
||||
};
|
||||
|
||||
const size_t BytesPerArena = ArenaSize + ArenaBitmapBytes;
|
||||
const size_t ArenasPerChunk = (GC_CHUNK_SIZE - sizeof(ChunkInfo)) / BytesPerArena;
|
||||
/*
|
||||
* Calculating ArenasPerChunk:
|
||||
*
|
||||
* In order to figure out how many Arenas will fit in a chunk, we need to know
|
||||
* how much extra space is available after we allocate the header data. This
|
||||
* is a problem because the header size depends on the number of arenas in the
|
||||
* chunk. The two dependent fields are bitmap and decommittedArenas.
|
||||
*
|
||||
* For the mark bitmap, we know that each arena will use a fixed number of full
|
||||
* bytes: ArenaBitmapBytes. The full size of the header data is this number
|
||||
* multiplied by the eventual number of arenas we have in the header. We,
|
||||
* conceptually, distribute this header data among the individual arenas and do
|
||||
* not include it in the header. This way we do not have to worry about its
|
||||
* variable size: it gets attached to the variable number we are computing.
|
||||
*
|
||||
* For the decommitted arena bitmap, we only have 1 bit per arena, so this
|
||||
* technique will not work. Instead, we observe that we do not have enough
|
||||
* header info to fill 8 full arenas: it is currently 4 on 64bit, less on
|
||||
* 32bit. Thus, with current numbers, we need 64 bytes for decommittedArenas.
|
||||
* This will not become 63 bytes unless we double the data required in the
|
||||
* header. Therefore, we just compute the number of bytes required to track
|
||||
* every possible arena and do not worry about slop bits, since there are too
|
||||
* few to usefully allocate.
|
||||
*
|
||||
* To actually compute the number of arenas we can allocate in a chunk, we
|
||||
* divide the amount of available space less the header info (not including
|
||||
* the mark bitmap which is distributed into the arena size) by the size of
|
||||
* the arena (with the mark bitmap bytes it uses).
|
||||
*/
|
||||
const size_t BytesPerArenaWithHeader = ArenaSize + ArenaBitmapBytes;
|
||||
const size_t ChunkDecommitBitmapBytes = ChunkSize / ArenaSize / JS_BITS_PER_BYTE;
|
||||
const size_t ChunkBytesAvailable = ChunkSize - sizeof(ChunkInfo) - ChunkDecommitBitmapBytes;
|
||||
const size_t ArenasPerChunk = ChunkBytesAvailable / BytesPerArenaWithHeader;
|
||||
|
||||
/* A chunk bitmap contains enough mark bits for all the cells in a chunk. */
|
||||
struct ChunkBitmap {
|
||||
|
@ -615,11 +679,14 @@ struct ChunkBitmap {
|
|||
|
||||
JS_STATIC_ASSERT(ArenaBitmapBytes * ArenasPerChunk == sizeof(ChunkBitmap));
|
||||
|
||||
const size_t ChunkPadSize = GC_CHUNK_SIZE
|
||||
typedef BitArray<ArenasPerChunk> PerArenaBitmap;
|
||||
|
||||
const size_t ChunkPadSize = ChunkSize
|
||||
- (sizeof(Arena) * ArenasPerChunk)
|
||||
- sizeof(ChunkBitmap)
|
||||
- sizeof(PerArenaBitmap)
|
||||
- sizeof(ChunkInfo);
|
||||
JS_STATIC_ASSERT(ChunkPadSize < BytesPerArena);
|
||||
JS_STATIC_ASSERT(ChunkPadSize < BytesPerArenaWithHeader);
|
||||
|
||||
/*
|
||||
* Chunks contain arenas and associated data structures (mark bitmap, delayed
|
||||
|
@ -632,35 +699,36 @@ struct Chunk {
|
|||
uint8 padding[ChunkPadSize];
|
||||
|
||||
ChunkBitmap bitmap;
|
||||
PerArenaBitmap decommittedArenas;
|
||||
ChunkInfo info;
|
||||
|
||||
static Chunk *fromAddress(uintptr_t addr) {
|
||||
addr &= ~GC_CHUNK_MASK;
|
||||
addr &= ~ChunkMask;
|
||||
return reinterpret_cast<Chunk *>(addr);
|
||||
}
|
||||
|
||||
static bool withinArenasRange(uintptr_t addr) {
|
||||
uintptr_t offset = addr & GC_CHUNK_MASK;
|
||||
uintptr_t offset = addr & ChunkMask;
|
||||
return offset < ArenasPerChunk * ArenaSize;
|
||||
}
|
||||
|
||||
static size_t arenaIndex(uintptr_t addr) {
|
||||
JS_ASSERT(withinArenasRange(addr));
|
||||
return (addr & GC_CHUNK_MASK) >> ArenaShift;
|
||||
return (addr & ChunkMask) >> ArenaShift;
|
||||
}
|
||||
|
||||
uintptr_t address() const {
|
||||
uintptr_t addr = reinterpret_cast<uintptr_t>(this);
|
||||
JS_ASSERT(!(addr & GC_CHUNK_MASK));
|
||||
JS_ASSERT(!(addr & ChunkMask));
|
||||
return addr;
|
||||
}
|
||||
|
||||
bool unused() const {
|
||||
return info.numFree == ArenasPerChunk;
|
||||
return info.numArenasFree == ArenasPerChunk;
|
||||
}
|
||||
|
||||
bool hasAvailableArenas() const {
|
||||
return info.numFree > 0;
|
||||
bool noAvailableArenas() const {
|
||||
return info.numArenasFree == 0;
|
||||
}
|
||||
|
||||
inline void addToAvailableList(JSCompartment *compartment);
|
||||
|
@ -675,9 +743,16 @@ struct Chunk {
|
|||
|
||||
private:
|
||||
inline void init();
|
||||
|
||||
/* Search for a decommitted arena to allocate. */
|
||||
jsuint findDecommittedArenaOffset();
|
||||
ArenaHeader* fetchNextDecommittedArena();
|
||||
|
||||
/* Unlink and return the freeArenasHead. */
|
||||
inline ArenaHeader* fetchNextFreeArena();
|
||||
};
|
||||
|
||||
JS_STATIC_ASSERT(sizeof(Chunk) == GC_CHUNK_SIZE);
|
||||
JS_STATIC_ASSERT(sizeof(Chunk) == ChunkSize);
|
||||
|
||||
class ChunkPool {
|
||||
Chunk *emptyChunkListHead;
|
||||
|
@ -702,6 +777,9 @@ class ChunkPool {
|
|||
|
||||
/* Must be called either during the GC or with the GC lock taken. */
|
||||
void expire(JSRuntime *rt, bool releaseAll);
|
||||
|
||||
/* Must be called either during the GC or with the GC lock taken. */
|
||||
JS_FRIEND_API(int64) countDecommittedArenas(JSRuntime *rt);
|
||||
};
|
||||
|
||||
inline uintptr_t
|
||||
|
@ -726,7 +804,7 @@ Cell::chunk() const
|
|||
{
|
||||
uintptr_t addr = uintptr_t(this);
|
||||
JS_ASSERT(addr % Cell::CellSize == 0);
|
||||
addr &= ~(GC_CHUNK_SIZE - 1);
|
||||
addr &= ~(ChunkSize - 1);
|
||||
return reinterpret_cast<Chunk *>(addr);
|
||||
}
|
||||
|
||||
|
@ -744,20 +822,6 @@ Cell::isAligned() const
|
|||
}
|
||||
#endif
|
||||
|
||||
inline void
|
||||
ArenaHeader::init(JSCompartment *comp, AllocKind kind)
|
||||
{
|
||||
JS_ASSERT(!allocated());
|
||||
JS_ASSERT(!hasDelayedMarking);
|
||||
compartment = comp;
|
||||
|
||||
JS_STATIC_ASSERT(FINALIZE_LIMIT <= 255);
|
||||
allocKind = size_t(kind);
|
||||
|
||||
/* See comments in FreeSpan::allocateFromNewArena. */
|
||||
firstFreeSpanOffsets = FreeSpan::FullArenaOffsets;
|
||||
}
|
||||
|
||||
inline uintptr_t
|
||||
ArenaHeader::address() const
|
||||
{
|
||||
|
@ -808,7 +872,7 @@ ChunkBitmap::getMarkWordAndMask(const Cell *cell, uint32 color,
|
|||
uintptr_t **wordp, uintptr_t *maskp)
|
||||
{
|
||||
JS_ASSERT(cell->chunk() == Chunk::fromAddress(reinterpret_cast<uintptr_t>(this)));
|
||||
size_t bit = (cell->address() & GC_CHUNK_MASK) / Cell::CellSize + color;
|
||||
size_t bit = (cell->address() & ChunkMask) / Cell::CellSize + color;
|
||||
JS_ASSERT(bit < ArenaBitmapBits * ArenasPerChunk);
|
||||
*maskp = uintptr_t(1) << (bit % JS_BITS_PER_WORD);
|
||||
*wordp = &bitmap[bit / JS_BITS_PER_WORD];
|
||||
|
@ -1152,7 +1216,7 @@ struct ArenaLists {
|
|||
* chunks with total capacity of 16MB to avoid buffer resizes during browser
|
||||
* startup.
|
||||
*/
|
||||
const size_t INITIAL_CHUNK_CAPACITY = 16 * 1024 * 1024 / GC_CHUNK_SIZE;
|
||||
const size_t INITIAL_CHUNK_CAPACITY = 16 * 1024 * 1024 / ChunkSize;
|
||||
|
||||
/* The number of GC cycles an empty chunk can survive before been released. */
|
||||
const size_t MAX_EMPTY_CHUNK_AGE = 4;
|
||||
|
@ -1457,13 +1521,13 @@ struct GCChunkHasher {
|
|||
* ratio.
|
||||
*/
|
||||
static HashNumber hash(gc::Chunk *chunk) {
|
||||
JS_ASSERT(!(jsuword(chunk) & GC_CHUNK_MASK));
|
||||
return HashNumber(jsuword(chunk) >> GC_CHUNK_SHIFT);
|
||||
JS_ASSERT(!(jsuword(chunk) & gc::ChunkMask));
|
||||
return HashNumber(jsuword(chunk) >> gc::ChunkShift);
|
||||
}
|
||||
|
||||
static bool match(gc::Chunk *k, gc::Chunk *l) {
|
||||
JS_ASSERT(!(jsuword(k) & GC_CHUNK_MASK));
|
||||
JS_ASSERT(!(jsuword(l) & GC_CHUNK_MASK));
|
||||
JS_ASSERT(!(jsuword(k) & gc::ChunkMask));
|
||||
JS_ASSERT(!(jsuword(l) & gc::ChunkMask));
|
||||
return k == l;
|
||||
}
|
||||
};
|
||||
|
@ -1653,6 +1717,7 @@ void
|
|||
MarkStackRangeConservatively(JSTracer *trc, Value *begin, Value *end);
|
||||
|
||||
typedef void (*IterateCompartmentCallback)(JSContext *cx, void *data, JSCompartment *compartment);
|
||||
typedef void (*IterateChunkCallback)(JSContext *cx, void *data, gc::Chunk *chunk);
|
||||
typedef void (*IterateArenaCallback)(JSContext *cx, void *data, gc::Arena *arena,
|
||||
JSGCTraceKind traceKind, size_t thingSize);
|
||||
typedef void (*IterateCellCallback)(JSContext *cx, void *data, void *thing,
|
||||
|
@ -1669,6 +1734,12 @@ IterateCompartmentsArenasCells(JSContext *cx, void *data,
|
|||
IterateArenaCallback arenaCallback,
|
||||
IterateCellCallback cellCallback);
|
||||
|
||||
/*
|
||||
* Invoke chunkCallback on every in-use chunk.
|
||||
*/
|
||||
extern JS_FRIEND_API(void)
|
||||
IterateChunks(JSContext *cx, void *data, IterateChunkCallback chunkCallback);
|
||||
|
||||
/*
|
||||
* Invoke cellCallback on every in-use object of the specified thing kind for
|
||||
* the given compartment or for all compartments if it is null.
|
||||
|
|
|
@ -55,6 +55,7 @@
|
|||
# include <mach/mach_init.h>
|
||||
# include <mach/vm_map.h>
|
||||
# include <malloc/malloc.h>
|
||||
# include <sys/mman.h>
|
||||
|
||||
#elif defined(XP_UNIX)
|
||||
|
||||
|
@ -271,22 +272,23 @@ UnmapPages(void *addr, size_t size)
|
|||
#endif
|
||||
|
||||
namespace js {
|
||||
namespace gc {
|
||||
|
||||
inline void *
|
||||
static inline void *
|
||||
FindChunkStart(void *p)
|
||||
{
|
||||
jsuword addr = reinterpret_cast<jsuword>(p);
|
||||
addr = (addr + GC_CHUNK_MASK) & ~GC_CHUNK_MASK;
|
||||
addr = (addr + ChunkMask) & ~ChunkMask;
|
||||
return reinterpret_cast<void *>(addr);
|
||||
}
|
||||
|
||||
void *
|
||||
AllocGCChunk()
|
||||
AllocChunk()
|
||||
{
|
||||
void *p;
|
||||
|
||||
#ifdef JS_GC_HAS_MAP_ALIGN
|
||||
p = MapAlignedPages(GC_CHUNK_SIZE, GC_CHUNK_SIZE);
|
||||
p = MapAlignedPages(ChunkSize, ChunkSize);
|
||||
if (!p)
|
||||
return NULL;
|
||||
#else
|
||||
|
@ -296,24 +298,24 @@ AllocGCChunk()
|
|||
* final result via one mapping operation. This means unmapping any
|
||||
* preliminary result that is not correctly aligned.
|
||||
*/
|
||||
p = MapPages(NULL, GC_CHUNK_SIZE);
|
||||
p = MapPages(NULL, ChunkSize);
|
||||
if (!p)
|
||||
return NULL;
|
||||
|
||||
if (reinterpret_cast<jsuword>(p) & GC_CHUNK_MASK) {
|
||||
UnmapPages(p, GC_CHUNK_SIZE);
|
||||
p = MapPages(FindChunkStart(p), GC_CHUNK_SIZE);
|
||||
if (reinterpret_cast<jsuword>(p) & ChunkMask) {
|
||||
UnmapPages(p, ChunkSize);
|
||||
p = MapPages(FindChunkStart(p), ChunkSize);
|
||||
while (!p) {
|
||||
/*
|
||||
* Over-allocate in order to map a memory region that is
|
||||
* definitely large enough then deallocate and allocate again the
|
||||
* correct size, within the over-sized mapping.
|
||||
*/
|
||||
p = MapPages(NULL, GC_CHUNK_SIZE * 2);
|
||||
p = MapPages(NULL, ChunkSize * 2);
|
||||
if (!p)
|
||||
return 0;
|
||||
UnmapPages(p, GC_CHUNK_SIZE * 2);
|
||||
p = MapPages(FindChunkStart(p), GC_CHUNK_SIZE);
|
||||
UnmapPages(p, ChunkSize * 2);
|
||||
p = MapPages(FindChunkStart(p), ChunkSize);
|
||||
|
||||
/*
|
||||
* Failure here indicates a race with another thread, so
|
||||
|
@ -323,17 +325,55 @@ AllocGCChunk()
|
|||
}
|
||||
#endif /* !JS_GC_HAS_MAP_ALIGN */
|
||||
|
||||
JS_ASSERT(!(reinterpret_cast<jsuword>(p) & GC_CHUNK_MASK));
|
||||
JS_ASSERT(!(reinterpret_cast<jsuword>(p) & ChunkMask));
|
||||
return p;
|
||||
}
|
||||
|
||||
void
|
||||
FreeGCChunk(void *p)
|
||||
FreeChunk(void *p)
|
||||
{
|
||||
JS_ASSERT(p);
|
||||
JS_ASSERT(!(reinterpret_cast<jsuword>(p) & GC_CHUNK_MASK));
|
||||
UnmapPages(p, GC_CHUNK_SIZE);
|
||||
JS_ASSERT(!(reinterpret_cast<jsuword>(p) & ChunkMask));
|
||||
UnmapPages(p, ChunkSize);
|
||||
}
|
||||
|
||||
#ifdef XP_WIN
|
||||
bool
|
||||
CommitMemory(void *addr, size_t size)
|
||||
{
|
||||
JS_ASSERT(uintptr_t(addr) % 4096UL == 0);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
DecommitMemory(void *addr, size_t size)
|
||||
{
|
||||
JS_ASSERT(uintptr_t(addr) % 4096UL == 0);
|
||||
LPVOID p = VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
|
||||
return p == addr;
|
||||
}
|
||||
#elif defined XP_OSX || defined XP_UNIX
|
||||
# ifndef MADV_DONTNEED
|
||||
# define MADV_DONTNEED MADV_FREE
|
||||
# endif
|
||||
bool
|
||||
CommitMemory(void *addr, size_t size)
|
||||
{
|
||||
JS_ASSERT(uintptr_t(addr) % 4096UL == 0);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
DecommitMemory(void *addr, size_t size)
|
||||
{
|
||||
JS_ASSERT(uintptr_t(addr) % 4096UL == 0);
|
||||
int result = madvise(addr, size, MADV_DONTNEED);
|
||||
return result != -1;
|
||||
}
|
||||
#else
|
||||
# error "No CommitMemory defined on this platform."
|
||||
#endif
|
||||
|
||||
} /* namespace gc */
|
||||
} /* namespace js */
|
||||
|
||||
|
|
|
@ -44,17 +44,25 @@
|
|||
#include "jsutil.h"
|
||||
|
||||
namespace js {
|
||||
namespace gc {
|
||||
|
||||
const size_t GC_CHUNK_SHIFT = 20;
|
||||
const size_t GC_CHUNK_SIZE = size_t(1) << GC_CHUNK_SHIFT;
|
||||
const size_t GC_CHUNK_MASK = GC_CHUNK_SIZE - 1;
|
||||
const size_t ChunkShift = 20;
|
||||
const size_t ChunkSize = size_t(1) << ChunkShift;
|
||||
const size_t ChunkMask = ChunkSize - 1;
|
||||
|
||||
void *
|
||||
AllocGCChunk();
|
||||
AllocChunk();
|
||||
|
||||
void
|
||||
FreeGCChunk(void *p);
|
||||
FreeChunk(void *p);
|
||||
|
||||
}
|
||||
bool
|
||||
CommitMemory(void *addr, size_t size);
|
||||
|
||||
bool
|
||||
DecommitMemory(void *addr, size_t size);
|
||||
|
||||
} /* namespace gc */
|
||||
} /* namespace js */
|
||||
|
||||
#endif /* jsgchunk_h__ */
|
||||
|
|
|
@ -1284,11 +1284,21 @@ CompartmentCallback(JSContext *cx, void *vdata, JSCompartment *compartment)
|
|||
moz_malloc_usable_size);
|
||||
}
|
||||
|
||||
void
|
||||
ChunkCallback(JSContext *cx, void *vdata, js::gc::Chunk *chunk)
|
||||
{
|
||||
IterateData *data = static_cast<IterateData *>(vdata);
|
||||
for (uint32 i = 0; i < js::gc::ArenasPerChunk; i++)
|
||||
if (chunk->decommittedArenas.get(i))
|
||||
data->gcHeapChunkDirtyDecommitted += js::gc::ArenaSize;
|
||||
}
|
||||
|
||||
void
|
||||
ArenaCallback(JSContext *cx, void *vdata, js::gc::Arena *arena,
|
||||
JSGCTraceKind traceKind, size_t thingSize)
|
||||
{
|
||||
IterateData *data = static_cast<IterateData *>(vdata);
|
||||
|
||||
data->currCompartmentStats->gcHeapArenaHeaders +=
|
||||
sizeof(js::gc::ArenaHeader);
|
||||
size_t allocationSpace = arena->thingsSpan(thingSize);
|
||||
|
@ -1431,7 +1441,7 @@ static PRInt64
|
|||
GetGCChunkTotalBytes()
|
||||
{
|
||||
JSRuntime *rt = nsXPConnect::GetRuntimeInstance()->GetJSRuntime();
|
||||
return PRInt64(JS_GetGCParameter(rt, JSGC_TOTAL_CHUNKS)) * js::GC_CHUNK_SIZE;
|
||||
return PRInt64(JS_GetGCParameter(rt, JSGC_TOTAL_CHUNKS)) * js::gc::ChunkSize;
|
||||
}
|
||||
|
||||
NS_MEMORY_REPORTER_IMPLEMENT(XPConnectJSGCHeap,
|
||||
|
@ -1550,15 +1560,20 @@ CollectCompartmentStatsForRuntime(JSRuntime *rt, IterateData *data)
|
|||
|
||||
data->compartmentStatsVector.SetCapacity(rt->compartments.length());
|
||||
|
||||
data->gcHeapChunkCleanDecommitted =
|
||||
rt->gcChunkPool.countDecommittedArenas(rt) *
|
||||
js::gc::ArenaSize;
|
||||
data->gcHeapChunkCleanUnused =
|
||||
PRInt64(JS_GetGCParameter(rt, JSGC_UNUSED_CHUNKS)) *
|
||||
js::GC_CHUNK_SIZE;
|
||||
js::gc::ChunkSize -
|
||||
data->gcHeapChunkCleanDecommitted;
|
||||
data->gcHeapChunkTotal =
|
||||
PRInt64(JS_GetGCParameter(rt, JSGC_TOTAL_CHUNKS)) *
|
||||
js::GC_CHUNK_SIZE;
|
||||
js::gc::ChunkSize;
|
||||
|
||||
js::IterateCompartmentsArenasCells(cx, data, CompartmentCallback,
|
||||
ArenaCallback, CellCallback);
|
||||
js::IterateChunks(cx, data, ChunkCallback);
|
||||
|
||||
for (js::ThreadDataIter i(rt); !i.empty(); i.popFront())
|
||||
data->stackSize += i.threadData()->stackSpace.committedSize();
|
||||
|
@ -1577,7 +1592,9 @@ CollectCompartmentStatsForRuntime(JSRuntime *rt, IterateData *data)
|
|||
// This is initialized to all bytes stored in used chunks, and then we
|
||||
// subtract used space from it each time around the loop.
|
||||
data->gcHeapChunkDirtyUnused = data->gcHeapChunkTotal -
|
||||
data->gcHeapChunkCleanUnused;
|
||||
data->gcHeapChunkCleanUnused -
|
||||
data->gcHeapChunkCleanDecommitted -
|
||||
data->gcHeapChunkDirtyDecommitted;
|
||||
|
||||
for (PRUint32 index = 0;
|
||||
index < data->compartmentStatsVector.Length();
|
||||
|
@ -1625,7 +1642,7 @@ CollectCompartmentStatsForRuntime(JSRuntime *rt, IterateData *data)
|
|||
|
||||
size_t numDirtyChunks = (data->gcHeapChunkTotal -
|
||||
data->gcHeapChunkCleanUnused) /
|
||||
js::GC_CHUNK_SIZE;
|
||||
js::gc::ChunkSize;
|
||||
PRInt64 perChunkAdmin =
|
||||
sizeof(js::gc::Chunk) - (sizeof(js::gc::Arena) * js::gc::ArenasPerChunk);
|
||||
data->gcHeapChunkAdmin = numDirtyChunks * perChunkAdmin;
|
||||
|
@ -1636,6 +1653,8 @@ CollectCompartmentStatsForRuntime(JSRuntime *rt, IterateData *data)
|
|||
// they can be fractional.
|
||||
data->gcHeapUnusedPercentage = (data->gcHeapChunkCleanUnused +
|
||||
data->gcHeapChunkDirtyUnused +
|
||||
data->gcHeapChunkCleanDecommitted +
|
||||
data->gcHeapChunkDirtyDecommitted +
|
||||
data->gcHeapArenaUnused) * 10000 /
|
||||
data->gcHeapChunkTotal;
|
||||
|
||||
|
@ -1915,14 +1934,25 @@ ReportJSRuntimeStats(const IterateData &data, const nsACString &pathPrefix,
|
|||
JS_GC_HEAP_KIND, data.gcHeapChunkDirtyUnused,
|
||||
"Memory on the garbage-collected JavaScript heap, within chunks with at "
|
||||
"least one allocated GC thing, that could be holding useful data but "
|
||||
"currently isn't.",
|
||||
"currently isn't. Memory here is mutually exclusive with memory reported"
|
||||
"under gc-heap-decommitted.",
|
||||
callback, closure);
|
||||
|
||||
ReportMemoryBytes(pathPrefix +
|
||||
NS_LITERAL_CSTRING("gc-heap-chunk-clean-unused"),
|
||||
JS_GC_HEAP_KIND, data.gcHeapChunkCleanUnused,
|
||||
"Memory on the garbage-collected JavaScript heap taken by completely empty "
|
||||
"chunks, that soon will be released unless claimed for new allocations.",
|
||||
"chunks, that soon will be released unless claimed for new allocations. "
|
||||
"Memory here is mutually exclusive with memory reported under "
|
||||
"gc-heap-decommitted.",
|
||||
callback, closure);
|
||||
|
||||
ReportMemoryBytes(pathPrefix +
|
||||
NS_LITERAL_CSTRING("gc-heap-decommitted"),
|
||||
JS_GC_HEAP_KIND,
|
||||
data.gcHeapChunkCleanDecommitted + data.gcHeapChunkDirtyDecommitted,
|
||||
"Memory in the address space of the garbage-collected JavaScript heap that "
|
||||
"is currently returned to the OS.",
|
||||
callback, closure);
|
||||
|
||||
ReportMemoryBytes(pathPrefix +
|
||||
|
@ -1931,6 +1961,7 @@ ReportJSRuntimeStats(const IterateData &data, const nsACString &pathPrefix,
|
|||
"Memory on the garbage-collected JavaScript heap, within chunks, that is "
|
||||
"used to hold internal book-keeping information.",
|
||||
callback, closure);
|
||||
|
||||
}
|
||||
|
||||
} // namespace memory
|
||||
|
@ -1975,8 +2006,16 @@ public:
|
|||
"easy comparison with other 'js-gc' reporters.",
|
||||
callback, closure);
|
||||
|
||||
ReportMemoryBytes(NS_LITERAL_CSTRING("js-gc-heap-decommitted"),
|
||||
nsIMemoryReporter::KIND_OTHER,
|
||||
data.gcHeapChunkCleanDecommitted + data.gcHeapChunkDirtyDecommitted,
|
||||
"The same as 'explicit/js/gc-heap-decommitted'. Shown here for "
|
||||
"easy comparison with other 'js-gc' reporters.",
|
||||
callback, closure);
|
||||
|
||||
ReportMemoryBytes(NS_LITERAL_CSTRING("js-gc-heap-arena-unused"),
|
||||
nsIMemoryReporter::KIND_OTHER, data.gcHeapArenaUnused,
|
||||
nsIMemoryReporter::KIND_OTHER,
|
||||
data.gcHeapArenaUnused,
|
||||
"Memory on the garbage-collected JavaScript heap, within arenas, that "
|
||||
"could be holding useful data but currently isn't. This is the sum of "
|
||||
"all compartments' 'gc-heap/arena-unused' numbers.",
|
||||
|
|
|
@ -237,6 +237,8 @@ struct IterateData
|
|||
gcHeapChunkTotal(0),
|
||||
gcHeapChunkCleanUnused(0),
|
||||
gcHeapChunkDirtyUnused(0),
|
||||
gcHeapChunkCleanDecommitted(0),
|
||||
gcHeapChunkDirtyDecommitted(0),
|
||||
gcHeapArenaUnused(0),
|
||||
gcHeapChunkAdmin(0),
|
||||
gcHeapUnusedPercentage(0),
|
||||
|
@ -258,6 +260,8 @@ struct IterateData
|
|||
PRInt64 gcHeapChunkTotal;
|
||||
PRInt64 gcHeapChunkCleanUnused;
|
||||
PRInt64 gcHeapChunkDirtyUnused;
|
||||
PRInt64 gcHeapChunkCleanDecommitted;
|
||||
PRInt64 gcHeapChunkDirtyDecommitted;
|
||||
PRInt64 gcHeapArenaUnused;
|
||||
PRInt64 gcHeapChunkAdmin;
|
||||
PRInt64 gcHeapUnusedPercentage;
|
||||
|
|
Загрузка…
Ссылка в новой задаче