Bug 988486 - Re-organize Zone to save some space and increase readability; r=jonco

This commit is contained in:
Terrence Cole 2014-05-16 11:24:23 -07:00
Родитель e079fd36ed
Коммит 873477a08e
2 изменённых файлов: 243 добавлений и 322 удалений

Просмотреть файл

@ -24,26 +24,25 @@ using namespace js::gc;
JS::Zone::Zone(JSRuntime *rt) JS::Zone::Zone(JSRuntime *rt)
: JS::shadow::Zone(rt, &rt->gc.marker), : JS::shadow::Zone(rt, &rt->gc.marker),
allocator(this), allocator(this),
ionUsingBarriers_(false), types(this),
active(false), compartments(),
gcScheduled(false), gcGrayRoots(),
gcState(NoGC), gcHeapGrowthFactor(3.0),
gcPreserveCode(false), gcMallocBytes(0),
gcMallocGCTriggered(false),
gcBytes(0), gcBytes(0),
gcTriggerBytes(0), gcTriggerBytes(0),
gcHeapGrowthFactor(3.0), data(nullptr),
isSystem(false), isSystem(false),
usedByExclusiveThread(false), usedByExclusiveThread(false),
scheduledForDestruction(false), scheduledForDestruction(false),
maybeAlive(true), maybeAlive(true),
gcMallocBytes(0), active(false),
gcMallocGCTriggered(false), jitZone_(nullptr),
gcGrayRoots(), gcState_(NoGC),
data(nullptr), gcScheduled_(false),
types(this) gcPreserveCode_(false),
#ifdef JS_ION ionUsingBarriers_(false)
, jitZone_(nullptr)
#endif
{ {
/* Ensure that there are no vtables to mess us up here. */ /* Ensure that there are no vtables to mess us up here. */
JS_ASSERT(reinterpret_cast<JS::shadow::Zone *>(this) == JS_ASSERT(reinterpret_cast<JS::shadow::Zone *>(this) ==

Просмотреть файл

@ -23,282 +23,78 @@ namespace jit {
class JitZone; class JitZone;
} }
/* // Encapsulates the data needed to perform allocation. Typically there is
* Encapsulates the data needed to perform allocation. Typically there is // precisely one of these per zone (|cx->zone().allocator|). However, in
* precisely one of these per zone (|cx->zone().allocator|). However, in // parallel execution mode, there will be one per worker thread.
* parallel execution mode, there will be one per worker thread.
*/
class Allocator class Allocator
{ {
/*
* Since allocators can be accessed from worker threads, the parent zone_
* should not be accessed in general. ArenaLists is allowed to actually do
* the allocation, however.
*/
friend class gc::ArenaLists;
JS::Zone *zone_;
public: public:
explicit Allocator(JS::Zone *zone); explicit Allocator(JS::Zone *zone);
js::gc::ArenaLists arenas; js::gc::ArenaLists arenas;
private:
// Since allocators can be accessed from worker threads, the parent zone_
// should not be accessed in general. ArenaLists is allowed to actually do
// the allocation, however.
friend class gc::ArenaLists;
JS::Zone *zone_;
}; };
typedef Vector<JSCompartment *, 1, SystemAllocPolicy> CompartmentVector; } // namespace js
} /* namespace js */
namespace JS { namespace JS {
/* // A zone is a collection of compartments. Every compartment belongs to exactly
* A zone is a collection of compartments. Every compartment belongs to exactly // one zone. In Firefox, there is roughly one zone per tab along with a system
* one zone. In Firefox, there is roughly one zone per tab along with a system // zone for everything else. Zones mainly serve as boundaries for garbage
* zone for everything else. Zones mainly serve as boundaries for garbage // collection. Unlike compartments, they have no special security properties.
* collection. Unlike compartments, they have no special security properties. //
* // Every GC thing belongs to exactly one zone. GC things from the same zone but
* Every GC thing belongs to exactly one zone. GC things from the same zone but // different compartments can share an arena (4k page). GC things from different
* different compartments can share an arena (4k page). GC things from different // zones cannot be stored in the same arena. The garbage collector is capable of
* zones cannot be stored in the same arena. The garbage collector is capable of // collecting one zone at a time; it cannot collect at the granularity of
* collecting one zone at a time; it cannot collect at the granularity of // compartments.
* compartments. //
* // GC things are tied to zones and compartments as follows:
* GC things are tied to zones and compartments as follows: //
* // - JSObjects belong to a compartment and cannot be shared between
* - JSObjects belong to a compartment and cannot be shared between // compartments. If an object needs to point to a JSObject in a different
* compartments. If an object needs to point to a JSObject in a different // compartment, regardless of zone, it must go through a cross-compartment
* compartment, regardless of zone, it must go through a cross-compartment // wrapper. Each compartment keeps track of its outgoing wrappers in a table.
* wrapper. Each compartment keeps track of its outgoing wrappers in a table. //
* // - JSStrings do not belong to any particular compartment, but they do belong
* - JSStrings do not belong to any particular compartment, but they do belong // to a zone. Thus, two different compartments in the same zone can point to a
* to a zone. Thus, two different compartments in the same zone can point to a // JSString. When a string needs to be wrapped, we copy it if it's in a
* JSString. When a string needs to be wrapped, we copy it if it's in a // different zone and do nothing if it's in the same zone. Thus, transferring
* different zone and do nothing if it's in the same zone. Thus, transferring // strings within a zone is very efficient.
* strings within a zone is very efficient. //
* // - Shapes and base shapes belong to a compartment and cannot be shared between
* - Shapes and base shapes belong to a compartment and cannot be shared between // compartments. A base shape holds a pointer to its compartment. Shapes find
* compartments. A base shape holds a pointer to its compartment. Shapes find // their compartment via their base shape. JSObjects find their compartment
* their compartment via their base shape. JSObjects find their compartment // via their shape.
* via their shape. //
* // - Scripts are also compartment-local and cannot be shared. A script points to
* - Scripts are also compartment-local and cannot be shared. A script points to // its compartment.
* its compartment. //
* // - Type objects and JitCode objects belong to a compartment and cannot be
* - Type objects and JitCode objects belong to a compartment and cannot be // shared. However, there is no mechanism to obtain their compartments.
* shared. However, there is no mechanism to obtain their compartments. //
* // A zone remains alive as long as any GC things in the zone are alive. A
* A zone remains alive as long as any GC things in the zone are alive. A // compartment remains alive as long as any JSObjects, scripts, shapes, or base
* compartment remains alive as long as any JSObjects, scripts, shapes, or base // shapes within it are alive.
* shapes within it are alive. //
* // We always guarantee that a zone has at least one live compartment by refusing
* We always guarantee that a zone has at least one live compartment by refusing // to delete the last compartment in a live zone. (This could happen, for
* to delete the last compartment in a live zone. (This could happen, for // example, if the conservative scanner marks a string in an otherwise dead
* example, if the conservative scanner marks a string in an otherwise dead // zone.)
* zone.)
*/
struct Zone : public JS::shadow::Zone, struct Zone : public JS::shadow::Zone,
public js::gc::GraphNodeBase<JS::Zone>, public js::gc::GraphNodeBase<JS::Zone>,
public js::MallocProvider<JS::Zone> public js::MallocProvider<JS::Zone>
{ {
private:
friend bool js::CurrentThreadCanAccessZone(Zone *zone);
friend class js::gc::GCRuntime;
public:
js::Allocator allocator;
js::CompartmentVector compartments;
private:
bool ionUsingBarriers_;
public:
bool active; // GC flag, whether there are active frames
bool compileBarriers(bool needsBarrier) const {
return needsBarrier || runtimeFromMainThread()->gcZeal() == js::gc::ZealVerifierPreValue;
}
bool compileBarriers() const {
return compileBarriers(needsBarrier());
}
enum ShouldUpdateIon {
DontUpdateIon,
UpdateIon
};
void setNeedsBarrier(bool needs, ShouldUpdateIon updateIon);
const bool *addressOfNeedsBarrier() const {
return &needsBarrier_;
}
public:
enum GCState {
NoGC,
Mark,
MarkGray,
Sweep,
Finished
};
private:
bool gcScheduled;
GCState gcState;
bool gcPreserveCode;
mozilla::DebugOnly<unsigned> gcLastZoneGroupIndex;
public:
bool isCollecting() const {
if (runtimeFromMainThread()->isHeapCollecting())
return gcState != NoGC;
else
return needsBarrier();
}
bool isPreservingCode() const {
return gcPreserveCode;
}
/*
* If this returns true, all object tracing must be done with a GC marking
* tracer.
*/
bool requireGCTracer() const {
return runtimeFromMainThread()->isHeapMajorCollecting() && gcState != NoGC;
}
void setGCState(GCState state) {
JS_ASSERT(runtimeFromMainThread()->isHeapBusy());
JS_ASSERT_IF(state != NoGC, canCollect());
gcState = state;
}
void scheduleGC() {
JS_ASSERT(!runtimeFromMainThread()->isHeapBusy());
gcScheduled = true;
}
void unscheduleGC() {
gcScheduled = false;
}
bool isGCScheduled() {
return gcScheduled && canCollect();
}
void setPreservingCode(bool preserving) {
gcPreserveCode = preserving;
}
bool canCollect() {
// Zones cannot be collected while in use by other threads.
if (usedByExclusiveThread)
return false;
JSRuntime *rt = runtimeFromAnyThread();
if (rt->isAtomsZone(this) && rt->exclusiveThreadsPresent())
return false;
return true;
}
bool wasGCStarted() const {
return gcState != NoGC;
}
bool isGCMarking() {
if (runtimeFromMainThread()->isHeapCollecting())
return gcState == Mark || gcState == MarkGray;
else
return needsBarrier();
}
bool isGCMarkingBlack() {
return gcState == Mark;
}
bool isGCMarkingGray() {
return gcState == MarkGray;
}
bool isGCSweeping() {
return gcState == Sweep;
}
bool isGCFinished() {
return gcState == Finished;
}
#ifdef DEBUG
/*
* For testing purposes, return the index of the zone group which this zone
* was swept in in the last GC.
*/
unsigned lastZoneGroupIndex() {
return gcLastZoneGroupIndex;
}
#endif
/* This is updated by both the main and GC helper threads. */
mozilla::Atomic<size_t, mozilla::ReleaseAcquire> gcBytes;
size_t gcTriggerBytes;
size_t gcMaxMallocBytes;
double gcHeapGrowthFactor;
bool isSystem;
/* Whether this zone is being used by a thread with an ExclusiveContext. */
bool usedByExclusiveThread;
/*
* Get a number that is incremented whenever this zone is collected, and
* possibly at other times too.
*/
uint64_t gcNumber();
/*
* These flags help us to discover if a compartment that shouldn't be alive
* manages to outlive a GC.
*/
bool scheduledForDestruction;
bool maybeAlive;
/*
* Malloc counter to measure memory pressure for GC scheduling. It runs from
* gcMaxMallocBytes down to zero. This counter should be used only when it's
* not possible to know the size of a free.
*/
mozilla::Atomic<ptrdiff_t, mozilla::ReleaseAcquire> gcMallocBytes;
/*
* Whether a GC has been triggered as a result of gcMallocBytes falling
* below zero.
*
* This should be a bool, but Atomic only supports 32-bit and pointer-sized
* types.
*/
mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> gcMallocGCTriggered;
/* This compartment's gray roots. */
js::Vector<js::GrayRoot, 0, js::SystemAllocPolicy> gcGrayRoots;
/*
* A set of edges from this zone to other zones.
*
* This is used during GC while calculating zone groups to record edges that
* can't be determined by examining this zone by itself.
*/
typedef js::HashSet<Zone *, js::DefaultHasher<Zone *>, js::SystemAllocPolicy> ZoneSet;
ZoneSet gcZoneGroupEdges;
/* Per-zone data for use by an embedder. */
void *data;
Zone(JSRuntime *rt); Zone(JSRuntime *rt);
~Zone(); ~Zone();
bool init(); bool init();
void findOutgoingEdges(js::gc::ComponentFinder<JS::Zone> &finder); void findOutgoingEdges(js::gc::ComponentFinder<JS::Zone> &finder);
@ -315,69 +111,198 @@ struct Zone : public JS::shadow::Zone,
void resetGCMallocBytes(); void resetGCMallocBytes();
void setGCMaxMallocBytes(size_t value); void setGCMaxMallocBytes(size_t value);
void updateMallocCounter(size_t nbytes) { void updateMallocCounter(size_t nbytes) {
/* // Note: this code may be run from worker threads. We tolerate any
* Note: this code may be run from worker threads. We // thread races when updating gcMallocBytes.
* tolerate any thread races when updating gcMallocBytes.
*/
gcMallocBytes -= ptrdiff_t(nbytes); gcMallocBytes -= ptrdiff_t(nbytes);
if (MOZ_UNLIKELY(isTooMuchMalloc())) if (MOZ_UNLIKELY(isTooMuchMalloc()))
onTooMuchMalloc(); onTooMuchMalloc();
} }
bool isTooMuchMalloc() const { bool isTooMuchMalloc() const { return gcMallocBytes <= 0; }
return gcMallocBytes <= 0;
}
void onTooMuchMalloc(); void onTooMuchMalloc();
void *onOutOfMemory(void *p, size_t nbytes) { void *onOutOfMemory(void *p, size_t nbytes) {
return runtimeFromMainThread()->onOutOfMemory(p, nbytes); return runtimeFromMainThread()->onOutOfMemory(p, nbytes);
} }
void reportAllocationOverflow() { void reportAllocationOverflow() { js_ReportAllocationOverflow(nullptr); }
js_ReportAllocationOverflow(nullptr);
}
js::types::TypeZone types;
void sweep(js::FreeOp *fop, bool releaseTypes, bool *oom); void sweep(js::FreeOp *fop, bool releaseTypes, bool *oom);
bool hasMarkedCompartments(); bool hasMarkedCompartments();
void scheduleGC() { JS_ASSERT(!runtimeFromMainThread()->isHeapBusy()); gcScheduled_ = true; }
void unscheduleGC() { gcScheduled_ = false; }
bool isGCScheduled() { return gcScheduled_ && canCollect(); }
void setPreservingCode(bool preserving) { gcPreserveCode_ = preserving; }
bool isPreservingCode() const { return gcPreserveCode_; }
bool canCollect() {
// Zones cannot be collected while in use by other threads.
if (usedByExclusiveThread)
return false;
JSRuntime *rt = runtimeFromAnyThread();
if (rt->isAtomsZone(this) && rt->exclusiveThreadsPresent())
return false;
return true;
}
enum GCState {
NoGC,
Mark,
MarkGray,
Sweep,
Finished
};
void setGCState(GCState state) {
JS_ASSERT(runtimeFromMainThread()->isHeapBusy());
JS_ASSERT_IF(state != NoGC, canCollect());
gcState_ = state;
}
bool isCollecting() const {
if (runtimeFromMainThread()->isHeapCollecting())
return gcState_ != NoGC;
else
return needsBarrier();
}
// If this returns true, all object tracing must be done with a GC marking
// tracer.
bool requireGCTracer() const {
return runtimeFromMainThread()->isHeapMajorCollecting() && gcState_ != NoGC;
}
bool isGCMarking() {
if (runtimeFromMainThread()->isHeapCollecting())
return gcState_ == Mark || gcState_ == MarkGray;
else
return needsBarrier();
}
bool wasGCStarted() const { return gcState_ != NoGC; }
bool isGCMarkingBlack() { return gcState_ == Mark; }
bool isGCMarkingGray() { return gcState_ == MarkGray; }
bool isGCSweeping() { return gcState_ == Sweep; }
bool isGCFinished() { return gcState_ == Finished; }
// Get a number that is incremented whenever this zone is collected, and
// possibly at other times too.
uint64_t gcNumber();
bool compileBarriers() const { return compileBarriers(needsBarrier()); }
bool compileBarriers(bool needsBarrier) const {
return needsBarrier || runtimeFromMainThread()->gcZeal() == js::gc::ZealVerifierPreValue;
}
enum ShouldUpdateIon { DontUpdateIon, UpdateIon };
void setNeedsBarrier(bool needs, ShouldUpdateIon updateIon);
const bool *addressOfNeedsBarrier() const { return &needsBarrier_; }
js::jit::JitZone *getJitZone(JSContext *cx) { return jitZone_ ? jitZone_ : createJitZone(cx); }
js::jit::JitZone *jitZone() { return jitZone_; }
#ifdef DEBUG
// For testing purposes, return the index of the zone group which this zone
// was swept in in the last GC.
unsigned lastZoneGroupIndex() { return gcLastZoneGroupIndex; }
#endif
private: private:
void sweepBreakpoints(js::FreeOp *fop); void sweepBreakpoints(js::FreeOp *fop);
void sweepCompartments(js::FreeOp *fop, bool keepAtleastOne, bool lastGC); void sweepCompartments(js::FreeOp *fop, bool keepAtleastOne, bool lastGC);
#ifdef JS_ION
js::jit::JitZone *jitZone_;
js::jit::JitZone *createJitZone(JSContext *cx); js::jit::JitZone *createJitZone(JSContext *cx);
public: public:
js::jit::JitZone *getJitZone(JSContext *cx) { js::Allocator allocator;
return jitZone_ ? jitZone_ : createJitZone(cx);
} js::types::TypeZone types;
js::jit::JitZone *jitZone() {
return jitZone_; // The set of compartments in this zone.
} typedef js::Vector<JSCompartment *, 1, js::SystemAllocPolicy> CompartmentVector;
#endif CompartmentVector compartments;
// This compartment's gray roots.
typedef js::Vector<js::GrayRoot, 0, js::SystemAllocPolicy> GrayRootVector;
GrayRootVector gcGrayRoots;
// A set of edges from this zone to other zones.
//
// This is used during GC while calculating zone groups to record edges that
// can't be determined by examining this zone by itself.
typedef js::HashSet<Zone *, js::DefaultHasher<Zone *>, js::SystemAllocPolicy> ZoneSet;
ZoneSet gcZoneGroupEdges;
// The "growth factor" for computing our next thresholds after a GC.
double gcHeapGrowthFactor;
// Malloc counter to measure memory pressure for GC scheduling. It runs from
// gcMaxMallocBytes down to zero. This counter should be used only when it's
// not possible to know the size of a free.
mozilla::Atomic<ptrdiff_t, mozilla::ReleaseAcquire> gcMallocBytes;
// GC trigger threshold for allocations on the C heap.
size_t gcMaxMallocBytes;
// Whether a GC has been triggered as a result of gcMallocBytes falling
// below zero.
//
// This should be a bool, but Atomic only supports 32-bit and pointer-sized
// types.
mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> gcMallocGCTriggered;
// Counts the number of bytes allocated in the GC heap for this zone. It is
// updated by both the main and GC helper threads.
mozilla::Atomic<size_t, mozilla::ReleaseAcquire> gcBytes;
// GC trigger threshold for allocations on the GC heap.
size_t gcTriggerBytes;
// Per-zone data for use by an embedder.
void *data;
bool isSystem;
bool usedByExclusiveThread;
// These flags help us to discover if a compartment that shouldn't be alive
// manages to outlive a GC.
bool scheduledForDestruction;
bool maybeAlive;
// True when there are active frames.
bool active;
mozilla::DebugOnly<unsigned> gcLastZoneGroupIndex;
private:
js::jit::JitZone *jitZone_;
GCState gcState_;
bool gcScheduled_;
bool gcPreserveCode_;
bool ionUsingBarriers_;
friend bool js::CurrentThreadCanAccessZone(Zone *zone);
friend class js::gc::GCRuntime;
}; };
} /* namespace JS */ } // namespace JS
namespace js { namespace js {
/* // Using the atoms zone without holding the exclusive access lock is dangerous
* Using the atoms zone without holding the exclusive access lock is dangerous // because worker threads may be using it simultaneously. Therefore, it's
* because worker threads may be using it simultaneously. Therefore, it's // better to skip the atoms zone when iterating over zones. If you need to
* better to skip the atoms zone when iterating over zones. If you need to // iterate over the atoms zone, consider taking the exclusive access lock first.
* iterate over the atoms zone, consider taking the exclusive access lock first.
*/
enum ZoneSelector { enum ZoneSelector {
WithAtoms, WithAtoms,
SkipAtoms SkipAtoms
}; };
class ZonesIter { class ZonesIter
private: {
JS::Zone **it, **end; JS::Zone **it, **end;
public: public:
@ -411,16 +336,6 @@ class ZonesIter {
struct CompartmentsInZoneIter struct CompartmentsInZoneIter
{ {
// This is for the benefit of CompartmentsIterT::comp.
friend class mozilla::Maybe<CompartmentsInZoneIter>;
private:
JSCompartment **it, **end;
CompartmentsInZoneIter()
: it(nullptr), end(nullptr)
{}
public:
explicit CompartmentsInZoneIter(JS::Zone *zone) { explicit CompartmentsInZoneIter(JS::Zone *zone) {
it = zone->compartments.begin(); it = zone->compartments.begin();
end = zone->compartments.end(); end = zone->compartments.end();
@ -442,16 +357,23 @@ struct CompartmentsInZoneIter
operator JSCompartment *() const { return get(); } operator JSCompartment *() const { return get(); }
JSCompartment *operator->() const { return get(); } JSCompartment *operator->() const { return get(); }
private:
JSCompartment **it, **end;
CompartmentsInZoneIter()
: it(nullptr), end(nullptr)
{}
// This is for the benefit of CompartmentsIterT::comp.
friend class mozilla::Maybe<CompartmentsInZoneIter>;
}; };
/* // This iterator iterates over all the compartments in a given set of zones. The
* This iterator iterates over all the compartments in a given set of zones. The // set of zones is determined by iterating ZoneIterT.
* set of zones is determined by iterating ZoneIterT.
*/
template<class ZonesIterT> template<class ZonesIterT>
class CompartmentsIterT class CompartmentsIterT
{ {
private:
ZonesIterT zone; ZonesIterT zone;
mozilla::Maybe<CompartmentsInZoneIter> comp; mozilla::Maybe<CompartmentsInZoneIter> comp;
@ -499,10 +421,10 @@ class CompartmentsIterT
typedef CompartmentsIterT<ZonesIter> CompartmentsIter; typedef CompartmentsIterT<ZonesIter> CompartmentsIter;
/* Return the Zone* of a Value. Asserts if the Value is not a GC thing. */ // Return the Zone* of a Value. Asserts if the Value is not a GC thing.
Zone * Zone *
ZoneOfValue(const JS::Value &value); ZoneOfValue(const JS::Value &value);
} /* namespace js */ } // namespace js
#endif /* gc_Zone_h */ #endif // gc_Zone_h