diff --git a/js/public/TracingAPI.h b/js/public/TracingAPI.h index d36ca8a1b987..f792b5b24f93 100644 --- a/js/public/TracingAPI.h +++ b/js/public/TracingAPI.h @@ -102,17 +102,19 @@ class JS_PUBLIC_API(JSTracer) MarkingTracer, CallbackTracer }; - bool isMarkingTracer() const { return tag == MarkingTracer; } - bool isCallbackTracer() const { return tag == CallbackTracer; } + bool isMarkingTracer() const { return tag_ == MarkingTracer; } + bool isCallbackTracer() const { return tag_ == CallbackTracer; } inline JS::CallbackTracer* asCallbackTracer(); protected: JSTracer(JSRuntime* rt, TracerKindTag tag, - WeakMapTraceKind weakTraceKind = TraceWeakMapValues); + WeakMapTraceKind weakTraceKind = TraceWeakMapValues) + : runtime_(rt), tag_(tag), eagerlyTraceWeakMaps_(weakTraceKind) + {} private: JSRuntime* runtime_; - TracerKindTag tag; + TracerKindTag tag_; WeakMapTraceKind eagerlyTraceWeakMaps_; }; @@ -134,7 +136,9 @@ class JS_PUBLIC_API(CallbackTracer) : public JSTracer {} // Update the trace callback. - void setTraceCallback(JSTraceCallback traceCallback); + void setTraceCallback(JSTraceCallback traceCallback) { + callback = traceCallback; + } // Test if the given callback is the same as our callback. bool hasCallback(JSTraceCallback maybeCallback) const { diff --git a/js/src/gc/Barrier.h b/js/src/gc/Barrier.h index b20a56189753..cce5a1e5b027 100644 --- a/js/src/gc/Barrier.h +++ b/js/src/gc/Barrier.h @@ -11,9 +11,9 @@ #include "gc/Heap.h" #include "gc/StoreBuffer.h" -#include "js/HashTable.h" #include "js/Id.h" #include "js/RootingAPI.h" +#include "js/Value.h" /* * A write barrier is a mechanism used by incremental or generation GCs to diff --git a/js/src/gc/Heap.h b/js/src/gc/Heap.h index 8c58c869a78a..286de9ed9f99 100644 --- a/js/src/gc/Heap.h +++ b/js/src/gc/Heap.h @@ -53,6 +53,9 @@ CurrentThreadIsIonCompiling(); extern bool UnmarkGrayCellRecursively(gc::Cell* cell, JSGCTraceKind kind); +extern void +TraceManuallyBarrieredGenericPointerEdge(JSTracer* trc, gc::Cell** thingp, const char* name); + namespace gc { struct Arena; @@ -61,9 +64,6 @@ class SortedArenaList; struct ArenaHeader; struct Chunk; -extern void -TraceManuallyBarrieredGenericPointerEdge(JSTracer* trc, Cell** thingp, const char* name); - /* * This flag allows an allocation site to request a specific heap based upon the * estimated lifetime or lifetime requirements of objects allocated from that diff --git a/js/src/gc/Marking.cpp b/js/src/gc/Marking.cpp index 0ce441bd533c..d6853ded0c86 100644 --- a/js/src/gc/Marking.cpp +++ b/js/src/gc/Marking.cpp @@ -105,6 +105,9 @@ void * const js::NullPtr::constNullValue = nullptr; JS_PUBLIC_DATA(void * const) JS::NullPtr::constNullValue = nullptr; + +/*** Tracing Invariants **************************************************************************/ + #if defined(DEBUG) template static inline bool @@ -299,11 +302,27 @@ ShouldMarkCrossCompartment(JSTracer* trc, JSObject* src, Value val) return val.isMarkable() && ShouldMarkCrossCompartment(trc, src, (Cell*)val.toGCThing()); } +template +static bool +ZoneIsAtomsZoneForString(JSRuntime* rt, T* thing) +{ + JSGCTraceKind kind = GetGCThingTraceKind(thing); + if (kind == JSTRACE_STRING || kind == JSTRACE_SYMBOL) + return rt->isAtomsZone(thing->zone()); + return false; +} + +#define JS_COMPARTMENT_ASSERT(rt, thing) \ + MOZ_ASSERT((thing)->zone()->isGCMarking() || ZoneIsAtomsZoneForString((rt), (thing))) + #define JS_ROOT_MARKING_ASSERT(trc) \ MOZ_ASSERT_IF(trc->isMarkingTracer(), \ trc->runtime()->gc.state() == NO_INCREMENTAL || \ trc->runtime()->gc.state() == MARK_ROOTS); + +/*** Tracing Interface ***************************************************************************/ + // A C++ version of JSGCTraceKind enum class TraceKind { #define NAMES(name, _, __) name, @@ -495,6 +514,67 @@ js::TraceProcessGlobalRoot(JSTracer* trc, T* thing, const char* name) template void js::TraceProcessGlobalRoot(JSTracer*, JSAtom*, const char*); template void js::TraceProcessGlobalRoot(JSTracer*, JS::Symbol*, const char*); +void +js::TraceObjectSlots(JSTracer* trc, NativeObject* obj, uint32_t start, uint32_t nslots) +{ + JS::AutoTracingIndex index(trc, start); + for (uint32_t i = start; i < (start + nslots); ++i) { + HeapSlot& slot = obj->getSlotRef(i); + if (InternalGCMethods::isMarkable(slot)) + DispatchToTracer(trc, slot.unsafeGet(), "object slot"); + ++index; + } +} + +void +gc::MarkValueForBarrier(JSTracer* trc, Value* valuep, const char* name) +{ + MOZ_ASSERT(!trc->runtime()->isHeapCollecting()); + TraceManuallyBarrieredEdge(trc, valuep, name); +} + +void +gc::MarkIdForBarrier(JSTracer* trc, jsid* idp, const char* name) +{ + TraceManuallyBarrieredEdge(trc, idp, name); +} + +// A typed functor adaptor for TraceRoot. +struct TraceRootFunctor { + template + void operator()(JSTracer* trc, Cell** thingp, const char* name) { + TraceRoot(trc, reinterpret_cast(thingp), name); + } +}; + +void +js::TraceGenericPointerRoot(JSTracer* trc, Cell** thingp, const char* name) +{ + MOZ_ASSERT(thingp); + if (!*thingp) + return; + TraceRootFunctor f; + CallTyped(f, (*thingp)->getTraceKind(), trc, thingp, name); +} + +// A typed functor adaptor for TraceManuallyBarrieredEdge. +struct TraceManuallyBarrieredEdgeFunctor { + template + void operator()(JSTracer* trc, Cell** thingp, const char* name) { + TraceManuallyBarrieredEdge(trc, reinterpret_cast(thingp), name); + } +}; + +void +js::TraceManuallyBarrieredGenericPointerEdge(JSTracer* trc, Cell** thingp, const char* name) +{ + MOZ_ASSERT(thingp); + if (!*thingp) + return; + TraceManuallyBarrieredEdgeFunctor f; + CallTyped(f, (*thingp)->getTraceKind(), trc, thingp, name); +} + // This method is responsible for dynamic dispatch to the real tracer // implementation. Consider replacing this choke point with virtual dispatch: // a sufficiently smart C++ compiler may be able to devirtualize some paths. @@ -514,6 +594,9 @@ DispatchToTracer(JSTracer* trc, T* thingp, const char* name) DoCallback(trc->asCallbackTracer(), thingp, name); } + +/*** GC Marking Interface *************************************************************************/ + template static inline bool MustSkipMarking(T thing) @@ -702,6 +785,13 @@ js::GCMarker::mark(T* thing) : gc::TenuredCell::fromPointer(thing)->markIfUnmarked(gc::BLACK); } + +/*** Inline, Eager GC Marking *********************************************************************/ + +// Each of the eager, inline marking paths is directly preceeded by the +// out-of-line, generic tracing code for comparison. Both paths must end up +// traversing equivalent subgraphs. + void LazyScript::traceChildren(JSTracer* trc) { @@ -753,7 +843,7 @@ js::GCMarker::eagerlyMarkChildren(LazyScript *thing) traverse(thing, static_cast(innerFunctions[i])); } -inline void +void Shape::traceChildren(JSTracer* trc) { TraceEdge(trc, &base_, "base"); @@ -961,581 +1051,49 @@ js::GCMarker::lazilyMarkChildren(ObjectGroup* group) traverse(group, static_cast(fun)); } + +/*** Mark-stack Marking ***************************************************************************/ -template -static inline void -CheckIsMarkedThing(T* thingp) +bool +GCMarker::drainMarkStack(SliceBudget& budget) { -#define IS_SAME_TYPE_OR(name, type, _) mozilla::IsSame::value || - static_assert( - FOR_EACH_GC_LAYOUT(IS_SAME_TYPE_OR) - false, "Only the base cell layout types are allowed into marking/tracing internals"); -#undef IS_SAME_TYPE_OR - #ifdef DEBUG - MOZ_ASSERT(thingp); - MOZ_ASSERT(*thingp); - JSRuntime* rt = (*thingp)->runtimeFromAnyThread(); - MOZ_ASSERT_IF(!ThingIsPermanentAtomOrWellKnownSymbol(*thingp), - CurrentThreadCanAccessRuntime(rt) || - (rt->isHeapCollecting() && rt->gc.state() == SWEEP)); + struct AutoCheckCompartment { + bool& flag; + explicit AutoCheckCompartment(bool& comparmentCheckFlag) : flag(comparmentCheckFlag) { + MOZ_ASSERT(!flag); + flag = true; + } + ~AutoCheckCompartment() { flag = false; } + } acc(strictCompartmentChecking); #endif -} -template -static bool -IsMarkedInternal(T* thingp) -{ - CheckIsMarkedThing(thingp); - JSRuntime* rt = (*thingp)->runtimeFromAnyThread(); - - if (IsInsideNursery(*thingp)) { - MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt)); - return rt->gc.nursery.getForwardedPointer(thingp); - } - - Zone* zone = (*thingp)->asTenured().zoneFromAnyThread(); - if (!zone->isCollectingFromAnyThread() || zone->isGCFinished()) - return true; - if (zone->isGCCompacting() && IsForwarded(*thingp)) - *thingp = Forwarded(*thingp); - return (*thingp)->asTenured().isMarked(); -} - -template -struct IsMarkedFunctor : public IdentityDefaultAdaptor { - template S operator()(T* t, bool* rv) { - *rv = IsMarkedInternal(&t); - return js::gc::RewrapValueOrId::wrap(t); - } -}; - -template <> -bool -IsMarkedInternal(Value* valuep) -{ - bool rv = true; - *valuep = DispatchValueTyped(IsMarkedFunctor(), *valuep, &rv); - return rv; -} - -template <> -bool -IsMarkedInternal(jsid* idp) -{ - bool rv = true; - *idp = DispatchIdTyped(IsMarkedFunctor(), *idp, &rv); - return rv; -} - -template -static bool -IsAboutToBeFinalizedInternal(T* thingp) -{ - CheckIsMarkedThing(thingp); - T thing = *thingp; - JSRuntime* rt = thing->runtimeFromAnyThread(); - - /* Permanent atoms are never finalized by non-owning runtimes. */ - if (ThingIsPermanentAtomOrWellKnownSymbol(thing) && !TlsPerThreadData.get()->associatedWith(rt)) + if (budget.isOverBudget()) return false; - Nursery& nursery = rt->gc.nursery; - MOZ_ASSERT_IF(!rt->isHeapMinorCollecting(), !IsInsideNursery(thing)); - if (rt->isHeapMinorCollecting()) { - if (IsInsideNursery(thing)) - return !nursery.getForwardedPointer(thingp); - return false; - } + for (;;) { + while (!stack.isEmpty()) { + processMarkStackTop(budget); + if (budget.isOverBudget()) { + saveValueRanges(); + return false; + } + } - Zone* zone = thing->asTenured().zoneFromAnyThread(); - if (zone->isGCSweeping()) { - if (thing->asTenured().arenaHeader()->allocatedDuringIncremental) + if (!hasDelayedChildren()) + break; + + /* + * Mark children of things that caused too deep recursion during the + * above tracing. Don't do this until we're done with everything + * else. + */ + if (!markDelayedChildren(budget)) { + saveValueRanges(); return false; - return !thing->asTenured().isMarked(); - } - else if (zone->isGCCompacting() && IsForwarded(thing)) { - *thingp = Forwarded(thing); - return false; - } - - return false; -} - -template -struct IsAboutToBeFinalizedFunctor : public IdentityDefaultAdaptor { - template S operator()(T* t, bool* rv) { - *rv = IsAboutToBeFinalizedInternal(&t); - return js::gc::RewrapValueOrId::wrap(t); - } -}; - -template <> -bool -IsAboutToBeFinalizedInternal(Value* valuep) -{ - bool rv = false; - *valuep = DispatchValueTyped(IsAboutToBeFinalizedFunctor(), *valuep, &rv); - return rv; -} - -template <> -bool -IsAboutToBeFinalizedInternal(jsid* idp) -{ - bool rv = false; - *idp = DispatchIdTyped(IsAboutToBeFinalizedFunctor(), *idp, &rv); - return rv; -} - -namespace js { -namespace gc { - -template -bool -IsMarkedUnbarriered(T* thingp) -{ - return IsMarkedInternal(ConvertToBase(thingp)); -} - -template -bool -IsMarked(BarrieredBase* thingp) -{ - return IsMarkedInternal(ConvertToBase(thingp->unsafeGet())); -} - -template -bool -IsMarked(ReadBarriered* thingp) -{ - return IsMarkedInternal(ConvertToBase(thingp->unsafeGet())); -} - -template -bool -IsAboutToBeFinalizedUnbarriered(T* thingp) -{ - return IsAboutToBeFinalizedInternal(ConvertToBase(thingp)); -} - -template -bool -IsAboutToBeFinalized(BarrieredBase* thingp) -{ - return IsAboutToBeFinalizedInternal(ConvertToBase(thingp->unsafeGet())); -} - -template -bool -IsAboutToBeFinalized(ReadBarriered* thingp) -{ - return IsAboutToBeFinalizedInternal(ConvertToBase(thingp->unsafeGet())); -} - -// Instantiate a copy of the Tracing templates for each derived type. -#define INSTANTIATE_ALL_VALID_TRACE_FUNCTIONS(type) \ - template bool IsMarkedUnbarriered(type*); \ - template bool IsMarked(BarrieredBase*); \ - template bool IsMarked(ReadBarriered*); \ - template bool IsAboutToBeFinalizedUnbarriered(type*); \ - template bool IsAboutToBeFinalized(BarrieredBase*); \ - template bool IsAboutToBeFinalized(ReadBarriered*); -FOR_EACH_GC_POINTER_TYPE(INSTANTIATE_ALL_VALID_TRACE_FUNCTIONS) -#undef INSTANTIATE_ALL_VALID_TRACE_FUNCTIONS - -template -T* -UpdateIfRelocated(JSRuntime* rt, T** thingp) -{ - MOZ_ASSERT(thingp); - if (!*thingp) - return nullptr; - - if (rt->isHeapMinorCollecting() && IsInsideNursery(*thingp)) { - rt->gc.nursery.getForwardedPointer(thingp); - return *thingp; - } - - Zone* zone = (*thingp)->zone(); - if (zone->isGCCompacting() && IsForwarded(*thingp)) - *thingp = Forwarded(*thingp); - - return *thingp; -} - -} /* namespace gc */ -} /* namespace js */ - -/*** Externally Typed Marking ***/ - -// A typed functor adaptor for TraceRoot. -struct TraceRootFunctor { - template - void operator()(JSTracer* trc, Cell** thingp, const char* name) { - TraceRoot(trc, reinterpret_cast(thingp), name); - } -}; - -// A typed functor adaptor for TraceManuallyBarrieredEdge. -struct TraceManuallyBarrieredEdgeFunctor { - template - void operator()(JSTracer* trc, Cell** thingp, const char* name) { - TraceManuallyBarrieredEdge(trc, reinterpret_cast(thingp), name); - } -}; - -void -js::gc::TraceGenericPointerRoot(JSTracer* trc, Cell** thingp, const char* name) -{ - MOZ_ASSERT(thingp); - if (!*thingp) - return; - TraceRootFunctor f; - CallTyped(f, (*thingp)->getTraceKind(), trc, thingp, name); -} - -void -js::gc::TraceManuallyBarrieredGenericPointerEdge(JSTracer* trc, Cell** thingp, const char* name) -{ - MOZ_ASSERT(thingp); - if (!*thingp) - return; - TraceManuallyBarrieredEdgeFunctor f; - CallTyped(f, (*thingp)->getTraceKind(), trc, thingp, name); -} - -/*** Type Marking ***/ - -void -TypeSet::MarkTypeRoot(JSTracer* trc, TypeSet::Type* v, const char* name) -{ - JS_ROOT_MARKING_ASSERT(trc); - MarkTypeUnbarriered(trc, v, name); -} - -void -TypeSet::MarkTypeUnbarriered(JSTracer* trc, TypeSet::Type* v, const char* name) -{ - if (v->isSingletonUnchecked()) { - JSObject* obj = v->singleton(); - DispatchToTracer(trc, &obj, name); - *v = TypeSet::ObjectType(obj); - } else if (v->isGroupUnchecked()) { - ObjectGroup* group = v->group(); - DispatchToTracer(trc, &group, name); - *v = TypeSet::ObjectType(group); - } -} - -/*** Slot Marking ***/ - -void -gc::MarkObjectSlots(JSTracer* trc, NativeObject* obj, uint32_t start, uint32_t nslots) -{ - JS::AutoTracingIndex index(trc, start); - for (uint32_t i = start; i < (start + nslots); ++i) { - HeapSlot& slot = obj->getSlotRef(i); - if (InternalGCMethods::isMarkable(slot)) - DispatchToTracer(trc, slot.unsafeGet(), "object slot"); - ++index; - } -} - -/*** Special Marking ***/ - -void -gc::MarkValueForBarrier(JSTracer* trc, Value* valuep, const char* name) -{ - MOZ_ASSERT(!trc->runtime()->isHeapCollecting()); - TraceManuallyBarrieredEdge(trc, valuep, name); -} - -void -gc::MarkIdForBarrier(JSTracer* trc, jsid* idp, const char* name) -{ - TraceManuallyBarrieredEdge(trc, idp, name); -} - -/*** Push Mark Stack ***/ - -/* - * This function is used by the cycle collector to trace through a - * shape. The cycle collector does not care about shapes or base - * shapes, so those are not marked. Instead, any shapes or base shapes - * that are encountered have their children marked. Stack space is - * bounded. - */ -void -gc::MarkCycleCollectorChildren(JSTracer* trc, Shape* shape) -{ - /* - * We need to mark the global, but it's OK to only do this once instead of - * doing it for every Shape in our lineage, since it's always the same - * global. - */ - JSObject* global = shape->compartment()->unsafeUnbarrieredMaybeGlobal(); - MOZ_ASSERT(global); - TraceManuallyBarrieredEdge(trc, &global, "global"); - - do { - MOZ_ASSERT(global == shape->compartment()->unsafeUnbarrieredMaybeGlobal()); - - MOZ_ASSERT(shape->base()); - shape->base()->assertConsistency(); - - TraceEdge(trc, &shape->propidRef(), "propid"); - - if (shape->hasGetterObject()) { - JSObject* tmp = shape->getterObject(); - TraceManuallyBarrieredEdge(trc, &tmp, "getter"); - MOZ_ASSERT(tmp == shape->getterObject()); - } - - if (shape->hasSetterObject()) { - JSObject* tmp = shape->setterObject(); - TraceManuallyBarrieredEdge(trc, &tmp, "setter"); - MOZ_ASSERT(tmp == shape->setterObject()); - } - - shape = shape->previous(); - } while (shape); -} - -void -TraceObjectGroupCycleCollectorChildrenCallback(JS::CallbackTracer* trc, - void** thingp, JSGCTraceKind kind); - -// Object groups can point to other object groups via an UnboxedLayout or the -// the original unboxed group link. There can potentially be deep or cyclic -// chains of such groups to trace through without going through a thing that -// participates in cycle collection. These need to be handled iteratively to -// avoid blowing the stack when running the cycle collector's callback tracer. -struct ObjectGroupCycleCollectorTracer : public JS::CallbackTracer -{ - explicit ObjectGroupCycleCollectorTracer(JS::CallbackTracer* innerTracer) - : JS::CallbackTracer(innerTracer->runtime(), - TraceObjectGroupCycleCollectorChildrenCallback, - DoNotTraceWeakMaps), - innerTracer(innerTracer) - {} - - JS::CallbackTracer* innerTracer; - Vector seen, worklist; -}; - -void -TraceObjectGroupCycleCollectorChildrenCallback(JS::CallbackTracer* trcArg, - void** thingp, JSGCTraceKind kind) -{ - ObjectGroupCycleCollectorTracer* trc = static_cast(trcArg); - JS::GCCellPtr thing(*thingp, kind); - - if (thing.isObject() || thing.isScript()) { - // Invoke the inner cycle collector callback on this child. It will not - // recurse back into TraceChildren. - trc->innerTracer->invoke(thingp, kind); - return; - } - - if (thing.isObjectGroup()) { - // If this group is required to be in an ObjectGroup chain, trace it - // via the provided worklist rather than continuing to recurse. - ObjectGroup* group = static_cast(thing.asCell()); - if (group->maybeUnboxedLayout()) { - for (size_t i = 0; i < trc->seen.length(); i++) { - if (trc->seen[i] == group) - return; - } - if (trc->seen.append(group) && trc->worklist.append(group)) { - return; - } else { - // If append fails, keep tracing normally. The worst that will - // happen is we end up overrecursing. - } } } - TraceChildren(trc, thing.asCell(), thing.kind()); -} - -void -gc::MarkCycleCollectorChildren(JSTracer* trc, ObjectGroup* group) -{ - MOZ_ASSERT(trc->isCallbackTracer()); - - // Early return if this group is not required to be in an ObjectGroup chain. - if (!group->maybeUnboxedLayout()) { - TraceChildren(trc, group, JSTRACE_OBJECT_GROUP); - return; - } - - ObjectGroupCycleCollectorTracer groupTracer(trc->asCallbackTracer()); - TraceChildren(&groupTracer, group, JSTRACE_OBJECT_GROUP); - - while (!groupTracer.worklist.empty()) { - ObjectGroup* innerGroup = groupTracer.worklist.popCopy(); - TraceChildren(&groupTracer, innerGroup, JSTRACE_OBJECT_GROUP); - } -} - -template -static void -PushArenaTyped(GCMarker* gcmarker, ArenaHeader* aheader) -{ - for (ArenaCellIterUnderGC i(aheader); !i.done(); i.next()) - gcmarker->traverse(i.get()); -} - -void -gc::PushArena(GCMarker* gcmarker, ArenaHeader* aheader) -{ - switch (MapAllocToTraceKind(aheader->getAllocKind())) { - case JSTRACE_OBJECT: - PushArenaTyped(gcmarker, aheader); - break; - - case JSTRACE_SCRIPT: - PushArenaTyped(gcmarker, aheader); - break; - - case JSTRACE_STRING: - PushArenaTyped(gcmarker, aheader); - break; - - case JSTRACE_SYMBOL: - PushArenaTyped(gcmarker, aheader); - break; - - case JSTRACE_BASE_SHAPE: - PushArenaTyped(gcmarker, aheader); - break; - - case JSTRACE_JITCODE: - PushArenaTyped(gcmarker, aheader); - break; - - case JSTRACE_LAZY_SCRIPT: - PushArenaTyped(gcmarker, aheader); - break; - - case JSTRACE_SHAPE: - PushArenaTyped(gcmarker, aheader); - break; - - case JSTRACE_OBJECT_GROUP: - PushArenaTyped(gcmarker, aheader); - break; - - default: - MOZ_CRASH("Invalid trace kind in PushArena."); - } -} - -struct SlotArrayLayout -{ - union { - HeapSlot* end; - uintptr_t kind; - }; - union { - HeapSlot* start; - uintptr_t index; - }; - NativeObject* obj; - - static void staticAsserts() { - /* This should have the same layout as three mark stack items. */ - JS_STATIC_ASSERT(sizeof(SlotArrayLayout) == 3 * sizeof(uintptr_t)); - } -}; - -/* - * During incremental GC, we return from drainMarkStack without having processed - * the entire stack. At that point, JS code can run and reallocate slot arrays - * that are stored on the stack. To prevent this from happening, we replace all - * ValueArrayTag stack items with SavedValueArrayTag. In the latter, slots - * pointers are replaced with slot indexes, and slot array end pointers are - * replaced with the kind of index (properties vs. elements). - */ -void -GCMarker::saveValueRanges() -{ - for (uintptr_t* p = stack.tos_; p > stack.stack_; ) { - uintptr_t tag = *--p & StackTagMask; - if (tag == ValueArrayTag) { - *p &= ~StackTagMask; - p -= 2; - SlotArrayLayout* arr = reinterpret_cast(p); - NativeObject* obj = arr->obj; - MOZ_ASSERT(obj->isNative()); - - HeapSlot* vp = obj->getDenseElementsAllowCopyOnWrite(); - if (arr->end == vp + obj->getDenseInitializedLength()) { - MOZ_ASSERT(arr->start >= vp); - arr->index = arr->start - vp; - arr->kind = HeapSlot::Element; - } else { - HeapSlot* vp = obj->fixedSlots(); - unsigned nfixed = obj->numFixedSlots(); - if (arr->start == arr->end) { - arr->index = obj->slotSpan(); - } else if (arr->start >= vp && arr->start < vp + nfixed) { - MOZ_ASSERT(arr->end == vp + Min(nfixed, obj->slotSpan())); - arr->index = arr->start - vp; - } else { - MOZ_ASSERT(arr->start >= obj->slots_ && - arr->end == obj->slots_ + obj->slotSpan() - nfixed); - arr->index = (arr->start - obj->slots_) + nfixed; - } - arr->kind = HeapSlot::Slot; - } - p[2] |= SavedValueArrayTag; - } else if (tag == SavedValueArrayTag) { - p -= 2; - } - } -} - -bool -GCMarker::restoreValueArray(NativeObject* obj, void** vpp, void** endp) -{ - uintptr_t start = stack.pop(); - HeapSlot::Kind kind = (HeapSlot::Kind) stack.pop(); - - if (kind == HeapSlot::Element) { - if (!obj->is()) - return false; - - uint32_t initlen = obj->getDenseInitializedLength(); - HeapSlot* vp = obj->getDenseElementsAllowCopyOnWrite(); - if (start < initlen) { - *vpp = vp + start; - *endp = vp + initlen; - } else { - /* The object shrunk, in which case no scanning is needed. */ - *vpp = *endp = vp; - } - } else { - MOZ_ASSERT(kind == HeapSlot::Slot); - HeapSlot* vp = obj->fixedSlots(); - unsigned nfixed = obj->numFixedSlots(); - unsigned nslots = obj->slotSpan(); - if (start < nslots) { - if (start < nfixed) { - *vpp = vp + start; - *endp = vp + Min(nfixed, nslots); - } else { - *vpp = obj->slots_ + start - nfixed; - *endp = obj->slots_ + nslots - nfixed; - } - } else { - /* The object shrunk, in which case no scanning is needed. */ - *vpp = *endp = vp; - } - } - - MOZ_ASSERT(*vpp <= *endp); return true; } @@ -1752,64 +1310,754 @@ GCMarker::processMarkStackTop(SliceBudget& budget) } } -bool -GCMarker::drainMarkStack(SliceBudget& budget) +struct SlotArrayLayout { -#ifdef DEBUG - struct AutoCheckCompartment { - bool& flag; - explicit AutoCheckCompartment(bool& comparmentCheckFlag) : flag(comparmentCheckFlag) { - MOZ_ASSERT(!flag); - flag = true; - } - ~AutoCheckCompartment() { flag = false; } - } acc(strictCompartmentChecking); -#endif + union { + HeapSlot* end; + uintptr_t kind; + }; + union { + HeapSlot* start; + uintptr_t index; + }; + NativeObject* obj; - if (budget.isOverBudget()) - return false; + static void staticAsserts() { + /* This should have the same layout as three mark stack items. */ + JS_STATIC_ASSERT(sizeof(SlotArrayLayout) == 3 * sizeof(uintptr_t)); + } +}; - for (;;) { - while (!stack.isEmpty()) { - processMarkStackTop(budget); - if (budget.isOverBudget()) { - saveValueRanges(); - return false; +/* + * During incremental GC, we return from drainMarkStack without having processed + * the entire stack. At that point, JS code can run and reallocate slot arrays + * that are stored on the stack. To prevent this from happening, we replace all + * ValueArrayTag stack items with SavedValueArrayTag. In the latter, slots + * pointers are replaced with slot indexes, and slot array end pointers are + * replaced with the kind of index (properties vs. elements). + */ +void +GCMarker::saveValueRanges() +{ + for (uintptr_t* p = stack.tos_; p > stack.stack_; ) { + uintptr_t tag = *--p & StackTagMask; + if (tag == ValueArrayTag) { + *p &= ~StackTagMask; + p -= 2; + SlotArrayLayout* arr = reinterpret_cast(p); + NativeObject* obj = arr->obj; + MOZ_ASSERT(obj->isNative()); + + HeapSlot* vp = obj->getDenseElementsAllowCopyOnWrite(); + if (arr->end == vp + obj->getDenseInitializedLength()) { + MOZ_ASSERT(arr->start >= vp); + arr->index = arr->start - vp; + arr->kind = HeapSlot::Element; + } else { + HeapSlot* vp = obj->fixedSlots(); + unsigned nfixed = obj->numFixedSlots(); + if (arr->start == arr->end) { + arr->index = obj->slotSpan(); + } else if (arr->start >= vp && arr->start < vp + nfixed) { + MOZ_ASSERT(arr->end == vp + Min(nfixed, obj->slotSpan())); + arr->index = arr->start - vp; + } else { + MOZ_ASSERT(arr->start >= obj->slots_ && + arr->end == obj->slots_ + obj->slotSpan() - nfixed); + arr->index = (arr->start - obj->slots_) + nfixed; + } + arr->kind = HeapSlot::Slot; } - } - - if (!hasDelayedChildren()) - break; - - /* - * Mark children of things that caused too deep recursion during the - * above tracing. Don't do this until we're done with everything - * else. - */ - if (!markDelayedChildren(budget)) { - saveValueRanges(); - return false; + p[2] |= SavedValueArrayTag; + } else if (tag == SavedValueArrayTag) { + p -= 2; } } +} + +bool +GCMarker::restoreValueArray(NativeObject* obj, void** vpp, void** endp) +{ + uintptr_t start = stack.pop(); + HeapSlot::Kind kind = (HeapSlot::Kind) stack.pop(); + + if (kind == HeapSlot::Element) { + if (!obj->is()) + return false; + + uint32_t initlen = obj->getDenseInitializedLength(); + HeapSlot* vp = obj->getDenseElementsAllowCopyOnWrite(); + if (start < initlen) { + *vpp = vp + start; + *endp = vp + initlen; + } else { + /* The object shrunk, in which case no scanning is needed. */ + *vpp = *endp = vp; + } + } else { + MOZ_ASSERT(kind == HeapSlot::Slot); + HeapSlot* vp = obj->fixedSlots(); + unsigned nfixed = obj->numFixedSlots(); + unsigned nslots = obj->slotSpan(); + if (start < nslots) { + if (start < nfixed) { + *vpp = vp + start; + *endp = vp + Min(nfixed, nslots); + } else { + *vpp = obj->slots_ + start - nfixed; + *endp = obj->slots_ + nslots - nfixed; + } + } else { + /* The object shrunk, in which case no scanning is needed. */ + *vpp = *endp = vp; + } + } + + MOZ_ASSERT(*vpp <= *endp); + return true; +} + + +/*** Mark Stack ***********************************************************************************/ + +bool +MarkStack::init(JSGCMode gcMode) +{ + setBaseCapacity(gcMode); + + MOZ_ASSERT(!stack_); + uintptr_t* newStack = js_pod_malloc(baseCapacity_); + if (!newStack) + return false; + + setStack(newStack, 0, baseCapacity_); + return true; +} + +void +MarkStack::setBaseCapacity(JSGCMode mode) +{ + switch (mode) { + case JSGC_MODE_GLOBAL: + case JSGC_MODE_COMPARTMENT: + baseCapacity_ = NON_INCREMENTAL_MARK_STACK_BASE_CAPACITY; + break; + case JSGC_MODE_INCREMENTAL: + baseCapacity_ = INCREMENTAL_MARK_STACK_BASE_CAPACITY; + break; + default: + MOZ_CRASH("bad gc mode"); + } + + if (baseCapacity_ > maxCapacity_) + baseCapacity_ = maxCapacity_; +} + +void +MarkStack::setMaxCapacity(size_t maxCapacity) +{ + MOZ_ASSERT(isEmpty()); + maxCapacity_ = maxCapacity; + if (baseCapacity_ > maxCapacity_) + baseCapacity_ = maxCapacity_; + + reset(); +} + +void +MarkStack::reset() +{ + if (capacity() == baseCapacity_) { + // No size change; keep the current stack. + setStack(stack_, 0, baseCapacity_); + return; + } + + uintptr_t* newStack = (uintptr_t*)js_realloc(stack_, sizeof(uintptr_t) * baseCapacity_); + if (!newStack) { + // If the realloc fails, just keep using the existing stack; it's + // not ideal but better than failing. + newStack = stack_; + baseCapacity_ = capacity(); + } + setStack(newStack, 0, baseCapacity_); +} + +bool +MarkStack::enlarge(unsigned count) +{ + size_t newCapacity = Min(maxCapacity_, capacity() * 2); + if (newCapacity < capacity() + count) + return false; + + size_t tosIndex = position(); + + uintptr_t* newStack = (uintptr_t*)js_realloc(stack_, sizeof(uintptr_t) * newCapacity); + if (!newStack) + return false; + + setStack(newStack, tosIndex, newCapacity); + return true; +} + +void +MarkStack::setGCMode(JSGCMode gcMode) +{ + // The mark stack won't be resized until the next call to reset(), but + // that will happen at the end of the next GC. + setBaseCapacity(gcMode); +} + +size_t +MarkStack::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const +{ + return mallocSizeOf(stack_); +} + + +/*** GCMarker *************************************************************************************/ + +/* + * DoNotTraceWeakMaps: the GC is recomputing the liveness of WeakMap entries, + * so we delay visting entries. + */ +GCMarker::GCMarker(JSRuntime* rt) + : JSTracer(rt, JSTracer::MarkingTracer, DoNotTraceWeakMaps), + stack(size_t(-1)), + color(BLACK), + unmarkedArenaStackTop(nullptr), + markLaterArenas(0), + started(false), + strictCompartmentChecking(false) +{ +} + +bool +GCMarker::init(JSGCMode gcMode) +{ + return stack.init(gcMode); +} + +void +GCMarker::start() +{ + MOZ_ASSERT(!started); + started = true; + color = BLACK; + + MOZ_ASSERT(!unmarkedArenaStackTop); + MOZ_ASSERT(markLaterArenas == 0); + +} + +void +GCMarker::stop() +{ + MOZ_ASSERT(isDrained()); + + MOZ_ASSERT(started); + started = false; + + MOZ_ASSERT(!unmarkedArenaStackTop); + MOZ_ASSERT(markLaterArenas == 0); + + /* Free non-ballast stack memory. */ + stack.reset(); +} + +void +GCMarker::reset() +{ + color = BLACK; + + stack.reset(); + MOZ_ASSERT(isMarkStackEmpty()); + + while (unmarkedArenaStackTop) { + ArenaHeader* aheader = unmarkedArenaStackTop; + MOZ_ASSERT(aheader->hasDelayedMarking); + MOZ_ASSERT(markLaterArenas); + unmarkedArenaStackTop = aheader->getNextDelayedMarking(); + aheader->unsetDelayedMarking(); + aheader->markOverflow = 0; + aheader->allocatedDuringIncremental = 0; + markLaterArenas--; + } + MOZ_ASSERT(isDrained()); + MOZ_ASSERT(!markLaterArenas); +} + +void +GCMarker::markDelayedChildren(ArenaHeader* aheader) +{ + if (aheader->markOverflow) { + bool always = aheader->allocatedDuringIncremental; + aheader->markOverflow = 0; + + for (ArenaCellIterUnderGC i(aheader); !i.done(); i.next()) { + TenuredCell* t = i.getCell(); + if (always || t->isMarked()) { + t->markIfUnmarked(); + JS_TraceChildren(this, t, MapAllocToTraceKind(aheader->getAllocKind())); + } + } + } else { + MOZ_ASSERT(aheader->allocatedDuringIncremental); + PushArena(this, aheader); + } + aheader->allocatedDuringIncremental = 0; + /* + * Note that during an incremental GC we may still be allocating into + * aheader. However, prepareForIncrementalGC sets the + * allocatedDuringIncremental flag if we continue marking. + */ +} + +bool +GCMarker::markDelayedChildren(SliceBudget& budget) +{ + GCRuntime& gc = runtime()->gc; + gcstats::AutoPhase ap(gc.stats, gc.state() == MARK, gcstats::PHASE_MARK_DELAYED); + + MOZ_ASSERT(unmarkedArenaStackTop); + do { + /* + * If marking gets delayed at the same arena again, we must repeat + * marking of its things. For that we pop arena from the stack and + * clear its hasDelayedMarking flag before we begin the marking. + */ + ArenaHeader* aheader = unmarkedArenaStackTop; + MOZ_ASSERT(aheader->hasDelayedMarking); + MOZ_ASSERT(markLaterArenas); + unmarkedArenaStackTop = aheader->getNextDelayedMarking(); + aheader->unsetDelayedMarking(); + markLaterArenas--; + markDelayedChildren(aheader); + + budget.step(150); + if (budget.isOverBudget()) + return false; + } while (unmarkedArenaStackTop); + MOZ_ASSERT(!markLaterArenas); return true; } -struct TraceChildrenFunctor { - template - void operator()(JSTracer* trc, void* thing) { - static_cast(thing)->traceChildren(trc); +template +static void +PushArenaTyped(GCMarker* gcmarker, ArenaHeader* aheader) +{ + for (ArenaCellIterUnderGC i(aheader); !i.done(); i.next()) + gcmarker->traverse(i.get()); +} + +void +gc::PushArena(GCMarker* gcmarker, ArenaHeader* aheader) +{ + switch (MapAllocToTraceKind(aheader->getAllocKind())) { + case JSTRACE_OBJECT: + PushArenaTyped(gcmarker, aheader); + break; + + case JSTRACE_SCRIPT: + PushArenaTyped(gcmarker, aheader); + break; + + case JSTRACE_STRING: + PushArenaTyped(gcmarker, aheader); + break; + + case JSTRACE_SYMBOL: + PushArenaTyped(gcmarker, aheader); + break; + + case JSTRACE_BASE_SHAPE: + PushArenaTyped(gcmarker, aheader); + break; + + case JSTRACE_JITCODE: + PushArenaTyped(gcmarker, aheader); + break; + + case JSTRACE_LAZY_SCRIPT: + PushArenaTyped(gcmarker, aheader); + break; + + case JSTRACE_SHAPE: + PushArenaTyped(gcmarker, aheader); + break; + + case JSTRACE_OBJECT_GROUP: + PushArenaTyped(gcmarker, aheader); + break; + + default: + MOZ_CRASH("Invalid trace kind in PushArena."); + } +} + +#ifdef DEBUG +void +GCMarker::checkZone(void* p) +{ + MOZ_ASSERT(started); + DebugOnly cell = static_cast(p); + MOZ_ASSERT_IF(cell->isTenured(), cell->asTenured().zone()->isCollecting()); +} +#endif + +size_t +GCMarker::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const +{ + size_t size = stack.sizeOfExcludingThis(mallocSizeOf); + for (ZonesIter zone(runtime(), WithAtoms); !zone.done(); zone.next()) + size += zone->gcGrayRoots.sizeOfExcludingThis(mallocSizeOf); + return size; +} + +void +js::SetMarkStackLimit(JSRuntime* rt, size_t limit) +{ + rt->gc.setMarkStackLimit(limit); +} + +/*** IsMarked / IsAboutToBeFinalized **************************************************************/ + +template +static inline void +CheckIsMarkedThing(T* thingp) +{ +#define IS_SAME_TYPE_OR(name, type, _) mozilla::IsSame::value || + static_assert( + FOR_EACH_GC_LAYOUT(IS_SAME_TYPE_OR) + false, "Only the base cell layout types are allowed into marking/tracing internals"); +#undef IS_SAME_TYPE_OR + +#ifdef DEBUG + MOZ_ASSERT(thingp); + MOZ_ASSERT(*thingp); + JSRuntime* rt = (*thingp)->runtimeFromAnyThread(); + MOZ_ASSERT_IF(!ThingIsPermanentAtomOrWellKnownSymbol(*thingp), + CurrentThreadCanAccessRuntime(rt) || + (rt->isHeapCollecting() && rt->gc.state() == SWEEP)); +#endif +} + +template +static bool +IsMarkedInternal(T* thingp) +{ + CheckIsMarkedThing(thingp); + JSRuntime* rt = (*thingp)->runtimeFromAnyThread(); + + if (IsInsideNursery(*thingp)) { + MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt)); + return rt->gc.nursery.getForwardedPointer(thingp); + } + + Zone* zone = (*thingp)->asTenured().zoneFromAnyThread(); + if (!zone->isCollectingFromAnyThread() || zone->isGCFinished()) + return true; + if (zone->isGCCompacting() && IsForwarded(*thingp)) + *thingp = Forwarded(*thingp); + return (*thingp)->asTenured().isMarked(); +} + +template +struct IsMarkedFunctor : public IdentityDefaultAdaptor { + template S operator()(T* t, bool* rv) { + *rv = IsMarkedInternal(&t); + return js::gc::RewrapValueOrId::wrap(t); } }; -void -js::TraceChildren(JSTracer* trc, void* thing, JSGCTraceKind kind) +template <> +bool +IsMarkedInternal(Value* valuep) { - MOZ_ASSERT(thing); - TraceChildrenFunctor f; - CallTyped(f, kind, trc, thing); + bool rv = true; + *valuep = DispatchValueTyped(IsMarkedFunctor(), *valuep, &rv); + return rv; } +template <> +bool +IsMarkedInternal(jsid* idp) +{ + bool rv = true; + *idp = DispatchIdTyped(IsMarkedFunctor(), *idp, &rv); + return rv; +} + +template +static bool +IsAboutToBeFinalizedInternal(T* thingp) +{ + CheckIsMarkedThing(thingp); + T thing = *thingp; + JSRuntime* rt = thing->runtimeFromAnyThread(); + + /* Permanent atoms are never finalized by non-owning runtimes. */ + if (ThingIsPermanentAtomOrWellKnownSymbol(thing) && !TlsPerThreadData.get()->associatedWith(rt)) + return false; + + Nursery& nursery = rt->gc.nursery; + MOZ_ASSERT_IF(!rt->isHeapMinorCollecting(), !IsInsideNursery(thing)); + if (rt->isHeapMinorCollecting()) { + if (IsInsideNursery(thing)) + return !nursery.getForwardedPointer(thingp); + return false; + } + + Zone* zone = thing->asTenured().zoneFromAnyThread(); + if (zone->isGCSweeping()) { + if (thing->asTenured().arenaHeader()->allocatedDuringIncremental) + return false; + return !thing->asTenured().isMarked(); + } + else if (zone->isGCCompacting() && IsForwarded(thing)) { + *thingp = Forwarded(thing); + return false; + } + + return false; +} + +template +struct IsAboutToBeFinalizedFunctor : public IdentityDefaultAdaptor { + template S operator()(T* t, bool* rv) { + *rv = IsAboutToBeFinalizedInternal(&t); + return js::gc::RewrapValueOrId::wrap(t); + } +}; + +template <> +bool +IsAboutToBeFinalizedInternal(Value* valuep) +{ + bool rv = false; + *valuep = DispatchValueTyped(IsAboutToBeFinalizedFunctor(), *valuep, &rv); + return rv; +} + +template <> +bool +IsAboutToBeFinalizedInternal(jsid* idp) +{ + bool rv = false; + *idp = DispatchIdTyped(IsAboutToBeFinalizedFunctor(), *idp, &rv); + return rv; +} + +namespace js { +namespace gc { + +template +bool +IsMarkedUnbarriered(T* thingp) +{ + return IsMarkedInternal(ConvertToBase(thingp)); +} + +template +bool +IsMarked(BarrieredBase* thingp) +{ + return IsMarkedInternal(ConvertToBase(thingp->unsafeGet())); +} + +template +bool +IsMarked(ReadBarriered* thingp) +{ + return IsMarkedInternal(ConvertToBase(thingp->unsafeGet())); +} + +template +bool +IsAboutToBeFinalizedUnbarriered(T* thingp) +{ + return IsAboutToBeFinalizedInternal(ConvertToBase(thingp)); +} + +template +bool +IsAboutToBeFinalized(BarrieredBase* thingp) +{ + return IsAboutToBeFinalizedInternal(ConvertToBase(thingp->unsafeGet())); +} + +template +bool +IsAboutToBeFinalized(ReadBarriered* thingp) +{ + return IsAboutToBeFinalizedInternal(ConvertToBase(thingp->unsafeGet())); +} + +// Instantiate a copy of the Tracing templates for each derived type. +#define INSTANTIATE_ALL_VALID_TRACE_FUNCTIONS(type) \ + template bool IsMarkedUnbarriered(type*); \ + template bool IsMarked(BarrieredBase*); \ + template bool IsMarked(ReadBarriered*); \ + template bool IsAboutToBeFinalizedUnbarriered(type*); \ + template bool IsAboutToBeFinalized(BarrieredBase*); \ + template bool IsAboutToBeFinalized(ReadBarriered*); +FOR_EACH_GC_POINTER_TYPE(INSTANTIATE_ALL_VALID_TRACE_FUNCTIONS) +#undef INSTANTIATE_ALL_VALID_TRACE_FUNCTIONS + +} /* namespace gc */ +} /* namespace js */ + + +/*** Type Marking *********************************************************************************/ + +void +TypeSet::MarkTypeRoot(JSTracer* trc, TypeSet::Type* v, const char* name) +{ + JS_ROOT_MARKING_ASSERT(trc); + MarkTypeUnbarriered(trc, v, name); +} + +void +TypeSet::MarkTypeUnbarriered(JSTracer* trc, TypeSet::Type* v, const char* name) +{ + if (v->isSingletonUnchecked()) { + JSObject* obj = v->singleton(); + DispatchToTracer(trc, &obj, name); + *v = TypeSet::ObjectType(obj); + } else if (v->isGroupUnchecked()) { + ObjectGroup* group = v->group(); + DispatchToTracer(trc, &group, name); + *v = TypeSet::ObjectType(group); + } +} + + +/*** Cycle Collector Helpers **********************************************************************/ + +// This function is used by the cycle collector to trace through a shape. The +// cycle collector does not care about shapes or base shapes, so those are not +// marked. Instead, any shapes or base shapes that are encountered have their +// children marked. Stack space is bounded. +// +// Note: The canonical way for an embedding to implement this functionality +// would be through a custom CallbackTracer that ignores unrequired children +// and pushes to a separate mark stack in order to bound the call stack usage. +// We've implemented like this purely for performance. +void +gc::MarkCycleCollectorChildren(JSTracer* trc, Shape* shape) +{ + // We need to mark the global, but it's OK to only do this once instead of + // doing it for every Shape in our lineage, since it's always the same + // global. + JSObject* global = shape->compartment()->unsafeUnbarrieredMaybeGlobal(); + MOZ_ASSERT(global); + TraceManuallyBarrieredEdge(trc, &global, "global"); + + do { + MOZ_ASSERT(global == shape->compartment()->unsafeUnbarrieredMaybeGlobal()); + + MOZ_ASSERT(shape->base()); + shape->base()->assertConsistency(); + + TraceEdge(trc, &shape->propidRef(), "propid"); + + if (shape->hasGetterObject()) { + JSObject* tmp = shape->getterObject(); + TraceManuallyBarrieredEdge(trc, &tmp, "getter"); + MOZ_ASSERT(tmp == shape->getterObject()); + } + + if (shape->hasSetterObject()) { + JSObject* tmp = shape->setterObject(); + TraceManuallyBarrieredEdge(trc, &tmp, "setter"); + MOZ_ASSERT(tmp == shape->setterObject()); + } + + shape = shape->previous(); + } while (shape); +} + +void +TraceObjectGroupCycleCollectorChildrenCallback(JS::CallbackTracer* trc, + void** thingp, JSGCTraceKind kind); + +// Object groups can point to other object groups via an UnboxedLayout or the +// the original unboxed group link. There can potentially be deep or cyclic +// chains of such groups to trace through without going through a thing that +// participates in cycle collection. These need to be handled iteratively to +// avoid blowing the stack when running the cycle collector's callback tracer. +struct ObjectGroupCycleCollectorTracer : public JS::CallbackTracer +{ + explicit ObjectGroupCycleCollectorTracer(JS::CallbackTracer* innerTracer) + : JS::CallbackTracer(innerTracer->runtime(), + TraceObjectGroupCycleCollectorChildrenCallback, + DoNotTraceWeakMaps), + innerTracer(innerTracer) + {} + + JS::CallbackTracer* innerTracer; + Vector seen, worklist; +}; + +void +TraceObjectGroupCycleCollectorChildrenCallback(JS::CallbackTracer* trcArg, + void** thingp, JSGCTraceKind kind) +{ + ObjectGroupCycleCollectorTracer* trc = static_cast(trcArg); + JS::GCCellPtr thing(*thingp, kind); + + if (thing.isObject() || thing.isScript()) { + // Invoke the inner cycle collector callback on this child. It will not + // recurse back into TraceChildren. + trc->innerTracer->invoke(thingp, kind); + return; + } + + if (thing.isObjectGroup()) { + // If this group is required to be in an ObjectGroup chain, trace it + // via the provided worklist rather than continuing to recurse. + ObjectGroup* group = static_cast(thing.asCell()); + if (group->maybeUnboxedLayout()) { + for (size_t i = 0; i < trc->seen.length(); i++) { + if (trc->seen[i] == group) + return; + } + if (trc->seen.append(group) && trc->worklist.append(group)) { + return; + } else { + // If append fails, keep tracing normally. The worst that will + // happen is we end up overrecursing. + } + } + } + + TraceChildren(trc, thing.asCell(), thing.kind()); +} + +void +gc::MarkCycleCollectorChildren(JSTracer* trc, ObjectGroup* group) +{ + MOZ_ASSERT(trc->isCallbackTracer()); + + // Early return if this group is not required to be in an ObjectGroup chain. + if (!group->maybeUnboxedLayout()) { + TraceChildren(trc, group, JSTRACE_OBJECT_GROUP); + return; + } + + ObjectGroupCycleCollectorTracer groupTracer(trc->asCallbackTracer()); + TraceChildren(&groupTracer, group, JSTRACE_OBJECT_GROUP); + + while (!groupTracer.worklist.empty()) { + ObjectGroup* innerGroup = groupTracer.worklist.popCopy(); + TraceChildren(&groupTracer, innerGroup, JSTRACE_OBJECT_GROUP); + } +} + + +/*** Cycle Collector Barrier Implementation *******************************************************/ + #ifdef DEBUG static void AssertNonGrayGCThing(JS::CallbackTracer* trc, void** thingp, JSGCTraceKind kind) @@ -1817,23 +2065,6 @@ AssertNonGrayGCThing(JS::CallbackTracer* trc, void** thingp, JSGCTraceKind kind) DebugOnly thing(static_cast(*thingp)); MOZ_ASSERT_IF(thing->isTenured(), !thing->asTenured().isMarked(js::gc::GRAY)); } - -template -bool -gc::ZoneIsGCMarking(T* thing) -{ - return thing->zone()->isGCMarking(); -} - -template -bool -js::gc::ZoneIsAtomsZoneForString(JSRuntime* rt, T* thing) -{ - JSGCTraceKind kind = GetGCThingTraceKind(thing); - if (kind == JSTRACE_STRING || kind == JSTRACE_SYMBOL) - return rt->isAtomsZone(thing->zone()); - return false; -} #endif static void diff --git a/js/src/gc/Marking.h b/js/src/gc/Marking.h index 2459834be44b..41155c32b762 100644 --- a/js/src/gc/Marking.h +++ b/js/src/gc/Marking.h @@ -7,87 +7,308 @@ #ifndef gc_Marking_h #define gc_Marking_h +#include "mozilla/DebugOnly.h" + #include "jsfriendapi.h" -#include "gc/Barrier.h" +#include "gc/Heap.h" +#include "gc/Tracer.h" +#include "js/GCAPI.h" +#include "js/SliceBudget.h" +#include "js/TracingAPI.h" +class JSLinearString; +class JSRope; namespace js { - -/*** Tracing ***/ - -// Trace through an edge in the live object graph on behalf of tracing. The -// effect of tracing the edge depends on the JSTracer being used. -template -void -TraceEdge(JSTracer* trc, BarrieredBase* thingp, const char* name); - -// Trace through a "root" edge. These edges are the initial edges in the object -// graph traversal. Root edges are asserted to only be traversed in the initial -// phase of a GC. -template -void -TraceRoot(JSTracer* trc, T* thingp, const char* name); - -// Like TraceEdge, but for edges that do not use one of the automatic barrier -// classes and, thus, must be treated specially for moving GC. This method is -// separate from TraceEdge to make accidental use of such edges more obvious. -template -void -TraceManuallyBarrieredEdge(JSTracer* trc, T* thingp, const char* name); - -// Trace all edges contained in the given array. -template -void -TraceRange(JSTracer* trc, size_t len, BarrieredBase* vec, const char* name); - -// Trace all root edges in the given array. -template -void -TraceRootRange(JSTracer* trc, size_t len, T* vec, const char* name); - -// Trace an edge that crosses compartment boundaries. If the compartment of the -// destination thing is not being GC'd, then the edge will not be traced. -template -void -TraceCrossCompartmentEdge(JSTracer* trc, JSObject* src, BarrieredBase* dst, - const char* name); - -// As above but with manual barriers. -template -void -TraceManuallyBarrieredCrossCompartmentEdge(JSTracer* trc, JSObject* src, T* dst, - const char* name); - -// Permanent atoms and well-known symbols are shared between runtimes and must -// use a separate marking path so that we can filter them out of normal heap -// tracing. -template -void -TraceProcessGlobalRoot(JSTracer* trc, T* thing, const char* name); - +class BaseShape; +class GCMarker; +class LazyScript; +class NativeObject; +class ObjectGroup; namespace gc { - -/* Return true if the pointer is nullptr, or if it is a tagged pointer to - * nullptr. - */ -MOZ_ALWAYS_INLINE bool -IsNullTaggedPointer(void* p) -{ - return uintptr_t(p) < 32; +struct ArenaHeader; +} +namespace jit { +class JitCode; } -/*** Externally Typed Marking ***/ +static const size_t NON_INCREMENTAL_MARK_STACK_BASE_CAPACITY = 4096; +static const size_t INCREMENTAL_MARK_STACK_BASE_CAPACITY = 32768; + +/* + * When the native stack is low, the GC does not call JS_TraceChildren to mark + * the reachable "children" of the thing. Rather the thing is put aside and + * JS_TraceChildren is called later with more space on the C stack. + * + * To implement such delayed marking of the children with minimal overhead for + * the normal case of sufficient native stack, the code adds a field per arena. + * The field markingDelay->link links all arenas with delayed things into a + * stack list with the pointer to stack top in GCMarker::unmarkedArenaStackTop. + * GCMarker::delayMarkingChildren adds arenas to the stack as necessary while + * markDelayedChildren pops the arenas from the stack until it empties. + */ +class MarkStack +{ + friend class GCMarker; + + uintptr_t* stack_; + uintptr_t* tos_; + uintptr_t* end_; + + // The capacity we start with and reset() to. + size_t baseCapacity_; + size_t maxCapacity_; + + public: + explicit MarkStack(size_t maxCapacity) + : stack_(nullptr), + tos_(nullptr), + end_(nullptr), + baseCapacity_(0), + maxCapacity_(maxCapacity) + {} + + ~MarkStack() { + js_free(stack_); + } + + size_t capacity() { return end_ - stack_; } + + ptrdiff_t position() const { return tos_ - stack_; } + + void setStack(uintptr_t* stack, size_t tosIndex, size_t capacity) { + stack_ = stack; + tos_ = stack + tosIndex; + end_ = stack + capacity; + } + + bool init(JSGCMode gcMode); + + void setBaseCapacity(JSGCMode mode); + size_t maxCapacity() const { return maxCapacity_; } + void setMaxCapacity(size_t maxCapacity); + + bool push(uintptr_t item) { + if (tos_ == end_) { + if (!enlarge(1)) + return false; + } + MOZ_ASSERT(tos_ < end_); + *tos_++ = item; + return true; + } + + bool push(uintptr_t item1, uintptr_t item2, uintptr_t item3) { + uintptr_t* nextTos = tos_ + 3; + if (nextTos > end_) { + if (!enlarge(3)) + return false; + nextTos = tos_ + 3; + } + MOZ_ASSERT(nextTos <= end_); + tos_[0] = item1; + tos_[1] = item2; + tos_[2] = item3; + tos_ = nextTos; + return true; + } + + bool isEmpty() const { + return tos_ == stack_; + } + + uintptr_t pop() { + MOZ_ASSERT(!isEmpty()); + return *--tos_; + } + + void reset(); + + /* Grow the stack, ensuring there is space for at least count elements. */ + bool enlarge(unsigned count); + + void setGCMode(JSGCMode gcMode); + + size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const; +}; + +class GCMarker : public JSTracer +{ + public: + explicit GCMarker(JSRuntime* rt); + bool init(JSGCMode gcMode); + + void setMaxCapacity(size_t maxCap) { stack.setMaxCapacity(maxCap); } + size_t maxCapacity() const { return stack.maxCapacity(); } + + void start(); + void stop(); + void reset(); + + // Mark the given GC thing and traverse its children at some point. + template void traverse(T thing); + + // Calls traverse on target after making additional assertions. + template void traverse(S source, T target); + + // C++ requires explicit declarations of partial template instantiations. + template void traverse(S source, jsid target); + + /* + * Care must be taken changing the mark color from gray to black. The cycle + * collector depends on the invariant that there are no black to gray edges + * in the GC heap. This invariant lets the CC not trace through black + * objects. If this invariant is violated, the cycle collector may free + * objects that are still reachable. + */ + void setMarkColorGray() { + MOZ_ASSERT(isDrained()); + MOZ_ASSERT(color == gc::BLACK); + color = gc::GRAY; + } + void setMarkColorBlack() { + MOZ_ASSERT(isDrained()); + MOZ_ASSERT(color == gc::GRAY); + color = gc::BLACK; + } + uint32_t markColor() const { return color; } + + void delayMarkingArena(gc::ArenaHeader* aheader); + void delayMarkingChildren(const void* thing); + void markDelayedChildren(gc::ArenaHeader* aheader); + bool markDelayedChildren(SliceBudget& budget); + bool hasDelayedChildren() const { + return !!unmarkedArenaStackTop; + } + + bool isDrained() { + return isMarkStackEmpty() && !unmarkedArenaStackTop; + } + + bool drainMarkStack(SliceBudget& budget); + + void setGCMode(JSGCMode mode) { stack.setGCMode(mode); } + + size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const; + +#ifdef DEBUG + bool shouldCheckCompartments() { return strictCompartmentChecking; } +#endif + + /* This is public exclusively for ScanRope. */ + MarkStack stack; + + private: +#ifdef DEBUG + void checkZone(void* p); +#else + void checkZone(void* p) {} +#endif + + /* + * We use a common mark stack to mark GC things of different types and use + * the explicit tags to distinguish them when it cannot be deduced from + * the context of push or pop operation. + */ + enum StackTag { + ValueArrayTag, + ObjectTag, + GroupTag, + SavedValueArrayTag, + JitCodeTag, + LastTag = JitCodeTag + }; + + static const uintptr_t StackTagMask = 7; + static_assert(StackTagMask >= uintptr_t(LastTag), "The tag mask must subsume the tags."); + static_assert(StackTagMask <= gc::CellMask, "The tag mask must be embeddable in a Cell*."); + + // Push an object onto the stack for later tracing and assert that it has + // already been marked. + void repush(JSObject* obj) { + MOZ_ASSERT(gc::TenuredCell::fromPointer(obj)->isMarked(markColor())); + pushTaggedPtr(ObjectTag, obj); + } + + template void markAndTraceChildren(T* thing); + template void markAndPush(StackTag tag, T* thing); + template void markAndScan(T* thing); + void eagerlyMarkChildren(JSLinearString* str); + void eagerlyMarkChildren(JSRope* rope); + void eagerlyMarkChildren(JSString* str); + void eagerlyMarkChildren(LazyScript *thing); + void eagerlyMarkChildren(Shape* shape); + void lazilyMarkChildren(ObjectGroup* group); + + // We may not have concrete types yet, so this has to be out of the header. + template + void dispatchToTraceChildren(T* thing); + + // Mark the given GC thing, but do not trace its children. Return true + // if the thing became marked. + template + bool mark(T* thing); + + void pushTaggedPtr(StackTag tag, void* ptr) { + checkZone(ptr); + uintptr_t addr = reinterpret_cast(ptr); + MOZ_ASSERT(!(addr & StackTagMask)); + if (!stack.push(addr | uintptr_t(tag))) + delayMarkingChildren(ptr); + } + + void pushValueArray(JSObject* obj, void* start, void* end) { + checkZone(obj); + + MOZ_ASSERT(start <= end); + uintptr_t tagged = reinterpret_cast(obj) | GCMarker::ValueArrayTag; + uintptr_t startAddr = reinterpret_cast(start); + uintptr_t endAddr = reinterpret_cast(end); + + /* + * Push in the reverse order so obj will be on top. If we cannot push + * the array, we trigger delay marking for the whole object. + */ + if (!stack.push(endAddr, startAddr, tagged)) + delayMarkingChildren(obj); + } + + bool isMarkStackEmpty() { + return stack.isEmpty(); + } + + bool restoreValueArray(NativeObject* obj, void** vpp, void** endp); + void saveValueRanges(); + inline void processMarkStackTop(SliceBudget& budget); + + /* The color is only applied to objects and functions. */ + uint32_t color; + + /* Pointer to the top of the stack of arenas we are delaying marking on. */ + js::gc::ArenaHeader* unmarkedArenaStackTop; + + /* Count of arenas that are currently in the stack. */ + mozilla::DebugOnly markLaterArenas; + + /* Assert that start and stop are called with correct ordering. */ + mozilla::DebugOnly started; + + /* + * If this is true, all marked objects must belong to a compartment being + * GCed. This is used to look for compartment bugs. + */ + mozilla::DebugOnly strictCompartmentChecking; +}; void -TraceGenericPointerRoot(JSTracer* trc, Cell** thingp, const char* name); +SetMarkStackLimit(JSRuntime* rt, size_t limit); -void -TraceManuallyBarrieredGenericPointerEdge(JSTracer* trc, Cell** thingp, const char* name); +bool +IsBufferingGrayRoots(JSTracer* trc); -/*** Slot Marking ***/ - -void -MarkObjectSlots(JSTracer* trc, NativeObject* obj, uint32_t start, uint32_t nslots); +} /* namespace js */ +namespace js { +namespace gc { /*** Special Cases ***/ @@ -103,7 +324,7 @@ MarkCycleCollectorChildren(JSTracer* trc, ObjectGroup* group); void PushArena(GCMarker* gcmarker, ArenaHeader* aheader); -/*** Generic ***/ +/*** Liveness ***/ template bool @@ -143,10 +364,16 @@ ToMarkable(Cell* cell) return cell; } -/* - * HashKeyRef represents a reference to a HashMap key. This should normally - * be used through the HashTableWriteBarrierPost function. - */ +// Return true if the pointer is nullptr, or if it is a tagged pointer to +// nullptr. +MOZ_ALWAYS_INLINE bool +IsNullTaggedPointer(void* p) +{ + return uintptr_t(p) < 32; +} + +// HashKeyRef represents a reference to a HashMap key. This should normally +// be used through the HashTableWriteBarrierPost function. template class HashKeyRef : public BufferableRef { @@ -167,6 +394,8 @@ class HashKeyRef : public BufferableRef } }; +// Wrap a GC thing pointer into a new Value or jsid. The type system enforces +// that the thing pointer is a wrappable type. template struct RewrapValueOrId {}; #define DECLARE_REWRAP(S, T, method, prefix) \ @@ -181,9 +410,6 @@ DECLARE_REWRAP(jsid, JS::Symbol*, SYMBOL_TO_JSID, ); } /* namespace gc */ -void -TraceChildren(JSTracer* trc, void* thing, JSGCTraceKind kind); - bool UnmarkGrayShapeRecursively(Shape* shape); diff --git a/js/src/gc/RootMarking.cpp b/js/src/gc/RootMarking.cpp index 49522e067ff0..331dd8670b4b 100644 --- a/js/src/gc/RootMarking.cpp +++ b/js/src/gc/RootMarking.cpp @@ -552,6 +552,28 @@ js::gc::GCRuntime::markRuntime(JSTracer* trc, } } +// Append traced things to a buffer on the zone for use later in the GC. +// See the comment in GCRuntime.h above grayBufferState for details. +class BufferGrayRootsTracer : public JS::CallbackTracer +{ + // Set to false if we OOM while buffering gray roots. + bool bufferingGrayRootsFailed; + + void appendGrayRoot(gc::TenuredCell* thing, JSGCTraceKind kind); + + public: + explicit BufferGrayRootsTracer(JSRuntime* rt) + : JS::CallbackTracer(rt, grayTraceCallback), bufferingGrayRootsFailed(false) + {} + + static void grayTraceCallback(JS::CallbackTracer* trc, void** thingp, JSGCTraceKind kind) { + auto tracer = static_cast(trc); + tracer->appendGrayRoot(gc::TenuredCell::fromPointer(*thingp), kind); + } + + bool failed() const { return bufferingGrayRootsFailed; } +}; + void js::gc::GCRuntime::bufferGrayRoots() { @@ -621,3 +643,13 @@ GCRuntime::resetBufferedGrayRoots() const for (GCZonesIter zone(rt); !zone.done(); zone.next()) zone->gcGrayRoots.clearAndFree(); } + +// Return true if this trace is happening on behalf of gray buffering during +// the marking phase of incremental GC. +bool +js::IsBufferingGrayRoots(JSTracer* trc) +{ + return trc->isCallbackTracer() && + trc->asCallbackTracer()->hasCallback(BufferGrayRootsTracer::grayTraceCallback); +} + diff --git a/js/src/gc/StoreBuffer.cpp b/js/src/gc/StoreBuffer.cpp index 5ad80ab866f6..32ba20fdbeed 100644 --- a/js/src/gc/StoreBuffer.cpp +++ b/js/src/gc/StoreBuffer.cpp @@ -42,7 +42,7 @@ StoreBuffer::SlotsEdge::mark(JSTracer* trc) const int32_t start = Min(uint32_t(start_), obj->slotSpan()); int32_t end = Min(uint32_t(start_) + count_, obj->slotSpan()); MOZ_ASSERT(end >= start); - MarkObjectSlots(trc, obj, start, end - start); + TraceObjectSlots(trc, obj, start, end - start); } } diff --git a/js/src/gc/StoreBuffer.h b/js/src/gc/StoreBuffer.h index 7eb60d201d49..6a9abc2aec10 100644 --- a/js/src/gc/StoreBuffer.h +++ b/js/src/gc/StoreBuffer.h @@ -15,7 +15,6 @@ #include "ds/LifoAlloc.h" #include "gc/Nursery.h" -#include "gc/Tracer.h" #include "js/MemoryMetrics.h" namespace js { diff --git a/js/src/gc/Tracer.cpp b/js/src/gc/Tracer.cpp index 37c03e047996..8373bf073c76 100644 --- a/js/src/gc/Tracer.cpp +++ b/js/src/gc/Tracer.cpp @@ -21,6 +21,7 @@ #include "gc/Marking.h" #include "gc/Zone.h" +#include "vm/Shape.h" #include "vm/Symbol.h" #include "jsgcinlines.h" @@ -35,6 +36,9 @@ void CheckTracedThing(JSTracer* trc, T thing); } // namespace js + +/*** Callback Tracer Dispatch ********************************************************************/ + template T DoCallback(JS::CallbackTracer* trc, T* thingp, const char* name) @@ -73,6 +77,24 @@ DoCallback(JS::CallbackTracer* trc, jsid* idp, const char* name) return *idp; } +void +JS::CallbackTracer::getTracingEdgeName(char* buffer, size_t bufferSize) +{ + MOZ_ASSERT(bufferSize > 0); + if (contextFunctor_) { + (*contextFunctor_)(this, buffer, bufferSize); + return; + } + if (contextIndex_ != InvalidIndex) { + JS_snprintf(buffer, bufferSize, "%s[%lu]", contextName_, contextIndex_); + return; + } + JS_snprintf(buffer, bufferSize, "%s", contextName_); +} + + +/*** Public Tracing API **************************************************************************/ + JS_PUBLIC_API(void) JS_CallUnbarrieredValueTracer(JSTracer* trc, Value* valuep, const char* name) { @@ -158,6 +180,21 @@ JS_TraceChildren(JSTracer* trc, void* thing, JSGCTraceKind kind) js::TraceChildren(trc, thing, kind); } +struct TraceChildrenFunctor { + template + void operator()(JSTracer* trc, void* thing) { + static_cast(thing)->traceChildren(trc); + } +}; + +void +js::TraceChildren(JSTracer* trc, void* thing, JSGCTraceKind kind) +{ + MOZ_ASSERT(thing); + TraceChildrenFunctor f; + CallTyped(f, kind, trc, thing); +} + JS_PUBLIC_API(void) JS_TraceRuntime(JSTracer* trc) { @@ -219,6 +256,9 @@ JS_TraceIncomingCCWs(JSTracer* trc, const JS::ZoneSet& zones) } } + +/*** Traced Edge Printer *************************************************************************/ + static size_t CountDecimalDigits(size_t num) { @@ -368,277 +408,3 @@ JS_GetTraceThingInfo(char* buf, size_t bufsize, JSTracer* trc, void* thing, } buf[bufsize - 1] = '\0'; } - -JSTracer::JSTracer(JSRuntime* rt, TracerKindTag kindTag, - WeakMapTraceKind weakTraceKind /* = TraceWeakMapValues */) - : runtime_(rt) - , tag(kindTag) - , eagerlyTraceWeakMaps_(weakTraceKind) -{ -} - -void -JS::CallbackTracer::getTracingEdgeName(char* buffer, size_t bufferSize) -{ - MOZ_ASSERT(bufferSize > 0); - if (contextFunctor_) { - (*contextFunctor_)(this, buffer, bufferSize); - return; - } - if (contextIndex_ != InvalidIndex) { - JS_snprintf(buffer, bufferSize, "%s[%lu]", contextName_, contextIndex_); - return; - } - JS_snprintf(buffer, bufferSize, "%s", contextName_); -} - -void -JS::CallbackTracer::setTraceCallback(JSTraceCallback traceCallback) -{ - callback = traceCallback; -} - -bool -MarkStack::init(JSGCMode gcMode) -{ - setBaseCapacity(gcMode); - - MOZ_ASSERT(!stack_); - uintptr_t* newStack = js_pod_malloc(baseCapacity_); - if (!newStack) - return false; - - setStack(newStack, 0, baseCapacity_); - return true; -} - -void -MarkStack::setBaseCapacity(JSGCMode mode) -{ - switch (mode) { - case JSGC_MODE_GLOBAL: - case JSGC_MODE_COMPARTMENT: - baseCapacity_ = NON_INCREMENTAL_MARK_STACK_BASE_CAPACITY; - break; - case JSGC_MODE_INCREMENTAL: - baseCapacity_ = INCREMENTAL_MARK_STACK_BASE_CAPACITY; - break; - default: - MOZ_CRASH("bad gc mode"); - } - - if (baseCapacity_ > maxCapacity_) - baseCapacity_ = maxCapacity_; -} - -void -MarkStack::setMaxCapacity(size_t maxCapacity) -{ - MOZ_ASSERT(isEmpty()); - maxCapacity_ = maxCapacity; - if (baseCapacity_ > maxCapacity_) - baseCapacity_ = maxCapacity_; - - reset(); -} - -void -MarkStack::reset() -{ - if (capacity() == baseCapacity_) { - // No size change; keep the current stack. - setStack(stack_, 0, baseCapacity_); - return; - } - - uintptr_t* newStack = (uintptr_t*)js_realloc(stack_, sizeof(uintptr_t) * baseCapacity_); - if (!newStack) { - // If the realloc fails, just keep using the existing stack; it's - // not ideal but better than failing. - newStack = stack_; - baseCapacity_ = capacity(); - } - setStack(newStack, 0, baseCapacity_); -} - -bool -MarkStack::enlarge(unsigned count) -{ - size_t newCapacity = Min(maxCapacity_, capacity() * 2); - if (newCapacity < capacity() + count) - return false; - - size_t tosIndex = position(); - - uintptr_t* newStack = (uintptr_t*)js_realloc(stack_, sizeof(uintptr_t) * newCapacity); - if (!newStack) - return false; - - setStack(newStack, tosIndex, newCapacity); - return true; -} - -void -MarkStack::setGCMode(JSGCMode gcMode) -{ - // The mark stack won't be resized until the next call to reset(), but - // that will happen at the end of the next GC. - setBaseCapacity(gcMode); -} - -size_t -MarkStack::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const -{ - return mallocSizeOf(stack_); -} - -/* - * DoNotTraceWeakMaps: the GC is recomputing the liveness of WeakMap entries, - * so we delay visting entries. - */ -GCMarker::GCMarker(JSRuntime* rt) - : JSTracer(rt, JSTracer::MarkingTracer, DoNotTraceWeakMaps), - stack(size_t(-1)), - color(BLACK), - unmarkedArenaStackTop(nullptr), - markLaterArenas(0), - started(false), - strictCompartmentChecking(false) -{ -} - -bool -GCMarker::init(JSGCMode gcMode) -{ - return stack.init(gcMode); -} - -void -GCMarker::start() -{ - MOZ_ASSERT(!started); - started = true; - color = BLACK; - - MOZ_ASSERT(!unmarkedArenaStackTop); - MOZ_ASSERT(markLaterArenas == 0); - -} - -void -GCMarker::stop() -{ - MOZ_ASSERT(isDrained()); - - MOZ_ASSERT(started); - started = false; - - MOZ_ASSERT(!unmarkedArenaStackTop); - MOZ_ASSERT(markLaterArenas == 0); - - /* Free non-ballast stack memory. */ - stack.reset(); -} - -void -GCMarker::reset() -{ - color = BLACK; - - stack.reset(); - MOZ_ASSERT(isMarkStackEmpty()); - - while (unmarkedArenaStackTop) { - ArenaHeader* aheader = unmarkedArenaStackTop; - MOZ_ASSERT(aheader->hasDelayedMarking); - MOZ_ASSERT(markLaterArenas); - unmarkedArenaStackTop = aheader->getNextDelayedMarking(); - aheader->unsetDelayedMarking(); - aheader->markOverflow = 0; - aheader->allocatedDuringIncremental = 0; - markLaterArenas--; - } - MOZ_ASSERT(isDrained()); - MOZ_ASSERT(!markLaterArenas); -} - -void -GCMarker::markDelayedChildren(ArenaHeader* aheader) -{ - if (aheader->markOverflow) { - bool always = aheader->allocatedDuringIncremental; - aheader->markOverflow = 0; - - for (ArenaCellIterUnderGC i(aheader); !i.done(); i.next()) { - TenuredCell* t = i.getCell(); - if (always || t->isMarked()) { - t->markIfUnmarked(); - JS_TraceChildren(this, t, MapAllocToTraceKind(aheader->getAllocKind())); - } - } - } else { - MOZ_ASSERT(aheader->allocatedDuringIncremental); - PushArena(this, aheader); - } - aheader->allocatedDuringIncremental = 0; - /* - * Note that during an incremental GC we may still be allocating into - * aheader. However, prepareForIncrementalGC sets the - * allocatedDuringIncremental flag if we continue marking. - */ -} - -bool -GCMarker::markDelayedChildren(SliceBudget& budget) -{ - GCRuntime& gc = runtime()->gc; - gcstats::AutoPhase ap(gc.stats, gc.state() == MARK, gcstats::PHASE_MARK_DELAYED); - - MOZ_ASSERT(unmarkedArenaStackTop); - do { - /* - * If marking gets delayed at the same arena again, we must repeat - * marking of its things. For that we pop arena from the stack and - * clear its hasDelayedMarking flag before we begin the marking. - */ - ArenaHeader* aheader = unmarkedArenaStackTop; - MOZ_ASSERT(aheader->hasDelayedMarking); - MOZ_ASSERT(markLaterArenas); - unmarkedArenaStackTop = aheader->getNextDelayedMarking(); - aheader->unsetDelayedMarking(); - markLaterArenas--; - markDelayedChildren(aheader); - - budget.step(150); - if (budget.isOverBudget()) - return false; - } while (unmarkedArenaStackTop); - MOZ_ASSERT(!markLaterArenas); - - return true; -} - -#ifdef DEBUG -void -GCMarker::checkZone(void* p) -{ - MOZ_ASSERT(started); - DebugOnly cell = static_cast(p); - MOZ_ASSERT_IF(cell->isTenured(), cell->asTenured().zone()->isCollecting()); -} -#endif - -size_t -GCMarker::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const -{ - size_t size = stack.sizeOfExcludingThis(mallocSizeOf); - for (ZonesIter zone(runtime(), WithAtoms); !zone.done(); zone.next()) - size += zone->gcGrayRoots.sizeOfExcludingThis(mallocSizeOf); - return size; -} - -void -js::SetMarkStackLimit(JSRuntime* rt, size_t limit) -{ - rt->gc.setMarkStackLimit(limit); -} diff --git a/js/src/gc/Tracer.h b/js/src/gc/Tracer.h index dd660d3dfa78..7741739fb84e 100644 --- a/js/src/gc/Tracer.h +++ b/js/src/gc/Tracer.h @@ -7,347 +7,117 @@ #ifndef js_Tracer_h #define js_Tracer_h -#include "mozilla/DebugOnly.h" +#include "jsfriendapi.h" -#include "gc/Heap.h" -#include "js/GCAPI.h" -#include "js/SliceBudget.h" -#include "js/TracingAPI.h" +#include "gc/Barrier.h" -class JSLinearString; -class JSRope; namespace js { -class BaseShape; -class GCMarker; -class LazyScript; -class NativeObject; -class ObjectGroup; -namespace gc { -struct ArenaHeader; -} -namespace jit { -class JitCode; -} -static const size_t NON_INCREMENTAL_MARK_STACK_BASE_CAPACITY = 4096; -static const size_t INCREMENTAL_MARK_STACK_BASE_CAPACITY = 32768; - -/* - * When the native stack is low, the GC does not call JS_TraceChildren to mark - * the reachable "children" of the thing. Rather the thing is put aside and - * JS_TraceChildren is called later with more space on the C stack. - * - * To implement such delayed marking of the children with minimal overhead for - * the normal case of sufficient native stack, the code adds a field per arena. - * The field markingDelay->link links all arenas with delayed things into a - * stack list with the pointer to stack top in GCMarker::unmarkedArenaStackTop. - * GCMarker::delayMarkingChildren adds arenas to the stack as necessary while - * markDelayedChildren pops the arenas from the stack until it empties. - */ -class MarkStack -{ - friend class GCMarker; - - uintptr_t* stack_; - uintptr_t* tos_; - uintptr_t* end_; - - // The capacity we start with and reset() to. - size_t baseCapacity_; - size_t maxCapacity_; - - public: - explicit MarkStack(size_t maxCapacity) - : stack_(nullptr), - tos_(nullptr), - end_(nullptr), - baseCapacity_(0), - maxCapacity_(maxCapacity) - {} - - ~MarkStack() { - js_free(stack_); - } - - size_t capacity() { return end_ - stack_; } - - ptrdiff_t position() const { return tos_ - stack_; } - - void setStack(uintptr_t* stack, size_t tosIndex, size_t capacity) { - stack_ = stack; - tos_ = stack + tosIndex; - end_ = stack + capacity; - } - - bool init(JSGCMode gcMode); - - void setBaseCapacity(JSGCMode mode); - size_t maxCapacity() const { return maxCapacity_; } - void setMaxCapacity(size_t maxCapacity); - - bool push(uintptr_t item) { - if (tos_ == end_) { - if (!enlarge(1)) - return false; - } - MOZ_ASSERT(tos_ < end_); - *tos_++ = item; - return true; - } - - bool push(uintptr_t item1, uintptr_t item2, uintptr_t item3) { - uintptr_t* nextTos = tos_ + 3; - if (nextTos > end_) { - if (!enlarge(3)) - return false; - nextTos = tos_ + 3; - } - MOZ_ASSERT(nextTos <= end_); - tos_[0] = item1; - tos_[1] = item2; - tos_[2] = item3; - tos_ = nextTos; - return true; - } - - bool isEmpty() const { - return tos_ == stack_; - } - - uintptr_t pop() { - MOZ_ASSERT(!isEmpty()); - return *--tos_; - } - - void reset(); - - /* Grow the stack, ensuring there is space for at least count elements. */ - bool enlarge(unsigned count); - - void setGCMode(JSGCMode gcMode); - - size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const; -}; - -#ifdef DEBUG -namespace gc { +// Internal Tracing API +// +// Tracing is an abstract visitation of each edge in a JS heap graph.[1] The +// most common (and performance sensitive) use of this infrastructure is for GC +// "marking" as part of the mark-and-sweep collector; however, this +// infrastructure is much more general than that and is used for many other +// purposes as well. +// +// One commonly misunderstood subtlety of the tracing architecture is the role +// of graph verticies versus graph edges. Graph verticies are the heap +// allocations -- GC things -- that are returned by Allocate. Graph edges are +// pointers -- including tagged pointers like Value and jsid -- that link the +// allocations into a complex heap. The tracing API deals *only* with edges. +// Any action taken on the target of a graph edge is independent to the tracing +// itself. +// +// Another common misunderstanding relates to the role of the JSTracer. The +// JSTracer instance determines what tracing does when visiting an edge; it +// does not itself participate in the tracing process, other than to be passed +// through as opaque data. It works like a closure in that respect. +// +// Tracing implementations internal to SpiderMonkey should use these interfaces +// instead of the public interfaces in js/TracingAPI.h. Unlike the public +// tracing methods, these work on internal types and avoid an external call. +// +// Note that the implementations for these methods are, surprisingly, in +// js/src/gc/Marking.cpp. This is so that the compiler can inline as much as +// possible in the common, marking pathways. Conceptually, however, they remain +// as part of the generic "tracing" architecture, rather than the more specific +// marking implementation of tracing. +// +// 1 - In SpiderMonkey, we call this concept tracing rather than visiting +// because "visiting" is already used by the compiler. Also, it's been +// called "tracing" forever and changing it would be extremely difficult at +// this point. +// Trace through an edge in the live object graph on behalf of tracing. The +// effect of tracing the edge depends on the JSTracer being used. template -extern bool -ZoneIsGCMarking(T* thing); - -template -extern bool -ZoneIsAtomsZoneForString(JSRuntime* rt, T* thing); - -} /* namespace gc */ -#endif - -#define JS_COMPARTMENT_ASSERT(rt, thing) \ - MOZ_ASSERT(gc::ZoneIsGCMarking((thing)) || gc::ZoneIsAtomsZoneForString((rt), (thing))) - -class GCMarker : public JSTracer -{ - public: - explicit GCMarker(JSRuntime* rt); - bool init(JSGCMode gcMode); - - void setMaxCapacity(size_t maxCap) { stack.setMaxCapacity(maxCap); } - size_t maxCapacity() const { return stack.maxCapacity(); } - - void start(); - void stop(); - void reset(); - - // Mark the given GC thing and traverse its children at some point. - template void traverse(T thing); - - // Calls traverse on target after making additional assertions. - template void traverse(S source, T target); - - // C++ requires explicit declarations of partial template instantiations. - template void traverse(S source, jsid target); - - /* - * Care must be taken changing the mark color from gray to black. The cycle - * collector depends on the invariant that there are no black to gray edges - * in the GC heap. This invariant lets the CC not trace through black - * objects. If this invariant is violated, the cycle collector may free - * objects that are still reachable. - */ - void setMarkColorGray() { - MOZ_ASSERT(isDrained()); - MOZ_ASSERT(color == gc::BLACK); - color = gc::GRAY; - } - void setMarkColorBlack() { - MOZ_ASSERT(isDrained()); - MOZ_ASSERT(color == gc::GRAY); - color = gc::BLACK; - } - uint32_t markColor() const { return color; } - - void delayMarkingArena(gc::ArenaHeader* aheader); - void delayMarkingChildren(const void* thing); - void markDelayedChildren(gc::ArenaHeader* aheader); - bool markDelayedChildren(SliceBudget& budget); - bool hasDelayedChildren() const { - return !!unmarkedArenaStackTop; - } - - bool isDrained() { - return isMarkStackEmpty() && !unmarkedArenaStackTop; - } - - bool drainMarkStack(SliceBudget& budget); - - void setGCMode(JSGCMode mode) { stack.setGCMode(mode); } - - size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const; - -#ifdef DEBUG - bool shouldCheckCompartments() { return strictCompartmentChecking; } -#endif - - /* This is public exclusively for ScanRope. */ - MarkStack stack; - - private: -#ifdef DEBUG - void checkZone(void* p); -#else - void checkZone(void* p) {} -#endif - - /* - * We use a common mark stack to mark GC things of different types and use - * the explicit tags to distinguish them when it cannot be deduced from - * the context of push or pop operation. - */ - enum StackTag { - ValueArrayTag, - ObjectTag, - GroupTag, - SavedValueArrayTag, - JitCodeTag, - LastTag = JitCodeTag - }; - - static const uintptr_t StackTagMask = 7; - static_assert(StackTagMask >= uintptr_t(LastTag), "The tag mask must subsume the tags."); - static_assert(StackTagMask <= gc::CellMask, "The tag mask must be embeddable in a Cell*."); - - // Push an object onto the stack for later tracing and assert that it has - // already been marked. - void repush(JSObject* obj) { - MOZ_ASSERT(gc::TenuredCell::fromPointer(obj)->isMarked(markColor())); - pushTaggedPtr(ObjectTag, obj); - } - - template void markAndTraceChildren(T* thing); - template void markAndPush(StackTag tag, T* thing); - template void markAndScan(T* thing); - void eagerlyMarkChildren(JSLinearString* str); - void eagerlyMarkChildren(JSRope* rope); - void eagerlyMarkChildren(JSString* str); - void eagerlyMarkChildren(LazyScript *thing); - void eagerlyMarkChildren(Shape* shape); - void lazilyMarkChildren(ObjectGroup* group); - - // We may not have concrete types yet, so this has to be out of the header. - template - void dispatchToTraceChildren(T* thing); - - // Mark the given GC thing, but do not trace its children. Return true - // if the thing became marked. - template - bool mark(T* thing); - - void pushTaggedPtr(StackTag tag, void* ptr) { - checkZone(ptr); - uintptr_t addr = reinterpret_cast(ptr); - MOZ_ASSERT(!(addr & StackTagMask)); - if (!stack.push(addr | uintptr_t(tag))) - delayMarkingChildren(ptr); - } - - void pushValueArray(JSObject* obj, void* start, void* end) { - checkZone(obj); - - MOZ_ASSERT(start <= end); - uintptr_t tagged = reinterpret_cast(obj) | GCMarker::ValueArrayTag; - uintptr_t startAddr = reinterpret_cast(start); - uintptr_t endAddr = reinterpret_cast(end); - - /* - * Push in the reverse order so obj will be on top. If we cannot push - * the array, we trigger delay marking for the whole object. - */ - if (!stack.push(endAddr, startAddr, tagged)) - delayMarkingChildren(obj); - } - - bool isMarkStackEmpty() { - return stack.isEmpty(); - } - - bool restoreValueArray(NativeObject* obj, void** vpp, void** endp); - void saveValueRanges(); - inline void processMarkStackTop(SliceBudget& budget); - - /* The color is only applied to objects and functions. */ - uint32_t color; - - /* Pointer to the top of the stack of arenas we are delaying marking on. */ - js::gc::ArenaHeader* unmarkedArenaStackTop; - - /* Count of arenas that are currently in the stack. */ - mozilla::DebugOnly markLaterArenas; - - /* Assert that start and stop are called with correct ordering. */ - mozilla::DebugOnly started; - - /* - * If this is true, all marked objects must belong to a compartment being - * GCed. This is used to look for compartment bugs. - */ - mozilla::DebugOnly strictCompartmentChecking; -}; - -// Append traced things to a buffer on the zone for use later in the GC. -// See the comment in GCRuntime.h above grayBufferState for details. -class BufferGrayRootsTracer : public JS::CallbackTracer -{ - // Set to false if we OOM while buffering gray roots. - bool bufferingGrayRootsFailed; - - void appendGrayRoot(gc::TenuredCell* thing, JSGCTraceKind kind); - - public: - explicit BufferGrayRootsTracer(JSRuntime* rt) - : JS::CallbackTracer(rt, grayTraceCallback), bufferingGrayRootsFailed(false) - {} - - static void grayTraceCallback(JS::CallbackTracer* trc, void** thingp, JSGCTraceKind kind) { - auto tracer = static_cast(trc); - tracer->appendGrayRoot(gc::TenuredCell::fromPointer(*thingp), kind); - } - - bool failed() const { return bufferingGrayRootsFailed; } -}; - void -SetMarkStackLimit(JSRuntime* rt, size_t limit); +TraceEdge(JSTracer* trc, BarrieredBase* thingp, const char* name); -// Return true if this trace is happening on behalf of gray buffering during -// the marking phase of incremental GC. -inline bool -IsBufferingGrayRoots(JSTracer* trc) -{ - return trc->isCallbackTracer() && - trc->asCallbackTracer()->hasCallback(BufferGrayRootsTracer::grayTraceCallback); -} +// Trace through a "root" edge. These edges are the initial edges in the object +// graph traversal. Root edges are asserted to only be traversed in the initial +// phase of a GC. +template +void +TraceRoot(JSTracer* trc, T* thingp, const char* name); -} /* namespace js */ +// Like TraceEdge, but for edges that do not use one of the automatic barrier +// classes and, thus, must be treated specially for moving GC. This method is +// separate from TraceEdge to make accidental use of such edges more obvious. +template +void +TraceManuallyBarrieredEdge(JSTracer* trc, T* thingp, const char* name); + +// Trace all edges contained in the given array. +template +void +TraceRange(JSTracer* trc, size_t len, BarrieredBase* vec, const char* name); + +// Trace all root edges in the given array. +template +void +TraceRootRange(JSTracer* trc, size_t len, T* vec, const char* name); + +// Trace an edge that crosses compartment boundaries. If the compartment of the +// destination thing is not being GC'd, then the edge will not be traced. +template +void +TraceCrossCompartmentEdge(JSTracer* trc, JSObject* src, BarrieredBase* dst, + const char* name); + +// As above but with manual barriers. +template +void +TraceManuallyBarrieredCrossCompartmentEdge(JSTracer* trc, JSObject* src, T* dst, + const char* name); + +// Permanent atoms and well-known symbols are shared between runtimes and must +// use a separate marking path so that we can filter them out of normal heap +// tracing. +template +void +TraceProcessGlobalRoot(JSTracer* trc, T* thing, const char* name); + +// Trace a root edge that uses the base GC thing type, instead of a more +// specific type. +void +TraceGenericPointerRoot(JSTracer* trc, gc::Cell** thingp, const char* name); + +// Trace a non-root edge that uses the base GC thing type, instead of a more +// specific type. +void +TraceManuallyBarrieredGenericPointerEdge(JSTracer* trc, gc::Cell** thingp, const char* name); + +// Object slots are not stored as a contiguous vector, so marking them as such +// will lead to the wrong indicies, if such are requested when tracing. +void +TraceObjectSlots(JSTracer* trc, NativeObject* obj, uint32_t start, uint32_t nslots); + +// Depricated. Please use one of the strongly typed variants above. +void +TraceChildren(JSTracer* trc, void* thing, JSGCTraceKind kind); + +} // namespace js #endif /* js_Tracer_h */ diff --git a/js/src/jit/JitFrames.cpp b/js/src/jit/JitFrames.cpp index c6bf23ad16e2..c9fc5c9ef456 100644 --- a/js/src/jit/JitFrames.cpp +++ b/js/src/jit/JitFrames.cpp @@ -1094,7 +1094,7 @@ MarkIonJSFrame(JSTracer* trc, const JitFrameIterator& frame) for (GeneralRegisterBackwardIterator iter(safepoint.allGprSpills()); iter.more(); iter++) { --spill; if (gcRegs.has(*iter)) - gc::TraceGenericPointerRoot(trc, reinterpret_cast(spill), "ion-gc-spill"); + TraceGenericPointerRoot(trc, reinterpret_cast(spill), "ion-gc-spill"); else if (valueRegs.has(*iter)) TraceRoot(trc, reinterpret_cast(spill), "ion-value-spill"); } @@ -1422,7 +1422,7 @@ MarkJitExitFrame(JSTracer* trc, const JitFrameIterator& frame) TraceRoot(trc, reinterpret_cast(argBase), "ion-vm-args"); break; case VMFunction::RootCell: - gc::TraceGenericPointerRoot(trc, reinterpret_cast(argBase), "ion-vm-args"); + TraceGenericPointerRoot(trc, reinterpret_cast(argBase), "ion-vm-args"); break; } @@ -1456,7 +1456,7 @@ MarkJitExitFrame(JSTracer* trc, const JitFrameIterator& frame) TraceRoot(trc, footer->outParam(), "ion-vm-outvp"); break; case VMFunction::RootCell: - gc::TraceGenericPointerRoot(trc, footer->outParam(), "ion-vm-out"); + TraceGenericPointerRoot(trc, footer->outParam(), "ion-vm-out"); break; } } diff --git a/js/src/jit/arm/Assembler-arm.cpp b/js/src/jit/arm/Assembler-arm.cpp index d51303b4b79a..305feba0d1a0 100644 --- a/js/src/jit/arm/Assembler-arm.cpp +++ b/js/src/jit/arm/Assembler-arm.cpp @@ -812,8 +812,8 @@ TraceOneDataRelocation(JSTracer* trc, Iter* iter) MOZ_ASSERT(!(uintptr_t(ptr) & 0x1)); // No barrier needed since these are constants. - gc::TraceManuallyBarrieredGenericPointerEdge(trc, reinterpret_cast(&ptr), - "ion-masm-ptr"); + TraceManuallyBarrieredGenericPointerEdge(trc, reinterpret_cast(&ptr), + "ion-masm-ptr"); if (ptr != prior) { MacroAssemblerARM::ma_mov_patch(Imm32(int32_t(ptr)), dest, Assembler::Always, rs, ins); diff --git a/js/src/jit/mips/Assembler-mips.cpp b/js/src/jit/mips/Assembler-mips.cpp index 102eb632f3df..b2345a2e1e51 100644 --- a/js/src/jit/mips/Assembler-mips.cpp +++ b/js/src/jit/mips/Assembler-mips.cpp @@ -295,7 +295,7 @@ TraceOneDataRelocation(JSTracer* trc, Instruction* inst) MOZ_ASSERT(!(uintptr_t(ptr) & 0x1)); // No barrier needed since these are constants. - gc::TraceManuallyBarrieredGenericPointerEdge(trc, reinterpret_cast(&ptr), + TraceManuallyBarrieredGenericPointerEdge(trc, reinterpret_cast(&ptr), "ion-masm-ptr"); if (ptr != prior) { Assembler::UpdateLuiOriValue(inst, inst->next(), uint32_t(ptr)); diff --git a/js/src/jit/x86-shared/Assembler-x86-shared.cpp b/js/src/jit/x86-shared/Assembler-x86-shared.cpp index cbb9f6dd2009..7542b6f0bd72 100644 --- a/js/src/jit/x86-shared/Assembler-x86-shared.cpp +++ b/js/src/jit/x86-shared/Assembler-x86-shared.cpp @@ -74,8 +74,8 @@ TraceDataRelocations(JSTracer* trc, uint8_t* buffer, CompactBufferReader& reader MOZ_ASSERT(!(*reinterpret_cast(ptr) & 0x1)); // No barrier needed since these are constants. - gc::TraceManuallyBarrieredGenericPointerEdge(trc, reinterpret_cast(ptr), - "ion-masm-ptr"); + TraceManuallyBarrieredGenericPointerEdge(trc, reinterpret_cast(ptr), + "ion-masm-ptr"); } } diff --git a/js/src/jsobj.cpp b/js/src/jsobj.cpp index 21fadd3e325b..b2e3ab51d410 100644 --- a/js/src/jsobj.cpp +++ b/js/src/jsobj.cpp @@ -4087,7 +4087,7 @@ JSObject::traceChildren(JSTracer* trc) { GetObjectSlotNameFunctor func(nobj); JS::AutoTracingDetails ctx(trc, func); - MarkObjectSlots(trc, nobj, 0, nobj->slotSpan()); + TraceObjectSlots(trc, nobj, 0, nobj->slotSpan()); } do { diff --git a/js/src/vm/Shape.h b/js/src/vm/Shape.h index 439f8d77132e..6110d81de3a5 100644 --- a/js/src/vm/Shape.h +++ b/js/src/vm/Shape.h @@ -933,7 +933,7 @@ class Shape : public gc::TenuredCell static inline ThingRootKind rootKind() { return THING_ROOT_SHAPE; } - inline void traceChildren(JSTracer* trc); + void traceChildren(JSTracer* trc); inline Shape* search(ExclusiveContext* cx, jsid id); inline Shape* searchLinear(jsid id);