Backout 6f47fa34dcbc (bug 729760) for make check failures

This commit is contained in:
Ed Morley 2012-07-25 12:22:28 +01:00
Родитель d84fd7964b
Коммит 7508699131
11 изменённых файлов: 241 добавлений и 576 удалений

Просмотреть файл

@ -423,47 +423,38 @@ struct ArenaHeader
* chunk. The latter allows to quickly check if the arena is allocated
* during the conservative GC scanning without searching the arena in the
* list.
*
* We use 8 bits for the allocKind so the compiler can use byte-level memory
* instructions to access it.
*/
size_t allocKind : 8;
/*
* When collecting we sometimes need to keep an auxillary list of arenas,
* for which we use the following fields. This happens for several reasons:
*
* When recursive marking uses too much stack the marking is delayed and the
* corresponding arenas are put into a stack. To distinguish the bottom of
* the stack from the arenas not present in the stack we use the
* markOverflow flag to tag arenas on the stack.
* When recursive marking uses too much stack the marking is delayed and
* the corresponding arenas are put into a stack using the following field
* as a linkage. To distinguish the bottom of the stack from the arenas
* not present in the stack we use an extra flag to tag arenas on the
* stack.
*
* Delayed marking is also used for arenas that we allocate into during an
* incremental GC. In this case, we intend to mark all the objects in the
* arena, and it's faster to do this marking in bulk.
*
* When sweeping we keep track of which arenas have been allocated since the
* end of the mark phase. This allows us to tell whether a pointer to an
* unmarked object is yet to be finalized or has already been reallocated.
* We set the allocatedDuringIncremental flag for this and clear it at the
* end of the sweep phase.
*
* To minimize the ArenaHeader size we record the next linkage as
* arenaAddress() >> ArenaShift and pack it with the allocKind field and the
* flags.
* To minimize the ArenaHeader size we record the next delayed marking
* linkage as arenaAddress() >> ArenaShift and pack it with the allocKind
* field and hasDelayedMarking flag. We use 8 bits for the allocKind, not
* ArenaShift - 1, so the compiler can use byte-level memory instructions
* to access it.
*/
public:
size_t hasDelayedMarking : 1;
size_t allocatedDuringIncremental : 1;
size_t markOverflow : 1;
size_t auxNextLink : JS_BITS_PER_WORD - 8 - 1 - 1 - 1;
size_t nextDelayedMarking : JS_BITS_PER_WORD - 8 - 1 - 1 - 1;
static void staticAsserts() {
/* We must be able to fit the allockind into uint8_t. */
JS_STATIC_ASSERT(FINALIZE_LIMIT <= 255);
/*
* auxNextLink packing assumes that ArenaShift has enough bits
* nextDelayedMarkingpacking assumes that ArenaShift has enough bits
* to cover allocKind and hasDelayedMarking.
*/
JS_STATIC_ASSERT(ArenaShift >= 8 + 1 + 1 + 1);
@ -496,7 +487,7 @@ struct ArenaHeader
markOverflow = 0;
allocatedDuringIncremental = 0;
hasDelayedMarking = 0;
auxNextLink = 0;
nextDelayedMarking = 0;
}
inline uintptr_t arenaAddress() const;
@ -528,11 +519,6 @@ struct ArenaHeader
inline ArenaHeader *getNextDelayedMarking() const;
inline void setNextDelayedMarking(ArenaHeader *aheader);
inline void unsetDelayedMarking();
inline ArenaHeader *getNextAllocDuringSweep() const;
inline void setNextAllocDuringSweep(ArenaHeader *aheader);
inline void unsetAllocDuringSweep();
};
struct Arena
@ -896,48 +882,15 @@ ArenaHeader::setFirstFreeSpan(const FreeSpan *span)
inline ArenaHeader *
ArenaHeader::getNextDelayedMarking() const
{
JS_ASSERT(hasDelayedMarking);
return &reinterpret_cast<Arena *>(auxNextLink << ArenaShift)->aheader;
return &reinterpret_cast<Arena *>(nextDelayedMarking << ArenaShift)->aheader;
}
inline void
ArenaHeader::setNextDelayedMarking(ArenaHeader *aheader)
{
JS_ASSERT(!(uintptr_t(aheader) & ArenaMask));
JS_ASSERT(!auxNextLink && !hasDelayedMarking);
hasDelayedMarking = 1;
auxNextLink = aheader->arenaAddress() >> ArenaShift;
}
inline void
ArenaHeader::unsetDelayedMarking()
{
JS_ASSERT(hasDelayedMarking);
hasDelayedMarking = 0;
auxNextLink = 0;
}
inline ArenaHeader *
ArenaHeader::getNextAllocDuringSweep() const
{
JS_ASSERT(allocatedDuringIncremental);
return &reinterpret_cast<Arena *>(auxNextLink << ArenaShift)->aheader;
}
inline void
ArenaHeader::setNextAllocDuringSweep(ArenaHeader *aheader)
{
JS_ASSERT(!auxNextLink && !allocatedDuringIncremental);
allocatedDuringIncremental = 1;
auxNextLink = aheader->arenaAddress() >> ArenaShift;
}
inline void
ArenaHeader::unsetAllocDuringSweep()
{
JS_ASSERT(allocatedDuringIncremental);
allocatedDuringIncremental = 0;
auxNextLink = 0;
nextDelayedMarking = aheader->arenaAddress() >> ArenaShift;
}
JS_ALWAYS_INLINE void

Просмотреть файл

@ -775,11 +775,6 @@ JSRuntime::JSRuntime()
gcDisableStrictProxyCheckingCount(0),
gcIncrementalState(gc::NO_INCREMENTAL),
gcLastMarkSlice(false),
gcSweepOnBackgroundThread(false),
gcSweepPhase(0),
gcSweepCompartmentIndex(0),
gcSweepKindIndex(0),
gcArenasAllocatedDuringSweep(NULL),
gcInterFrameGC(0),
gcSliceBudget(SliceBudget::Unlimited),
gcIncrementalEnabled(true),

Просмотреть файл

@ -561,21 +561,6 @@ struct JSRuntime : js::RuntimeFriendFields
/* Indicates that the last incremental slice exhausted the mark stack. */
bool gcLastMarkSlice;
/* Whether any sweeping will take place in the separate GC helper thread. */
bool gcSweepOnBackgroundThread;
/*
* Incremental sweep state.
*/
int gcSweepPhase;
ptrdiff_t gcSweepCompartmentIndex;
int gcSweepKindIndex;
/*
* List head of arenas allocated during the sweep phase.
*/
js::gc::ArenaHeader *gcArenasAllocatedDuringSweep;
/*
* Indicates that a GC slice has taken place in the middle of an animation
* frame, rather than at the beginning. In this case, the next slice will be

Просмотреть файл

@ -47,7 +47,6 @@ JSCompartment::JSCompartment(JSRuntime *rt)
needsBarrier_(false),
gcState(NoGCScheduled),
gcPreserveCode(false),
gcStarted(false),
gcBytes(0),
gcTriggerBytes(0),
gcHeapGrowthFactor(3.0),

Просмотреть файл

@ -174,7 +174,6 @@ struct JSCompartment
CompartmentGCState gcState;
bool gcPreserveCode;
bool gcStarted;
public:
bool isCollecting() const {
@ -227,19 +226,6 @@ struct JSCompartment
gcPreserveCode = preserving;
}
bool wasGCStarted() const {
return gcStarted;
}
void setGCStarted(bool started) {
JS_ASSERT(rt->isHeapBusy());
gcStarted = started;
}
bool isGCSweeping() {
return wasGCStarted() && rt->gcIncrementalState == js::gc::SWEEP;
}
size_t gcBytes;
size_t gcTriggerBytes;
size_t gcMaxMallocBytes;

Просмотреть файл

@ -127,7 +127,7 @@ js::PrepareForIncrementalGC(JSRuntime *rt)
return;
for (CompartmentsIter c(rt); !c.done(); c.next()) {
if (c->wasGCStarted())
if (c->needsBarrier())
PrepareCompartmentForGC(c);
}
}

Просмотреть файл

@ -632,7 +632,6 @@ SizeOfJSContext();
D(DEBUG_GC) \
D(DEBUG_MODE_GC) \
D(TRANSPLANT) \
D(RESET) \
\
/* Reasons from Firefox */ \
D(DOM_WINDOW_UTILS) \

Просмотреть файл

@ -200,29 +200,6 @@ const uint32_t Arena::FirstThingOffsets[] = {
#undef OFFSET
/*
* Finalization order for incrementally swept things.
*/
static const AllocKind FinalizePhaseShapes[] = {
FINALIZE_SHAPE,
FINALIZE_BASE_SHAPE,
FINALIZE_TYPE_OBJECT
};
static const AllocKind* FinalizePhases[] = {
FinalizePhaseShapes
};
static const int FinalizePhaseCount = sizeof(FinalizePhases) / sizeof(AllocKind*);
static const int FinalizePhaseLength[] = {
sizeof(FinalizePhaseShapes) / sizeof(AllocKind)
};
static const gcstats::Phase FinalizePhaseStatsPhase[] = {
gcstats::PHASE_SWEEP_SHAPE
};
#ifdef DEBUG
void
ArenaHeader::checkSynchronizedWithFreeList() const
@ -350,60 +327,46 @@ Arena::finalize(FreeOp *fop, AllocKind thingKind, size_t thingSize)
return false;
}
/*
* Insert an arena into the list in appropriate position and update the cursor
* to ensure that any arena before the cursor is full.
*/
void ArenaList::insert(ArenaHeader *a)
{
JS_ASSERT(a);
JS_ASSERT_IF(!head, cursor == &head);
a->next = *cursor;
*cursor = a;
if (!a->hasFreeThings())
cursor = &a->next;
}
template<typename T>
inline bool
FinalizeTypedArenas(FreeOp *fop,
ArenaHeader **src,
ArenaList &dest,
AllocKind thingKind,
SliceBudget &budget)
inline void
FinalizeTypedArenas(FreeOp *fop, ArenaLists::ArenaList *al, AllocKind thingKind)
{
/*
* Finalize arenas from src list, releasing empty arenas and inserting the
* others into dest in an appropriate position.
* Release empty arenas and move non-full arenas with some free things into
* a separated list that we append to al after the loop to ensure that any
* arena before al->cursor is full.
*/
JS_ASSERT_IF(!al->head, al->cursor == &al->head);
ArenaLists::ArenaList available;
ArenaHeader **ap = &al->head;
size_t thingSize = Arena::thingSize(thingKind);
while (ArenaHeader *aheader = *src) {
*src = aheader->next;
while (ArenaHeader *aheader = *ap) {
bool allClear = aheader->getArena()->finalize<T>(fop, thingKind, thingSize);
if (allClear)
if (allClear) {
*ap = aheader->next;
aheader->chunk()->releaseArena(aheader);
else
dest.insert(aheader);
budget.step(Arena::thingsPerArena(thingSize));
if (budget.isOverBudget())
return false;
} else if (aheader->hasFreeThings()) {
*ap = aheader->next;
*available.cursor = aheader;
available.cursor = &aheader->next;
} else {
ap = &aheader->next;
}
}
return true;
/* Terminate the available list and append it to al. */
*available.cursor = NULL;
*ap = available.head;
al->cursor = ap;
JS_ASSERT_IF(!al->head, al->cursor == &al->head);
}
/*
* Finalize the list. On return al->cursor points to the first non-empty arena
* after the al->head.
*/
static bool
FinalizeArenas(FreeOp *fop,
ArenaHeader **src,
ArenaList &dest,
AllocKind thingKind,
SliceBudget &budget)
static void
FinalizeArenas(FreeOp *fop, ArenaLists::ArenaList *al, AllocKind thingKind)
{
switch(thingKind) {
case FINALIZE_OBJECT0:
@ -418,28 +381,34 @@ FinalizeArenas(FreeOp *fop,
case FINALIZE_OBJECT12_BACKGROUND:
case FINALIZE_OBJECT16:
case FINALIZE_OBJECT16_BACKGROUND:
return FinalizeTypedArenas<JSObject>(fop, src, dest, thingKind, budget);
FinalizeTypedArenas<JSObject>(fop, al, thingKind);
break;
case FINALIZE_SCRIPT:
return FinalizeTypedArenas<JSScript>(fop, src, dest, thingKind, budget);
FinalizeTypedArenas<JSScript>(fop, al, thingKind);
break;
case FINALIZE_SHAPE:
return FinalizeTypedArenas<Shape>(fop, src, dest, thingKind, budget);
FinalizeTypedArenas<Shape>(fop, al, thingKind);
break;
case FINALIZE_BASE_SHAPE:
return FinalizeTypedArenas<BaseShape>(fop, src, dest, thingKind, budget);
FinalizeTypedArenas<BaseShape>(fop, al, thingKind);
break;
case FINALIZE_TYPE_OBJECT:
return FinalizeTypedArenas<types::TypeObject>(fop, src, dest, thingKind, budget);
FinalizeTypedArenas<types::TypeObject>(fop, al, thingKind);
break;
#if JS_HAS_XML_SUPPORT
case FINALIZE_XML:
return FinalizeTypedArenas<JSXML>(fop, src, dest, thingKind, budget);
FinalizeTypedArenas<JSXML>(fop, al, thingKind);
break;
#endif
case FINALIZE_STRING:
return FinalizeTypedArenas<JSString>(fop, src, dest, thingKind, budget);
FinalizeTypedArenas<JSString>(fop, al, thingKind);
break;
case FINALIZE_SHORT_STRING:
return FinalizeTypedArenas<JSShortString>(fop, src, dest, thingKind, budget);
FinalizeTypedArenas<JSShortString>(fop, al, thingKind);
break;
case FINALIZE_EXTERNAL_STRING:
return FinalizeTypedArenas<JSExternalString>(fop, src, dest, thingKind, budget);
default:
JS_NOT_REACHED("Invalid alloc kind");
return true;
FinalizeTypedArenas<JSExternalString>(fop, al, thingKind);
break;
}
}
@ -1467,13 +1436,6 @@ ArenaLists::prepareForIncrementalGC(JSRuntime *rt)
}
}
static inline void
PushArenaAllocatedDuringSweep(JSRuntime *runtime, ArenaHeader *arena)
{
arena->setNextAllocDuringSweep(runtime->gcArenasAllocatedDuringSweep);
runtime->gcArenasAllocatedDuringSweep = arena;
}
inline void *
ArenaLists::allocateFromArena(JSCompartment *comp, AllocKind thingKind)
{
@ -1527,13 +1489,9 @@ ArenaLists::allocateFromArena(JSCompartment *comp, AllocKind thingKind)
*/
freeLists[thingKind] = aheader->getFirstFreeSpan();
aheader->setAsFullyUsed();
if (JS_UNLIKELY(comp->wasGCStarted())) {
if (comp->needsBarrier()) {
aheader->allocatedDuringIncremental = true;
comp->rt->gcMarker.delayMarkingArena(aheader);
} else if (comp->isGCSweeping()) {
PushArenaAllocatedDuringSweep(comp->rt, aheader);
}
if (JS_UNLIKELY(comp->needsBarrier())) {
aheader->allocatedDuringIncremental = true;
comp->rt->gcMarker.delayMarkingArena(aheader);
}
return freeLists[thingKind].infallibleAllocate(Arena::thingSize(thingKind));
}
@ -1560,13 +1518,9 @@ ArenaLists::allocateFromArena(JSCompartment *comp, AllocKind thingKind)
if (!aheader)
return NULL;
if (JS_UNLIKELY(comp->wasGCStarted())) {
if (comp->needsBarrier()) {
aheader->allocatedDuringIncremental = true;
comp->rt->gcMarker.delayMarkingArena(aheader);
} else if (comp->isGCSweeping()) {
PushArenaAllocatedDuringSweep(comp->rt, aheader);
}
if (JS_UNLIKELY(comp->needsBarrier())) {
aheader->allocatedDuringIncremental = true;
comp->rt->gcMarker.delayMarkingArena(aheader);
}
aheader->next = al->head;
if (!al->head) {
@ -1585,31 +1539,14 @@ ArenaLists::allocateFromArena(JSCompartment *comp, AllocKind thingKind)
void
ArenaLists::finalizeNow(FreeOp *fop, AllocKind thingKind)
{
JS_ASSERT(!fop->onBackgroundThread());
JS_ASSERT(backgroundFinalizeState[thingKind] == BFS_DONE ||
backgroundFinalizeState[thingKind] == BFS_JUST_FINISHED);
ArenaHeader *arenas = arenaLists[thingKind].head;
arenaLists[thingKind].clear();
SliceBudget budget;
FinalizeArenas(fop, &arenas, arenaLists[thingKind], thingKind, budget);
JS_ASSERT(!arenas);
}
void
ArenaLists::queueForForegroundSweep(FreeOp *fop, AllocKind thingKind)
{
JS_ASSERT(!fop->onBackgroundThread());
JS_ASSERT(backgroundFinalizeState[thingKind] == BFS_DONE);
JS_ASSERT(!arenaListsToSweep[thingKind]);
arenaListsToSweep[thingKind] = arenaLists[thingKind].head;
arenaLists[thingKind].clear();
FinalizeArenas(fop, &arenaLists[thingKind], thingKind);
}
inline void
ArenaLists::queueForBackgroundSweep(FreeOp *fop, AllocKind thingKind)
ArenaLists::finalizeLater(FreeOp *fop, AllocKind thingKind)
{
JS_ASSERT(thingKind == FINALIZE_OBJECT0_BACKGROUND ||
thingKind == FINALIZE_OBJECT2_BACKGROUND ||
@ -1648,7 +1585,7 @@ ArenaLists::queueForBackgroundSweep(FreeOp *fop, AllocKind thingKind)
al->clear();
backgroundFinalizeState[thingKind] = BFS_RUN;
} else {
finalizeNow(fop, thingKind);
FinalizeArenas(fop, al, thingKind);
backgroundFinalizeState[thingKind] = BFS_DONE;
}
@ -1668,11 +1605,9 @@ ArenaLists::backgroundFinalize(FreeOp *fop, ArenaHeader *listHead)
JS_ASSERT(listHead);
AllocKind thingKind = listHead->getAllocKind();
JSCompartment *comp = listHead->compartment;
ArenaList finalized;
SliceBudget budget;
FinalizeArenas(fop, &listHead, finalized, thingKind, budget);
JS_ASSERT(!listHead);
finalized.head = listHead;
FinalizeArenas(fop, &finalized, thingKind);
/*
* After we finish the finalization al->cursor must point to the end of
@ -1706,7 +1641,7 @@ ArenaLists::backgroundFinalize(FreeOp *fop, ArenaHeader *listHead)
}
void
ArenaLists::queueObjectsForSweep(FreeOp *fop)
ArenaLists::finalizeObjects(FreeOp *fop)
{
finalizeNow(fop, FINALIZE_OBJECT0);
finalizeNow(fop, FINALIZE_OBJECT2);
@ -1715,12 +1650,12 @@ ArenaLists::queueObjectsForSweep(FreeOp *fop)
finalizeNow(fop, FINALIZE_OBJECT12);
finalizeNow(fop, FINALIZE_OBJECT16);
queueForBackgroundSweep(fop, FINALIZE_OBJECT0_BACKGROUND);
queueForBackgroundSweep(fop, FINALIZE_OBJECT2_BACKGROUND);
queueForBackgroundSweep(fop, FINALIZE_OBJECT4_BACKGROUND);
queueForBackgroundSweep(fop, FINALIZE_OBJECT8_BACKGROUND);
queueForBackgroundSweep(fop, FINALIZE_OBJECT12_BACKGROUND);
queueForBackgroundSweep(fop, FINALIZE_OBJECT16_BACKGROUND);
finalizeLater(fop, FINALIZE_OBJECT0_BACKGROUND);
finalizeLater(fop, FINALIZE_OBJECT2_BACKGROUND);
finalizeLater(fop, FINALIZE_OBJECT4_BACKGROUND);
finalizeLater(fop, FINALIZE_OBJECT8_BACKGROUND);
finalizeLater(fop, FINALIZE_OBJECT12_BACKGROUND);
finalizeLater(fop, FINALIZE_OBJECT16_BACKGROUND);
#if JS_HAS_XML_SUPPORT
finalizeNow(fop, FINALIZE_XML);
@ -1728,26 +1663,26 @@ ArenaLists::queueObjectsForSweep(FreeOp *fop)
}
void
ArenaLists::queueStringsForSweep(FreeOp *fop)
ArenaLists::finalizeStrings(FreeOp *fop)
{
queueForBackgroundSweep(fop, FINALIZE_SHORT_STRING);
queueForBackgroundSweep(fop, FINALIZE_STRING);
finalizeLater(fop, FINALIZE_SHORT_STRING);
finalizeLater(fop, FINALIZE_STRING);
finalizeNow(fop, FINALIZE_EXTERNAL_STRING);
}
void
ArenaLists::queueScriptsForSweep(FreeOp *fop)
ArenaLists::finalizeShapes(FreeOp *fop)
{
finalizeNow(fop, FINALIZE_SCRIPT);
finalizeNow(fop, FINALIZE_SHAPE);
finalizeNow(fop, FINALIZE_BASE_SHAPE);
finalizeNow(fop, FINALIZE_TYPE_OBJECT);
}
void
ArenaLists::queueShapesForSweep(FreeOp *fop)
ArenaLists::finalizeScripts(FreeOp *fop)
{
queueForForegroundSweep(fop, FINALIZE_SHAPE);
queueForForegroundSweep(fop, FINALIZE_BASE_SHAPE);
queueForForegroundSweep(fop, FINALIZE_TYPE_OBJECT);
finalizeNow(fop, FINALIZE_SCRIPT);
}
static void
@ -1988,7 +1923,7 @@ GCMarker::reset()
JS_ASSERT(aheader->hasDelayedMarking);
JS_ASSERT(markLaterArenas);
unmarkedArenaStackTop = aheader->getNextDelayedMarking();
aheader->unsetDelayedMarking();
aheader->hasDelayedMarking = 0;
aheader->markOverflow = 0;
aheader->allocatedDuringIncremental = 0;
markLaterArenas--;
@ -2071,7 +2006,7 @@ GCMarker::markDelayedChildren(SliceBudget &budget)
JS_ASSERT(aheader->hasDelayedMarking);
JS_ASSERT(markLaterArenas);
unmarkedArenaStackTop = aheader->getNextDelayedMarking();
aheader->unsetDelayedMarking();
aheader->hasDelayedMarking = 0;
markLaterArenas--;
markDelayedChildren(aheader);
@ -3107,11 +3042,9 @@ ReleaseObservedTypes(JSRuntime *rt)
}
static void
SweepCompartments(FreeOp *fop, gcreason::Reason gcReason)
SweepCompartments(FreeOp *fop, JSGCInvocationKind gckind)
{
JSRuntime *rt = fop->runtime();
JS_ASSERT_IF(gcReason == gcreason::LAST_CONTEXT, !rt->hasContexts());
JSDestroyCompartmentCallback callback = rt->destroyCompartmentCallback;
/* Skip the atomsCompartment. */
@ -3125,7 +3058,7 @@ SweepCompartments(FreeOp *fop, gcreason::Reason gcReason)
JSCompartment *compartment = *read++;
if (!compartment->hold && compartment->isCollecting() &&
(compartment->arenas.arenaListsAreEmpty() || gcReason == gcreason::LAST_CONTEXT))
(compartment->arenas.arenaListsAreEmpty() || !rt->hasContexts()))
{
compartment->arenas.checkEmptyFreeLists();
if (callback)
@ -3200,13 +3133,7 @@ BeginMarkPhase(JSRuntime *rt, bool isIncremental)
rt->gcIsFull = true;
for (CompartmentsIter c(rt); !c.done(); c.next()) {
JS_ASSERT(!c->wasGCStarted());
for (unsigned i = 0 ; i < FINALIZE_LIMIT ; ++i)
JS_ASSERT(!c->arenas.arenaListsToSweep[i]);
if (c->isCollecting())
c->setGCStarted(true);
else
if (!c->isCollecting())
rt->gcIsFull = false;
c->setPreservingCode(ShouldPreserveJITCode(c, currentTime));
@ -3457,7 +3384,7 @@ ValidateIncrementalMarking(JSRuntime *rt)
#endif
static void
BeginSweepPhase(JSRuntime *rt)
SweepPhase(JSRuntime *rt, JSGCInvocationKind gckind, bool *startBackgroundSweep)
{
/*
* Sweep phase.
@ -3485,14 +3412,13 @@ BeginSweepPhase(JSRuntime *rt)
isFull = false;
}
rt->gcSweepOnBackgroundThread =
(rt->hasContexts() && rt->gcHelperThread.prepareForBackgroundSweep());
*startBackgroundSweep = (rt->hasContexts() && rt->gcHelperThread.prepareForBackgroundSweep());
/* Purge the ArenaLists before sweeping. */
for (GCCompartmentsIter c(rt); !c.done(); c.next())
c->arenas.purge();
FreeOp fop(rt, rt->gcSweepOnBackgroundThread, false);
FreeOp fop(rt, *startBackgroundSweep, false);
{
gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_FINALIZE_START);
@ -3527,82 +3453,37 @@ BeginSweepPhase(JSRuntime *rt)
}
}
/*
* Queue all GC things in all compartments for sweeping, either in the
* foreground or on the background thread.
*
* Note that order is important here for the background case.
*
* Objects are finalized immediately but this may change in the future.
*/
for (GCCompartmentsIter c(rt); !c.done(); c.next())
c->arenas.queueObjectsForSweep(&fop);
for (GCCompartmentsIter c(rt); !c.done(); c.next())
c->arenas.queueStringsForSweep(&fop);
for (GCCompartmentsIter c(rt); !c.done(); c.next())
c->arenas.queueScriptsForSweep(&fop);
for (GCCompartmentsIter c(rt); !c.done(); c.next())
c->arenas.queueShapesForSweep(&fop);
{
gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_OBJECT);
rt->gcSweepPhase = 0;
rt->gcSweepCompartmentIndex = 0;
rt->gcSweepKindIndex = 0;
/*
* We finalize objects before other GC things to ensure that the object's
* finalizer can access the other things even if they will be freed.
*/
for (GCCompartmentsIter c(rt); !c.done(); c.next())
c->arenas.finalizeObjects(&fop);
}
{
gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_FINALIZE_END);
if (rt->gcFinalizeCallback)
rt->gcFinalizeCallback(&fop, JSFINALIZE_END, !rt->gcIsFull);
}
}
bool
ArenaLists::foregroundFinalize(FreeOp *fop, AllocKind thingKind, SliceBudget &sliceBudget)
{
if (!arenaListsToSweep[thingKind])
return true;
ArenaList &dest = arenaLists[thingKind];
return FinalizeArenas(fop, &arenaListsToSweep[thingKind], dest, thingKind, sliceBudget);
}
static bool
SweepPhase(JSRuntime *rt, SliceBudget &sliceBudget)
{
gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP);
FreeOp fop(rt, rt->gcSweepOnBackgroundThread, false);
for (; rt->gcSweepPhase < FinalizePhaseCount ; ++rt->gcSweepPhase) {
gcstats::AutoPhase ap(rt->gcStats, FinalizePhaseStatsPhase[rt->gcSweepPhase]);
ptrdiff_t len = rt->compartments.end() - rt->compartments.begin();
for (; rt->gcSweepCompartmentIndex < len ; ++rt->gcSweepCompartmentIndex) {
JSCompartment *c = rt->compartments.begin()[rt->gcSweepCompartmentIndex];
if (c->wasGCStarted()) {
while (rt->gcSweepKindIndex < FinalizePhaseLength[rt->gcSweepPhase]) {
AllocKind kind = FinalizePhases[rt->gcSweepPhase][rt->gcSweepKindIndex];
if (!c->arenas.foregroundFinalize(&fop, kind, sliceBudget))
return false;
++rt->gcSweepKindIndex;
}
}
rt->gcSweepKindIndex = 0;
}
rt->gcSweepCompartmentIndex = 0;
gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_STRING);
for (GCCompartmentsIter c(rt); !c.done(); c.next())
c->arenas.finalizeStrings(&fop);
}
return true;
}
{
gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_SCRIPT);
for (GCCompartmentsIter c(rt); !c.done(); c.next())
c->arenas.finalizeScripts(&fop);
}
static void
EndSweepPhase(JSRuntime *rt, JSGCInvocationKind gckind, gcreason::Reason gcReason)
{
gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP);
FreeOp fop(rt, rt->gcSweepOnBackgroundThread, false);
{
gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_SHAPE);
for (GCCompartmentsIter c(rt); !c.done(); c.next())
c->arenas.finalizeShapes(&fop);
}
#ifdef DEBUG
PropertyTree::dumpShapes(rt);
PropertyTree::dumpShapes(rt);
#endif
{
@ -3623,7 +3504,7 @@ EndSweepPhase(JSRuntime *rt, JSGCInvocationKind gckind, gcreason::Reason gcReaso
* This removes compartments from rt->compartment, so we do it last to make
* sure we don't miss sweeping any compartments.
*/
SweepCompartments(&fop, gcReason);
SweepCompartments(&fop, gckind);
#ifndef JS_THREADSAFE
/*
@ -3635,24 +3516,14 @@ EndSweepPhase(JSRuntime *rt, JSGCInvocationKind gckind, gcreason::Reason gcReaso
#endif
}
/*
* Reset the list of arenas marked as being allocated during sweep phase.
*/
while (ArenaHeader *arena = rt->gcArenasAllocatedDuringSweep) {
rt->gcArenasAllocatedDuringSweep = arena->getNextAllocDuringSweep();
arena->unsetAllocDuringSweep();
{
gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_FINALIZE_END);
if (rt->gcFinalizeCallback)
rt->gcFinalizeCallback(&fop, JSFINALIZE_END, !isFull);
}
for (CompartmentsIter c(rt); !c.done(); c.next()) {
for (CompartmentsIter c(rt); !c.done(); c.next())
c->setGCLastBytes(c->gcBytes, c->gcMallocAndFreeBytes, gckind);
if (c->wasGCStarted())
c->setGCStarted(false);
JS_ASSERT(!c->wasGCStarted());
for (unsigned i = 0 ; i < FINALIZE_LIMIT ; ++i)
JS_ASSERT(!c->arenas.arenaListsToSweep[i]);
}
rt->gcLastGCTime = PRMJ_Now();
}
@ -3737,38 +3608,17 @@ AutoGCSession::~AutoGCSession()
runtime->resetGCMallocBytes();
}
static void
IncrementalCollectSlice(JSRuntime *rt,
int64_t budget,
gcreason::Reason gcReason,
JSGCInvocationKind gcKind);
static void
ResetIncrementalGC(JSRuntime *rt, const char *reason)
{
if (rt->gcIncrementalState == NO_INCREMENTAL)
return;
if (rt->gcIncrementalState == SWEEP) {
/* If we've finished marking then sweep to completion here. */
IncrementalCollectSlice(rt, SliceBudget::Unlimited, gcreason::RESET, GC_NORMAL);
gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
rt->gcHelperThread.waitBackgroundSweepOrAllocEnd();
return;
}
JS_ASSERT(rt->gcIncrementalState == MARK);
for (CompartmentsIter c(rt); !c.done(); c.next()) {
for (CompartmentsIter c(rt); !c.done(); c.next())
c->setNeedsBarrier(false);
c->setGCStarted(false);
for (unsigned i = 0 ; i < FINALIZE_LIMIT ; ++i)
JS_ASSERT(!c->arenas.arenaListsToSweep[i]);
}
rt->gcMarker.reset();
rt->gcMarker.stop();
rt->gcIncrementalState = NO_INCREMENTAL;
JS_ASSERT(!rt->gcStrictCompartmentChecking);
@ -3798,12 +3648,10 @@ AutoGCSlice::AutoGCSlice(JSRuntime *rt)
for (GCCompartmentsIter c(rt); !c.done(); c.next()) {
/* Clear this early so we don't do any write barriers during GC. */
if (rt->gcIncrementalState == MARK) {
JS_ASSERT(c->needsBarrier());
if (rt->gcIncrementalState == MARK)
c->setNeedsBarrier(false);
} else {
else
JS_ASSERT(!c->needsBarrier());
}
}
}
@ -3814,8 +3662,7 @@ AutoGCSlice::~AutoGCSlice()
c->setNeedsBarrier(true);
c->arenas.prepareForIncrementalGC(runtime);
} else {
JS_ASSERT(runtime->gcIncrementalState == NO_INCREMENTAL ||
runtime->gcIncrementalState == SWEEP);
JS_ASSERT(runtime->gcIncrementalState == NO_INCREMENTAL);
c->setNeedsBarrier(false);
}
}
@ -3838,49 +3685,20 @@ class AutoCopyFreeListToArenas {
};
static void
PushZealSelectedObjects(JSRuntime *rt)
IncrementalMarkSlice(JSRuntime *rt, int64_t budget, gcreason::Reason reason, bool *shouldSweep)
{
#ifdef JS_GC_ZEAL
/* Push selected objects onto the mark stack and clear the list. */
for (JSObject **obj = rt->gcSelectedForMarking.begin();
obj != rt->gcSelectedForMarking.end(); obj++)
{
MarkObjectUnbarriered(&rt->gcMarker, obj, "selected obj");
}
#endif
}
static bool
DrainMarkStack(JSRuntime *rt, SliceBudget &sliceBudget)
{
/* Run a marking slice and return whether the stack is now empty. */
gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_MARK);
return rt->gcMarker.drainMarkStack(sliceBudget);
}
static void
IncrementalCollectSlice(JSRuntime *rt,
int64_t budget,
gcreason::Reason reason,
JSGCInvocationKind gckind)
{
AutoCopyFreeListToArenas copy(rt);
AutoGCSlice slice(rt);
gc::State initialState = rt->gcIncrementalState;
SliceBudget sliceBudget(budget);
*shouldSweep = false;
int zeal = 0;
#ifdef JS_GC_ZEAL
if (reason == gcreason::DEBUG_GC) {
/*
* Do the collection type specified by zeal mode only if the collection
* was triggered by RunDebugGC().
*/
// Do the collection type specified by zeal mode only if the collection
// was triggered by RunDebugGC().
zeal = rt->gcZeal();
JS_ASSERT_IF(zeal == ZealIncrementalMarkAllThenFinish ||
zeal == ZealIncrementalRootsThenFinish,
budget == SliceBudget::Unlimited);
}
#endif
@ -3894,88 +3712,51 @@ IncrementalCollectSlice(JSRuntime *rt,
rt->gcLastMarkSlice = false;
}
switch (rt->gcIncrementalState) {
case MARK_ROOTS:
if (rt->gcIncrementalState == MARK_ROOTS) {
BeginMarkPhase(rt, isIncremental);
PushZealSelectedObjects(rt);
rt->gcIncrementalState = MARK;
if (zeal == ZealIncrementalRootsThenFinish)
break;
return;
}
/* fall through */
case MARK: {
if (rt->gcIncrementalState == MARK) {
SliceBudget sliceBudget(budget);
/* If we needed delayed marking for gray roots, then collect until done. */
if (!rt->gcMarker.hasBufferedGrayRoots())
sliceBudget.reset();
bool finished = DrainMarkStack(rt, sliceBudget);
if (!finished)
break;
JS_ASSERT(rt->gcMarker.isDrained());
if (!rt->gcLastMarkSlice &&
((initialState == MARK && budget != SliceBudget::Unlimited) ||
zeal == ZealIncrementalMarkAllThenFinish))
{
/*
* Yield with the aim of starting the sweep in the next
* slice. We will need to mark anything new on the stack
* when we resume, so we stay in MARK state.
*/
rt->gcLastMarkSlice = true;
break;
#ifdef JS_GC_ZEAL
if (!rt->gcSelectedForMarking.empty()) {
for (JSObject **obj = rt->gcSelectedForMarking.begin();
obj != rt->gcSelectedForMarking.end(); obj++)
{
MarkObjectUnbarriered(&rt->gcMarker, obj, "selected obj");
}
}
EndMarkPhase(rt, isIncremental);
rt->gcIncrementalState = SWEEP;
/*
* This runs to completion, but we don't continue if the budget is
* now exhasted.
*/
BeginSweepPhase(rt);
if (sliceBudget.isOverBudget())
break;
/*
* Always yield here when running in incremental multi-slice zeal
* mode, so RunDebugGC can reset the slice buget.
*/
if (budget != SliceBudget::Unlimited && zeal == ZealIncrementalMultipleSlices)
break;
/* fall through */
}
case SWEEP: {
#ifdef DEBUG
for (CompartmentsIter c(rt); !c.done(); c.next())
JS_ASSERT(!c->needsBarrier());
#endif
bool finished = SweepPhase(rt, sliceBudget);
if (!finished)
break;
bool finished;
{
gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_MARK);
finished = rt->gcMarker.drainMarkStack(sliceBudget);
}
if (finished) {
JS_ASSERT(rt->gcMarker.isDrained());
EndSweepPhase(rt, gckind, reason);
if (rt->gcSweepOnBackgroundThread)
rt->gcHelperThread.startBackgroundSweep(gckind == GC_SHRINK);
rt->gcIncrementalState = NO_INCREMENTAL;
break;
}
default:
JS_ASSERT(false);
}
if (!rt->gcLastMarkSlice &&
((initialState == MARK && budget != SliceBudget::Unlimited) ||
zeal == ZealIncrementalMarkAllThenFinish))
{
rt->gcLastMarkSlice = true;
} else {
EndMarkPhase(rt, isIncremental);
rt->gcIncrementalState = NO_INCREMENTAL;
*shouldSweep = true;
}
}
}
}
class IncrementalSafety
@ -4053,10 +3834,8 @@ BudgetIncrementalGC(JSRuntime *rt, int64_t *budget)
rt->gcStats.nonincremental("malloc bytes trigger");
}
if (rt->gcIncrementalState != NO_INCREMENTAL &&
c->isCollecting() != c->wasGCStarted()) {
if (c->isCollecting() != c->needsBarrier())
reset = true;
}
}
if (reset)
@ -4098,6 +3877,7 @@ GCCycle(JSRuntime *rt, bool incremental, int64_t budget, JSGCInvocationKind gcki
rt->gcHelperThread.waitBackgroundSweepOrAllocEnd();
}
bool startBackgroundSweep = false;
{
if (!incremental) {
/* If non-incremental GC was requested, reset incremental GC. */
@ -4108,8 +3888,23 @@ GCCycle(JSRuntime *rt, bool incremental, int64_t budget, JSGCInvocationKind gcki
BudgetIncrementalGC(rt, &budget);
}
IncrementalCollectSlice(rt, budget, reason, gckind);
AutoCopyFreeListToArenas copy(rt);
bool shouldSweep;
IncrementalMarkSlice(rt, budget, reason, &shouldSweep);
#ifdef DEBUG
if (rt->gcIncrementalState == NO_INCREMENTAL) {
for (CompartmentsIter c(rt); !c.done(); c.next())
JS_ASSERT(!c->needsBarrier());
}
#endif
if (shouldSweep)
SweepPhase(rt, gckind, &startBackgroundSweep);
}
if (startBackgroundSweep)
rt->gcHelperThread.startBackgroundSweep(gckind == GC_SHRINK);
}
#ifdef JS_GC_ZEAL
@ -4475,34 +4270,20 @@ RunDebugGC(JSContext *cx)
type == ZealIncrementalMarkAllThenFinish ||
type == ZealIncrementalMultipleSlices)
{
js::gc::State initialState = rt->gcIncrementalState;
int64_t budget;
if (type == ZealIncrementalMultipleSlices) {
/*
* Start with a small slice limit and double it every slice. This
* ensure that we get multiple slices, and collection runs to
* completion.
*/
if (initialState == NO_INCREMENTAL)
// Start with a small slice limit and double it every slice. This ensure that we get
// multiple slices, and collection runs to completion.
if (rt->gcIncrementalState == NO_INCREMENTAL)
rt->gcIncrementalLimit = rt->gcZealFrequency / 2;
else
rt->gcIncrementalLimit *= 2;
budget = SliceBudget::WorkBudget(rt->gcIncrementalLimit);
} else {
// This triggers incremental GC but is actually ignored by IncrementalMarkSlice.
budget = SliceBudget::Unlimited;
}
Collect(rt, true, budget, GC_NORMAL, gcreason::DEBUG_GC);
/*
* For multi-slice zeal, reset the slice size when we get to the sweep
* phase.
*/
if (type == ZealIncrementalMultipleSlices &&
initialState == MARK && rt->gcIncrementalState == SWEEP)
{
rt->gcIncrementalLimit = rt->gcZealFrequency / 2;
}
} else {
Collect(rt, false, SliceBudget::Unlimited, GC_NORMAL, gcreason::DEBUG_GC);
}

Просмотреть файл

@ -41,7 +41,6 @@ namespace js {
class GCHelperThread;
struct Shape;
struct SliceBudget;
namespace gc {
@ -49,7 +48,6 @@ enum State {
NO_INCREMENTAL,
MARK_ROOTS,
MARK,
SWEEP,
INVALID
};
@ -148,35 +146,33 @@ IsNurseryAllocable(AllocKind kind)
inline JSGCTraceKind
GetGCThingTraceKind(const void *thing);
/*
* ArenaList::head points to the start of the list. Normally cursor points
* to the first arena in the list with some free things and all arenas
* before cursor are fully allocated. However, as the arena currently being
* allocated from is considered full while its list of free spans is moved
* into the freeList, during the GC or cell enumeration, when an
* unallocated freeList is moved back to the arena, we can see an arena
* with some free cells before the cursor. The cursor is an indirect
* pointer to allow for efficient list insertion at the cursor point and
* other list manipulations.
*/
struct ArenaList {
ArenaHeader *head;
ArenaHeader **cursor;
ArenaList() {
clear();
}
void clear() {
head = NULL;
cursor = &head;
}
void insert(ArenaHeader *arena);
};
struct ArenaLists {
/*
* ArenaList::head points to the start of the list. Normally cursor points
* to the first arena in the list with some free things and all arenas
* before cursor are fully allocated. However, as the arena currently being
* allocated from is considered full while its list of free spans is moved
* into the freeList, during the GC or cell enumeration, when an
* unallocated freeList is moved back to the arena, we can see an arena
* with some free cells before the cursor. The cursor is an indirect
* pointer to allow for efficient list insertion at the cursor point and
* other list manipulations.
*/
struct ArenaList {
ArenaHeader *head;
ArenaHeader **cursor;
ArenaList() {
clear();
}
void clear() {
head = NULL;
cursor = &head;
}
};
private:
/*
* For each arena kind its free list is represented as the first span with
@ -215,18 +211,12 @@ struct ArenaLists {
volatile uintptr_t backgroundFinalizeState[FINALIZE_LIMIT];
public:
/* For each arena kind, a list of arenas remaining to be swept. */
ArenaHeader *arenaListsToSweep[FINALIZE_LIMIT];
public:
ArenaLists() {
for (size_t i = 0; i != FINALIZE_LIMIT; ++i)
freeLists[i].initAsEmpty();
for (size_t i = 0; i != FINALIZE_LIMIT; ++i)
backgroundFinalizeState[i] = BFS_DONE;
for (size_t i = 0; i != FINALIZE_LIMIT; ++i)
arenaListsToSweep[i] = NULL;
}
~ArenaLists() {
@ -266,10 +256,6 @@ struct ArenaLists {
return true;
}
bool arenasAreFull(AllocKind thingKind) const {
return !*arenaLists[thingKind].cursor;
}
void unmarkAll() {
for (size_t i = 0; i != FINALIZE_LIMIT; ++i) {
/* The background finalization must have stopped at this point. */
@ -378,18 +364,16 @@ struct ArenaLists {
JS_ASSERT(freeLists[kind].isEmpty());
}
void queueObjectsForSweep(FreeOp *fop);
void queueStringsForSweep(FreeOp *fop);
void queueShapesForSweep(FreeOp *fop);
void queueScriptsForSweep(FreeOp *fop);
void finalizeObjects(FreeOp *fop);
void finalizeStrings(FreeOp *fop);
void finalizeShapes(FreeOp *fop);
void finalizeScripts(FreeOp *fop);
bool foregroundFinalize(FreeOp *fop, AllocKind thingKind, SliceBudget &sliceBudget);
static void backgroundFinalize(FreeOp *fop, ArenaHeader *listHead);
private:
inline void finalizeNow(FreeOp *fop, AllocKind thingKind);
inline void queueForForegroundSweep(FreeOp *fop, AllocKind thingKind);
inline void queueForBackgroundSweep(FreeOp *fop, AllocKind thingKind);
inline void finalizeLater(FreeOp *fop, AllocKind thingKind);
inline void *allocateFromArena(JSCompartment *comp, AllocKind thingKind);
};

Просмотреть файл

@ -102,14 +102,12 @@ void
Shape::removeChild(Shape *child)
{
JS_ASSERT(!child->inDictionary());
JS_ASSERT(child->parent == this);
KidsPointer *kidp = &kids;
if (kidp->isShape()) {
JS_ASSERT(kidp->toShape() == child);
kidp->setNull();
child->parent = NULL;
return;
}
@ -117,7 +115,6 @@ Shape::removeChild(Shape *child)
JS_ASSERT(hash->count() >= 2); /* otherwise kidp->isShape() should be true */
hash->remove(child);
child->parent = NULL;
if (hash->count() == 1) {
/* Convert from HASH form back to SHAPE form. */
@ -129,10 +126,27 @@ Shape::removeChild(Shape *child)
}
}
/*
* We need a read barrier for the shape tree, since these are weak pointers.
*/
static Shape *
ReadBarrier(Shape *shape)
{
#ifdef JSGC_INCREMENTAL
JSCompartment *comp = shape->compartment();
if (comp->needsBarrier()) {
Shape *tmp = shape;
MarkShapeUnbarriered(comp->barrierTracer(), &tmp, "read barrier");
JS_ASSERT(tmp == shape);
}
#endif
return shape;
}
Shape *
PropertyTree::getChild(JSContext *cx, Shape *parent_, uint32_t nfixed, const StackShape &child)
{
Shape *shape = NULL;
Shape *shape;
JS_ASSERT(parent_);
@ -146,43 +160,17 @@ PropertyTree::getChild(JSContext *cx, Shape *parent_, uint32_t nfixed, const Sta
*/
KidsPointer *kidp = &parent_->kids;
if (kidp->isShape()) {
Shape *kid = kidp->toShape();
if (kid->matches(child))
shape = kid;
shape = kidp->toShape();
if (shape->matches(child))
return ReadBarrier(shape);
} else if (kidp->isHash()) {
shape = *kidp->toHash()->lookup(child);
if (shape)
return ReadBarrier(shape);
} else {
/* If kidp->isNull(), we always insert. */
}
#ifdef JSGC_INCREMENTAL
if (shape) {
JSCompartment *comp = shape->compartment();
if (comp->needsBarrier()) {
/*
* We need a read barrier for the shape tree, since these are weak
* pointers.
*/
Shape *tmp = shape;
MarkShapeUnbarriered(comp->barrierTracer(), &tmp, "read barrier");
JS_ASSERT(tmp == shape);
} else if (comp->isGCSweeping() && !shape->isMarked() &&
!shape->arenaHeader()->allocatedDuringIncremental)
{
/*
* The shape we've found is unreachable and due to be finalized, so
* remove our weak reference to it and don't use it.
*/
JS_ASSERT(parent_->isMarked());
parent_->removeChild(shape);
shape = NULL;
}
}
#endif
if (shape)
return shape;
StackShape::AutoRooter childRoot(cx, &child);
RootedShape parent(cx, parent_);
@ -202,11 +190,6 @@ void
Shape::finalize(FreeOp *fop)
{
if (!inDictionary()) {
/*
* Note that due to incremental sweeping, if !parent->isMarked() then
* the parent may point to a new shape allocated in the same cell that
* use to hold our parent.
*/
if (parent && parent->isMarked())
parent->removeChild(this);

Просмотреть файл

@ -1206,7 +1206,7 @@ ScriptSource::createFromSource(JSContext *cx, const jschar *src, uint32_t length
* accessed even if the name was already in the table. At this point old
* scripts pointing to the source may no longer be reachable.
*/
if (cx->runtime->gcIncrementalState != NO_INCREMENTAL && cx->runtime->gcIsFull)
if (cx->runtime->gcIncrementalState == MARK && cx->runtime->gcIsFull)
ss->marked = true;
#endif
@ -1346,7 +1346,7 @@ ScriptSource::performXDR(XDRState<mode> *xdr, ScriptSource **ssp)
cleanup.protect(ss);
#ifdef JSGC_INCREMENTAL
// See comment in ScriptSource::createFromSource.
if (xdr->cx()->runtime->gcIncrementalState != NO_INCREMENTAL &&
if (xdr->cx()->runtime->gcIncrementalState == MARK &&
xdr->cx()->runtime->gcIsFull)
ss->marked = true;
#endif
@ -1414,7 +1414,7 @@ js::SaveScriptFilename(JSContext *cx, const char *filename)
* scripts or exceptions pointing to the filename may no longer be
* reachable.
*/
if (rt->gcIncrementalState != NO_INCREMENTAL && rt->gcIsFull)
if (rt->gcIncrementalState == MARK && rt->gcIsFull)
sfe->marked = true;
#endif