Bug 538450 - Part 3: Remove atom lock. r=jonco

* AutoAccessAtomsZone is removed in Part 9

Differential Revision: https://phabricator.services.mozilla.com/D131365
This commit is contained in:
Tooru Fujisawa 2021-11-19 04:43:07 +00:00
Родитель cb7cb2f084
Коммит 40fe0d0d17
8 изменённых файлов: 13 добавлений и 117 удалений

Просмотреть файл

@ -130,12 +130,10 @@ class MOZ_RAII AutoMajorGCProfilerEntry : public AutoGeckoProfilerEntry {
explicit AutoMajorGCProfilerEntry(GCRuntime* gc);
};
class MOZ_RAII AutoTraceSession : public AutoLockAllAtoms,
public AutoHeapSession {
class MOZ_RAII AutoTraceSession : public AutoHeapSession {
public:
explicit AutoTraceSession(JSRuntime* rt)
: AutoLockAllAtoms(rt),
AutoHeapSession(&rt->gc, JS::HeapState::Tracing) {}
: AutoHeapSession(&rt->gc, JS::HeapState::Tracing) {}
};
struct MOZ_RAII AutoFinishGC {

Просмотреть файл

@ -28,10 +28,6 @@ class JS_PUBLIC_API Realm;
namespace js {
// Accessing the atoms zone can be dangerous because helper threads may be
// accessing it concurrently to the main thread, so it's better to skip the
// atoms zone when iterating over zones. If you need to iterate over the atoms
// zone, consider using AutoLockAllAtoms.
enum ZoneSelector { WithAtoms, SkipAtoms };
// Iterate over all zones in the runtime apart from the atoms zone.

Просмотреть файл

@ -122,13 +122,7 @@ void CheckArenaListAccess<Helper>::check() const {
JSRuntime* rt = TlsContext.get()->runtime();
if (zone->isAtomsZone()) {
// The main thread can access the atoms arenas if it holds all the atoms
// table locks.
if (rt->currentThreadHasAtomsTableAccess()) {
return;
}
// Otherwise we must hold the GC lock if parallel parsing is running.
// We must hold the GC lock if parallel parsing is running.
if (rt->isOffThreadParseRunning()) {
rt->gc.assertCurrentThreadHasLockedGC();
}

Просмотреть файл

@ -19,24 +19,11 @@
* The atoms table is a mapping from strings to JSAtoms that supports concurrent
* access and incremental sweeping.
*
* The table is partitioned based on the key into multiple sub-tables. Each
* sub-table is protected by a lock to ensure safety when accessed by helper
* threads. Concurrent access improves performance of off-thread parsing which
* frequently creates large numbers of atoms. Locking is only required when
* off-thread parsing is running.
* The table is partitioned based on the key into multiple sub-tables.
*/
namespace js {
// Take all atoms table locks to allow iterating over cells in the atoms zone.
class MOZ_RAII AutoLockAllAtoms {
JSRuntime* runtime;
public:
explicit AutoLockAllAtoms(JSRuntime* rt);
~AutoLockAllAtoms();
};
// This is a tagged pointer to an atom that duplicates the atom's pinned flag so
// that we don't have to check the atom itself when marking pinned atoms (there
// can be a great many atoms). See bug 1445196.
@ -120,9 +107,6 @@ class AtomsTable {
explicit Partition(uint32_t index);
~Partition();
// Lock that must be held to access this set.
Mutex lock;
// The atoms in this set.
AtomSet atoms;
@ -132,13 +116,7 @@ class AtomsTable {
Partition* partitions[PartitionCount];
#ifdef DEBUG
bool allPartitionsLocked = false;
#endif
public:
class AutoLock;
// An iterator used for sweeping atoms incrementally.
class SweepIterator {
AtomsTable& atoms;
@ -180,10 +158,6 @@ class AtomsTable {
// Sweep some atoms incrementally and return whether we finished.
bool sweepIncrementally(SweepIterator& atomsToSweep, SliceBudget& budget);
#ifdef DEBUG
bool mainThreadHasAllLocks() const { return allPartitionsLocked; }
#endif
size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
private:
@ -192,10 +166,6 @@ class AtomsTable {
void tracePinnedAtomsInSet(JSTracer* trc, AtomSet& atoms);
void mergeAtomsAddedWhileSweeping(Partition& partition);
friend class AutoLockAllAtoms;
void lockAll();
void unlockAll();
};
bool AtomIsPinned(JSContext* cx, JSAtom* atom);

Просмотреть файл

@ -312,17 +312,8 @@ void JSRuntime::finishAtoms() {
emptyString = nullptr;
}
class AtomsTable::AutoLock {
public:
MOZ_ALWAYS_INLINE AutoLock(JSRuntime* rt, Mutex& aLock) {}
MOZ_ALWAYS_INLINE ~AutoLock() {}
};
AtomsTable::Partition::Partition(uint32_t index)
: lock(
MutexId{mutexid::AtomsTable.name, mutexid::AtomsTable.order + index}),
atoms(InitialTableSize),
atomsAddedWhileSweeping(nullptr) {}
: atoms(InitialTableSize), atomsAddedWhileSweeping(nullptr) {}
AtomsTable::Partition::~Partition() { MOZ_ASSERT(!atomsAddedWhileSweeping); }
@ -342,30 +333,6 @@ bool AtomsTable::init() {
return true;
}
void AtomsTable::lockAll() {
MOZ_ASSERT(!allPartitionsLocked);
for (size_t i = 0; i < PartitionCount; i++) {
partitions[i]->lock.lock();
}
#ifdef DEBUG
allPartitionsLocked = true;
#endif
}
void AtomsTable::unlockAll() {
MOZ_ASSERT(allPartitionsLocked);
for (size_t i = 0; i < PartitionCount; i++) {
partitions[PartitionCount - i - 1]->lock.unlock();
}
#ifdef DEBUG
allPartitionsLocked = false;
#endif
}
MOZ_ALWAYS_INLINE size_t
AtomsTable::getPartitionIndex(const AtomHasher::Lookup& lookup) {
size_t index = lookup.hash >> (32 - PartitionShift);
@ -438,9 +405,7 @@ void js::TraceWellKnownSymbols(JSTracer* trc) {
}
void AtomsTable::traceWeak(JSTracer* trc) {
JSRuntime* rt = trc->runtime();
for (size_t i = 0; i < PartitionCount; i++) {
AutoLock lock(rt, partitions[i]->lock);
AtomSet& atoms = partitions[i]->atoms;
for (AtomSet::Enum e(atoms); !e.empty(); e.popFront()) {
JSAtom* atom = e.front().asPtrUnbarriered();
@ -624,7 +589,7 @@ static MOZ_ALWAYS_INLINE JSAtom* AtomizeAndCopyCharsFromLookup(
const AtomHasher::Lookup& lookup, PinningBehavior pin,
const Maybe<uint32_t>& indexValue) {
// Try the per-Zone cache first. If we find the atom there we can avoid the
// atoms lock, the markAtom call, and the multiple HashSet lookups below.
// markAtom call, and the multiple HashSet lookups below.
// We don't use the per-Zone cache if we want a pinned atom: handling that
// is more complicated and pinning atoms is relatively uncommon.
Zone* zone = cx->zone();
@ -661,8 +626,6 @@ static MOZ_ALWAYS_INLINE JSAtom* AtomizeAndCopyCharsFromLookup(
return atom;
}
// Validate the length before taking an atoms partition lock, as throwing an
// exception here may reenter this code.
if (MOZ_UNLIKELY(!JSString::validateLength(cx, length))) {
return nullptr;
}
@ -697,7 +660,6 @@ MOZ_ALWAYS_INLINE JSAtom* AtomsTable::atomizeAndCopyChars(
JSContext* cx, const CharT* chars, size_t length, PinningBehavior pin,
const Maybe<uint32_t>& indexValue, const AtomHasher::Lookup& lookup) {
Partition& part = *partitions[getPartitionIndex(lookup)];
AutoLock lock(cx->runtime(), part.lock);
AtomSet& atoms = part.atoms;
AtomSet* atomsAddedWhileSweeping = part.atomsAddedWhileSweeping;
@ -735,9 +697,8 @@ MOZ_ALWAYS_INLINE JSAtom* AtomsTable::atomizeAndCopyChars(
return nullptr;
}
// We have held the lock since looking up p, and the operations we've done
// since then can't GC; therefore the atoms table has not been modified and
// p is still valid.
// The operations above can't GC; therefore the atoms table has not been
// modified and p is still valid.
AtomSet* addSet =
part.atomsAddedWhileSweeping ? part.atomsAddedWhileSweeping : &atoms;
if (MOZ_UNLIKELY(!addSet->add(p, AtomStateEntry(atom, bool(pin))))) {
@ -872,8 +833,8 @@ static MOZ_ALWAYS_INLINE JSAtom* AllocateNewAtom(
JSLinearString* linear = MakeLinearStringForAtomization(cx, chars, length);
if (!linear) {
// Grudgingly forgo last-ditch GC. The alternative would be to release
// the lock, manually GC here, and retry from the top.
// Grudgingly forgo last-ditch GC. The alternative would be to manually GC
// here, and retry from the top.
ReportOutOfMemory(cx);
return nullptr;
}
@ -955,7 +916,6 @@ bool AtomsTable::atomIsPinned(JSRuntime* rt, JSAtom* atom) {
AtomHasher::Lookup lookup(atom);
AtomsTable::Partition& part = *partitions[getPartitionIndex(lookup)];
AtomsTable::AutoLock lock(rt, part.lock);
AtomSet::Ptr p = part.atoms.lookup(lookup);
if (!p && part.atomsAddedWhileSweeping) {
p = part.atomsAddedWhileSweeping->lookup(lookup);
@ -974,7 +934,6 @@ void AtomsTable::maybePinExistingAtom(JSContext* cx, JSAtom* atom) {
AtomHasher::Lookup lookup(atom);
AtomsTable::Partition& part = *partitions[getPartitionIndex(lookup)];
AtomsTable::AutoLock lock(cx->runtime(), part.lock);
AtomSet::Ptr p = part.atoms.lookup(lookup);
if (!p && part.atomsAddedWhileSweeping) {
p = part.atomsAddedWhileSweeping->lookup(lookup);
@ -1196,11 +1155,3 @@ template JSAtom* js::ToAtom<NoGC>(JSContext* cx, const Value& v);
Handle<PropertyName*> js::ClassName(JSProtoKey key, JSContext* cx) {
return ClassName(key, cx->names());
}
js::AutoLockAllAtoms::AutoLockAllAtoms(JSRuntime* rt) : runtime(rt) {
MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime));
}
js::AutoLockAllAtoms::~AutoLockAllAtoms() {
MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime));
}

Просмотреть файл

@ -57,6 +57,7 @@ class DebugModeOSRVolatileJitFrameIter;
} // namespace jit
namespace gc {
class AutoTraceSession;
class AutoCheckCanAccessAtomsDuringGC;
class AutoSuppressNurseryCellAlloc;
} // namespace gc
@ -1156,18 +1157,10 @@ class MOZ_RAII AutoLockScriptData {
}
};
// A token used to prove you can safely access the atoms zone. This zone is
// accessed by the main thread and by off-thread parsing. There are two
// situations in which it is safe:
//
// - the current thread holds all atoms table locks (off-thread parsing may be
// running and must also take one of these locks for access)
//
// - the GC is running and is collecting the atoms zone (this cannot be started
// while off-thread parsing is happening)
// TODO: Remove.
class MOZ_STACK_CLASS AutoAccessAtomsZone {
public:
MOZ_IMPLICIT AutoAccessAtomsZone(const AutoLockAllAtoms& lock) {}
MOZ_IMPLICIT AutoAccessAtomsZone(const gc::AutoTraceSession& lock) {}
MOZ_IMPLICIT AutoAccessAtomsZone(
const gc::AutoCheckCanAccessAtomsDuringGC& canAccess) {}
};

Просмотреть файл

@ -21,7 +21,6 @@
_(ShellWorkerThreads, 100) \
_(ShellObjectMailbox, 100) \
\
_(AtomsTable, 200) \
_(WasmInitBuiltinThunks, 250) \
_(WasmLazyStubsTier1, 250) \
_(WasmLazyStubsTier2, 251) \

Просмотреть файл

@ -590,11 +590,6 @@ struct JSRuntime {
scriptDataLock.assertOwnedByCurrentThread();
}
bool currentThreadHasAtomsTableAccess() const {
return js::CurrentThreadCanAccessRuntime(this) &&
atoms_->mainThreadHasAllLocks();
}
#endif
JS::HeapState heapState() const { return gc.heapState(); }