Bug 1594054 - Move ExecutableAllocator from JitRuntime to JitZone. r=jonco,erahm

This matches the JitCode GC-thing lifetime and will hopefully help avoid
fragmentation.

Differential Revision: https://phabricator.services.mozilla.com/D52823

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Jan de Mooij 2019-11-14 10:20:02 +00:00
Родитель 32fdd189d1
Коммит 72d7c72f56
11 изменённых файлов: 93 добавлений и 68 удалений

Просмотреть файл

@ -507,7 +507,6 @@ struct RuntimeSizes {
void addToServoSizes(ServoSizes* sizes) const {
FOR_EACH_SIZE(ADD_TO_SERVO_SIZES);
scriptSourceInfo.addToServoSizes(sizes);
code.addToServoSizes(sizes);
gc.addToServoSizes(sizes);
}
@ -518,7 +517,6 @@ struct RuntimeSizes {
// FineGrained, we subtract the measurements of the notable script sources
// and move them into |notableScriptSources|.
ScriptSourceInfo scriptSourceInfo;
CodeSizes code;
GCSizes gc;
typedef js::HashMap<const char*, ScriptSourceInfo, mozilla::CStringHasher,
@ -683,6 +681,7 @@ struct ZoneStats {
unusedGCThings.addToServoSizes(sizes);
stringInfo.addToServoSizes(sizes);
shapeInfo.addToServoSizes(sizes);
code.addToServoSizes(sizes);
}
FOR_EACH_SIZE(DECL_SIZE_ZERO);
@ -694,6 +693,7 @@ struct ZoneStats {
UnusedGCThingSizes unusedGCThings;
StringInfo stringInfo;
ShapeInfo shapeInfo;
CodeSizes code;
void* extra = nullptr; // This field can be used by embedders.
typedef js::HashMap<JSString*, StringInfo,

Просмотреть файл

@ -5552,6 +5552,10 @@ IncrementalProgress GCRuntime::endSweepingSweepGroup(JSFreeOp* fop,
/* Update the GC state for zones we have swept. */
for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
if (jit::JitZone* jitZone = zone->jitZone()) {
// Clear out any small pools that we're hanging on to.
jitZone->execAlloc().purge();
}
AutoLockGC lock(this);
zone->changeGCState(Zone::Sweep, Zone::Finished);
zone->updateGCThresholds(*this, invocationKind, lock);
@ -6357,11 +6361,6 @@ void GCRuntime::endSweepPhase(bool destroyingRuntime) {
* script's filename. See bug 323267.
*/
SweepScriptData(rt);
/* Clear out any small pools that we're hanging on to. */
if (rt->hasJitRuntime()) {
rt->jitRuntime()->execAlloc().purge();
}
}
{

Просмотреть файл

@ -580,15 +580,16 @@ void Zone::traceAtomCache(JSTracer* trc) {
}
void Zone::addSizeOfIncludingThis(
mozilla::MallocSizeOf mallocSizeOf, size_t* typePool, size_t* regexpZone,
size_t* jitZone, size_t* baselineStubsOptimized, size_t* cachedCFG,
size_t* uniqueIdMap, size_t* shapeCaches, size_t* atomsMarkBitmaps,
size_t* compartmentObjects, size_t* crossCompartmentWrappersTables,
size_t* compartmentsPrivateData, size_t* scriptCountsMapArg) {
mozilla::MallocSizeOf mallocSizeOf, JS::CodeSizes* code, size_t* typePool,
size_t* regexpZone, size_t* jitZone, size_t* baselineStubsOptimized,
size_t* cachedCFG, size_t* uniqueIdMap, size_t* shapeCaches,
size_t* atomsMarkBitmaps, size_t* compartmentObjects,
size_t* crossCompartmentWrappersTables, size_t* compartmentsPrivateData,
size_t* scriptCountsMapArg) {
*typePool += types.typeLifoAlloc().sizeOfExcludingThis(mallocSizeOf);
*regexpZone += regExps().sizeOfExcludingThis(mallocSizeOf);
if (jitZone_) {
jitZone_->addSizeOfIncludingThis(mallocSizeOf, jitZone,
jitZone_->addSizeOfIncludingThis(mallocSizeOf, code, jitZone,
baselineStubsOptimized, cachedCFG);
}
*uniqueIdMap += uniqueIds().shallowSizeOfExcludingThis(mallocSizeOf);

Просмотреть файл

@ -210,11 +210,12 @@ class Zone : public js::ZoneAllocator, public js::gc::GraphNodeBase<JS::Zone> {
ShouldDiscardJitScripts discardJitScripts = KeepJitScripts);
void addSizeOfIncludingThis(
mozilla::MallocSizeOf mallocSizeOf, size_t* typePool, size_t* regexpZone,
size_t* jitZone, size_t* baselineStubsOptimized, size_t* cachedCFG,
size_t* uniqueIdMap, size_t* shapeCaches, size_t* atomsMarkBitmaps,
size_t* compartmentObjects, size_t* crossCompartmentWrappersTables,
size_t* compartmentsPrivateData, size_t* scriptCountsMapArg);
mozilla::MallocSizeOf mallocSizeOf, JS::CodeSizes* code, size_t* typePool,
size_t* regexpZone, size_t* jitZone, size_t* baselineStubsOptimized,
size_t* cachedCFG, size_t* uniqueIdMap, size_t* shapeCaches,
size_t* atomsMarkBitmaps, size_t* compartmentObjects,
size_t* crossCompartmentWrappersTables, size_t* compartmentsPrivateData,
size_t* scriptCountsMapArg);
// Iterate over all cells in the zone. See the definition of ZoneCellIter
// in gc/GC-inl.h for the possible arguments and documentation.

Просмотреть файл

@ -156,8 +156,7 @@ bool jit::InitializeJit() {
}
JitRuntime::JitRuntime()
: execAlloc_(),
nextCompilationId_(0),
: nextCompilationId_(0),
exceptionTailOffset_(0),
bailoutTailOffset_(0),
profilerExitFrameTailOffset_(0),
@ -609,7 +608,7 @@ size_t JitRealm::sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
}
void JitZone::addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
size_t* jitZone,
JS::CodeSizes* code, size_t* jitZone,
size_t* baselineStubsOptimized,
size_t* cachedCFG) const {
*jitZone += mallocSizeOf(this);
@ -617,6 +616,8 @@ void JitZone::addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
baselineCacheIRStubCodes_.shallowSizeOfExcludingThis(mallocSizeOf);
*jitZone += ionCacheIRStubInfoSet_.shallowSizeOfExcludingThis(mallocSizeOf);
execAlloc().addSizeOfCode(code);
*baselineStubsOptimized +=
optimizedStubSpace_.sizeOfExcludingThis(mallocSizeOf);
*cachedCFG += cfgSpace_.sizeOfExcludingThis(mallocSizeOf);

Просмотреть файл

@ -132,9 +132,6 @@ class JitRuntime {
private:
friend class JitRealm;
// Executable allocator for all code except wasm code.
MainThreadData<ExecutableAllocator> execAlloc_;
MainThreadData<uint64_t> nextCompilationId_;
// Buffer for OSR from baseline to Ion. To avoid holding on to this for too
@ -306,8 +303,6 @@ class JitRuntime {
static MOZ_MUST_USE bool MarkJitcodeGlobalTableIteratively(GCMarker* marker);
static void TraceWeakJitcodeGlobalTable(JSRuntime* rt, JSTracer* trc);
ExecutableAllocator& execAlloc() { return execAlloc_.ref(); }
const BaselineICFallbackCode& baselineICFallbackCode() const {
return baselineICFallbackCode_.ref();
}
@ -500,11 +495,15 @@ class JitZone {
SystemAllocPolicy, IcStubCodeMapGCPolicy<CacheIRStubKey>>;
BaselineCacheIRStubCodeMap baselineCacheIRStubCodes_;
// Executable allocator for all code except wasm code.
MainThreadData<ExecutableAllocator> execAlloc_;
public:
void traceWeak(JSTracer* trc);
void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
size_t* jitZone, size_t* baselineStubsOptimized,
JS::CodeSizes* code, size_t* jitZone,
size_t* baselineStubsOptimized,
size_t* cachedCFG) const;
OptimizedICStubSpace* optimizedStubSpace() { return &optimizedStubSpace_; }
@ -540,6 +539,9 @@ class JitZone {
return ionCacheIRStubInfoSet_.add(p, std::move(key));
}
void purgeIonCacheIRStubInfo() { ionCacheIRStubInfoSet_.clearAndCompact(); }
ExecutableAllocator& execAlloc() { return execAlloc_.ref(); }
const ExecutableAllocator& execAlloc() const { return execAlloc_.ref(); }
};
class JitRealm {

Просмотреть файл

@ -37,9 +37,15 @@ JitCode* Linker::newCode(JSContext* cx, CodeKind kind) {
// ExecutableAllocator requires bytesNeeded to be aligned.
bytesNeeded = AlignBytes(bytesNeeded, ExecutableAllocatorAlignment);
JitZone* jitZone = cx->zone()->getJitZone(cx);
if (!jitZone) {
// Note: don't call fail(cx) here, getJitZone reports OOM.
return nullptr;
}
ExecutablePool* pool;
uint8_t* result = (uint8_t*)cx->runtime()->jitRuntime()->execAlloc().alloc(
cx, bytesNeeded, &pool, kind);
uint8_t* result =
(uint8_t*)jitZone->execAlloc().alloc(cx, bytesNeeded, &pool, kind);
if (!result) {
return fail(cx);
}

Просмотреть файл

@ -213,9 +213,9 @@ static void StatsZoneCallback(JSRuntime* rt, void* data, Zone* zone) {
rtStats->currZoneStats = &zStats;
zone->addSizeOfIncludingThis(
rtStats->mallocSizeOf_, &zStats.typePool, &zStats.regexpZone,
&zStats.jitZone, &zStats.baselineStubsOptimized, &zStats.cachedCFG,
&zStats.uniqueIdMap, &zStats.shapeTables,
rtStats->mallocSizeOf_, &zStats.code, &zStats.typePool,
&zStats.regexpZone, &zStats.jitZone, &zStats.baselineStubsOptimized,
&zStats.cachedCFG, &zStats.uniqueIdMap, &zStats.shapeTables,
&rtStats->runtime.atomsMarkBitmaps, &zStats.compartmentObjects,
&zStats.crossCompartmentWrappersTables, &zStats.compartmentsPrivateData,
&zStats.scriptCountsMap);

Просмотреть файл

@ -116,15 +116,10 @@ bool JSRuntime::createJitRuntime(JSContext* cx) {
MOZ_ASSERT(!jitRuntime_);
if (!CanLikelyAllocateMoreExecutableMemory()) {
// Report OOM instead of potentially hitting the MOZ_CRASH below, but first
// try to release memory.
// Try to release memory first instead of potentially reporting OOM below.
if (OnLargeAllocationFailure) {
OnLargeAllocationFailure();
}
if (!CanLikelyAllocateMoreExecutableMemory()) {
ReportOutOfMemory(cx);
return false;
}
}
jit::JitRuntime* jrt = cx->new_<jit::JitRuntime>();
@ -136,12 +131,10 @@ bool JSRuntime::createJitRuntime(JSContext* cx) {
// we can't just wait to assign jitRuntime_.
jitRuntime_ = jrt;
AutoEnterOOMUnsafeRegion noOOM;
if (!jitRuntime_->initialize(cx)) {
// Handling OOM here is complicated: if we delete jitRuntime_ now, we
// will destroy the ExecutableAllocator, even though there may still be
// JitCode instances holding references to ExecutablePools.
noOOM.crash("OOM in createJitRuntime");
js_delete(jitRuntime_.ref());
jitRuntime_ = nullptr;
return false;
}
return true;

Просмотреть файл

@ -386,8 +386,6 @@ void JSRuntime::addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
}
if (jitRuntime_) {
jitRuntime_->execAlloc().addSizeOfCode(&rtSizes->code);
// Sizes of the IonBuilders we are holding for lazy linking
for (auto builder : jitRuntime_->ionLazyLinkList(this)) {
rtSizes->jitLazyLink += builder->sizeOfExcludingThis(mallocSizeOf);

Просмотреть файл

@ -1354,6 +1354,20 @@ NS_IMPL_ISUPPORTS(JSMainRuntimeTemporaryPeakReporter, nsIMemoryReporter)
} \
} while (0)
// Report realm/zone non-heap bytes.
#define ZRREPORT_NONHEAP_BYTES(_path, _amount, _desc) \
do { \
size_t amount = _amount; /* evaluate _amount only once */ \
if (amount >= SUNDRIES_THRESHOLD) { \
handleReport->Callback(EmptyCString(), _path, \
nsIMemoryReporter::KIND_NONHEAP, \
nsIMemoryReporter::UNITS_BYTES, amount, \
NS_LITERAL_CSTRING(_desc), data); \
} else { \
sundriesNonHeap += amount; \
} \
} while (0)
// Report runtime bytes.
#define RREPORT_BYTES(_path, _kind, _amount, _desc) \
do { \
@ -1384,7 +1398,10 @@ static void ReportZoneStats(const JS::ZoneStats& zStats,
nsISupports* data, bool anonymize,
size_t* gcTotalOut = nullptr) {
const nsCString& pathPrefix = extras.pathPrefix;
size_t gcTotal = 0, sundriesGCHeap = 0, sundriesMallocHeap = 0;
size_t gcTotal = 0;
size_t sundriesGCHeap = 0;
size_t sundriesMallocHeap = 0;
size_t sundriesNonHeap = 0;
MOZ_ASSERT(!gcTotalOut == zStats.isTotals);
@ -1478,6 +1495,27 @@ static void ReportZoneStats(const JS::ZoneStats& zStats,
zStats.scriptCountsMap,
"Profiling-related information for scripts.");
ZRREPORT_NONHEAP_BYTES(pathPrefix + NS_LITERAL_CSTRING("code/ion"),
zStats.code.ion,
"Code generated by the IonMonkey JIT.");
ZRREPORT_NONHEAP_BYTES(pathPrefix + NS_LITERAL_CSTRING("code/baseline"),
zStats.code.baseline,
"Code generated by the Baseline JIT.");
ZRREPORT_NONHEAP_BYTES(pathPrefix + NS_LITERAL_CSTRING("code/regexp"),
zStats.code.regexp,
"Code generated by the regexp JIT.");
ZRREPORT_NONHEAP_BYTES(
pathPrefix + NS_LITERAL_CSTRING("code/other"), zStats.code.other,
"Code generated by the JITs for wrappers and trampolines.");
ZRREPORT_NONHEAP_BYTES(pathPrefix + NS_LITERAL_CSTRING("code/unused"),
zStats.code.unused,
"Memory allocated by one of the JITs to hold code, "
"but which is currently unused.");
size_t stringsNotableAboutMemoryGCHeap = 0;
size_t stringsNotableAboutMemoryMallocHeap = 0;
@ -1665,6 +1703,14 @@ static void ReportZoneStats(const JS::ZoneStats& zStats,
"be worth showing individually.");
}
if (sundriesNonHeap > 0) {
// We deliberately don't use ZRREPORT_NONHEAP_BYTES here.
REPORT_BYTES(pathPrefix + NS_LITERAL_CSTRING("sundries/other-heap"),
KIND_NONHEAP, sundriesNonHeap,
"The sum of non-malloc/gc measurements that are too small to "
"be worth showing individually.");
}
if (gcTotalOut) {
*gcTotalOut += gcTotal;
}
@ -2014,28 +2060,6 @@ void ReportJSRuntimeExplicitTreeStats(const JS::RuntimeStats& rtStats,
rtTotal);
}
RREPORT_BYTES(rtPath + NS_LITERAL_CSTRING("runtime/code/ion"), KIND_NONHEAP,
rtStats.runtime.code.ion,
"Code generated by the IonMonkey JIT.");
RREPORT_BYTES(rtPath + NS_LITERAL_CSTRING("runtime/code/baseline"),
KIND_NONHEAP, rtStats.runtime.code.baseline,
"Code generated by the Baseline JIT.");
RREPORT_BYTES(rtPath + NS_LITERAL_CSTRING("runtime/code/regexp"),
KIND_NONHEAP, rtStats.runtime.code.regexp,
"Code generated by the regexp JIT.");
RREPORT_BYTES(rtPath + NS_LITERAL_CSTRING("runtime/code/other"), KIND_NONHEAP,
rtStats.runtime.code.other,
"Code generated by the JITs for wrappers and trampolines.");
RREPORT_BYTES(
rtPath + NS_LITERAL_CSTRING("runtime/code/unused"), KIND_NONHEAP,
rtStats.runtime.code.unused,
"Memory allocated by one of the JITs to hold code, but which is "
"currently unused.");
RREPORT_BYTES(rtPath + NS_LITERAL_CSTRING("runtime/gc/marker"), KIND_HEAP,
rtStats.runtime.gc.marker, "The GC mark stack and gray roots.");