Bug 1551796 part 11 - Move more JitScript code into JitScript.cpp. r=tcampbell

JitScript::initICEntriesAndBytecodeTypeMap is still in BaselineIC.cpp because
it depends on things defined there (like FallbackStubAllocator) and I think it's
not unreasonable to keep it there.

Differential Revision: https://phabricator.services.mozilla.com/D32303

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Jan de Mooij 2019-05-24 12:03:13 +00:00
Родитель 32ffc2b901
Коммит 182c086abf
5 изменённых файлов: 339 добавлений и 336 удалений

Просмотреть файл

@ -1293,15 +1293,6 @@ void ICStubCompilerBase::PushStubPayload(MacroAssembler& masm,
masm.adjustFrame(sizeof(intptr_t)); masm.adjustFrame(sizeof(intptr_t));
} }
void JitScript::noteAccessedGetter(uint32_t pcOffset) {
ICEntry& entry = icEntryFromPCOffset(pcOffset);
ICFallbackStub* stub = entry.fallbackStub();
if (stub->isGetProp_Fallback()) {
stub->toGetProp_Fallback()->noteAccessedGetter();
}
}
// TypeMonitor_Fallback // TypeMonitor_Fallback
// //
@ -2489,15 +2480,6 @@ bool FallbackICCodeCompiler::emit_SetElem() {
return tailCallVM<Fn, DoSetElemFallback>(masm); return tailCallVM<Fn, DoSetElemFallback>(masm);
} }
void JitScript::noteHasDenseAdd(uint32_t pcOffset) {
ICEntry& entry = icEntryFromPCOffset(pcOffset);
ICFallbackStub* stub = entry.fallbackStub();
if (stub->isSetElem_Fallback()) {
stub->toSetElem_Fallback()->noteHasDenseAdd();
}
}
template <typename T> template <typename T>
void StoreToTypedArray(JSContext* cx, MacroAssembler& masm, Scalar::Type type, void StoreToTypedArray(JSContext* cx, MacroAssembler& masm, Scalar::Type type,
const ValueOperand& value, const T& dest, const ValueOperand& value, const T& dest,

Просмотреть файл

@ -517,14 +517,6 @@ void BaselineScript::trace(JSTracer* trc) {
TraceNullableEdge(trc, &templateEnv_, "baseline-template-environment"); TraceNullableEdge(trc, &templateEnv_, "baseline-template-environment");
} }
void JitScript::trace(JSTracer* trc) {
// Mark all IC stub codes hanging off the IC stub entries.
for (size_t i = 0; i < numICEntries(); i++) {
ICEntry& ent = icEntry(i);
ent.trace(trc);
}
}
/* static */ /* static */
void BaselineScript::writeBarrierPre(Zone* zone, BaselineScript* script) { void BaselineScript::writeBarrierPre(Zone* zone, BaselineScript* script) {
if (zone->needsIncrementalBarrier()) { if (zone->needsIncrementalBarrier()) {
@ -619,15 +611,6 @@ CompactBufferReader BaselineScript::pcMappingReader(size_t indexEntry) {
return CompactBufferReader(dataStart, dataEnd); return CompactBufferReader(dataStart, dataEnd);
} }
struct ICEntries {
JitScript* const jitScript_;
explicit ICEntries(JitScript* jitScript) : jitScript_(jitScript) {}
size_t numEntries() const { return jitScript_->numICEntries(); }
ICEntry& operator[](size_t index) const { return jitScript_->icEntry(index); }
};
struct RetAddrEntries { struct RetAddrEntries {
BaselineScript* const baseline_; BaselineScript* const baseline_;
@ -667,30 +650,6 @@ RetAddrEntry& BaselineScript::retAddrEntryFromReturnOffset(
return retAddrEntry(loc); return retAddrEntry(loc);
} }
static bool ComputeBinarySearchMid(ICEntries entries, uint32_t pcOffset,
size_t* loc) {
return BinarySearchIf(
entries, 0, entries.numEntries(),
[pcOffset](const ICEntry& entry) {
uint32_t entryOffset = entry.pcOffset();
if (pcOffset < entryOffset) {
return -1;
}
if (entryOffset < pcOffset) {
return 1;
}
if (entry.isForPrologue()) {
// Prologue ICEntries are used for function argument type checks.
// Ignore these entries and return 1 because these entries appear in
// the ICEntry list before the other ICEntry (if any) at offset 0.
MOZ_ASSERT(entryOffset == 0);
return 1;
}
return 0;
},
loc);
}
static bool ComputeBinarySearchMid(RetAddrEntries entries, uint32_t pcOffset, static bool ComputeBinarySearchMid(RetAddrEntries entries, uint32_t pcOffset,
size_t* loc) { size_t* loc) {
return BinarySearchIf( return BinarySearchIf(
@ -712,82 +671,6 @@ uint8_t* BaselineScript::returnAddressForEntry(const RetAddrEntry& ent) {
return method()->raw() + ent.returnOffset().offset(); return method()->raw() + ent.returnOffset().offset();
} }
ICEntry* JitScript::maybeICEntryFromPCOffset(uint32_t pcOffset) {
// This method ignores prologue IC entries. There can be at most one
// non-prologue IC per bytecode op.
size_t mid;
if (!ComputeBinarySearchMid(ICEntries(this), pcOffset, &mid)) {
return nullptr;
}
MOZ_ASSERT(mid < numICEntries());
ICEntry& entry = icEntry(mid);
MOZ_ASSERT(!entry.isForPrologue());
MOZ_ASSERT(entry.pcOffset() == pcOffset);
return &entry;
}
ICEntry& JitScript::icEntryFromPCOffset(uint32_t pcOffset) {
ICEntry* entry = maybeICEntryFromPCOffset(pcOffset);
MOZ_RELEASE_ASSERT(entry);
return *entry;
}
ICEntry* JitScript::maybeICEntryFromPCOffset(uint32_t pcOffset,
ICEntry* prevLookedUpEntry) {
// Do a linear forward search from the last queried PC offset, or fallback to
// a binary search if the last offset is too far away.
if (prevLookedUpEntry && pcOffset >= prevLookedUpEntry->pcOffset() &&
(pcOffset - prevLookedUpEntry->pcOffset()) <= 10) {
ICEntry* firstEntry = &icEntry(0);
ICEntry* lastEntry = &icEntry(numICEntries() - 1);
ICEntry* curEntry = prevLookedUpEntry;
while (curEntry >= firstEntry && curEntry <= lastEntry) {
if (curEntry->pcOffset() == pcOffset && !curEntry->isForPrologue()) {
return curEntry;
}
curEntry++;
}
return nullptr;
}
return maybeICEntryFromPCOffset(pcOffset);
}
ICEntry& JitScript::icEntryFromPCOffset(uint32_t pcOffset,
ICEntry* prevLookedUpEntry) {
ICEntry* entry = maybeICEntryFromPCOffset(pcOffset, prevLookedUpEntry);
MOZ_RELEASE_ASSERT(entry);
return *entry;
}
ICEntry* JitScript::interpreterICEntryFromPCOffset(uint32_t pcOffset) {
// We have to return the entry to store in BaselineFrame::interpreterICEntry
// when resuming in the Baseline Interpreter at pcOffset. The bytecode op at
// pcOffset does not necessarily have an ICEntry, so we want to return the
// first ICEntry for which the following is true:
//
// !entry.isForPrologue() && entry.pcOffset() >= pcOffset
//
// Fortunately, ComputeBinarySearchMid returns exactly this entry.
size_t mid;
ComputeBinarySearchMid(ICEntries(this), pcOffset, &mid);
if (mid < numICEntries()) {
ICEntry& entry = icEntry(mid);
MOZ_ASSERT(!entry.isForPrologue());
MOZ_ASSERT(entry.pcOffset() >= pcOffset);
return &entry;
}
// Resuming at a pc after the last ICEntry. Just return nullptr:
// BaselineFrame::interpreterICEntry will never be used in this case.
return nullptr;
}
RetAddrEntry& BaselineScript::retAddrEntryFromPCOffset( RetAddrEntry& BaselineScript::retAddrEntryFromPCOffset(
uint32_t pcOffset, RetAddrEntry::Kind kind) { uint32_t pcOffset, RetAddrEntry::Kind kind) {
size_t mid; size_t mid;
@ -1197,150 +1080,6 @@ void BaselineInterpreter::toggleCodeCoverageInstrumentation(bool enable) {
toggleCodeCoverageInstrumentationUnchecked(enable); toggleCodeCoverageInstrumentationUnchecked(enable);
} }
void JitScript::purgeOptimizedStubs(JSScript* script) {
MOZ_ASSERT(script->jitScript() == this);
Zone* zone = script->zone();
if (zone->isGCSweeping() && IsAboutToBeFinalizedDuringSweep(*script)) {
// We're sweeping and the script is dead. Don't purge optimized stubs
// because (1) accessing CacheIRStubInfo pointers in ICStubs is invalid
// because we may have swept them already when we started (incremental)
// sweeping and (2) it's unnecessary because this script will be finalized
// soon anyway.
return;
}
JitSpew(JitSpew_BaselineIC, "Purging optimized stubs");
for (size_t i = 0; i < numICEntries(); i++) {
ICEntry& entry = icEntry(i);
ICStub* lastStub = entry.firstStub();
while (lastStub->next()) {
lastStub = lastStub->next();
}
if (lastStub->isFallback()) {
// Unlink all stubs allocated in the optimized space.
ICStub* stub = entry.firstStub();
ICStub* prev = nullptr;
while (stub->next()) {
if (!stub->allocatedInFallbackSpace()) {
lastStub->toFallbackStub()->unlinkStub(zone, prev, stub);
stub = stub->next();
continue;
}
prev = stub;
stub = stub->next();
}
if (lastStub->isMonitoredFallback()) {
// Monitor stubs can't make calls, so are always in the
// optimized stub space.
ICTypeMonitor_Fallback* lastMonStub =
lastStub->toMonitoredFallbackStub()->maybeFallbackMonitorStub();
if (lastMonStub) {
lastMonStub->resetMonitorStubChain(zone);
}
}
} else if (lastStub->isTypeMonitor_Fallback()) {
lastStub->toTypeMonitor_Fallback()->resetMonitorStubChain(zone);
} else {
MOZ_CRASH("Unknown fallback stub");
}
}
#ifdef DEBUG
// All remaining stubs must be allocated in the fallback space.
for (size_t i = 0; i < numICEntries(); i++) {
ICEntry& entry = icEntry(i);
ICStub* stub = entry.firstStub();
while (stub->next()) {
MOZ_ASSERT(stub->allocatedInFallbackSpace());
stub = stub->next();
}
}
#endif
}
#ifdef JS_STRUCTURED_SPEW
static bool GetStubEnteredCount(ICStub* stub, uint32_t* count) {
switch (stub->kind()) {
case ICStub::CacheIR_Regular:
*count = stub->toCacheIR_Regular()->enteredCount();
return true;
case ICStub::CacheIR_Updated:
*count = stub->toCacheIR_Updated()->enteredCount();
return true;
case ICStub::CacheIR_Monitored:
*count = stub->toCacheIR_Monitored()->enteredCount();
return true;
default:
return false;
}
}
bool HasEnteredCounters(ICEntry& entry) {
ICStub* stub = entry.firstStub();
while (stub && !stub->isFallback()) {
uint32_t count;
if (GetStubEnteredCount(stub, &count)) {
return true;
}
stub = stub->next();
}
return false;
}
void jit::JitSpewBaselineICStats(JSScript* script, const char* dumpReason) {
MOZ_ASSERT(script->hasJitScript());
JSContext* cx = TlsContext.get();
AutoStructuredSpewer spew(cx, SpewChannel::BaselineICStats, script);
if (!spew) {
return;
}
JitScript* jitScript = script->jitScript();
spew->property("reason", dumpReason);
spew->beginListProperty("entries");
for (size_t i = 0; i < jitScript->numICEntries(); i++) {
ICEntry& entry = jitScript->icEntry(i);
if (!HasEnteredCounters(entry)) {
continue;
}
uint32_t pcOffset = entry.pcOffset();
jsbytecode* pc = entry.pc(script);
unsigned column;
unsigned int line = PCToLineNumber(script, pc, &column);
spew->beginObject();
spew->property("op", CodeName[*pc]);
spew->property("pc", pcOffset);
spew->property("line", line);
spew->property("column", column);
spew->beginListProperty("counts");
ICStub* stub = entry.firstStub();
while (stub && !stub->isFallback()) {
uint32_t count;
if (GetStubEnteredCount(stub, &count)) {
spew->value(count);
} else {
spew->value("?");
}
stub = stub->next();
}
spew->endList();
spew->property("fallback_count", entry.fallbackStub()->enteredCount());
spew->endObject();
}
spew->endList();
}
#endif
void jit::FinishDiscardBaselineScript(FreeOp* fop, JSScript* script) { void jit::FinishDiscardBaselineScript(FreeOp* fop, JSScript* script) {
MOZ_ASSERT(script->hasBaselineScript()); MOZ_ASSERT(script->hasBaselineScript());
MOZ_ASSERT(!script->jitScript()->active()); MOZ_ASSERT(!script->jitScript()->active());
@ -1410,51 +1149,6 @@ void jit::ToggleBaselineTraceLoggerEngine(JSRuntime* runtime, bool enable) {
} }
#endif #endif
static void MarkActiveJitScripts(JSContext* cx,
const JitActivationIterator& activation) {
for (OnlyJSJitFrameIter iter(activation); !iter.done(); ++iter) {
const JSJitFrameIter& frame = iter.frame();
switch (frame.type()) {
case FrameType::BaselineJS:
frame.script()->jitScript()->setActive();
break;
case FrameType::Exit:
if (frame.exitFrame()->is<LazyLinkExitFrameLayout>()) {
LazyLinkExitFrameLayout* ll =
frame.exitFrame()->as<LazyLinkExitFrameLayout>();
JSScript* script =
ScriptFromCalleeToken(ll->jsFrame()->calleeToken());
script->jitScript()->setActive();
}
break;
case FrameType::Bailout:
case FrameType::IonJS: {
// Keep the JitScript and BaselineScript around, since bailouts from
// the ion jitcode need to re-enter into the Baseline code.
frame.script()->jitScript()->setActive();
for (InlineFrameIterator inlineIter(cx, &frame); inlineIter.more();
++inlineIter) {
inlineIter.script()->jitScript()->setActive();
}
break;
}
default:;
}
}
}
void jit::MarkActiveJitScripts(Zone* zone) {
if (zone->isAtomsZone()) {
return;
}
JSContext* cx = TlsContext.get();
for (JitActivationIterator iter(cx); !iter.done(); ++iter) {
if (iter->compartment()->zone() == zone) {
MarkActiveJitScripts(cx, iter);
}
}
}
void BaselineInterpreter::init(JitCode* code, uint32_t interpretOpOffset, void BaselineInterpreter::init(JitCode* code, uint32_t interpretOpOffset,
uint32_t profilerEnterToggleOffset, uint32_t profilerEnterToggleOffset,
uint32_t profilerExitToggleOffset, uint32_t profilerExitToggleOffset,

Просмотреть файл

@ -638,17 +638,9 @@ MOZ_MUST_USE bool BailoutIonToBaseline(
bool invalidate, BaselineBailoutInfo** bailoutInfo, bool invalidate, BaselineBailoutInfo** bailoutInfo,
const ExceptionBailoutInfo* exceptionInfo); const ExceptionBailoutInfo* exceptionInfo);
// Mark JitScripts on the stack as active, so that they are not discarded
// during GC.
void MarkActiveJitScripts(Zone* zone);
MethodStatus BaselineCompile(JSContext* cx, JSScript* script, MethodStatus BaselineCompile(JSContext* cx, JSScript* script,
bool forceDebugInstrumentation = false); bool forceDebugInstrumentation = false);
#ifdef JS_STRUCTURED_SPEW
void JitSpewBaselineICStats(JSScript* script, const char* dumpReason);
#endif
static const unsigned BASELINE_MAX_ARGS_LENGTH = 20000; static const unsigned BASELINE_MAX_ARGS_LENGTH = 20000;
// Class storing the generated Baseline Interpreter code for the runtime. // Class storing the generated Baseline Interpreter code for the runtime.

Просмотреть файл

@ -6,14 +6,17 @@
#include "jit/JitScript-inl.h" #include "jit/JitScript-inl.h"
#include "mozilla/BinarySearch.h"
#include "mozilla/IntegerPrintfMacros.h" #include "mozilla/IntegerPrintfMacros.h"
#include "mozilla/Move.h" #include "mozilla/Move.h"
#include "mozilla/ScopeExit.h" #include "mozilla/ScopeExit.h"
#include "jit/BaselineIC.h" #include "jit/BaselineIC.h"
#include "vm/JSScript.h" #include "vm/JSScript.h"
#include "vm/Stack.h"
#include "vm/TypeInference.h" #include "vm/TypeInference.h"
#include "jit/JSJitFrameIter-inl.h"
#include "vm/JSScript-inl.h" #include "vm/JSScript-inl.h"
#include "vm/TypeInference-inl.h" #include "vm/TypeInference-inl.h"
@ -153,10 +156,12 @@ void JSScript::maybeReleaseJitScript() {
updateJitCodeRaw(runtimeFromMainThread()); updateJitCodeRaw(runtimeFromMainThread());
} }
/* static */ void JitScript::trace(JSTracer* trc) {
void JitScript::Destroy(Zone* zone, JitScript* script) { // Mark all IC stub codes hanging off the IC stub entries.
script->prepareForDestruction(zone); for (size_t i = 0; i < numICEntries(); i++) {
js_delete(script); ICEntry& ent = icEntry(i);
ent.trace(trc);
}
} }
#ifdef DEBUG #ifdef DEBUG
@ -217,3 +222,325 @@ void JitScript::printTypes(JSContext* cx, HandleScript script) {
fprintf(stderr, "\n"); fprintf(stderr, "\n");
} }
#endif /* DEBUG */ #endif /* DEBUG */
/* static */
void JitScript::Destroy(Zone* zone, JitScript* script) {
script->prepareForDestruction(zone);
js_delete(script);
}
struct ICEntries {
JitScript* const jitScript_;
explicit ICEntries(JitScript* jitScript) : jitScript_(jitScript) {}
size_t numEntries() const { return jitScript_->numICEntries(); }
ICEntry& operator[](size_t index) const { return jitScript_->icEntry(index); }
};
static bool ComputeBinarySearchMid(ICEntries entries, uint32_t pcOffset,
size_t* loc) {
return mozilla::BinarySearchIf(
entries, 0, entries.numEntries(),
[pcOffset](const ICEntry& entry) {
uint32_t entryOffset = entry.pcOffset();
if (pcOffset < entryOffset) {
return -1;
}
if (entryOffset < pcOffset) {
return 1;
}
if (entry.isForPrologue()) {
// Prologue ICEntries are used for function argument type checks.
// Ignore these entries and return 1 because these entries appear in
// the ICEntry list before the other ICEntry (if any) at offset 0.
MOZ_ASSERT(entryOffset == 0);
return 1;
}
return 0;
},
loc);
}
ICEntry* JitScript::maybeICEntryFromPCOffset(uint32_t pcOffset) {
// This method ignores prologue IC entries. There can be at most one
// non-prologue IC per bytecode op.
size_t mid;
if (!ComputeBinarySearchMid(ICEntries(this), pcOffset, &mid)) {
return nullptr;
}
MOZ_ASSERT(mid < numICEntries());
ICEntry& entry = icEntry(mid);
MOZ_ASSERT(!entry.isForPrologue());
MOZ_ASSERT(entry.pcOffset() == pcOffset);
return &entry;
}
ICEntry& JitScript::icEntryFromPCOffset(uint32_t pcOffset) {
ICEntry* entry = maybeICEntryFromPCOffset(pcOffset);
MOZ_RELEASE_ASSERT(entry);
return *entry;
}
ICEntry* JitScript::maybeICEntryFromPCOffset(uint32_t pcOffset,
ICEntry* prevLookedUpEntry) {
// Do a linear forward search from the last queried PC offset, or fallback to
// a binary search if the last offset is too far away.
if (prevLookedUpEntry && pcOffset >= prevLookedUpEntry->pcOffset() &&
(pcOffset - prevLookedUpEntry->pcOffset()) <= 10) {
ICEntry* firstEntry = &icEntry(0);
ICEntry* lastEntry = &icEntry(numICEntries() - 1);
ICEntry* curEntry = prevLookedUpEntry;
while (curEntry >= firstEntry && curEntry <= lastEntry) {
if (curEntry->pcOffset() == pcOffset && !curEntry->isForPrologue()) {
return curEntry;
}
curEntry++;
}
return nullptr;
}
return maybeICEntryFromPCOffset(pcOffset);
}
ICEntry& JitScript::icEntryFromPCOffset(uint32_t pcOffset,
ICEntry* prevLookedUpEntry) {
ICEntry* entry = maybeICEntryFromPCOffset(pcOffset, prevLookedUpEntry);
MOZ_RELEASE_ASSERT(entry);
return *entry;
}
ICEntry* JitScript::interpreterICEntryFromPCOffset(uint32_t pcOffset) {
// We have to return the entry to store in BaselineFrame::interpreterICEntry
// when resuming in the Baseline Interpreter at pcOffset. The bytecode op at
// pcOffset does not necessarily have an ICEntry, so we want to return the
// first ICEntry for which the following is true:
//
// !entry.isForPrologue() && entry.pcOffset() >= pcOffset
//
// Fortunately, ComputeBinarySearchMid returns exactly this entry.
size_t mid;
ComputeBinarySearchMid(ICEntries(this), pcOffset, &mid);
if (mid < numICEntries()) {
ICEntry& entry = icEntry(mid);
MOZ_ASSERT(!entry.isForPrologue());
MOZ_ASSERT(entry.pcOffset() >= pcOffset);
return &entry;
}
// Resuming at a pc after the last ICEntry. Just return nullptr:
// BaselineFrame::interpreterICEntry will never be used in this case.
return nullptr;
}
void JitScript::purgeOptimizedStubs(JSScript* script) {
MOZ_ASSERT(script->jitScript() == this);
Zone* zone = script->zone();
if (zone->isGCSweeping() && IsAboutToBeFinalizedDuringSweep(*script)) {
// We're sweeping and the script is dead. Don't purge optimized stubs
// because (1) accessing CacheIRStubInfo pointers in ICStubs is invalid
// because we may have swept them already when we started (incremental)
// sweeping and (2) it's unnecessary because this script will be finalized
// soon anyway.
return;
}
JitSpew(JitSpew_BaselineIC, "Purging optimized stubs");
for (size_t i = 0; i < numICEntries(); i++) {
ICEntry& entry = icEntry(i);
ICStub* lastStub = entry.firstStub();
while (lastStub->next()) {
lastStub = lastStub->next();
}
if (lastStub->isFallback()) {
// Unlink all stubs allocated in the optimized space.
ICStub* stub = entry.firstStub();
ICStub* prev = nullptr;
while (stub->next()) {
if (!stub->allocatedInFallbackSpace()) {
lastStub->toFallbackStub()->unlinkStub(zone, prev, stub);
stub = stub->next();
continue;
}
prev = stub;
stub = stub->next();
}
if (lastStub->isMonitoredFallback()) {
// Monitor stubs can't make calls, so are always in the
// optimized stub space.
ICTypeMonitor_Fallback* lastMonStub =
lastStub->toMonitoredFallbackStub()->maybeFallbackMonitorStub();
if (lastMonStub) {
lastMonStub->resetMonitorStubChain(zone);
}
}
} else if (lastStub->isTypeMonitor_Fallback()) {
lastStub->toTypeMonitor_Fallback()->resetMonitorStubChain(zone);
} else {
MOZ_CRASH("Unknown fallback stub");
}
}
#ifdef DEBUG
// All remaining stubs must be allocated in the fallback space.
for (size_t i = 0; i < numICEntries(); i++) {
ICEntry& entry = icEntry(i);
ICStub* stub = entry.firstStub();
while (stub->next()) {
MOZ_ASSERT(stub->allocatedInFallbackSpace());
stub = stub->next();
}
}
#endif
}
void JitScript::noteAccessedGetter(uint32_t pcOffset) {
ICEntry& entry = icEntryFromPCOffset(pcOffset);
ICFallbackStub* stub = entry.fallbackStub();
if (stub->isGetProp_Fallback()) {
stub->toGetProp_Fallback()->noteAccessedGetter();
}
}
void JitScript::noteHasDenseAdd(uint32_t pcOffset) {
ICEntry& entry = icEntryFromPCOffset(pcOffset);
ICFallbackStub* stub = entry.fallbackStub();
if (stub->isSetElem_Fallback()) {
stub->toSetElem_Fallback()->noteHasDenseAdd();
}
}
#ifdef JS_STRUCTURED_SPEW
static bool GetStubEnteredCount(ICStub* stub, uint32_t* count) {
switch (stub->kind()) {
case ICStub::CacheIR_Regular:
*count = stub->toCacheIR_Regular()->enteredCount();
return true;
case ICStub::CacheIR_Updated:
*count = stub->toCacheIR_Updated()->enteredCount();
return true;
case ICStub::CacheIR_Monitored:
*count = stub->toCacheIR_Monitored()->enteredCount();
return true;
default:
return false;
}
}
static bool HasEnteredCounters(ICEntry& entry) {
ICStub* stub = entry.firstStub();
while (stub && !stub->isFallback()) {
uint32_t count;
if (GetStubEnteredCount(stub, &count)) {
return true;
}
stub = stub->next();
}
return false;
}
void jit::JitSpewBaselineICStats(JSScript* script, const char* dumpReason) {
MOZ_ASSERT(script->hasJitScript());
JSContext* cx = TlsContext.get();
AutoStructuredSpewer spew(cx, SpewChannel::BaselineICStats, script);
if (!spew) {
return;
}
JitScript* jitScript = script->jitScript();
spew->property("reason", dumpReason);
spew->beginListProperty("entries");
for (size_t i = 0; i < jitScript->numICEntries(); i++) {
ICEntry& entry = jitScript->icEntry(i);
if (!HasEnteredCounters(entry)) {
continue;
}
uint32_t pcOffset = entry.pcOffset();
jsbytecode* pc = entry.pc(script);
unsigned column;
unsigned int line = PCToLineNumber(script, pc, &column);
spew->beginObject();
spew->property("op", CodeName[*pc]);
spew->property("pc", pcOffset);
spew->property("line", line);
spew->property("column", column);
spew->beginListProperty("counts");
ICStub* stub = entry.firstStub();
while (stub && !stub->isFallback()) {
uint32_t count;
if (GetStubEnteredCount(stub, &count)) {
spew->value(count);
} else {
spew->value("?");
}
stub = stub->next();
}
spew->endList();
spew->property("fallback_count", entry.fallbackStub()->enteredCount());
spew->endObject();
}
spew->endList();
}
#endif
static void MarkActiveJitScripts(JSContext* cx,
const JitActivationIterator& activation) {
for (OnlyJSJitFrameIter iter(activation); !iter.done(); ++iter) {
const JSJitFrameIter& frame = iter.frame();
switch (frame.type()) {
case FrameType::BaselineJS:
frame.script()->jitScript()->setActive();
break;
case FrameType::Exit:
if (frame.exitFrame()->is<LazyLinkExitFrameLayout>()) {
LazyLinkExitFrameLayout* ll =
frame.exitFrame()->as<LazyLinkExitFrameLayout>();
JSScript* script =
ScriptFromCalleeToken(ll->jsFrame()->calleeToken());
script->jitScript()->setActive();
}
break;
case FrameType::Bailout:
case FrameType::IonJS: {
// Keep the JitScript and BaselineScript around, since bailouts from
// the ion jitcode need to re-enter into the Baseline code.
frame.script()->jitScript()->setActive();
for (InlineFrameIterator inlineIter(cx, &frame); inlineIter.more();
++inlineIter) {
inlineIter.script()->jitScript()->setActive();
}
break;
}
default:;
}
}
}
void jit::MarkActiveJitScripts(Zone* zone) {
if (zone->isAtomsZone()) {
return;
}
JSContext* cx = TlsContext.get();
for (JitActivationIterator iter(cx); !iter.done(); ++iter) {
if (iter->compartment()->zone() == zone) {
MarkActiveJitScripts(cx, iter);
}
}
}

Просмотреть файл

@ -314,6 +314,14 @@ class MOZ_RAII AutoKeepJitScripts {
inline ~AutoKeepJitScripts(); inline ~AutoKeepJitScripts();
}; };
// Mark JitScripts on the stack as active, so that they are not discarded
// during GC.
void MarkActiveJitScripts(Zone* zone);
#ifdef JS_STRUCTURED_SPEW
void JitSpewBaselineICStats(JSScript* script, const char* dumpReason);
#endif
} // namespace jit } // namespace jit
} // namespace js } // namespace js