Bug 1233818 part 4 - Make implicit interrupts with work --non-writable-jitcode. r=bhackett

This commit is contained in:
Jan de Mooij 2015-12-23 11:28:54 +01:00
Родитель dbc0dce92e
Коммит 1f20d726b5
12 изменённых файлов: 97 добавлений и 40 удалений

Просмотреть файл

@ -1174,7 +1174,7 @@ RedirectIonBackedgesToInterruptCheck(JSRuntime* rt)
// thus not in a JIT iloop. We assume that the interrupt flag will be
// checked at least once before entering JIT code (if not, no big deal;
// the browser will just request another interrupt in a second).
if (!jitRuntime->mutatingBackedgeList())
if (!jitRuntime->preventBackedgePatching())
jitRuntime->patchIonBackedges(rt, jit::JitRuntime::BackedgeInterruptCheck);
}
}

Просмотреть файл

@ -8167,7 +8167,7 @@ CodeGenerator::link(JSContext* cx, CompilerConstraintList* constraints)
// read barriers which were skipped while compiling the script off thread.
Linker linker(masm);
AutoFlushICache afc("IonLink");
JitCode* code = linker.newCode<CanGC>(cx, ION_CODE);
JitCode* code = linker.newCode<CanGC>(cx, ION_CODE, !patchableBackedges_.empty());
if (!code)
return false;

Просмотреть файл

@ -120,7 +120,8 @@ ExecutablePool::available() const
return m_end - m_freePtr;
}
ExecutableAllocator::ExecutableAllocator()
ExecutableAllocator::ExecutableAllocator(JitRuntime* jrt)
: jrt_(jrt)
{
MOZ_ASSERT(m_smallPools.empty());
}
@ -212,6 +213,8 @@ ExecutableAllocator::roundUpAllocationSize(size_t request, size_t granularity)
ExecutablePool*
ExecutableAllocator::createPool(size_t n)
{
MOZ_ASSERT(jrt_->preventBackedgePatching());
size_t allocSize = roundUpAllocationSize(n, pageSize);
if (allocSize == OVERSIZE_ALLOCATION)
return nullptr;
@ -241,6 +244,9 @@ ExecutableAllocator::createPool(size_t n)
void*
ExecutableAllocator::alloc(size_t n, ExecutablePool** poolp, CodeKind type)
{
// Don't race with reprotectAll called from the signal handler.
JitRuntime::AutoPreventBackedgePatching apbp(jrt_);
// Caller must ensure 'n' is word-size aligned. If all allocations are
// of word sized quantities, then all subsequent allocations will be
// aligned.
@ -265,6 +271,9 @@ ExecutableAllocator::alloc(size_t n, ExecutablePool** poolp, CodeKind type)
void
ExecutableAllocator::releasePoolPages(ExecutablePool* pool)
{
// Don't race with reprotectAll called from the signal handler.
JitRuntime::AutoPreventBackedgePatching apbp(jrt_);
MOZ_ASSERT(pool->m_allocation.pages);
systemRelease(pool->m_allocation);
@ -278,6 +287,9 @@ ExecutableAllocator::releasePoolPages(ExecutablePool* pool)
void
ExecutableAllocator::purge()
{
// Don't race with reprotectAll called from the signal handler.
JitRuntime::AutoPreventBackedgePatching apbp(jrt_);
for (size_t i = 0; i < m_smallPools.length(); i++)
m_smallPools[i]->release();
m_smallPools.clear();
@ -323,6 +335,22 @@ ExecutableAllocator::addSizeOfCode(JS::CodeSizes* sizes) const
}
}
void
ExecutableAllocator::reprotectAll(ProtectionSetting protection)
{
if (!nonWritableJitCode)
return;
if (!m_pools.initialized())
return;
for (ExecPoolHashSet::Range r = m_pools.all(); !r.empty(); r.popFront()) {
ExecutablePool* pool = r.front();
char* start = pool->m_allocation.pages;
reprotectRegion(start, pool->m_freePtr - start, protection);
}
}
#if TARGET_OS_IPHONE
bool ExecutableAllocator::nonWritableJitCode = true;
#else

Просмотреть файл

@ -79,6 +79,7 @@ namespace jit {
enum CodeKind { ION_CODE = 0, BASELINE_CODE, REGEXP_CODE, OTHER_CODE };
class ExecutableAllocator;
class JitRuntime;
// These are reference-counted. A new one starts with a count of 1.
class ExecutablePool
@ -133,11 +134,12 @@ class ExecutableAllocator
#ifdef XP_WIN
mozilla::Maybe<mozilla::non_crypto::XorShift128PlusRNG> randomNumberGenerator;
#endif
JitRuntime* jrt_;
public:
enum ProtectionSetting { Writable, Executable };
ExecutableAllocator();
explicit ExecutableAllocator(JitRuntime* jrt);
~ExecutableAllocator();
void purge();
@ -184,6 +186,13 @@ class ExecutableAllocator
reprotectRegion(start, size, Executable);
}
void makeAllWritable() {
reprotectAll(Writable);
}
void makeAllExecutable() {
reprotectAll(Executable);
}
static unsigned initialProtectionFlags(ProtectionSetting protection);
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
@ -259,6 +268,7 @@ class ExecutableAllocator
void operator=(const ExecutableAllocator&) = delete;
static void reprotectRegion(void*, size_t, ProtectionSetting);
void reprotectAll(ProtectionSetting);
// These are strong references; they keep pools alive.
static const size_t maxSmallPools = 4;

Просмотреть файл

@ -166,7 +166,8 @@ jit::InitializeIon()
}
JitRuntime::JitRuntime()
: execAlloc_(),
: execAlloc_(this),
backedgeExecAlloc_(this),
exceptionTail_(nullptr),
bailoutTail_(nullptr),
profilerExitFrameTail_(nullptr),
@ -179,7 +180,7 @@ JitRuntime::JitRuntime()
baselineDebugModeOSRHandler_(nullptr),
functionWrappers_(nullptr),
osrTempData_(nullptr),
mutatingBackedgeList_(false),
preventBackedgePatching_(false),
ionReturnOverride_(MagicValue(JS_ARG_POISON)),
jitcodeGlobalTable_(nullptr)
{
@ -357,8 +358,10 @@ JitRuntime::freeOsrTempData()
void
JitRuntime::patchIonBackedges(JSRuntime* rt, BackedgeTarget target)
{
MOZ_ASSERT_IF(target == BackedgeLoopHeader, mutatingBackedgeList_);
MOZ_ASSERT_IF(target == BackedgeInterruptCheck, !mutatingBackedgeList_);
MOZ_ASSERT_IF(target == BackedgeLoopHeader, preventBackedgePatching_);
MOZ_ASSERT_IF(target == BackedgeInterruptCheck, !preventBackedgePatching_);
backedgeExecAlloc_.makeAllWritable();
// Patch all loop backedges in Ion code so that they either jump to the
// normal loop header or to an interrupt handler each time they run.
@ -372,6 +375,8 @@ JitRuntime::patchIonBackedges(JSRuntime* rt, BackedgeTarget target)
else
PatchBackedge(patchableBackedge->backedge, patchableBackedge->interruptCheck, target);
}
backedgeExecAlloc_.makeAllExecutable();
}
JitCompartment::JitCompartment()
@ -1124,7 +1129,7 @@ IonScript::copyPatchableBackedges(JSContext* cx, JitCode* code,
MacroAssembler& masm)
{
JitRuntime* jrt = cx->runtime()->jitRuntime();
JitRuntime::AutoMutateBackedges amb(jrt);
JitRuntime::AutoPreventBackedgePatching apbp(jrt);
for (size_t i = 0; i < backedgeEntries_; i++) {
PatchableBackedgeInfo& info = backedges[i];
@ -1358,7 +1363,7 @@ IonScript::unlinkFromRuntime(FreeOp* fop)
// make sure that those backedges are unlinked from the runtime and not
// reclobbered with garbage if an interrupt is requested.
JitRuntime* jrt = fop->runtime()->jitRuntime();
JitRuntime::AutoMutateBackedges amb(jrt);
JitRuntime::AutoPreventBackedgePatching apbp(jrt);
for (size_t i = 0; i < backedgeEntries_; i++)
jrt->removePatchableBackedge(&backedgeList()[i]);

Просмотреть файл

@ -83,9 +83,13 @@ class JitRuntime
{
friend class JitCompartment;
// Executable allocator for all code except asm.js code.
// Executable allocator for all code except asm.js code and Ion code with
// patchable backedges (see below).
ExecutableAllocator execAlloc_;
// Executable allocator for Ion scripts with patchable backedges.
ExecutableAllocator backedgeExecAlloc_;
// Shared exception-handler tail.
JitCode* exceptionTail_;
@ -145,11 +149,14 @@ class JitRuntime
// (after returning from JIT code).
uint8_t* osrTempData_;
// If true, the signal handler to interrupt Ion code should not attempt to
// patch backedges, as we're busy modifying data structures.
volatile bool preventBackedgePatching_;
// List of all backedges in all Ion code. The backedge edge list is accessed
// asynchronously when the main thread is paused and mutatingBackedgeList_
// is false. Thus, the list must only be mutated while mutatingBackedgeList_
// asynchronously when the main thread is paused and preventBackedgePatching_
// is false. Thus, the list must only be mutated while preventBackedgePatching_
// is true.
volatile bool mutatingBackedgeList_;
InlineList<PatchableBackedge> backedgeList_;
// In certain cases, we want to optimize certain opcodes to typed instructions,
@ -202,30 +209,34 @@ class JitRuntime
ExecutableAllocator& execAlloc() {
return execAlloc_;
}
ExecutableAllocator& backedgeExecAlloc() {
return backedgeExecAlloc_;
}
class AutoMutateBackedges
class AutoPreventBackedgePatching
{
JitRuntime* jrt_;
bool prev_;
public:
explicit AutoMutateBackedges(JitRuntime* jrt) : jrt_(jrt) {
MOZ_ASSERT(!jrt->mutatingBackedgeList_);
jrt->mutatingBackedgeList_ = true;
explicit AutoPreventBackedgePatching(JitRuntime* jrt) : jrt_(jrt) {
prev_ = jrt->preventBackedgePatching_;
jrt->preventBackedgePatching_ = true;
}
~AutoMutateBackedges() {
MOZ_ASSERT(jrt_->mutatingBackedgeList_);
jrt_->mutatingBackedgeList_ = false;
~AutoPreventBackedgePatching() {
MOZ_ASSERT(jrt_->preventBackedgePatching_);
jrt_->preventBackedgePatching_ = prev_;
}
};
bool mutatingBackedgeList() const {
return mutatingBackedgeList_;
bool preventBackedgePatching() const {
return preventBackedgePatching_;
}
void addPatchableBackedge(PatchableBackedge* backedge) {
MOZ_ASSERT(mutatingBackedgeList_);
MOZ_ASSERT(preventBackedgePatching_);
backedgeList_.pushFront(backedge);
}
void removePatchableBackedge(PatchableBackedge* backedge) {
MOZ_ASSERT(mutatingBackedgeList_);
MOZ_ASSERT(preventBackedgePatching_);
backedgeList_.remove(backedge);
}
@ -495,13 +506,16 @@ const unsigned WINDOWS_BIG_FRAME_TOUCH_INCREMENT = 4096 - 1;
// is |false|, it's a no-op.
class MOZ_STACK_CLASS AutoWritableJitCode
{
// Backedge patching from the signal handler will change memory protection
// flags, so don't allow it in a AutoWritableJitCode scope.
JitRuntime::AutoPreventBackedgePatching preventPatching_;
JSRuntime* rt_;
void* addr_;
size_t size_;
public:
AutoWritableJitCode(JSRuntime* rt, void* addr, size_t size)
: rt_(rt), addr_(addr), size_(size)
: preventPatching_(rt->jitRuntime()), rt_(rt), addr_(addr), size_(size)
{
rt_->toggleAutoWritableJitCodeActive(true);
ExecutableAllocator::makeWritable(addr_, size_);

Просмотреть файл

@ -37,8 +37,9 @@ class Linker
}
template <AllowGC allowGC>
JitCode* newCode(JSContext* cx, CodeKind kind) {
JitCode* newCode(JSContext* cx, CodeKind kind, bool hasPatchableBackedges = false) {
MOZ_ASSERT(masm.numAsmJSAbsoluteLinks() == 0);
MOZ_ASSERT_IF(hasPatchableBackedges, kind == ION_CODE);
gc::AutoSuppressGC suppressGC(cx);
if (masm.oom())
@ -52,7 +53,9 @@ class Linker
// ExecutableAllocator requires bytesNeeded to be word-size aligned.
bytesNeeded = AlignBytes(bytesNeeded, sizeof(void*));
ExecutableAllocator& execAlloc = cx->runtime()->jitRuntime()->execAlloc();
ExecutableAllocator& execAlloc = hasPatchableBackedges
? cx->runtime()->jitRuntime()->backedgeExecAlloc()
: cx->runtime()->jitRuntime()->execAlloc();
uint8_t* result = (uint8_t*)execAlloc.alloc(bytesNeeded, &pool, kind);
if (!result)
return fail(cx);

Просмотреть файл

@ -93,15 +93,8 @@ static void
TryToUseImplicitInterruptCheck(MIRGraph& graph, MBasicBlock* backedge)
{
// Implicit interrupt checks require asm.js signal handlers to be installed.
// They also require writable JIT code: reprotecting in patchIonBackedges
// would be expensive and using AutoWritableJitCode in the signal handler
// is complicated because there could be another AutoWritableJitCode on the
// stack.
if (!GetJitContext()->runtime->canUseSignalHandlers() ||
ExecutableAllocator::nonWritableJitCode)
{
if (!GetJitContext()->runtime->canUseSignalHandlers())
return;
}
// To avoid triggering expensive interrupts (backedge patching) in
// requestMajorGC and requestMinorGC, use an implicit interrupt check only

Просмотреть файл

@ -480,7 +480,7 @@ InterruptCheck(JSContext* cx)
{
JitRuntime* jrt = cx->runtime()->jitRuntime();
JitRuntime::AutoMutateBackedges amb(jrt);
JitRuntime::AutoPreventBackedgePatching apbp(jrt);
jrt->patchIonBackedges(cx->runtime(), JitRuntime::BackedgeLoopHeader);
}

Просмотреть файл

@ -165,7 +165,7 @@ JSRuntime::createJitRuntime(JSContext* cx)
// Protect jitRuntime_ from being observed (by InterruptRunningJitCode)
// while it is being initialized. Unfortunately, initialization depends on
// jitRuntime_ being non-null, so we can't just wait to assign jitRuntime_.
JitRuntime::AutoMutateBackedges amb(jrt);
JitRuntime::AutoPreventBackedgePatching apbp(jrt);
jitRuntime_ = jrt;
if (!jitRuntime_->initialize(cx)) {

Просмотреть файл

@ -5556,8 +5556,10 @@ GCRuntime::endSweepPhase(bool destroyingRuntime)
SweepScriptData(rt);
/* Clear out any small pools that we're hanging on to. */
if (jit::JitRuntime* jitRuntime = rt->jitRuntime())
if (jit::JitRuntime* jitRuntime = rt->jitRuntime()) {
jitRuntime->execAlloc().purge();
jitRuntime->backedgeExecAlloc().purge();
}
/*
* This removes compartments from rt->compartment, so we do it last to make

Просмотреть файл

@ -542,8 +542,10 @@ JSRuntime::addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf, JS::Runtim
for (ScriptDataTable::Range r = scriptDataTable().all(); !r.empty(); r.popFront())
rtSizes->scriptData += mallocSizeOf(r.front());
if (jitRuntime_)
if (jitRuntime_) {
jitRuntime_->execAlloc().addSizeOfCode(&rtSizes->code);
jitRuntime_->backedgeExecAlloc().addSizeOfCode(&rtSizes->code);
}
rtSizes->gc.marker += gc.marker.sizeOfExcludingThis(mallocSizeOf);
rtSizes->gc.nurseryCommitted += gc.nursery.sizeOfHeapCommitted();