Bug 1406041: Implement process-wide wasm code lookup; r=luke

--HG--
extra : rebase_source : 78f074a8aa470014276f77bda151d3165588fa4d
This commit is contained in:
Benjamin Bouvier 2017-10-11 11:40:30 +02:00
Родитель 6a2135a0bb
Коммит dbd7908b4d
14 изменённых файлов: 314 добавлений и 113 удалений

Просмотреть файл

@ -1596,7 +1596,7 @@ Simulator::handleWasmFault(int32_t addr, unsigned numBytes)
void* pc = reinterpret_cast<void*>(get_pc());
uint8_t* fp = reinterpret_cast<uint8_t*>(get_register(r11));
const wasm::CodeSegment* segment = act->compartment()->wasm.lookupCodeSegment(pc);
const wasm::CodeSegment* segment = wasm::LookupCodeSegment(pc);
if (!segment)
return false;

Просмотреть файл

@ -1641,7 +1641,7 @@ Simulator::handleWasmInterrupt()
void* fp = (void*)getRegister(Register::fp);
JitActivation* activation = TlsContext.get()->activation()->asJit();
const wasm::CodeSegment* segment = activation->compartment()->wasm.lookupCodeSegment(pc);
const wasm::CodeSegment* segment = wasm::LookupCodeSegment(pc);
if (!segment || !segment->containsCodePC(pc))
return;
@ -1671,7 +1671,7 @@ Simulator::handleWasmFault(int32_t addr, unsigned numBytes)
void* pc = reinterpret_cast<void*>(get_pc());
uint8_t* fp = reinterpret_cast<uint8_t*>(getRegister(Register::fp));
const wasm::CodeSegment* segment = act->compartment()->wasm.lookupCodeSegment(pc);
const wasm::CodeSegment* segment = wasm::LookupCodeSegment(pc);
if (!segment)
return false;

Просмотреть файл

@ -379,6 +379,7 @@ UNIFIED_SOURCES += [
'wasm/WasmIonCompile.cpp',
'wasm/WasmJS.cpp',
'wasm/WasmModule.cpp',
'wasm/WasmProcess.cpp',
'wasm/WasmSignalHandlers.cpp',
'wasm/WasmStubs.cpp',
'wasm/WasmTable.cpp',

Просмотреть файл

@ -47,6 +47,7 @@
_(WasmModuleTieringLock, 500) \
_(WasmCompileTaskState, 500) \
\
_(WasmCodeSegmentMap, 600) \
_(TraceLoggerGraphState, 600) \
_(VTuneLock, 600)

Просмотреть файл

@ -1713,7 +1713,7 @@ jit::JitActivation::startWasmInterrupt(const JS::ProfilingFrameIterator::Registe
MOZ_ALWAYS_TRUE(wasm::StartUnwinding(*this, state, &unwindState, &ignoredUnwound));
void* pc = unwindState.pc;
MOZ_ASSERT(compartment()->wasm.lookupCode(pc)->lookupRange(pc)->isFunction());
MOZ_ASSERT(wasm::LookupCode(pc)->lookupRange(pc)->isFunction());
cx_->runtime()->startWasmInterrupt(state.pc, pc);
setWasmExitFP(unwindState.fp);

Просмотреть файл

@ -27,6 +27,7 @@
#endif
#include "vtune/VTuneWrapper.h"
#include "wasm/WasmModule.h"
#include "wasm/WasmProcess.h"
#include "wasm/WasmSerialize.h"
#include "jit/MacroAssembler-inl.h"
@ -284,11 +285,21 @@ CodeSegment::initialize(Tier tier,
if (!ExecutableAllocator::makeExecutable(bytes_.get(), RoundupCodeLength(codeLength)))
return false;
if (!RegisterCodeSegment(this))
return false;
registered_ = true;
SendCodeRangesToProfiler(*this, bytecode.bytes, metadata);
return true;
}
CodeSegment::~CodeSegment()
{
if (registered_)
UnregisterCodeSegment(this);
}
size_t
CodeSegment::serializedSize() const
{

Просмотреть файл

@ -80,6 +80,8 @@ class CodeSegment
uint8_t* outOfBoundsCode_;
uint8_t* unalignedAccessCode_;
bool registered_;
bool initialize(Tier tier,
UniqueCodeBytes bytes,
uint32_t codeLength,
@ -103,9 +105,12 @@ class CodeSegment
length_(0),
interruptCode_(nullptr),
outOfBoundsCode_(nullptr),
unalignedAccessCode_(nullptr)
unalignedAccessCode_(nullptr),
registered_(false)
{}
~CodeSegment();
static UniqueCodeSegment create(Tier tier,
jit::MacroAssembler& masm,
const ShareableBytes& bytecode,

Просмотреть файл

@ -28,13 +28,11 @@ using namespace js;
using namespace wasm;
Compartment::Compartment(Zone* zone)
: mutatingInstances_(false)
{}
Compartment::~Compartment()
{
MOZ_ASSERT(instances_.empty());
MOZ_ASSERT(!mutatingInstances_);
}
struct InstanceComparator
@ -62,20 +60,6 @@ struct InstanceComparator
}
};
struct CodeSegmentPC
{
const void* pc;
explicit CodeSegmentPC(const void* pc) : pc(pc) {}
int operator()(const CodeSegment* cs) const {
if (cs->containsCodePC(pc))
return 0;
if (pc < cs->base())
return -1;
return 1;
}
};
bool
Compartment::registerInstance(JSContext* cx, HandleWasmInstanceObject instanceObj)
{
@ -84,33 +68,16 @@ Compartment::registerInstance(JSContext* cx, HandleWasmInstanceObject instanceOb
instance.ensureProfilingLabels(cx->runtime()->geckoProfiler().enabled());
if (instance.debugEnabled() &&
instance.compartment()->debuggerObservesAllExecution())
{
if (instance.debugEnabled() && instance.compartment()->debuggerObservesAllExecution())
instance.ensureEnterFrameTrapsState(cx, true);
}
size_t index;
if (BinarySearchIf(instances_, 0, instances_.length(), InstanceComparator(instance), &index))
MOZ_CRASH("duplicate registration");
{
AutoMutateInstances guard(*this);
if (!instances_.insert(instances_.begin() + index, &instance)) {
ReportOutOfMemory(cx);
return false;
}
const Code& code = instance.code();
for (auto t : code.tiers()) {
const CodeSegment& cs = code.segment(t);
BinarySearchIf(codeSegments_, 0, codeSegments_.length(), CodeSegmentPC(cs.base()),
&index);
if (!codeSegments_.insert(codeSegments_.begin() + index, &cs)) {
ReportOutOfMemory(cx);
return false;
}
}
if (!instances_.insert(instances_.begin() + index, &instance)) {
ReportOutOfMemory(cx);
return false;
}
Debugger::onNewWasmInstance(cx, instanceObj);
@ -123,39 +90,7 @@ Compartment::unregisterInstance(Instance& instance)
size_t index;
if (!BinarySearchIf(instances_, 0, instances_.length(), InstanceComparator(instance), &index))
return;
AutoMutateInstances guard(*this);
instances_.erase(instances_.begin() + index);
const Code& code = instance.code();
for (auto t : code.tiers()) {
MOZ_ALWAYS_TRUE(BinarySearchIf(codeSegments_, 0, codeSegments_.length(),
CodeSegmentPC(code.segment(t).base()), &index));
codeSegments_.erase(codeSegments_.begin() + index);
}
}
const CodeSegment*
Compartment::lookupCodeSegment(const void* pc) const
{
// lookupCodeSegment() can be called asynchronously from the interrupt signal
// handler. In that case, the signal handler is just asking whether the pc
// is in wasm code. If instances_ is being mutated then we can't be
// executing wasm code so returning nullptr is fine.
if (mutatingInstances_)
return nullptr;
size_t index;
if (!BinarySearchIf(codeSegments_, 0, codeSegments_.length(), CodeSegmentPC(pc), &index))
return nullptr;
return codeSegments_[index];
}
const Code*
Compartment::lookupCode(const void* pc) const
{
const CodeSegment* found = lookupCodeSegment(pc);
return found ? found->code() : nullptr;
}
void

Просмотреть файл

@ -27,8 +27,6 @@ namespace wasm {
class CodeSegment;
typedef Vector<Instance*, 0, SystemAllocPolicy> InstanceVector;
typedef Vector<const CodeSegment*, 0, SystemAllocPolicy> CodeSegmentVector;
// wasm::Compartment lives in JSCompartment and contains the wasm-related
// per-compartment state. wasm::Compartment tracks every live instance in the
// compartment and must be notified, via registerInstance(), of any new
@ -37,20 +35,6 @@ typedef Vector<const CodeSegment*, 0, SystemAllocPolicy> CodeSegmentVector;
class Compartment
{
InstanceVector instances_;
CodeSegmentVector codeSegments_;
volatile bool mutatingInstances_;
struct AutoMutateInstances {
Compartment &c;
explicit AutoMutateInstances(Compartment& c) : c(c) {
MOZ_ASSERT(!c.mutatingInstances_);
c.mutatingInstances_ = true;
}
~AutoMutateInstances() {
MOZ_ASSERT(c.mutatingInstances_);
c.mutatingInstances_ = false;
}
};
public:
explicit Compartment(Zone* zone);
@ -72,12 +56,6 @@ class Compartment
const InstanceVector& instances() const { return instances_; }
// These methods return the wasm::CodeSegment (resp. wasm::Code) containing
// the given pc, if any exist in the compartment.
const CodeSegment* lookupCodeSegment(const void* pc) const;
const Code* lookupCode(const void* pc) const;
// Ensure all Instances in this JSCompartment have profiling labels created.
void ensureProfilingLabels(bool profilingEnabled);

Просмотреть файл

@ -62,7 +62,7 @@ WasmFrameIter::WasmFrameIter(JitActivation* activation, wasm::Frame* fp)
// instead.
code_ = &fp_->tls->instance->code();
MOZ_ASSERT(code_ == activation->compartment()->wasm.lookupCode(activation->wasmUnwindPC()));
MOZ_ASSERT(code_ == LookupCode(activation->wasmUnwindPC()));
codeRange_ = code_->lookupRange(activation->wasmUnwindPC());
MOZ_ASSERT(codeRange_->kind() == CodeRange::Function);
@ -127,7 +127,7 @@ WasmFrameIter::popFrame()
void* returnAddress = prevFP->returnAddress;
code_ = &fp_->tls->instance->code();
MOZ_ASSERT(code_ == activation_->compartment()->wasm.lookupCode(returnAddress));
MOZ_ASSERT(code_ == LookupCode(returnAddress));
codeRange_ = code_->lookupRange(returnAddress);
MOZ_ASSERT(codeRange_->kind() == CodeRange::Function);
@ -596,7 +596,7 @@ static inline void
AssertMatchesCallSite(const JitActivation& activation, void* callerPC, Frame* callerFP)
{
#ifdef DEBUG
const Code* code = activation.compartment()->wasm.lookupCode(callerPC);
const Code* code = LookupCode(callerPC);
MOZ_ASSERT(code);
const CodeRange* callerCodeRange = code->lookupRange(callerPC);
@ -626,7 +626,7 @@ ProfilingFrameIterator::initFromExitFP(const Frame* fp)
stackAddress_ = (void*)fp;
code_ = activation_->compartment()->wasm.lookupCode(pc);
code_ = LookupCode(pc);
MOZ_ASSERT(code_);
codeRange_ = code_->lookupRange(pc);
@ -686,7 +686,7 @@ js::wasm::StartUnwinding(const JitActivation& activation, const RegisterState& r
uint8_t* codeBase;
const Code* code = nullptr;
const CodeSegment* codeSegment = activation.compartment()->wasm.lookupCodeSegment(pc);
const CodeSegment* codeSegment = LookupCodeSegment(pc);
if (codeSegment) {
code = codeSegment->code();
codeRange = code->lookupRange(pc);
@ -900,7 +900,7 @@ ProfilingFrameIterator::operator++()
}
code_ = &callerFP_->tls->instance->code();
MOZ_ASSERT(code_ == activation_->compartment()->wasm.lookupCode(callerPC_));
MOZ_ASSERT(code_ == LookupCode(callerPC_));
codeRange_ = code_->lookupRange(callerPC_);
MOZ_ASSERT(codeRange_);
@ -1118,13 +1118,7 @@ wasm::LookupFaultingInstance(const CodeSegment& codeSegment, void* pc, void* fp)
bool
wasm::InCompiledCode(void* pc)
{
JSContext* cx = TlsContext.get();
if (!cx)
return false;
MOZ_RELEASE_ASSERT(!cx->handlingSegFault);
if (cx->compartment()->wasm.lookupCodeSegment(pc))
if (LookupCodeSegment(pc))
return true;
const CodeRange* codeRange;

Просмотреть файл

@ -22,6 +22,7 @@
#include "gc/Barrier.h"
#include "wasm/WasmCode.h"
#include "wasm/WasmDebug.h"
#include "wasm/WasmProcess.h"
#include "wasm/WasmTable.h"
namespace js {

225
js/src/wasm/WasmProcess.cpp Normal file
Просмотреть файл

@ -0,0 +1,225 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*
* Copyright 2017 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "wasm/WasmProcess.h"
#include "mozilla/BinarySearch.h"
#include "vm/MutexIDs.h"
#include "wasm/WasmCode.h"
using namespace js;
using namespace wasm;
using mozilla::BinarySearchIf;
// Per-process map from values of program-counter (pc) to CodeSegments.
//
// Whenever a new CodeSegment is ready to use, it has to be registered so that
// we can have fast lookups from pc to CodeSegments in numerous places. Since
// wasm compilation may be tiered, and the second tier doesn't have access to
// any JSContext/JSCompartment/etc lying around, we have to use a process-wide
// map instead.
typedef Vector<const CodeSegment*, 0, SystemAllocPolicy> CodeSegmentVector;
class ProcessCodeSegmentMap
{
// Since writes (insertions or removals) can happen on any background
// thread at the same time, we need a lock here.
Mutex mutatorsMutex_;
CodeSegmentVector segments1_;
CodeSegmentVector segments2_;
// Because of sampling/interruptions/stack iteration in general, the
// thread running wasm might need to know to which CodeSegment the
// current PC belongs, during a call to lookup(). A lookup is a
// read-only operation, and we don't want to take a lock then
// (otherwise, we could have a deadlock situation if an async lookup
// happened on a given thread that was holding mutatorsMutex_ while getting
// interrupted/sampled). Since the writer could be modifying the data that
// is getting looked up, the writer functions use spin-locks to know if
// there are any observers (i.e. calls to lookup()) of the atomic data.
Atomic<size_t> observers_;
// Except during swapAndWait(), there are no lookup() observers of the
// vector pointed to by mutableCodeSegments_
CodeSegmentVector* mutableCodeSegments_;
Atomic<const CodeSegmentVector*> readonlyCodeSegments_;
struct CodeSegmentPC
{
const void* pc;
explicit CodeSegmentPC(const void* pc) : pc(pc) {}
int operator()(const CodeSegment* cs) const {
if (cs->containsCodePC(pc))
return 0;
if (pc < cs->base())
return -1;
return 1;
}
};
void swapAndWait() {
// Both vectors are consistent for look up at this point, although their
// contents are different: there is no way for the looked up PC to be
// in the code segment that is getting registered, because the code
// segment is not even fully created yet.
// If a lookup happens before this instruction, then the
// soon-to-become-former read-only pointer is used during the lookup,
// which is valid.
mutableCodeSegments_ = const_cast<CodeSegmentVector*>(
readonlyCodeSegments_.exchange(mutableCodeSegments_)
);
// If a lookup happens after this instruction, then the updated vector
// is used, which is valid:
// - in case of insertion, it means the new vector contains more data,
// but it's fine since the code segment is getting registered and thus
// isn't even fully created yet, so the code can't be running.
// - in case of removal, it means the new vector contains one less
// entry, but it's fine since unregistering means the code segment
// isn't used by any live instance anymore, thus PC can't be in the
// to-be-removed code segment's range.
// A lookup could have happened on any of the two vectors. Wait for
// observers to be done using any vector before mutating.
while (observers_);
}
public:
ProcessCodeSegmentMap()
: mutatorsMutex_(mutexid::WasmCodeSegmentMap),
observers_(0),
mutableCodeSegments_(&segments1_),
readonlyCodeSegments_(&segments2_)
{
}
~ProcessCodeSegmentMap()
{
MOZ_ASSERT(segments1_.empty());
MOZ_ASSERT(segments2_.empty());
}
bool insert(const CodeSegment* cs) {
LockGuard<Mutex> lock(mutatorsMutex_);
size_t index;
MOZ_ALWAYS_FALSE(BinarySearchIf(*mutableCodeSegments_, 0, mutableCodeSegments_->length(),
CodeSegmentPC(cs->base()), &index));
if (!mutableCodeSegments_->insert(mutableCodeSegments_->begin() + index, cs))
return false;
swapAndWait();
#ifdef DEBUG
size_t otherIndex;
MOZ_ALWAYS_FALSE(BinarySearchIf(*mutableCodeSegments_, 0, mutableCodeSegments_->length(),
CodeSegmentPC(cs->base()), &otherIndex));
MOZ_ASSERT(index == otherIndex);
#endif
// Although we could simply revert the insertion in the read-only
// vector, it is simpler to just crash and given that each CodeSegment
// consumes multiple pages, it is unlikely this insert() would OOM in
// practice
AutoEnterOOMUnsafeRegion oom;
if (!mutableCodeSegments_->insert(mutableCodeSegments_->begin() + index, cs))
oom.crash("when inserting a CodeSegment in the process-wide map");
return true;
}
void remove(const CodeSegment* cs) {
LockGuard<Mutex> lock(mutatorsMutex_);
size_t index;
MOZ_ALWAYS_TRUE(BinarySearchIf(*mutableCodeSegments_, 0, mutableCodeSegments_->length(),
CodeSegmentPC(cs->base()), &index));
mutableCodeSegments_->erase(mutableCodeSegments_->begin() + index);
swapAndWait();
#ifdef DEBUG
size_t otherIndex;
MOZ_ALWAYS_TRUE(BinarySearchIf(*mutableCodeSegments_, 0, mutableCodeSegments_->length(),
CodeSegmentPC(cs->base()), &otherIndex));
MOZ_ASSERT(index == otherIndex);
#endif
mutableCodeSegments_->erase(mutableCodeSegments_->begin() + index);
}
const CodeSegment* lookup(const void* pc) {
auto decObserver = mozilla::MakeScopeExit([&] {
observers_--;
});
observers_++;
// Once atomically-read, the readonly vector is valid as long as
// observers_ has been incremented (see swapAndWait()).
const CodeSegmentVector* readonly = readonlyCodeSegments_;
size_t index;
if (!BinarySearchIf(*readonly, 0, readonly->length(), CodeSegmentPC(pc), &index))
return nullptr;
// It is fine returning a raw CodeSegment*, because we assume we are
// looking up a live PC in code which is on the stack, keeping the
// CodeSegment alive.
return (*readonly)[index];
}
};
static ProcessCodeSegmentMap processCodeSegmentMap;
bool
wasm::RegisterCodeSegment(const CodeSegment* cs)
{
return processCodeSegmentMap.insert(cs);
}
void
wasm::UnregisterCodeSegment(const CodeSegment* cs)
{
processCodeSegmentMap.remove(cs);
}
const CodeSegment*
wasm::LookupCodeSegment(const void* pc)
{
return processCodeSegmentMap.lookup(pc);
}
const Code*
wasm::LookupCode(const void* pc)
{
const CodeSegment* found = LookupCodeSegment(pc);
return found ? found->code() : nullptr;
}

50
js/src/wasm/WasmProcess.h Normal file
Просмотреть файл

@ -0,0 +1,50 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*
* Copyright 2017 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef wasm_process_h
#define wasm_process_h
namespace js {
namespace wasm {
class CodeSegment;
class Code;
// These methods return the wasm::CodeSegment (resp. wasm::Code) containing
// the given pc, if any exist in the process. These methods do not take a lock,
// and thus are safe to use in a profiling or async interrupt context.
const CodeSegment*
LookupCodeSegment(const void* pc);
const Code*
LookupCode(const void* pc);
// These methods allow to (un)register CodeSegments so they can be looked up
// via pc in the methods described above.
bool
RegisterCodeSegment(const CodeSegment* cs);
void
UnregisterCodeSegment(const CodeSegment* cs);
} // namespace wasm
} // namespace js
#endif // wasm_process_h

Просмотреть файл

@ -990,7 +990,7 @@ HandleFault(PEXCEPTION_POINTERS exception)
return false;
JitActivation* activation = cx->activation()->asJit();
const CodeSegment* codeSegment = activation->compartment()->wasm.lookupCodeSegment(pc);
const CodeSegment* codeSegment = LookupCodeSegment(pc);
if (!codeSegment)
return false;
@ -1125,7 +1125,7 @@ HandleMachException(JSContext* cx, const ExceptionRequest& request)
return false;
JitActivation* activation = cx->activation()->asJit();
const CodeSegment* codeSegment = activation->compartment()->wasm.lookupCodeSegment(pc);
const CodeSegment* codeSegment = LookupCodeSegment(pc);
if (!codeSegment)
return false;
@ -1336,7 +1336,7 @@ HandleFault(int signum, siginfo_t* info, void* ctx)
return false;
JitActivation* activation = cx->activation()->asJit();
const CodeSegment* segment = activation->compartment()->wasm.lookupCodeSegment(pc);
const CodeSegment* segment = LookupCodeSegment(pc);
if (!segment)
return false;
@ -1445,7 +1445,7 @@ wasm::InInterruptibleCode(JSContext* cx, uint8_t* pc, const CodeSegment** cs)
if (!cx->compartment())
return false;
*cs = cx->compartment()->wasm.lookupCodeSegment(pc);
*cs = LookupCodeSegment(pc);
if (!*cs)
return false;