Bug 1435360 - Baldr: remove wasm async interrupt support (r=jandem)

--HG--
extra : rebase_source : af951434c93e1c946cad43aaae023b491fb58ef1
This commit is contained in:
Luke Wagner 2018-03-12 11:12:54 -05:00
Родитель f5962ee1a1
Коммит 7093a7468e
36 изменённых файлов: 315 добавлений и 1130 удалений

Просмотреть файл

@ -47,6 +47,7 @@
#include "jit/ValueNumbering.h"
#include "jit/WasmBCE.h"
#include "js/Printf.h"
#include "util/Windows.h"
#include "vm/Debugger.h"
#include "vm/HelperThreads.h"
#include "vm/JSCompartment.h"
@ -64,6 +65,10 @@
#include "vm/JSScript-inl.h"
#include "vm/Stack-inl.h"
#if defined(ANDROID)
# include <sys/system_properties.h>
#endif
using namespace js;
using namespace js::jit;
@ -392,7 +397,7 @@ JitZoneGroup::patchIonBackedges(JSContext* cx, BackedgeTarget target)
MOZ_ASSERT(cx->runtime()->jitRuntime()->preventBackedgePatching());
MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
} else {
// We must be called from InterruptRunningJitCode, or a signal handler
// We must be called from jit::InterruptRunningCode, or a signal handler
// triggered there. rt->handlingJitInterrupt() ensures we can't reenter
// this code.
MOZ_ASSERT(!cx->runtime()->jitRuntime()->preventBackedgePatching());
@ -3352,3 +3357,169 @@ jit::JitSupportsAtomics()
// If you change these, please also change the comment in TempAllocator.
/* static */ const size_t TempAllocator::BallastSize = 16 * 1024;
/* static */ const size_t TempAllocator::PreferredLifoChunkSize = 32 * 1024;
static void
RedirectIonBackedgesToInterruptCheck(JSContext* cx)
{
// Jitcode may only be modified on the runtime's active thread.
if (cx != cx->runtime()->activeContext())
return;
// The faulting thread is suspended so we can access cx fields that can
// normally only be accessed by the cx's active thread.
AutoNoteSingleThreadedRegion anstr;
Zone* zone = cx->zoneRaw();
if (zone && !zone->isAtomsZone()) {
jit::JitRuntime* jitRuntime = cx->runtime()->jitRuntime();
if (!jitRuntime)
return;
// If the backedge list is being mutated, the pc must be in C++ code and
// thus not in a JIT iloop. We assume that the interrupt flag will be
// checked at least once before entering JIT code (if not, no big deal;
// the browser will just request another interrupt in a second).
if (!jitRuntime->preventBackedgePatching()) {
jit::JitZoneGroup* jzg = zone->group()->jitZoneGroup;
jzg->patchIonBackedges(cx, jit::JitZoneGroup::BackedgeInterruptCheck);
}
}
}
#if !defined(XP_WIN)
// For the interrupt signal, pick a signal number that:
// - is not otherwise used by mozilla or standard libraries
// - defaults to nostop and noprint on gdb/lldb so that noone is bothered
// SIGVTALRM a relative of SIGALRM, so intended for user code, but, unlike
// SIGALRM, not used anywhere else in Mozilla.
static const int sJitAsyncInterruptSignal = SIGVTALRM;
static void
JitAsyncInterruptHandler(int signum, siginfo_t*, void*)
{
MOZ_RELEASE_ASSERT(signum == sJitAsyncInterruptSignal);
JSContext* cx = TlsContext.get();
if (!cx)
return;
#if defined(JS_SIMULATOR_ARM) || defined(JS_SIMULATOR_MIPS32) || defined(JS_SIMULATOR_MIPS64)
SimulatorProcess::ICacheCheckingDisableCount++;
#endif
RedirectIonBackedgesToInterruptCheck(cx);
#if defined(JS_SIMULATOR_ARM) || defined(JS_SIMULATOR_MIPS32) || defined(JS_SIMULATOR_MIPS64)
SimulatorProcess::cacheInvalidatedBySignalHandler_ = true;
SimulatorProcess::ICacheCheckingDisableCount--;
#endif
cx->finishHandlingJitInterrupt();
}
#endif
static bool sTriedInstallAsyncInterrupt = false;
static bool sHaveAsyncInterrupt = false;
void
jit::EnsureAsyncInterrupt(JSContext* cx)
{
// We assume that there are no races creating the first JSRuntime of the process.
if (sTriedInstallAsyncInterrupt)
return;
sTriedInstallAsyncInterrupt = true;
#if defined(ANDROID) && !defined(__aarch64__)
// Before Android 4.4 (SDK version 19), there is a bug
// https://android-review.googlesource.com/#/c/52333
// in Bionic's pthread_join which causes pthread_join to return early when
// pthread_kill is used (on any thread). Nobody expects the pthread_cond_wait
// EINTRquisition.
char version_string[PROP_VALUE_MAX];
mozilla::PodArrayZero(version_string);
if (__system_property_get("ro.build.version.sdk", version_string) > 0) {
if (atol(version_string) < 19)
return;
}
#endif
#if defined(XP_WIN)
// Windows uses SuspendThread to stop the active thread from another thread.
#else
struct sigaction interruptHandler;
interruptHandler.sa_flags = SA_SIGINFO;
interruptHandler.sa_sigaction = &JitAsyncInterruptHandler;
sigemptyset(&interruptHandler.sa_mask);
struct sigaction prev;
if (sigaction(sJitAsyncInterruptSignal, &interruptHandler, &prev))
MOZ_CRASH("unable to install interrupt handler");
// There shouldn't be any other handlers installed for
// sJitAsyncInterruptSignal. If there are, we could always forward, but we
// need to understand what we're doing to avoid problematic interference.
if ((prev.sa_flags & SA_SIGINFO && prev.sa_sigaction) ||
(prev.sa_handler != SIG_DFL && prev.sa_handler != SIG_IGN))
{
MOZ_CRASH("contention for interrupt signal");
}
#endif // defined(XP_WIN)
sHaveAsyncInterrupt = true;
}
bool
jit::HaveAsyncInterrupt()
{
MOZ_ASSERT(sTriedInstallAsyncInterrupt);
return sHaveAsyncInterrupt;
}
// JSRuntime::requestInterrupt sets interrupt_ (which is checked frequently by
// C++ code at every Baseline JIT loop backedge) and jitStackLimit_ (which is
// checked at every Baseline and Ion JIT function prologue). The remaining
// sources of potential iloops (Ion loop backedges) are handled by this
// function: Ion loop backedges are patched to instead point to a stub that
// handles the interrupt;
void
jit::InterruptRunningCode(JSContext* cx)
{
// If signal handlers weren't installed, then Ion emit normal interrupt
// checks and don't need asynchronous interruption.
MOZ_ASSERT(sTriedInstallAsyncInterrupt);
if (!sHaveAsyncInterrupt)
return;
// Do nothing if we're already handling an interrupt here, to avoid races
// below and in JitRuntime::patchIonBackedges.
if (!cx->startHandlingJitInterrupt())
return;
// If we are on context's thread, then we can patch Ion backedges without
// any special synchronization.
if (cx == TlsContext.get()) {
RedirectIonBackedgesToInterruptCheck(cx);
cx->finishHandlingJitInterrupt();
return;
}
// We are not on the runtime's active thread, so we need to halt the
// runtime's active thread first.
#if defined(XP_WIN)
// On Windows, we can simply suspend the active thread. SuspendThread can
// sporadically fail if the thread is in the middle of a syscall. Rather
// than retrying in a loop, just wait for the next request for interrupt.
HANDLE thread = (HANDLE)cx->threadNative();
if (SuspendThread(thread) != (DWORD)-1) {
RedirectIonBackedgesToInterruptCheck(cx);
ResumeThread(thread);
}
cx->finishHandlingJitInterrupt();
#else
// On Unix, we instead deliver an async signal to the active thread which
// halts the thread and callers our JitAsyncInterruptHandler (which has
// already been installed by EnsureSignalHandlersInstalled).
pthread_t thread = (pthread_t)cx->threadNative();
pthread_kill(thread, sJitAsyncInterruptSignal);
#endif
}

Просмотреть файл

@ -736,6 +736,20 @@ class MOZ_STACK_CLASS MaybeAutoWritableJitCode
}
};
// Ensure the given JSRuntime is set up to use async interrupts. Failure to
// enable signal handlers indicates some catastrophic failure and creation of
// the runtime must fail.
void
EnsureAsyncInterrupt(JSContext* cx);
// Return whether the async interrupt can be used to interrupt Ion code.
bool
HaveAsyncInterrupt();
// Force any currently-executing JIT code to call HandleExecutionInterrupt.
extern void
InterruptRunningCode(JSContext* cx);
} // namespace jit
} // namespace js

Просмотреть файл

@ -187,10 +187,6 @@ DefaultJitOptions::DefaultJitOptions()
// pc-relative jump and call instructions.
SET_DEFAULT(jumpThreshold, UINT32_MAX);
// Whether the (ARM) simulators should always interrupt before executing any
// instruction.
SET_DEFAULT(simulatorAlwaysInterrupt, false);
// Branch pruning heuristic is based on a scoring system, which is look at
// different metrics and provide a score. The score is computed as a
// projection where each factor defines the weight of each metric. Then this

Просмотреть файл

@ -76,7 +76,6 @@ struct DefaultJitOptions
bool wasmFoldOffsets;
bool wasmDelayTier2;
bool ionInterruptWithoutSignals;
bool simulatorAlwaysInterrupt;
uint32_t baselineWarmUpThreshold;
uint32_t exceptionBailoutThreshold;
uint32_t frequentBailoutThreshold;

Просмотреть файл

@ -15,7 +15,6 @@
#include "jit/LIR.h"
#include "jit/MIR.h"
#include "jit/MIRGraph.h"
#include "wasm/WasmSignalHandlers.h"
#include "jit/shared/Lowering-shared-inl.h"
#include "vm/BytecodeUtil-inl.h"
@ -95,8 +94,8 @@ LIRGenerator::visitIsConstructing(MIsConstructing* ins)
static void
TryToUseImplicitInterruptCheck(MIRGraph& graph, MBasicBlock* backedge)
{
// Implicit interrupt checks require wasm signal handlers to be installed.
if (!wasm::HaveSignalHandlers() || JitOptions.ionInterruptWithoutSignals)
// Implicit interrupt checks require JIT async interrupt support.
if (!jit::HaveAsyncInterrupt() || JitOptions.ionInterruptWithoutSignals)
return;
// To avoid triggering expensive interrupts (backedge patching) in

Просмотреть файл

@ -1160,7 +1160,6 @@ Simulator::Simulator(JSContext* cx)
stackLimit_ = 0;
pc_modified_ = false;
icount_ = 0L;
wasm_interrupt_ = false;
break_pc_ = nullptr;
break_instr_ = 0;
single_stepping_ = false;
@ -1594,29 +1593,6 @@ Simulator::registerState()
return state;
}
// The signal handler only redirects the PC to the interrupt stub when the PC is
// in function code. However, this guard is racy for the ARM simulator since the
// signal handler samples PC in the middle of simulating an instruction and thus
// the current PC may have advanced once since the signal handler's guard. So we
// re-check here.
void
Simulator::handleWasmInterrupt()
{
if (!wasm::CodeExists)
return;
uint8_t* pc = (uint8_t*)get_pc();
const wasm::ModuleSegment* ms = nullptr;
if (!wasm::InInterruptibleCode(cx_, pc, &ms))
return;
if (!cx_->activation()->asJit()->startWasmInterrupt(registerState()))
return;
set_pc(int32_t(ms->interruptCode()));
}
static inline JitActivation*
GetJitActivation(JSContext* cx)
{
@ -1659,9 +1635,7 @@ Simulator::handleWasmSegFault(int32_t addr, unsigned numBytes)
const wasm::MemoryAccess* memoryAccess = instance->code().lookupMemoryAccess(pc);
if (!memoryAccess) {
MOZ_ALWAYS_TRUE(act->asJit()->startWasmInterrupt(registerState()));
if (!instance->code().containsCodePC(pc))
MOZ_CRASH("Cannot map PC to trap handler");
act->asJit()->startWasmTrap(wasm::Trap::OutOfBounds, 0, registerState());
set_pc(int32_t(moduleSegment->outOfBoundsCode()));
return true;
}
@ -4925,19 +4899,6 @@ Simulator::disable_single_stepping()
single_step_callback_arg_ = nullptr;
}
static void
FakeInterruptHandler()
{
JSContext* cx = TlsContext.get();
uint8_t* pc = cx->simulator()->get_pc_as<uint8_t*>();
const wasm::ModuleSegment* ms= nullptr;
if (!wasm::InInterruptibleCode(cx, pc, &ms))
return;
cx->simulator()->trigger_wasm_interrupt();
}
template<bool EnableStopSimAt>
void
Simulator::execute()
@ -4957,16 +4918,9 @@ Simulator::execute()
} else {
if (single_stepping_)
single_step_callback_(single_step_callback_arg_, this, (void*)program_counter);
if (MOZ_UNLIKELY(JitOptions.simulatorAlwaysInterrupt))
FakeInterruptHandler();
SimInstruction* instr = reinterpret_cast<SimInstruction*>(program_counter);
instructionDecode(instr);
icount_++;
if (MOZ_UNLIKELY(wasm_interrupt_)) {
handleWasmInterrupt();
wasm_interrupt_ = false;
}
}
program_counter = get_pc();
}

Просмотреть файл

@ -197,13 +197,6 @@ class Simulator
template <typename T>
T get_pc_as() const { return reinterpret_cast<T>(get_pc()); }
void trigger_wasm_interrupt() {
// This can be called several times if a single interrupt isn't caught
// and handled by the simulator, but this is fine; once the current
// instruction is done executing, the interrupt will be handled anyhow.
wasm_interrupt_ = true;
}
void enable_single_stepping(SingleStepCallback cb, void* arg);
void disable_single_stepping();
@ -293,7 +286,6 @@ class Simulator
void printStopInfo(uint32_t code);
// Handle a wasm interrupt triggered by an async signal handler.
void handleWasmInterrupt();
JS::ProfilingFrameIterator::RegisterState registerState();
// Handle any wasm faults, returning true if the fault was handled.
@ -426,9 +418,6 @@ class Simulator
bool pc_modified_;
int64_t icount_;
// wasm async interrupt / fault support
bool wasm_interrupt_;
// Debugger input.
char* lastDebuggerInput_;

Просмотреть файл

@ -86,7 +86,6 @@ void Simulator::ResetState() {
// Reset registers to 0.
pc_ = nullptr;
pc_modified_ = false;
wasm_interrupt_ = false;
for (unsigned i = 0; i < kNumberOfRegisters; i++) {
set_xreg(i, 0xbadbeef);
}
@ -195,15 +194,6 @@ void Simulator::ExecuteInstruction() {
VIXL_ASSERT(IsWordAligned(pc_));
decoder_->Decode(pc_);
increment_pc();
if (MOZ_UNLIKELY(wasm_interrupt_)) {
handle_wasm_interrupt();
// Just calling set_pc turns the pc_modified_ flag on, which means it doesn't
// auto-step after executing the next instruction. Force that to off so it
// will auto-step after executing the first instruction of the handler.
pc_modified_ = false;
wasm_interrupt_ = false;
}
}
@ -230,12 +220,6 @@ bool Simulator::overRecursedWithExtra(uint32_t extra) const {
}
void Simulator::trigger_wasm_interrupt() {
MOZ_ASSERT(!wasm_interrupt_);
wasm_interrupt_ = true;
}
static inline JitActivation*
GetJitActivation(JSContext* cx)
{
@ -257,32 +241,6 @@ Simulator::registerState()
return state;
}
// The signal handler only redirects the PC to the interrupt stub when the PC is
// in function code. However, this guard is racy for the ARM simulator since the
// signal handler samples PC in the middle of simulating an instruction and thus
// the current PC may have advanced once since the signal handler's guard. So we
// re-check here.
void Simulator::handle_wasm_interrupt()
{
if (!js::wasm::CodeExists)
return;
uint8_t* pc = (uint8_t*)get_pc();
const js::wasm::ModuleSegment* ms = nullptr;
if (!js::wasm::InInterruptibleCode(cx_, pc, &ms))
return;
JitActivation* act = GetJitActivation(cx_);
if (!act)
return;
if (!act->startWasmInterrupt(registerState()))
return;
set_pc((Instruction*)ms->interruptCode());
}
bool
Simulator::handle_wasm_seg_fault(uintptr_t addr, unsigned numBytes)
{
@ -309,10 +267,7 @@ Simulator::handle_wasm_seg_fault(uintptr_t addr, unsigned numBytes)
const js::wasm::MemoryAccess* memoryAccess = instance->code().lookupMemoryAccess(pc);
if (!memoryAccess) {
if (!act->startWasmInterrupt(registerState()))
MOZ_CRASH("Cannot start interrupt");
if (!instance->code().containsCodePC(pc))
MOZ_CRASH("Cannot map PC to trap handler");
act->startWasmTrap(js::wasm::Trap::OutOfBounds, 0, registerState());
set_pc((Instruction*)moduleSegment->outOfBoundsCode());
return true;
}

Просмотреть файл

@ -746,8 +746,6 @@ class Simulator : public DecoderVisitor {
pc_modified_ = true;
}
void trigger_wasm_interrupt();
void handle_wasm_interrupt();
bool handle_wasm_ill_fault();
bool handle_wasm_seg_fault(uintptr_t addr, unsigned numBytes);
@ -2583,7 +2581,6 @@ class Simulator : public DecoderVisitor {
// automatically incremented.
bool pc_modified_;
const Instruction* pc_;
bool wasm_interrupt_;
static const char* xreg_names[];
static const char* wreg_names[];

Просмотреть файл

@ -1269,7 +1269,6 @@ Simulator::Simulator()
pc_modified_ = false;
icount_ = 0;
break_count_ = 0;
wasm_interrupt_ = false;
break_pc_ = nullptr;
break_instr_ = 0;
@ -1642,32 +1641,6 @@ Simulator::registerState()
return state;
}
// The signal handler only redirects the PC to the interrupt stub when the PC is
// in function code. However, this guard is racy for the simulator since the
// signal handler samples PC in the middle of simulating an instruction and thus
// the current PC may have advanced once since the signal handler's guard. So we
// re-check here.
void
Simulator::handleWasmInterrupt()
{
if (!wasm::CodeExists)
return;
void* pc = (void*)get_pc();
void* fp = (void*)getRegister(Register::fp);
JitActivation* activation = TlsContext.get()->activation()->asJit();
const wasm::CodeSegment* segment = wasm::LookupCodeSegment(pc);
if (!segment || !segment->isModule() || !segment->containsCodePC(pc))
return;
if (!activation->startWasmInterrupt(registerState()))
return;
set_pc(int32_t(segment->asModule()->interruptCode()));
}
// WebAssembly memories contain an extra region of guard pages (see
// WasmArrayRawBuffer comment). The guard pages catch out-of-bounds accesses
// using a signal handler that redirects PC to a stub that safely reports an
@ -1706,9 +1679,7 @@ Simulator::handleWasmFault(int32_t addr, unsigned numBytes)
const wasm::MemoryAccess* memoryAccess = instance->code().lookupMemoryAccess(pc);
if (!memoryAccess) {
MOZ_ALWAYS_TRUE(act->startWasmInterrupt(registerState()));
if (!instance->code().containsCodePC(pc))
MOZ_CRASH("Cannot map PC to trap handler");
act->startWasmTrap(wasm::Trap::OutOfBounds, 0, registerState());
set_pc(int32_t(moduleSegment->outOfBoundsCode()));
return true;
}
@ -3673,19 +3644,6 @@ Simulator::branchDelayInstructionDecode(SimInstruction* instr)
instructionDecode(instr);
}
static void
FakeInterruptHandler()
{
JSContext* cx = TlsContext.get();
uint8_t* pc = cx->simulator()->get_pc_as<uint8_t*>();
const wasm::ModuleSegment* ms = nullptr;
if (!wasm::InInterruptibleCode(cx, pc, &ms))
return;
cx->simulator()->trigger_wasm_interrupt();
}
template<bool enableStopSimAt>
void
Simulator::execute()
@ -3699,16 +3657,9 @@ Simulator::execute()
MipsDebugger dbg(this);
dbg.debug();
} else {
if (MOZ_UNLIKELY(JitOptions.simulatorAlwaysInterrupt))
FakeInterruptHandler();
SimInstruction* instr = reinterpret_cast<SimInstruction*>(program_counter);
instructionDecode(instr);
icount_++;
if (MOZ_UNLIKELY(wasm_interrupt_)) {
handleWasmInterrupt();
wasm_interrupt_ = false;
}
}
program_counter = get_pc();
}

Просмотреть файл

@ -202,13 +202,6 @@ class Simulator {
template <typename T>
T get_pc_as() const { return reinterpret_cast<T>(get_pc()); }
void trigger_wasm_interrupt() {
// This can be called several times if a single interrupt isn't caught
// and handled by the simulator, but this is fine; once the current
// instruction is done executing, the interrupt will be handled anyhow.
wasm_interrupt_ = true;
}
// Accessor to the internal simulator stack area.
uintptr_t stackLimit() const;
bool overRecursed(uintptr_t newsp = 0) const;
@ -304,8 +297,6 @@ class Simulator {
void increaseStopCounter(uint32_t code);
void printStopInfo(uint32_t code);
// Handle a wasm interrupt triggered by an async signal handler.
void handleWasmInterrupt();
JS::ProfilingFrameIterator::RegisterState registerState();
// Handle any wasm faults, returning true if the fault was handled.
@ -365,9 +356,6 @@ class Simulator {
int icount_;
int break_count_;
// wasm async interrupt / fault support
bool wasm_interrupt_;
// Debugger input.
char* lastDebuggerInput_;

Просмотреть файл

@ -1278,7 +1278,6 @@ Simulator::Simulator()
pc_modified_ = false;
icount_ = 0;
break_count_ = 0;
wasm_interrupt_ = false;
break_pc_ = nullptr;
break_instr_ = 0;
single_stepping_ = false;
@ -1645,35 +1644,6 @@ Simulator::registerState()
return state;
}
// The signal handler only redirects the PC to the interrupt stub when the PC is
// in function code. However, this guard is racy for the simulator since the
// signal handler samples PC in the middle of simulating an instruction and thus
// the current PC may have advanced once since the signal handler's guard. So we
// re-check here.
void
Simulator::handleWasmInterrupt()
{
if (!wasm::CodeExists)
return;
void* pc = (void*)get_pc();
void* fp = (void*)getRegister(Register::fp);
JitActivation* activation = TlsContext.get()->activation()->asJit();
const wasm::CodeSegment* segment = wasm::LookupCodeSegment(pc);
if (!segment || !segment->isModule() || !segment->containsCodePC(pc))
return;
// fp can be null during the prologue/epilogue of the entry function.
if (!fp)
return;
if (!activation->startWasmInterrupt(registerState()))
return;
set_pc(int64_t(segment->asModule()->interruptCode()));
}
// WebAssembly memories contain an extra region of guard pages (see
// WasmArrayRawBuffer comment). The guard pages catch out-of-bounds accesses
// using a signal handler that redirects PC to a stub that safely reports an
@ -1712,9 +1682,7 @@ Simulator::handleWasmFault(uint64_t addr, unsigned numBytes)
const wasm::MemoryAccess* memoryAccess = instance->code().lookupMemoryAccess(pc);
if (!memoryAccess) {
MOZ_ALWAYS_TRUE(act->startWasmInterrupt(registerState()));
if (!instance->code().containsCodePC(pc))
MOZ_CRASH("Cannot map PC to trap handler");
act->startWasmTrap(wasm::Trap::OutOfBounds, 0, registerState());
set_pc(int64_t(moduleSegment->outOfBoundsCode()));
return true;
}
@ -4062,19 +4030,6 @@ Simulator::disable_single_stepping()
single_step_callback_arg_ = nullptr;
}
static void
FakeInterruptHandler()
{
JSContext* cx = TlsContext.get();
uint8_t* pc = cx->simulator()->get_pc_as<uint8_t*>();
const wasm::ModuleSegment* ms = nullptr;
if (!wasm::InInterruptibleCode(cx, pc, &ms))
return;
cx->simulator()->trigger_wasm_interrupt();
}
template<bool enableStopSimAt>
void
Simulator::execute()
@ -4093,16 +4048,9 @@ Simulator::execute()
} else {
if (single_stepping_)
single_step_callback_(single_step_callback_arg_, this, (void*)program_counter);
if (MOZ_UNLIKELY(JitOptions.simulatorAlwaysInterrupt))
FakeInterruptHandler();
SimInstruction* instr = reinterpret_cast<SimInstruction *>(program_counter);
instructionDecode(instr);
icount_++;
if (MOZ_UNLIKELY(wasm_interrupt_)) {
handleWasmInterrupt();
wasm_interrupt_ = false;
}
}
program_counter = get_pc();
}

Просмотреть файл

@ -206,13 +206,6 @@ class Simulator {
template <typename T>
T get_pc_as() const { return reinterpret_cast<T>(get_pc()); }
void trigger_wasm_interrupt() {
// This can be called several times if a single interrupt isn't caught
// and handled by the simulator, but this is fine; once the current
// instruction is done executing, the interrupt will be handled anyhow.
wasm_interrupt_ = true;
}
void enable_single_stepping(SingleStepCallback cb, void* arg);
void disable_single_stepping();
@ -319,8 +312,6 @@ class Simulator {
void increaseStopCounter(uint32_t code);
void printStopInfo(uint32_t code);
// Handle a wasm interrupt triggered by an async signal handler.
void handleWasmInterrupt();
JS::ProfilingFrameIterator::RegisterState registerState();
// Handle any wasm faults, returning true if the fault was handled.
@ -378,9 +369,6 @@ class Simulator {
int64_t icount_;
int64_t break_count_;
// wasm async interrupt support
bool wasm_interrupt_;
// Debugger input.
char* lastDebuggerInput_;

Просмотреть файл

@ -7272,9 +7272,6 @@ JS_SetGlobalJitCompilerOption(JSContext* cx, JSJitCompilerOption opt, uint32_t v
}
jit::JitOptions.jumpThreshold = value;
break;
case JSJITCOMPILER_SIMULATOR_ALWAYS_INTERRUPT:
jit::JitOptions.simulatorAlwaysInterrupt = !!value;
break;
case JSJITCOMPILER_SPECTRE_INDEX_MASKING:
jit::JitOptions.spectreIndexMasking = !!value;
break;

Просмотреть файл

@ -183,7 +183,7 @@ JSRuntime::createJitRuntime(JSContext* cx)
if (!jrt)
return nullptr;
// Protect jitRuntime_ from being observed (by InterruptRunningJitCode)
// Protect jitRuntime_ from being observed (by jit::InterruptRunningCode)
// while it is being initialized. Unfortunately, initialization depends on
// jitRuntime_ being non-null, so we can't just wait to assign jitRuntime_.
JitRuntime::AutoPreventBackedgePatching apbp(cx->runtime(), jrt);

Просмотреть файл

@ -52,7 +52,6 @@
#include "vm/JSObject.h"
#include "vm/JSScript.h"
#include "vm/Shape.h"
#include "wasm/WasmSignalHandlers.h"
#include "vm/JSObject-inl.h"
#include "vm/JSScript-inl.h"
@ -102,7 +101,7 @@ JSContext::init(ContextKind kind)
{
// Skip most of the initialization if this thread will not be running JS.
if (kind == ContextKind::Cooperative) {
// Get a platform-native handle for this thread, used by js::InterruptRunningJitCode.
// Get a platform-native handle for this thread, used by jit::InterruptRunningCode.
#ifdef XP_WIN
size_t openFlags = THREAD_GET_CONTEXT | THREAD_SET_CONTEXT | THREAD_SUSPEND_RESUME |
THREAD_QUERY_INFORMATION;
@ -123,11 +122,12 @@ JSContext::init(ContextKind kind)
return false;
#ifdef JS_SIMULATOR
simulator_ = js::jit::Simulator::Create(this);
simulator_ = jit::Simulator::Create(this);
if (!simulator_)
return false;
#endif
jit::EnsureAsyncInterrupt(this);
if (!wasm::EnsureSignalHandlers(this))
return false;
}

Просмотреть файл

@ -44,7 +44,6 @@
#include "vm/JSScript.h"
#include "vm/TraceLogging.h"
#include "vm/TraceLoggingGraph.h"
#include "wasm/WasmSignalHandlers.h"
#include "gc/GC-inl.h"
#include "vm/JSContext-inl.h"
@ -598,7 +597,7 @@ JSContext::requestInterrupt(InterruptMode mode)
if (fx.isWaiting())
fx.wake(FutexThread::WakeForJSInterrupt);
fx.unlock();
InterruptRunningJitCode(this);
jit::InterruptRunningCode(this);
}
}

Просмотреть файл

@ -999,18 +999,9 @@ struct JSRuntime : public js::MallocProvider<JSRuntime>
public:
js::RuntimeCaches& caches() { return caches_.ref(); }
// When wasm traps or is interrupted, the signal handler records some data
// for unwinding purposes. Wasm code can't interrupt or trap reentrantly.
js::ActiveThreadData<
mozilla::MaybeOneOf<js::wasm::TrapData, js::wasm::InterruptData>
> wasmUnwindData;
js::wasm::TrapData& wasmTrapData() {
return wasmUnwindData.ref().ref<js::wasm::TrapData>();
}
js::wasm::InterruptData& wasmInterruptData() {
return wasmUnwindData.ref().ref<js::wasm::InterruptData>();
}
// When wasm traps, the signal handler records some data for unwinding
// purposes. Wasm code can't trap reentrantly.
js::ActiveThreadData<mozilla::Maybe<js::wasm::TrapData>> wasmTrapData;
public:
#if defined(NIGHTLY_BUILD)

Просмотреть файл

@ -1574,7 +1574,7 @@ jit::JitActivation::~JitActivation()
// JitActivations.
MOZ_ASSERT(!bailoutData_);
MOZ_ASSERT(!isWasmInterrupted());
// Traps get handled immediately.
MOZ_ASSERT(!isWasmTrapping());
clearRematerializedFrames();
@ -1742,86 +1742,6 @@ jit::JitActivation::traceIonRecovery(JSTracer* trc)
it->trace(trc);
}
bool
jit::JitActivation::startWasmInterrupt(const JS::ProfilingFrameIterator::RegisterState& state)
{
// fp may be null when first entering wasm code from an interpreter entry
// stub.
if (!state.fp)
return false;
MOZ_ASSERT(state.pc);
// Execution can only be interrupted in function code. Afterwards, control
// flow does not reenter function code and thus there can be no
// interrupt-during-interrupt.
bool unwound;
wasm::UnwindState unwindState;
MOZ_ALWAYS_TRUE(wasm::StartUnwinding(state, &unwindState, &unwound));
void* pc = unwindState.pc;
if (unwound) {
// In the prologue/epilogue, FP might have been fixed up to the
// caller's FP, and the caller could be the jit entry. Ignore this
// interrupt, in this case, because FP points to a jit frame and not a
// wasm one.
if (!wasm::LookupCode(pc)->lookupFuncRange(pc))
return false;
}
cx_->runtime()->wasmUnwindData.ref().construct<wasm::InterruptData>(pc, state.pc);
setWasmExitFP(unwindState.fp);
MOZ_ASSERT(compartment() == unwindState.fp->tls->instance->compartment());
MOZ_ASSERT(isWasmInterrupted());
return true;
}
void
jit::JitActivation::finishWasmInterrupt()
{
MOZ_ASSERT(isWasmInterrupted());
cx_->runtime()->wasmUnwindData.ref().destroy();
packedExitFP_ = nullptr;
}
bool
jit::JitActivation::isWasmInterrupted() const
{
JSRuntime* rt = cx_->runtime();
if (!rt->wasmUnwindData.ref().constructed<wasm::InterruptData>())
return false;
Activation* act = cx_->activation();
while (act && !act->hasWasmExitFP())
act = act->prev();
if (act != this)
return false;
DebugOnly<const wasm::Frame*> fp = wasmExitFP();
DebugOnly<void*> unwindPC = rt->wasmInterruptData().unwindPC;
MOZ_ASSERT(fp->instance()->code().containsCodePC(unwindPC));
return true;
}
void*
jit::JitActivation::wasmInterruptUnwindPC() const
{
MOZ_ASSERT(isWasmInterrupted());
return cx_->runtime()->wasmInterruptData().unwindPC;
}
void*
jit::JitActivation::wasmInterruptResumePC() const
{
MOZ_ASSERT(isWasmInterrupted());
return cx_->runtime()->wasmInterruptData().resumePC;
}
void
jit::JitActivation::startWasmTrap(wasm::Trap trap, uint32_t bytecodeOffset,
const wasm::RegisterState& state)
@ -1842,7 +1762,7 @@ jit::JitActivation::startWasmTrap(wasm::Trap trap, uint32_t bytecodeOffset,
if (unwound)
bytecodeOffset = code.lookupCallSite(pc)->lineOrBytecode();
cx_->runtime()->wasmUnwindData.ref().construct<wasm::TrapData>(pc, trap, bytecodeOffset);
cx_->runtime()->wasmTrapData.ref().emplace(pc, trap, bytecodeOffset);
setWasmExitFP(fp);
}
@ -1851,7 +1771,7 @@ jit::JitActivation::finishWasmTrap()
{
MOZ_ASSERT(isWasmTrapping());
cx_->runtime()->wasmUnwindData.ref().destroy();
cx_->runtime()->wasmTrapData.ref().reset();
packedExitFP_ = nullptr;
}
@ -1859,7 +1779,7 @@ bool
jit::JitActivation::isWasmTrapping() const
{
JSRuntime* rt = cx_->runtime();
if (!rt->wasmUnwindData.ref().constructed<wasm::TrapData>())
if (!rt->wasmTrapData.ref())
return false;
Activation* act = cx_->activation();
@ -1870,7 +1790,7 @@ jit::JitActivation::isWasmTrapping() const
return false;
DebugOnly<const wasm::Frame*> fp = wasmExitFP();
DebugOnly<void*> unwindPC = rt->wasmTrapData().pc;
DebugOnly<void*> unwindPC = rt->wasmTrapData->pc;
MOZ_ASSERT(fp->instance()->code().containsCodePC(unwindPC));
return true;
}
@ -1879,14 +1799,14 @@ void*
jit::JitActivation::wasmTrapPC() const
{
MOZ_ASSERT(isWasmTrapping());
return cx_->runtime()->wasmTrapData().pc;
return cx_->runtime()->wasmTrapData->pc;
}
uint32_t
jit::JitActivation::wasmTrapBytecodeOffset() const
{
MOZ_ASSERT(isWasmTrapping());
return cx_->runtime()->wasmTrapData().bytecodeOffset;
return cx_->runtime()->wasmTrapData->bytecodeOffset;
}
InterpreterFrameIterator&

Просмотреть файл

@ -1805,17 +1805,6 @@ class JitActivation : public Activation
return offsetof(JitActivation, encodedWasmExitReason_);
}
// Interrupts are started from the interrupt signal handler (or the ARM
// simulator) and cleared by WasmHandleExecutionInterrupt or WasmHandleThrow
// when the interrupt is handled.
// Returns true iff we've entered interrupted state.
bool startWasmInterrupt(const wasm::RegisterState& state);
void finishWasmInterrupt();
bool isWasmInterrupted() const;
void* wasmInterruptUnwindPC() const;
void* wasmInterruptResumePC() const;
void startWasmTrap(wasm::Trap trap, uint32_t bytecodeOffset, const wasm::RegisterState& state);
void finishWasmTrap();
bool isWasmTrapping() const;

Просмотреть файл

@ -3606,8 +3606,7 @@ class BaseCompiler final : public BaseCompilerInterface
void addInterruptCheck()
{
// Always use signals for interrupts with Asm.JS/Wasm
MOZ_RELEASE_ASSERT(HaveSignalHandlers());
// TODO
}
void jumpTable(const LabelVector& labels, Label* theTable) {
@ -9817,8 +9816,6 @@ BaseCompiler::init()
if (!fr.setupLocals(locals_, sig().args(), debugEnabled_, &localInfo_))
return false;
addInterruptCheck();
return true;
}

Просмотреть файл

@ -69,28 +69,6 @@ CallingActivation()
return act->asJit();
}
static void*
WasmHandleExecutionInterrupt()
{
JitActivation* activation = CallingActivation();
MOZ_ASSERT(activation->isWasmInterrupted());
if (!CheckForInterrupt(activation->cx())) {
// If CheckForInterrupt failed, it is time to interrupt execution.
// Returning nullptr to the caller will jump to the throw stub which
// will call HandleThrow. The JitActivation must stay in the
// interrupted state until then so that stack unwinding works in
// HandleThrow.
return nullptr;
}
// If CheckForInterrupt succeeded, then execution can proceed and the
// interrupt is over.
void* resumePC = activation->wasmInterruptResumePC();
activation->finishWasmInterrupt();
return resumePC;
}
static bool
WasmHandleDebugTrap()
{
@ -219,7 +197,6 @@ wasm::HandleThrow(JSContext* cx, WasmFrameIter& iter)
frame->leave(cx);
}
MOZ_ASSERT(!cx->activation()->asJit()->isWasmInterrupted(), "unwinding clears the interrupt");
MOZ_ASSERT(!cx->activation()->asJit()->isWasmTrapping(), "unwinding clears the trapping state");
return iter.unwoundAddressOfReturnAddress();
@ -287,7 +264,7 @@ WasmOldReportTrap(int32_t trapIndex)
static void
WasmReportTrap()
{
Trap trap = TlsContext.get()->runtime()->wasmTrapData().trap;
Trap trap = TlsContext.get()->runtime()->wasmTrapData->trap;
WasmOldReportTrap(int32_t(trap));
}
@ -511,9 +488,6 @@ void*
wasm::AddressOf(SymbolicAddress imm, ABIFunctionType* abiType)
{
switch (imm) {
case SymbolicAddress::HandleExecutionInterrupt:
*abiType = Args_General0;
return FuncCast(WasmHandleExecutionInterrupt, *abiType);
case SymbolicAddress::HandleDebugTrap:
*abiType = Args_General0;
return FuncCast(WasmHandleDebugTrap, *abiType);
@ -692,7 +666,6 @@ wasm::NeedsBuiltinThunk(SymbolicAddress sym)
// Some functions don't want to a thunk, because they already have one or
// they don't have frame info.
switch (sym) {
case SymbolicAddress::HandleExecutionInterrupt: // GenerateInterruptExit
case SymbolicAddress::HandleDebugTrap: // GenerateDebugTrapStub
case SymbolicAddress::HandleThrow: // GenerateThrowStub
case SymbolicAddress::ReportTrap: // GenerateTrapExit
@ -874,8 +847,8 @@ PopulateTypedNatives(TypedNativeToFuncPtrMap* typedNatives)
// things:
// - bridging the few differences between the internal wasm ABI and the external
// native ABI (viz. float returns on x86 and soft-fp ARM)
// - executing an exit prologue/epilogue which in turn allows any asynchronous
// interrupt to see the full stack up to the wasm operation that called out
// - executing an exit prologue/epilogue which in turn allows any profiling
// iterator to see the full stack up to the wasm operation that called out
//
// Thunks are created for two kinds of C++ callees, enumerated above:
// - SymbolicAddress: for statically compiled calls in the wasm module

Просмотреть файл

@ -335,7 +335,6 @@ ModuleSegment::initialize(Tier tier,
const CodeRangeVector& codeRanges)
{
MOZ_ASSERT(bytes_ == nullptr);
MOZ_ASSERT(linkData.interruptOffset);
MOZ_ASSERT(linkData.outOfBoundsOffset);
MOZ_ASSERT(linkData.unalignedAccessOffset);
MOZ_ASSERT(linkData.trapOffset);
@ -343,7 +342,6 @@ ModuleSegment::initialize(Tier tier,
tier_ = tier;
bytes_ = Move(codeBytes);
length_ = codeLength;
interruptCode_ = bytes_.get() + linkData.interruptOffset;
outOfBoundsCode_ = bytes_.get() + linkData.outOfBoundsOffset;
unalignedAccessCode_ = bytes_.get() + linkData.unalignedAccessOffset;
trapCode_ = bytes_.get() + linkData.trapOffset;

Просмотреть файл

@ -144,9 +144,8 @@ class ModuleSegment : public CodeSegment
{
Tier tier_;
// These are pointers into code for stubs used for asynchronous
// signal-handler control-flow transfer.
uint8_t* interruptCode_;
// These are pointers into code for stubs used for signal-handler
// control-flow transfer.
uint8_t* outOfBoundsCode_;
uint8_t* unalignedAccessCode_;
uint8_t* trapCode_;
@ -173,7 +172,6 @@ class ModuleSegment : public CodeSegment
ModuleSegment()
: CodeSegment(),
tier_(Tier(-1)),
interruptCode_(nullptr),
outOfBoundsCode_(nullptr),
unalignedAccessCode_(nullptr),
trapCode_(nullptr)
@ -195,7 +193,6 @@ class ModuleSegment : public CodeSegment
Tier tier() const { return tier_; }
uint8_t* interruptCode() const { return interruptCode_; }
uint8_t* outOfBoundsCode() const { return outOfBoundsCode_; }
uint8_t* unalignedAccessCode() const { return unalignedAccessCode_; }
uint8_t* trapCode() const { return trapCode_; }

Просмотреть файл

@ -61,27 +61,6 @@ WasmFrameIter::WasmFrameIter(JitActivation* activation, wasm::Frame* fp)
return;
}
// When asynchronously interrupted, exitFP is set to the interrupted frame
// itself and so we do not want to skip it. Instead, we can recover the
// Code and CodeRange from the JitActivation, which are set when control
// flow was interrupted. There is no CallSite (b/c the interrupt was
// async), but this is fine because CallSite is only used for line number
// for which we can use the beginning of the function from the CodeRange
// instead.
if (activation->isWasmInterrupted()) {
code_ = &fp_->tls->instance->code();
MOZ_ASSERT(code_ == LookupCode(activation->wasmInterruptUnwindPC()));
codeRange_ = code_->lookupFuncRange(activation->wasmInterruptUnwindPC());
MOZ_ASSERT(codeRange_);
lineOrBytecode_ = codeRange_->funcLineOrBytecode();
MOZ_ASSERT(!done());
return;
}
// Otherwise, execution exits wasm code via an exit stub which sets exitFP
// to the exit stub's frame. Thus, in this case, we want to start iteration
// at the caller of the exit frame, whose Code, CodeRange and CallSite are
@ -111,14 +90,12 @@ WasmFrameIter::operator++()
// popping each frame and, once onLeaveFrame is called for a given frame,
// that frame must not be visible to subsequent stack iteration (or it
// could be added as a "new" frame just as it becomes garbage). When the
// frame is "interrupted", then exitFP is included in the callstack
// (otherwise, it is skipped, as explained above). So to unwind the
// innermost frame, we just clear the interrupt state.
// frame is trapping, then exitFP is included in the callstack (otherwise,
// it is skipped, as explained above). So to unwind the innermost frame, we
// just clear the trapping state.
if (unwind_ == Unwind::True) {
if (activation_->isWasmInterrupted())
activation_->finishWasmInterrupt();
else if (activation_->isWasmTrapping())
if (activation_->isWasmTrapping())
activation_->finishWasmTrap();
activation_->setWasmExitFP(fp_);
}
@ -760,10 +737,8 @@ ProfilingFrameIterator::initFromExitFP(const Frame* fp)
// This means that the innermost frame is skipped. This is fine because:
// - for import exit calls, the innermost frame is a thunk, so the first
// frame that shows up is the function calling the import;
// - for Math and other builtin calls as well as interrupts, we note the
// absence of an exit reason and inject a fake "builtin" frame; and
// - for async interrupts, we just accept that we'll lose the innermost
// frame.
// - for Math and other builtin calls, we note the absence of an exit
// reason and inject a fake "builtin" frame; and
switch (codeRange_->kind()) {
case CodeRange::InterpEntry:
callerPC_ = nullptr;
@ -791,7 +766,6 @@ ProfilingFrameIterator::initFromExitFP(const Frame* fp)
case CodeRange::OutOfBoundsExit:
case CodeRange::UnalignedExit:
case CodeRange::Throw:
case CodeRange::Interrupt:
case CodeRange::FarJumpIsland:
MOZ_CRASH("Unexpected CodeRange kind");
}
@ -1012,11 +986,6 @@ js::wasm::StartUnwinding(const RegisterState& registers, UnwindState* unwindStat
// the entire activation. To simplify testing, we simply pretend throw
// stubs have already popped the entire stack.
return false;
case CodeRange::Interrupt:
// When the PC is in the async interrupt stub, the fp may be garbage and
// so we cannot blindly unwind it. Since the percent of time spent in
// the interrupt stub is extremely small, just ignore the stack.
return false;
}
unwindState->code = code;
@ -1142,7 +1111,6 @@ ProfilingFrameIterator::operator++()
MOZ_CRASH("should have had null caller fp");
case CodeRange::JitEntry:
MOZ_CRASH("should have been guarded above");
case CodeRange::Interrupt:
case CodeRange::Throw:
MOZ_CRASH("code range doesn't have frame");
}
@ -1155,7 +1123,6 @@ ThunkedNativeToDescription(SymbolicAddress func)
{
MOZ_ASSERT(NeedsBuiltinThunk(func));
switch (func) {
case SymbolicAddress::HandleExecutionInterrupt:
case SymbolicAddress::HandleDebugTrap:
case SymbolicAddress::HandleThrow:
case SymbolicAddress::ReportTrap:
@ -1311,8 +1278,7 @@ ProfilingFrameIterator::label() const
case CodeRange::OutOfBoundsExit: return "out-of-bounds stub (in wasm)";
case CodeRange::UnalignedExit: return "unaligned trap stub (in wasm)";
case CodeRange::FarJumpIsland: return "interstitial (in wasm)";
case CodeRange::Throw: MOZ_FALLTHROUGH;
case CodeRange::Interrupt: MOZ_CRASH("does not have a frame");
case CodeRange::Throw: MOZ_CRASH("does not have a frame");
}
MOZ_CRASH("bad code range kind");

Просмотреть файл

@ -49,12 +49,6 @@ struct CallableOffsets;
//
// If you want to handle every kind of frames (including JS jit frames), use
// JitFrameIter.
//
// The one exception is that this iterator may be called from the interrupt
// callback which may be called asynchronously from asm.js code; in this case,
// the backtrace may not be correct. That being said, we try our best printing
// an informative message to the user and at least the name of the innermost
// function stack frame.
class WasmFrameIter
{
@ -158,7 +152,7 @@ class ExitReason
};
// Iterates over the frames of a single wasm JitActivation, given an
// asynchronously-interrupted thread's state.
// asynchronously-profiled thread's state.
class ProfilingFrameIterator
{
const Code* code_;

Просмотреть файл

@ -550,10 +550,6 @@ ModuleGenerator::noteCodeRange(uint32_t codeRangeIndex, const CodeRange& codeRan
MOZ_ASSERT(!linkDataTier_->unalignedAccessOffset);
linkDataTier_->unalignedAccessOffset = codeRange.begin();
break;
case CodeRange::Interrupt:
MOZ_ASSERT(!linkDataTier_->interruptOffset);
linkDataTier_->interruptOffset = codeRange.begin();
break;
case CodeRange::TrapExit:
MOZ_ASSERT(!linkDataTier_->trapOffset);
linkDataTier_->trapOffset = codeRange.begin();

Просмотреть файл

@ -251,8 +251,6 @@ class FunctionCompiler
return false;
}
addInterruptCheck();
return true;
}
@ -1035,8 +1033,7 @@ class FunctionCompiler
void addInterruptCheck()
{
// We rely on signal handlers for interrupts on Asm.JS/Wasm
MOZ_RELEASE_ASSERT(wasm::HaveSignalHandlers());
// TODO
}
MDefinition* extractSimdElement(unsigned lane, MDefinition* base, MIRType type, SimdSign sign)

Просмотреть файл

@ -42,7 +42,6 @@ struct CompileArgs;
struct LinkDataTierCacheablePod
{
uint32_t interruptOffset;
uint32_t outOfBoundsOffset;
uint32_t unalignedAccessOffset;
uint32_t trapOffset;

Просмотреть файл

@ -50,15 +50,14 @@ class ProcessCodeSegmentMap
CodeSegmentVector segments1_;
CodeSegmentVector segments2_;
// Because of sampling/interruptions/stack iteration in general, the
// thread running wasm might need to know to which CodeSegment the
// current PC belongs, during a call to lookup(). A lookup is a
// read-only operation, and we don't want to take a lock then
// Because of profiling, the thread running wasm might need to know to which
// CodeSegment the current PC belongs, during a call to lookup(). A lookup
// is a read-only operation, and we don't want to take a lock then
// (otherwise, we could have a deadlock situation if an async lookup
// happened on a given thread that was holding mutatorsMutex_ while getting
// interrupted/sampled). Since the writer could be modifying the data that
// is getting looked up, the writer functions use spin-locks to know if
// there are any observers (i.e. calls to lookup()) of the atomic data.
// sampled). Since the writer could be modifying the data that is getting
// looked up, the writer functions use spin-locks to know if there are any
// observers (i.e. calls to lookup()) of the atomic data.
Atomic<size_t> observers_;

Просмотреть файл

@ -30,7 +30,7 @@ class CodeSegment;
// These methods return the wasm::CodeSegment (resp. wasm::Code) containing
// the given pc, if any exist in the process. These methods do not take a lock,
// and thus are safe to use in a profiling or async interrupt context.
// and thus are safe to use in a profiling context.
const CodeSegment*
LookupCodeSegment(const void* pc, const CodeRange** codeRange = nullptr);

Просмотреть файл

@ -31,20 +31,32 @@
#include "vm/ArrayBufferObject-inl.h"
#if defined(XP_WIN)
# include "util/Windows.h"
#else
# include <signal.h>
# include <sys/mman.h>
#endif
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
# include <sys/ucontext.h> // for ucontext_t, mcontext_t
#endif
#if defined(__x86_64__)
# if defined(__DragonFly__)
# include <machine/npx.h> // for union savefpu
# elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \
defined(__NetBSD__) || defined(__OpenBSD__)
# include <machine/fpu.h> // for struct savefpu/fxsave64
# endif
#endif
using namespace js;
using namespace js::jit;
using namespace js::wasm;
using JS::GenericNaN;
using mozilla::DebugOnly;
using mozilla::PodArrayZero;
#if defined(ANDROID)
# include <sys/system_properties.h>
# if defined(MOZ_LINKER)
extern "C" MFBT_API bool IsSignalHandlingBroken();
# endif
#endif
// Crashing inside the signal handler can cause the handler to be recursively
// invoked, eventually blowing the stack without actually showing a crash
@ -257,38 +269,20 @@ struct AutoSignalHandler
# define RFP_sig(p) ((p)->uc_mcontext.mc_regs[30])
# endif
#elif defined(XP_DARWIN)
# define EIP_sig(p) ((p)->uc_mcontext->__ss.__eip)
# define EBP_sig(p) ((p)->uc_mcontext->__ss.__ebp)
# define ESP_sig(p) ((p)->uc_mcontext->__ss.__esp)
# define RIP_sig(p) ((p)->uc_mcontext->__ss.__rip)
# define RBP_sig(p) ((p)->uc_mcontext->__ss.__rbp)
# define RSP_sig(p) ((p)->uc_mcontext->__ss.__rsp)
# define R14_sig(p) ((p)->uc_mcontext->__ss.__lr)
# define R15_sig(p) ((p)->uc_mcontext->__ss.__pc)
# define EIP_sig(p) ((p)->thread.uts.ts32.__eip)
# define EBP_sig(p) ((p)->thread.uts.ts32.__ebp)
# define ESP_sig(p) ((p)->thread.uts.ts32.__esp)
# define RIP_sig(p) ((p)->thread.__rip)
# define RBP_sig(p) ((p)->thread.__rbp)
# define RSP_sig(p) ((p)->thread.__rsp)
# define R11_sig(p) ((p)->thread.__r[11])
# define R13_sig(p) ((p)->thread.__sp)
# define R14_sig(p) ((p)->thread.__lr)
# define R15_sig(p) ((p)->thread.__pc)
#else
# error "Don't know how to read/write to the thread state via the mcontext_t."
#endif
#if defined(XP_WIN)
# include "util/Windows.h"
#else
# include <signal.h>
# include <sys/mman.h>
#endif
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
# include <sys/ucontext.h> // for ucontext_t, mcontext_t
#endif
#if defined(__x86_64__)
# if defined(__DragonFly__)
# include <machine/npx.h> // for union savefpu
# elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \
defined(__NetBSD__) || defined(__OpenBSD__)
# include <machine/fpu.h> // for struct savefpu/fxsave64
# endif
#endif
#if defined(ANDROID)
// Not all versions of the Android NDK define ucontext_t or mcontext_t.
// Detect this and provide custom but compatible definitions. Note that these
@ -369,38 +363,30 @@ enum { REG_EIP = 14 };
# endif // !defined(__BIONIC_HAVE_UCONTEXT_T)
#endif // defined(ANDROID)
#if !defined(XP_WIN)
# define CONTEXT ucontext_t
#endif
// Define a context type for use in the emulator code. This is usually just
// the same as CONTEXT, but on Mac we use a different structure since we call
// into the emulator code from a Mach exception handler rather than a
// sigaction-style signal handler.
#if defined(XP_DARWIN)
# if defined(__x86_64__)
struct macos_x64_context {
x86_thread_state64_t thread;
x86_float_state64_t float_;
};
# define EMULATOR_CONTEXT macos_x64_context
# define CONTEXT macos_x64_context
# elif defined(__i386__)
struct macos_x86_context {
x86_thread_state_t thread;
x86_float_state_t float_;
};
# define EMULATOR_CONTEXT macos_x86_context
# define CONTEXT macos_x86_context
# elif defined(__arm__)
struct macos_arm_context {
arm_thread_state_t thread;
arm_neon_state_t float_;
};
# define EMULATOR_CONTEXT macos_arm_context
# define CONTEXT macos_arm_context
# else
# error Unsupported architecture
# endif
#else
# define EMULATOR_CONTEXT CONTEXT
#elif !defined(XP_WIN)
# define CONTEXT ucontext_t
#endif
#if defined(_M_X64) || defined(__x86_64__)
@ -428,14 +414,10 @@ struct macos_arm_context {
# define LR_sig(p) R31_sig(p)
#endif
#if defined(PC_sig) && defined(FP_sig) && defined(SP_sig)
# define KNOWS_MACHINE_STATE
#endif
static uint8_t**
ContextToPC(CONTEXT* context)
{
#ifdef KNOWS_MACHINE_STATE
#ifdef PC_sig
return reinterpret_cast<uint8_t**>(&PC_sig(context));
#else
MOZ_CRASH();
@ -445,116 +427,48 @@ ContextToPC(CONTEXT* context)
static uint8_t*
ContextToFP(CONTEXT* context)
{
#ifdef KNOWS_MACHINE_STATE
#ifdef FP_sig
return reinterpret_cast<uint8_t*>(FP_sig(context));
#else
MOZ_CRASH();
#endif
}
#ifdef KNOWS_MACHINE_STATE
static uint8_t*
ContextToSP(CONTEXT* context)
{
#ifdef SP_sig
return reinterpret_cast<uint8_t*>(SP_sig(context));
#else
MOZ_CRASH();
#endif
}
# if defined(__arm__) || defined(__aarch64__) || defined(__mips__)
#if defined(__arm__) || defined(__aarch64__) || defined(__mips__)
static uint8_t*
ContextToLR(CONTEXT* context)
{
# ifdef LR_sig
return reinterpret_cast<uint8_t*>(LR_sig(context));
}
# endif
#endif // KNOWS_MACHINE_STATE
#if defined(XP_DARWIN)
static uint8_t**
ContextToPC(EMULATOR_CONTEXT* context)
{
# if defined(__x86_64__)
static_assert(sizeof(context->thread.__rip) == sizeof(void*),
"stored IP should be compile-time pointer-sized");
return reinterpret_cast<uint8_t**>(&context->thread.__rip);
# elif defined(__i386__)
static_assert(sizeof(context->thread.uts.ts32.__eip) == sizeof(void*),
"stored IP should be compile-time pointer-sized");
return reinterpret_cast<uint8_t**>(&context->thread.uts.ts32.__eip);
# elif defined(__arm__)
static_assert(sizeof(context->thread.__pc) == sizeof(void*),
"stored IP should be compile-time pointer-sized");
return reinterpret_cast<uint8_t**>(&context->thread.__pc);
# else
# error Unsupported architecture
MOZ_CRASH();
# endif
}
static uint8_t*
ContextToFP(EMULATOR_CONTEXT* context)
{
# if defined(__x86_64__)
return (uint8_t*)context->thread.__rbp;
# elif defined(__i386__)
return (uint8_t*)context->thread.uts.ts32.__ebp;
# elif defined(__arm__)
return (uint8_t*)context->thread.__r[11];
# else
# error Unsupported architecture
# endif
}
# if defined(__arm__) || defined(__aarch64__)
static uint8_t*
ContextToLR(EMULATOR_CONTEXT* context)
{
return (uint8_t*)context->thread.__lr;
}
# endif
static uint8_t*
ContextToSP(EMULATOR_CONTEXT* context)
{
# if defined(__x86_64__)
return (uint8_t*)context->thread.__rsp;
# elif defined(__i386__)
return (uint8_t*)context->thread.uts.ts32.__esp;
# elif defined(__arm__)
return (uint8_t*)context->thread.__sp;
# else
# error Unsupported architecture
# endif
}
static JS::ProfilingFrameIterator::RegisterState
ToRegisterState(EMULATOR_CONTEXT* context)
{
JS::ProfilingFrameIterator::RegisterState state;
state.fp = ContextToFP(context);
state.pc = *ContextToPC(context);
state.sp = ContextToSP(context);
# if defined(__arm__) || defined(__aarch64__)
state.lr = ContextToLR(context);
# endif
return state;
}
#endif // XP_DARWIN
#endif
static JS::ProfilingFrameIterator::RegisterState
ToRegisterState(CONTEXT* context)
{
#ifdef KNOWS_MACHINE_STATE
JS::ProfilingFrameIterator::RegisterState state;
state.fp = ContextToFP(context);
state.pc = *ContextToPC(context);
state.sp = ContextToSP(context);
# if defined(__arm__) || defined(__aarch64__) || defined(__mips__)
#if defined(__arm__) || defined(__aarch64__) || defined(__mips__)
state.lr = ContextToLR(context);
# endif
return state;
#else
MOZ_CRASH();
state.lr = (void*)UINTPTR_MAX;
#endif
return state;
}
#if defined(WASM_HUGE_MEMORY)
@ -653,7 +567,7 @@ AddressOfFPRegisterSlot(CONTEXT* context, FloatRegisters::Encoding encoding)
}
MOZ_COLD static void*
AddressOfGPRegisterSlot(EMULATOR_CONTEXT* context, Registers::Code code)
AddressOfGPRegisterSlot(CONTEXT* context, Registers::Code code)
{
switch (code) {
case X86Encoding::rax: return &RAX_sig(context);
@ -678,7 +592,7 @@ AddressOfGPRegisterSlot(EMULATOR_CONTEXT* context, Registers::Code code)
}
# else
MOZ_COLD static void*
AddressOfFPRegisterSlot(EMULATOR_CONTEXT* context, FloatRegisters::Encoding encoding)
AddressOfFPRegisterSlot(CONTEXT* context, FloatRegisters::Encoding encoding)
{
switch (encoding) {
case X86Encoding::xmm0: return &context->float_.__fpu_xmm0;
@ -703,7 +617,7 @@ AddressOfFPRegisterSlot(EMULATOR_CONTEXT* context, FloatRegisters::Encoding enco
}
MOZ_COLD static void*
AddressOfGPRegisterSlot(EMULATOR_CONTEXT* context, Registers::Code code)
AddressOfGPRegisterSlot(CONTEXT* context, Registers::Code code)
{
switch (code) {
case X86Encoding::rax: return &context->thread.__rax;
@ -729,20 +643,20 @@ AddressOfGPRegisterSlot(EMULATOR_CONTEXT* context, Registers::Code code)
# endif // !XP_DARWIN
#elif defined(JS_CODEGEN_ARM64)
MOZ_COLD static void*
AddressOfFPRegisterSlot(EMULATOR_CONTEXT* context, FloatRegisters::Encoding encoding)
AddressOfFPRegisterSlot(CONTEXT* context, FloatRegisters::Encoding encoding)
{
MOZ_CRASH("NYI - asm.js not supported yet on this platform");
}
MOZ_COLD static void*
AddressOfGPRegisterSlot(EMULATOR_CONTEXT* context, Registers::Code code)
AddressOfGPRegisterSlot(CONTEXT* context, Registers::Code code)
{
MOZ_CRASH("NYI - asm.js not supported yet on this platform");
}
#endif
MOZ_COLD static void
SetRegisterToCoercedUndefined(EMULATOR_CONTEXT* context, size_t size,
SetRegisterToCoercedUndefined(CONTEXT* context, size_t size,
const Disassembler::OtherOperand& value)
{
if (value.kind() == Disassembler::OtherOperand::FPR)
@ -752,7 +666,7 @@ SetRegisterToCoercedUndefined(EMULATOR_CONTEXT* context, size_t size,
}
MOZ_COLD static void
SetRegisterToLoadedValue(EMULATOR_CONTEXT* context, SharedMem<void*> addr, size_t size,
SetRegisterToLoadedValue(CONTEXT* context, SharedMem<void*> addr, size_t size,
const Disassembler::OtherOperand& value)
{
if (value.kind() == Disassembler::OtherOperand::FPR)
@ -762,14 +676,14 @@ SetRegisterToLoadedValue(EMULATOR_CONTEXT* context, SharedMem<void*> addr, size_
}
MOZ_COLD static void
SetRegisterToLoadedValueSext32(EMULATOR_CONTEXT* context, SharedMem<void*> addr, size_t size,
SetRegisterToLoadedValueSext32(CONTEXT* context, SharedMem<void*> addr, size_t size,
const Disassembler::OtherOperand& value)
{
SetGPRegToLoadedValueSext32(addr, size, AddressOfGPRegisterSlot(context, value.gpr()));
}
MOZ_COLD static void
StoreValueFromRegister(EMULATOR_CONTEXT* context, SharedMem<void*> addr, size_t size,
StoreValueFromRegister(CONTEXT* context, SharedMem<void*> addr, size_t size,
const Disassembler::OtherOperand& value)
{
if (value.kind() == Disassembler::OtherOperand::FPR)
@ -781,7 +695,7 @@ StoreValueFromRegister(EMULATOR_CONTEXT* context, SharedMem<void*> addr, size_t
}
MOZ_COLD static uint8_t*
ComputeAccessAddress(EMULATOR_CONTEXT* context, const Disassembler::ComplexAddress& address)
ComputeAccessAddress(CONTEXT* context, const Disassembler::ComplexAddress& address)
{
MOZ_RELEASE_ASSERT(!address.isPCRelative(), "PC-relative addresses not supported yet");
@ -806,7 +720,7 @@ ComputeAccessAddress(EMULATOR_CONTEXT* context, const Disassembler::ComplexAddre
}
MOZ_COLD static void
HandleMemoryAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
HandleMemoryAccess(CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
const ModuleSegment* segment, const Instance& instance, JitActivation* activation,
uint8_t** ppc)
{
@ -818,7 +732,7 @@ HandleMemoryAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddr
// experimental SIMD.js or Atomics. When these are converted to
// non-experimental wasm features, this case, as well as outOfBoundsCode,
// can be removed.
MOZ_ALWAYS_TRUE(activation->startWasmInterrupt(ToRegisterState(context)));
activation->startWasmTrap(wasm::Trap::OutOfBounds, 0, ToRegisterState(context));
*ppc = segment->outOfBoundsCode();
return;
}
@ -962,7 +876,7 @@ HandleMemoryAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddr
#else // WASM_HUGE_MEMORY
MOZ_COLD static void
HandleMemoryAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
HandleMemoryAccess(CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
const ModuleSegment* segment, const Instance& instance, JitActivation* activation,
uint8_t** ppc)
{
@ -971,7 +885,7 @@ HandleMemoryAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddr
const MemoryAccess* memoryAccess = instance.code().lookupMemoryAccess(pc);
if (!memoryAccess) {
// See explanation in the WASM_HUGE_MEMORY HandleMemoryAccess.
MOZ_ALWAYS_TRUE(activation->startWasmInterrupt(ToRegisterState(context)));
activation->startWasmTrap(wasm::Trap::OutOfBounds, 0, ToRegisterState(context));
*ppc = segment->outOfBoundsCode();
return;
}
@ -1019,33 +933,8 @@ HandleFault(PEXCEPTION_POINTERS exception)
MOZ_ASSERT(activation);
const Instance* instance = LookupFaultingInstance(*moduleSegment, pc, ContextToFP(context));
if (!instance) {
// On Windows, it is possible for InterruptRunningJitCode to execute
// between a faulting instruction and the handling of the fault due
// to InterruptRunningJitCode's use of SuspendThread. When this happens,
// after ResumeThread, the exception handler is called with pc equal to
// ModuleSegment.interrupt, which is logically wrong. The Right Thing would
// be for the OS to make fault-handling atomic (so that CONTEXT.pc was
// always the logically-faulting pc). Fortunately, we can detect this
// case and silence the exception ourselves (the exception will
// retrigger after the interrupt jumps back to resumePC).
return activation->isWasmInterrupted() &&
pc == moduleSegment->interruptCode() &&
moduleSegment->containsCodePC(activation->wasmInterruptResumePC());
}
// In the same race-with-interrupt situation above, it's *also* possible
// that the reported 'pc' is the pre-interrupt pc, not post-interrupt
// moduleSegment->interruptCode (this may be windows-version-specific). In
// this case, lookupTrap()/lookupMemoryAccess() will all succeed causing the
// pc to be redirected *again* (to a trap stub), leading to the interrupt
// stub never being called. Since the goal of the async interrupt is to break
// out iloops and trapping does just that, this is fine, we just clear the
// "interrupted" state.
if (activation->isWasmInterrupted()) {
MOZ_ASSERT(activation->wasmInterruptResumePC() == pc);
activation->finishWasmInterrupt();
}
if (!instance)
return false;
if (record->ExceptionCode == EXCEPTION_ILLEGAL_INSTRUCTION) {
Trap trap;
@ -1125,7 +1014,7 @@ HandleMachException(JSContext* cx, const ExceptionRequest& request)
mach_port_t cxThread = request.body.thread.name;
// Read out the JSRuntime thread's register state.
EMULATOR_CONTEXT context;
CONTEXT context;
# if defined(__x86_64__)
unsigned int thread_state_count = x86_THREAD_STATE64_COUNT;
unsigned int float_state_count = x86_FLOAT_STATE64_COUNT;
@ -1446,7 +1335,7 @@ HandleFault(int signum, siginfo_t* info, void* ctx)
// partly overlaps the end of the heap. In this case, it is an out-of-bounds
// error and we should signal that properly, but to do so we must inspect
// the operand of the failed access.
MOZ_ALWAYS_TRUE(activation->startWasmInterrupt(ToRegisterState(context)));
activation->startWasmTrap(wasm::Trap::UnalignedAccess, 0, ToRegisterState(context));
*ppc = moduleSegment->unalignedAccessCode();
return true;
}
@ -1495,119 +1384,8 @@ WasmFaultHandler(int signum, siginfo_t* info, void* context)
}
# endif // XP_WIN || XP_DARWIN || assume unix
static void
RedirectIonBackedgesToInterruptCheck(JSContext* cx)
{
if (!cx->runtime()->hasJitRuntime())
return;
jit::JitRuntime* jitRuntime = cx->runtime()->jitRuntime();
Zone* zone = cx->zoneRaw();
if (zone && !zone->isAtomsZone()) {
// If the backedge list is being mutated, the pc must be in C++ code and
// thus not in a JIT iloop. We assume that the interrupt flag will be
// checked at least once before entering JIT code (if not, no big deal;
// the browser will just request another interrupt in a second).
if (!jitRuntime->preventBackedgePatching()) {
jit::JitZoneGroup* jzg = zone->group()->jitZoneGroup;
jzg->patchIonBackedges(cx, jit::JitZoneGroup::BackedgeInterruptCheck);
}
}
}
bool
wasm::InInterruptibleCode(JSContext* cx, uint8_t* pc, const ModuleSegment** ms)
{
// Only interrupt in function code so that the frame iterators have the
// invariant that resumePC always has a function CodeRange and we can't
// get into any weird interrupt-during-interrupt-stub cases.
if (!cx->compartment())
return false;
const CodeSegment* cs = LookupCodeSegment(pc);
if (!cs || !cs->isModule())
return false;
*ms = cs->asModule();
return !!(*ms)->code().lookupFuncRange(pc);
}
// The return value indicates whether the PC was changed, not whether there was
// a failure.
static bool
RedirectJitCodeToInterruptCheck(JSContext* cx, CONTEXT* context)
{
// Jitcode may only be modified on the runtime's active thread.
if (cx != cx->runtime()->activeContext())
return false;
// The faulting thread is suspended so we can access cx fields that can
// normally only be accessed by the cx's active thread.
AutoNoteSingleThreadedRegion anstr;
RedirectIonBackedgesToInterruptCheck(cx);
#ifdef JS_SIMULATOR
uint8_t* pc = cx->simulator()->get_pc_as<uint8_t*>();
#else
uint8_t* pc = *ContextToPC(context);
#endif
const ModuleSegment* moduleSegment = nullptr;
if (!InInterruptibleCode(cx, pc, &moduleSegment))
return false;
#ifdef JS_SIMULATOR
// The checks performed by the !JS_SIMULATOR path happen in
// Simulator::handleWasmInterrupt.
cx->simulator()->trigger_wasm_interrupt();
#else
// Only probe cx->activation() after we know the pc is in wasm code. This
// way we don't depend on signal-safe update of cx->activation().
JitActivation* activation = cx->activation()->asJit();
// The out-of-bounds/unaligned trap paths which call startWasmInterrupt() go
// through function code, so test if already interrupted. These paths are
// temporary though, so this case can be removed later.
if (activation->isWasmInterrupted())
return false;
if (!activation->startWasmInterrupt(ToRegisterState(context)))
return false;
*ContextToPC(context) = moduleSegment->interruptCode();
#endif
return true;
}
#if !defined(XP_WIN)
// For the interrupt signal, pick a signal number that:
// - is not otherwise used by mozilla or standard libraries
// - defaults to nostop and noprint on gdb/lldb so that noone is bothered
// SIGVTALRM a relative of SIGALRM, so intended for user code, but, unlike
// SIGALRM, not used anywhere else in Mozilla.
static const int sInterruptSignal = SIGVTALRM;
static void
JitInterruptHandler(int signum, siginfo_t* info, void* context)
{
if (JSContext* cx = TlsContext.get()) {
#if defined(JS_SIMULATOR_ARM) || defined(JS_SIMULATOR_MIPS32) || defined(JS_SIMULATOR_MIPS64)
SimulatorProcess::ICacheCheckingDisableCount++;
#endif
RedirectJitCodeToInterruptCheck(cx, (CONTEXT*)context);
#if defined(JS_SIMULATOR_ARM) || defined(JS_SIMULATOR_MIPS32) || defined(JS_SIMULATOR_MIPS64)
SimulatorProcess::cacheInvalidatedBySignalHandler_ = true;
SimulatorProcess::ICacheCheckingDisableCount--;
#endif
cx->finishHandlingJitInterrupt();
}
}
#if defined(ANDROID) && defined(MOZ_LINKER)
extern "C" MFBT_API bool IsSignalHandlingBroken();
#endif
static bool sTriedInstallSignalHandlers = false;
@ -1621,73 +1399,35 @@ ProcessHasSignalHandlers()
return sHaveSignalHandlers;
sTriedInstallSignalHandlers = true;
#if defined(ANDROID)
# if !defined(__aarch64__)
// Before Android 4.4 (SDK version 19), there is a bug
// https://android-review.googlesource.com/#/c/52333
// in Bionic's pthread_join which causes pthread_join to return early when
// pthread_kill is used (on any thread). Nobody expects the pthread_cond_wait
// EINTRquisition.
char version_string[PROP_VALUE_MAX];
PodArrayZero(version_string);
if (__system_property_get("ro.build.version.sdk", version_string) > 0) {
if (atol(version_string) < 19)
return false;
}
# endif
# if defined(MOZ_LINKER)
#if defined(ANDROID) && defined(MOZ_LINKER)
// Signal handling is broken on some android systems.
if (IsSignalHandlingBroken())
return false;
# endif
#endif
// The interrupt handler allows the active thread to be paused from another
// thread (see InterruptRunningJitCode).
#if defined(XP_WIN)
// Windows uses SuspendThread to stop the active thread from another thread.
#else
struct sigaction interruptHandler;
interruptHandler.sa_flags = SA_SIGINFO;
interruptHandler.sa_sigaction = &JitInterruptHandler;
sigemptyset(&interruptHandler.sa_mask);
struct sigaction prev;
if (sigaction(sInterruptSignal, &interruptHandler, &prev))
MOZ_CRASH("unable to install interrupt handler");
// There shouldn't be any other handlers installed for sInterruptSignal. If
// there are, we could always forward, but we need to understand what we're
// doing to avoid problematic interference.
if ((prev.sa_flags & SA_SIGINFO && prev.sa_sigaction) ||
(prev.sa_handler != SIG_DFL && prev.sa_handler != SIG_IGN))
{
MOZ_CRASH("contention for interrupt signal");
}
#endif // defined(XP_WIN)
// Initalize ThreadLocal flag used by WasmFaultHandler
sAlreadyInSignalHandler.infallibleInit();
// Install a SIGSEGV handler to handle safely-out-of-bounds asm.js heap
// access and/or unaligned accesses.
# if defined(XP_WIN)
# if defined(MOZ_ASAN)
#if defined(XP_WIN)
# if defined(MOZ_ASAN)
// Under ASan we need to let the ASan runtime's ShadowExceptionHandler stay
// in the first handler position. This requires some coordination with
// MemoryProtectionExceptionHandler::isDisabled().
const bool firstHandler = false;
# else
# else
// Otherwise, WasmFaultHandler needs to go first, so that we can recover
// from wasm faults and continue execution without triggering handlers
// such as MemoryProtectionExceptionHandler that assume we are crashing.
const bool firstHandler = true;
# endif
# endif
if (!AddVectoredExceptionHandler(firstHandler, WasmFaultHandler))
return false;
# elif defined(XP_DARWIN)
#elif defined(XP_DARWIN)
// OSX handles seg faults via the Mach exception handler above, so don't
// install WasmFaultHandler.
# else
#else
// SA_NODEFER allows us to reenter the signal handler if we crash while
// handling the signal, and fall through to the Breakpad handler by testing
// handlingSegFault.
@ -1700,7 +1440,7 @@ ProcessHasSignalHandlers()
if (sigaction(SIGSEGV, &faultHandler, &sPrevSEGVHandler))
MOZ_CRASH("unable to install segv handler");
# if defined(JS_CODEGEN_ARM)
# if defined(JS_CODEGEN_ARM)
// On Arm Handle Unaligned Accesses
struct sigaction busHandler;
busHandler.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
@ -1708,7 +1448,7 @@ ProcessHasSignalHandlers()
sigemptyset(&busHandler.sa_mask);
if (sigaction(SIGBUS, &busHandler, &sPrevSIGBUSHandler))
MOZ_CRASH("unable to install sigbus handler");
# endif
# endif
// Install a handler to handle the instructions that are emitted to implement
// wasm traps.
@ -1718,7 +1458,7 @@ ProcessHasSignalHandlers()
sigemptyset(&wasmTrapHandler.sa_mask);
if (sigaction(kWasmTrapSignal, &wasmTrapHandler, &sPrevWasmTrapHandler))
MOZ_CRASH("unable to install wasm trap handler");
# endif
#endif
sHaveSignalHandlers = true;
return true;
@ -1746,61 +1486,3 @@ wasm::HaveSignalHandlers()
MOZ_ASSERT(sTriedInstallSignalHandlers);
return sHaveSignalHandlers;
}
// JSRuntime::requestInterrupt sets interrupt_ (which is checked frequently by
// C++ code at every Baseline JIT loop backedge) and jitStackLimit_ (which is
// checked at every Baseline and Ion JIT function prologue). The remaining
// sources of potential iloops (Ion loop backedges and all wasm code) are
// handled by this function:
// 1. Ion loop backedges are patched to instead point to a stub that handles
// the interrupt;
// 2. if the active thread's pc is inside wasm code, the pc is updated to point
// to a stub that handles the interrupt.
void
js::InterruptRunningJitCode(JSContext* cx)
{
// If signal handlers weren't installed, then Ion and wasm emit normal
// interrupt checks and don't need asynchronous interruption.
if (!HaveSignalHandlers())
return;
// Do nothing if we're already handling an interrupt here, to avoid races
// below and in JitRuntime::patchIonBackedges.
if (!cx->startHandlingJitInterrupt())
return;
// If we are on context's thread, then: pc is not in wasm code (so nothing
// to do for wasm) and we can patch Ion backedges without any special
// synchronization.
if (cx == TlsContext.get()) {
RedirectIonBackedgesToInterruptCheck(cx);
cx->finishHandlingJitInterrupt();
return;
}
// We are not on the runtime's active thread, so to do 1 and 2 above, we need
// to halt the runtime's active thread first.
#if defined(XP_WIN)
// On Windows, we can simply suspend the active thread and work directly on
// its context from this thread. SuspendThread can sporadically fail if the
// thread is in the middle of a syscall. Rather than retrying in a loop,
// just wait for the next request for interrupt.
HANDLE thread = (HANDLE)cx->threadNative();
if (SuspendThread(thread) != (DWORD)-1) {
CONTEXT context;
context.ContextFlags = CONTEXT_FULL;
if (GetThreadContext(thread, &context)) {
if (RedirectJitCodeToInterruptCheck(cx, &context))
SetThreadContext(thread, &context);
}
ResumeThread(thread);
}
cx->finishHandlingJitInterrupt();
#else
// On Unix, we instead deliver an async signal to the active thread which
// halts the thread and callers our JitInterruptHandler (which has already
// been installed by EnsureSignalHandlersInstalled).
pthread_t thread = (pthread_t)cx->threadNative();
pthread_kill(thread, sInterruptSignal);
#endif
}

Просмотреть файл

@ -30,11 +30,6 @@
#include "wasm/WasmTypes.h"
namespace js {
// Force any currently-executing asm.js/ion code to call HandleExecutionInterrupt.
extern void
InterruptRunningJitCode(JSContext* cx);
namespace wasm {
// Ensure the given JSRuntime is set up to use signals. Failure to enable signal
@ -43,18 +38,11 @@ namespace wasm {
MOZ_MUST_USE bool
EnsureSignalHandlers(JSContext* cx);
// Return whether signals can be used in this process for interrupts or
// asm.js/wasm out-of-bounds.
// Return whether signals can be used in this process for asm.js/wasm
// out-of-bounds.
bool
HaveSignalHandlers();
class ModuleSegment;
// Returns true if wasm code is on top of the activation stack (and fills out
// the code segment outparam in this case), or false otherwise.
bool
InInterruptibleCode(JSContext* cx, uint8_t* pc, const ModuleSegment** ms);
#if defined(XP_DARWIN)
// On OSX we are forced to use the lower-level Mach exception mechanism instead
// of Unix signals. Mach exceptions are not handled on the victim's stack but
@ -79,23 +67,7 @@ class MachExceptionHandler
};
#endif
// Typed wrappers encapsulating the data saved by the signal handler on async
// interrupt or trap. On interrupt, the PC at which to resume is saved. On trap,
// the bytecode offset to be reported in callstacks is saved.
struct InterruptData
{
// The pc to use for unwinding purposes which is kept consistent with fp at
// call boundaries.
void* unwindPC;
// The pc at which we should return if the interrupt doesn't stop execution.
void* resumePC;
InterruptData(void* unwindPC, void* resumePC)
: unwindPC(unwindPC), resumePC(resumePC)
{}
};
// On trap, the bytecode offset to be reported in callstacks is saved.
struct TrapData
{

Просмотреть файл

@ -1251,8 +1251,6 @@ GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi, Label* throwLa
// need to worry about rooting anymore.
// - or the value needs to be rooted, but nothing can cause a GC between
// here and CoerceInPlace, which roots before coercing to a primitive.
// In particular, this is true because wasm::InInterruptibleCode will
// return false when PC is in the jit exit.
// The JIT callee clobbers all registers, including WasmTlsReg and
// FramePointer, so restore those here. During this sequence of
@ -1618,227 +1616,10 @@ static const LiveRegisterSet AllRegsExceptSP(
FloatRegisterSet(FloatRegisters::AllMask));
#endif
// The async interrupt-callback exit is called from arbitrarily-interrupted wasm
// code. It calls into the WasmHandleExecutionInterrupt to determine whether we must
// really halt execution which can reenter the VM (e.g., to display the slow
// script dialog). If execution is not interrupted, this stub must carefully
// preserve *all* register state. If execution is interrupted, the entire
// activation will be popped by the throw stub, so register state does not need
// to be restored.
static bool
GenerateInterruptExit(MacroAssembler& masm, Label* throwLabel, Offsets* offsets)
{
AssertExpectedSP(masm);
masm.haltingAlign(CodeAlignment);
offsets->begin = masm.currentOffset();
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
// Be very careful here not to perturb the machine state before saving it
// to the stack. In particular, add/sub instructions may set conditions in
// the flags register.
masm.push(Imm32(0)); // space used as return address, updated below
masm.setFramePushed(0); // set to 0 now so that framePushed is offset of return address
masm.PushFlags(); // after this we are safe to use sub
masm.PushRegsInMask(AllRegsExceptSP); // save all GP/FP registers (except SP)
// We know that StackPointer is word-aligned, but not necessarily
// stack-aligned, so we need to align it dynamically.
masm.moveStackPtrTo(ABINonVolatileReg);
masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
if (ShadowStackSpace)
masm.subFromStackPtr(Imm32(ShadowStackSpace));
// Make the call to C++, which preserves ABINonVolatileReg.
masm.assertStackAlignment(ABIStackAlignment);
masm.call(SymbolicAddress::HandleExecutionInterrupt);
// HandleExecutionInterrupt returns null if execution is interrupted and
// the resumption pc otherwise.
masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
// Restore the stack pointer then store resumePC into the stack slow that
// will be popped by the 'ret' below.
masm.moveToStackPtr(ABINonVolatileReg);
masm.storePtr(ReturnReg, Address(StackPointer, masm.framePushed()));
// Restore the machine state to before the interrupt. After popping flags,
// no instructions can be executed which set flags.
masm.PopRegsInMask(AllRegsExceptSP);
masm.PopFlags();
// Return to the resumePC stored into this stack slot above.
MOZ_ASSERT(masm.framePushed() == 0);
masm.ret();
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
// Reserve space to store resumePC and HeapReg.
masm.subFromStackPtr(Imm32(2 * sizeof(intptr_t)));
// Set to zero so we can use masm.framePushed() below.
masm.setFramePushed(0);
// Save all registers, except sp.
masm.PushRegsInMask(AllUserRegsExceptSP);
// Save the stack pointer and FCSR in a non-volatile registers.
masm.moveStackPtrTo(s0);
masm.as_cfc1(s1, Assembler::FCSR);
// Align the stack.
masm.ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
// Store HeapReg into the reserved space.
masm.storePtr(HeapReg, Address(s0, masm.framePushed() + sizeof(intptr_t)));
# ifdef USES_O32_ABI
// MIPS ABI requires rewserving stack for registes $a0 to $a3.
masm.subFromStackPtr(Imm32(4 * sizeof(intptr_t)));
# endif
masm.assertStackAlignment(ABIStackAlignment);
masm.call(SymbolicAddress::HandleExecutionInterrupt);
masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
// This will restore stack to the address before the call.
masm.moveToStackPtr(s0);
// Restore FCSR.
masm.as_ctc1(s1, Assembler::FCSR);
// Store resumePC into the reserved space.
masm.storePtr(ReturnReg, Address(s0, masm.framePushed()));
masm.PopRegsInMask(AllUserRegsExceptSP);
// Pop resumePC into PC. Clobber HeapReg to make the jump and restore it
// during jump delay slot.
masm.loadPtr(Address(StackPointer, 0), HeapReg);
// Reclaim the reserve space.
masm.addToStackPtr(Imm32(2 * sizeof(intptr_t)));
masm.as_jr(HeapReg);
masm.loadPtr(Address(StackPointer, -int32_t(sizeof(intptr_t))), HeapReg);
#elif defined(JS_CODEGEN_ARM)
{
// Be careful not to clobber scratch registers before they are saved.
ScratchRegisterScope scratch(masm);
SecondScratchRegisterScope secondScratch(masm);
// Reserve a word to receive the return address.
masm.as_alu(StackPointer, StackPointer, Imm8(4), OpSub);
// Set framePushed to 0 now so that framePushed can be used later as the
// stack offset to the return-address space reserved above.
masm.setFramePushed(0);
// Save all GP/FP registers (except PC and SP).
masm.PushRegsInMask(AllRegsExceptPCSP);
}
// Save SP, APSR and FPSCR in non-volatile registers.
masm.as_mrs(r4);
masm.as_vmrs(r5);
masm.mov(sp, r6);
// We know that StackPointer is word-aligned, but not necessarily
// stack-aligned, so we need to align it dynamically.
masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
// Make the call to C++, which preserves the non-volatile registers.
masm.assertStackAlignment(ABIStackAlignment);
masm.call(SymbolicAddress::HandleExecutionInterrupt);
// HandleExecutionInterrupt returns null if execution is interrupted and
// the resumption pc otherwise.
masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
// Restore the stack pointer then store resumePC into the stack slot that
// will be popped by the 'ret' below.
masm.mov(r6, sp);
masm.storePtr(ReturnReg, Address(sp, masm.framePushed()));
// Restore the machine state to before the interrupt. After popping flags,
// no instructions can be executed which set flags.
masm.as_vmsr(r5);
masm.as_msr(r4);
masm.PopRegsInMask(AllRegsExceptPCSP);
// Return to the resumePC stored into this stack slot above.
MOZ_ASSERT(masm.framePushed() == 0);
masm.ret();
#elif defined(JS_CODEGEN_ARM64)
// Reserve aligned space to receive the saved LR and the new return address.
static constexpr unsigned SAVE_AREA = 16;
static constexpr unsigned LR_OFFSET = 0;
static constexpr unsigned PC_OFFSET = 8;
masm.subFromStackPtr(Imm32(SAVE_AREA));
uint32_t oldFramePushed = masm.framePushed();
// Set framePushed to 0 now so that framePushed can be used later as the
// stack offset to the return-address space reserved above.
masm.setFramePushed(0);
// Store LR specially so that PushRegsInMask gets an even number of words to
// store.
masm.Str(ARMRegister(lr, 64), vixl::MemOperand(sp, LR_OFFSET));
// Save all GP/FP registers (except SP and LR).
masm.PushRegsInMask(AllRegsExceptSPLR);
MOZ_ASSERT(masm.framePushed() % 16 == 0);
// Save SP, APSR and FPSCR in non-volatile registers, prior instructions
// must not update the flags.
masm.Mrs(x20, vixl::NZCV);
masm.Mrs(x21, vixl::FPCR);
masm.Mov(x22, sp);
// The stack is already aligned.
static_assert(ABIStackAlignment == 16, "ARM64 SP alignment");
// Make the call to C++, which preserves the non-volatile registers.
masm.assertStackAlignment(ABIStackAlignment);
masm.call(SymbolicAddress::HandleExecutionInterrupt);
// HandleExecutionInterrupt returns null if execution is interrupted and
// the resumption pc otherwise.
masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
// Restore the stack pointer then store resumePC into a stack slot where the
// return code below will find it.
masm.Mov(sp, x22);
masm.Str(ARMRegister(ReturnReg, 64), vixl::MemOperand(sp, masm.framePushed() + PC_OFFSET));
// Restore the machine state to before the interrupt. After popping flags,
// no instructions can be executed which set flags.
masm.Msr(vixl::FPCR, x21);
masm.Msr(vixl::NZCV, x20);
masm.PopRegsInMask(AllRegsExceptSPLR);
masm.Ldr(ARMRegister(lr, 64), vixl::MemOperand(sp, LR_OFFSET));
// Return to the resumePC stored into this stack slot above.
MOZ_ASSERT(masm.framePushed() == 0);
masm.setFramePushed(oldFramePushed);
// We can clobber PseudoStackPointer because wasm code does not use it.
masm.loadPtr(Address(masm.getStackPointer(), PC_OFFSET), PseudoStackPointer);
masm.addToStackPtr(Imm32(SAVE_AREA));
masm.Ret(PseudoStackPointer64);
#elif defined (JS_CODEGEN_NONE)
MOZ_CRASH();
#else
# error "Unknown architecture!"
#endif
return FinishOffsets(masm, offsets);
}
// Generate a stub that restores the stack pointer to what it was on entry to
// the wasm activation, sets the return register to 'false' and then executes a
// return which will return from this wasm activation to the caller. This stub
// should only be called after the caller has reported an error (or, in the case
// of the interrupt stub, intends to interrupt execution).
// should only be called after the caller has reported an error.
static bool
GenerateThrowStub(MacroAssembler& masm, Label* throwLabel, Offsets* offsets)
{
@ -1849,8 +1630,7 @@ GenerateThrowStub(MacroAssembler& masm, Label* throwLabel, Offsets* offsets)
offsets->begin = masm.currentOffset();
// The throw stub can be jumped to from an async interrupt that is halting
// execution. Thus the stack pointer can be unaligned and we must align it
// Conservatively, the stack pointer can be unaligned and we must align it
// dynamically.
masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
if (ShadowStackSpace)
@ -2044,19 +1824,12 @@ wasm::GenerateStubs(const ModuleEnvironment& env, const FuncImportVector& import
if (!code->codeRanges.emplaceBack(CodeRange::TrapExit, offsets))
return false;
if (!GenerateInterruptExit(masm, &throwLabel, &offsets))
CallableOffsets callableOffsets;
if (!GenerateDebugTrapStub(masm, &throwLabel, &callableOffsets))
return false;
if (!code->codeRanges.emplaceBack(CodeRange::Interrupt, offsets))
if (!code->codeRanges.emplaceBack(CodeRange::DebugTrap, callableOffsets))
return false;
{
CallableOffsets offsets;
if (!GenerateDebugTrapStub(masm, &throwLabel, &offsets))
return false;
if (!code->codeRanges.emplaceBack(CodeRange::DebugTrap, offsets))
return false;
}
if (!GenerateThrowStub(masm, &throwLabel, &offsets))
return false;
if (!code->codeRanges.emplaceBack(CodeRange::Throw, offsets))

Просмотреть файл

@ -790,7 +790,6 @@ CodeRange::CodeRange(Kind kind, Offsets offsets)
case UnalignedExit:
case TrapExit:
case Throw:
case Interrupt:
break;
default:
MOZ_CRASH("should use more specific constructor");

Просмотреть файл

@ -1064,7 +1064,6 @@ class CodeRange
OutOfBoundsExit, // stub jumped to by non-standard asm.js SIMD/Atomics
UnalignedExit, // stub jumped to by wasm Atomics and non-standard
// ARM unaligned trap
Interrupt, // stub executes asynchronously to interrupt wasm
Throw // special stack-unwinding stub jumped to by other stubs
};
@ -1373,7 +1372,6 @@ enum class SymbolicAddress
LogD,
PowD,
ATan2D,
HandleExecutionInterrupt,
HandleDebugTrap,
HandleThrow,
ReportTrap,