зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1428453 - Baldr: prefix current trap mechanism names with 'Old' (r=bbouvier)
MozReview-Commit-ID: JeNcXpbKL2s --HG-- extra : rebase_source : 452f6891e9f97ce254e6d8616595bf9d37737d8f
This commit is contained in:
Родитель
05a9191b32
Коммит
d99d8b74c3
|
@ -9478,7 +9478,7 @@ CodeGenerator::generateWasm(wasm::SigIdDesc sigId, wasm::BytecodeOffset trapOffs
|
|||
// Since we just overflowed the stack, to be on the safe side, pop the
|
||||
// stack so that, when the trap exit stub executes, it is a safe
|
||||
// distance away from the end of the native stack.
|
||||
wasm::TrapDesc trap(trapOffset, wasm::Trap::StackOverflow, /* framePushed = */ 0);
|
||||
wasm::OldTrapDesc trap(trapOffset, wasm::Trap::StackOverflow, /* framePushed = */ 0);
|
||||
if (frameSize() > 0) {
|
||||
masm.bind(&onOverflow);
|
||||
masm.addToStackPtr(Imm32(frameSize()));
|
||||
|
@ -9496,7 +9496,7 @@ CodeGenerator::generateWasm(wasm::SigIdDesc sigId, wasm::BytecodeOffset trapOffs
|
|||
if (!generateOutOfLineCode())
|
||||
return false;
|
||||
|
||||
masm.wasmEmitTrapOutOfLineCode();
|
||||
masm.wasmEmitOldTrapOutOfLineCode();
|
||||
|
||||
masm.flush();
|
||||
if (masm.oom())
|
||||
|
@ -12350,7 +12350,7 @@ CodeGenerator::visitWasmTrap(LWasmTrap* lir)
|
|||
MOZ_ASSERT(gen->compilingWasm());
|
||||
const MWasmTrap* mir = lir->mir();
|
||||
|
||||
masm.jump(trap(mir, mir->trap()));
|
||||
masm.jump(oldTrap(mir, mir->trap()));
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -12363,7 +12363,7 @@ CodeGenerator::visitWasmBoundsCheck(LWasmBoundsCheck* ins)
|
|||
Register ptr = ToRegister(ins->ptr());
|
||||
Register boundsCheckLimit = ToRegister(ins->boundsCheckLimit());
|
||||
masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr, boundsCheckLimit,
|
||||
trap(mir, wasm::Trap::OutOfBounds));
|
||||
oldTrap(mir, wasm::Trap::OutOfBounds));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -12373,7 +12373,7 @@ CodeGenerator::visitWasmAlignmentCheck(LWasmAlignmentCheck* ins)
|
|||
const MWasmAlignmentCheck* mir = ins->mir();
|
||||
Register ptr = ToRegister(ins->ptr());
|
||||
masm.branchTest32(Assembler::NonZero, ptr, Imm32(mir->byteSize() - 1),
|
||||
trap(mir, wasm::Trap::UnalignedAccess));
|
||||
oldTrap(mir, wasm::Trap::UnalignedAccess));
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -2844,7 +2844,7 @@ MacroAssembler::callWithABINoProfiler(void* fun, MoveOp::Type result, CheckUnsaf
|
|||
}
|
||||
|
||||
void
|
||||
MacroAssembler::callWithABI(wasm::BytecodeOffset callOffset, wasm::SymbolicAddress imm,
|
||||
MacroAssembler::callWithABI(wasm::BytecodeOffset bytecode, wasm::SymbolicAddress imm,
|
||||
MoveOp::Type result)
|
||||
{
|
||||
MOZ_ASSERT(wasm::NeedsBuiltinThunk(imm));
|
||||
|
@ -2861,7 +2861,7 @@ MacroAssembler::callWithABI(wasm::BytecodeOffset callOffset, wasm::SymbolicAddre
|
|||
// points when placing arguments.
|
||||
loadWasmTlsRegFromFrame();
|
||||
|
||||
call(wasm::CallSiteDesc(callOffset.bytecodeOffset, wasm::CallSite::Symbolic), imm);
|
||||
call(wasm::CallSiteDesc(bytecode.offset, wasm::CallSite::Symbolic), imm);
|
||||
callWithABIPost(stackAdjust, result, /* callFromWasm = */ true);
|
||||
|
||||
Pop(WasmTlsReg);
|
||||
|
@ -3006,7 +3006,7 @@ MacroAssembler::wasmCallIndirect(const wasm::CallSiteDesc& desc, const wasm::Cal
|
|||
if (needsBoundsCheck) {
|
||||
loadWasmGlobalPtr(callee.tableLengthGlobalDataOffset(), scratch);
|
||||
|
||||
wasm::TrapDesc oobTrap(trapOffset, wasm::Trap::OutOfBounds, framePushed());
|
||||
wasm::OldTrapDesc oobTrap(trapOffset, wasm::Trap::OutOfBounds, framePushed());
|
||||
branch32(Assembler::Condition::AboveOrEqual, index, scratch, oobTrap);
|
||||
}
|
||||
|
||||
|
@ -3014,7 +3014,7 @@ MacroAssembler::wasmCallIndirect(const wasm::CallSiteDesc& desc, const wasm::Cal
|
|||
loadWasmGlobalPtr(callee.tableBaseGlobalDataOffset(), scratch);
|
||||
|
||||
// Load the callee from the table.
|
||||
wasm::TrapDesc nullTrap(trapOffset, wasm::Trap::IndirectCallToNull, framePushed());
|
||||
wasm::OldTrapDesc nullTrap(trapOffset, wasm::Trap::IndirectCallToNull, framePushed());
|
||||
if (callee.wasmTableIsExternal()) {
|
||||
static_assert(sizeof(wasm::ExternalTableElem) == 8 || sizeof(wasm::ExternalTableElem) == 16,
|
||||
"elements of external tables are two words");
|
||||
|
@ -3040,21 +3040,21 @@ MacroAssembler::wasmCallIndirect(const wasm::CallSiteDesc& desc, const wasm::Cal
|
|||
}
|
||||
|
||||
void
|
||||
MacroAssembler::wasmEmitTrapOutOfLineCode()
|
||||
MacroAssembler::wasmEmitOldTrapOutOfLineCode()
|
||||
{
|
||||
for (const wasm::TrapSite& site : trapSites()) {
|
||||
for (const wasm::OldTrapSite& site : oldTrapSites()) {
|
||||
// Trap out-of-line codes are created for two kinds of trap sites:
|
||||
// - jumps, which are bound directly to the trap out-of-line path
|
||||
// - memory accesses, which can fault and then have control transferred
|
||||
// to the out-of-line path directly via signal handler setting pc
|
||||
switch (site.kind) {
|
||||
case wasm::TrapSite::Jump: {
|
||||
case wasm::OldTrapSite::Jump: {
|
||||
RepatchLabel jump;
|
||||
jump.use(site.codeOffset);
|
||||
bind(&jump);
|
||||
break;
|
||||
}
|
||||
case wasm::TrapSite::MemoryAccess: {
|
||||
case wasm::OldTrapSite::MemoryAccess: {
|
||||
append(wasm::MemoryAccess(site.codeOffset, currentOffset()));
|
||||
break;
|
||||
}
|
||||
|
@ -3072,7 +3072,7 @@ MacroAssembler::wasmEmitTrapOutOfLineCode()
|
|||
// directly to the trap exit stub. This takes advantage of the fact
|
||||
// that there is already a CallSite for call_indirect and the
|
||||
// current pre-prologue stack/register state.
|
||||
append(wasm::TrapFarJump(site.trap, farJumpWithPatch()));
|
||||
append(wasm::OldTrapFarJump(site.trap, farJumpWithPatch()));
|
||||
} else {
|
||||
// Inherit the frame depth of the trap site. This value is captured
|
||||
// by the wasm::CallSite to allow unwinding this frame.
|
||||
|
@ -3098,7 +3098,7 @@ MacroAssembler::wasmEmitTrapOutOfLineCode()
|
|||
// trap-handling function. The frame iterator knows to skip the trap
|
||||
// exit's frame so that unwinding begins at the frame and offset of
|
||||
// the trapping instruction.
|
||||
wasm::CallSiteDesc desc(site.bytecodeOffset, wasm::CallSiteDesc::TrapExit);
|
||||
wasm::CallSiteDesc desc(site.offset, wasm::CallSiteDesc::OldTrapExit);
|
||||
call(desc, site.trap);
|
||||
}
|
||||
|
||||
|
@ -3113,7 +3113,7 @@ MacroAssembler::wasmEmitTrapOutOfLineCode()
|
|||
// iterator to find the right CodeRange while walking the stack.
|
||||
breakpoint();
|
||||
|
||||
trapSites().clear();
|
||||
oldTrapSites().clear();
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -1506,7 +1506,7 @@ class MacroAssembler : public MacroAssemblerSpecific
|
|||
// Emit the out-of-line trap code to which trapping jumps/branches are
|
||||
// bound. This should be called once per function after all other codegen,
|
||||
// including "normal" OutOfLineCode.
|
||||
void wasmEmitTrapOutOfLineCode();
|
||||
void wasmEmitOldTrapOutOfLineCode();
|
||||
|
||||
// Perform a stack-overflow test, branching to the given Label on overflow.
|
||||
void wasmEmitStackCheck(Register sp, Register scratch, Label* onOverflow);
|
||||
|
|
|
@ -2453,7 +2453,7 @@ Assembler::as_b(Label* l, Condition c)
|
|||
}
|
||||
|
||||
BufferOffset
|
||||
Assembler::as_b(wasm::TrapDesc target, Condition c)
|
||||
Assembler::as_b(wasm::OldTrapDesc target, Condition c)
|
||||
{
|
||||
Label l;
|
||||
BufferOffset ret = as_b(&l, c);
|
||||
|
@ -2894,12 +2894,12 @@ Assembler::bind(Label* label, BufferOffset boff)
|
|||
}
|
||||
|
||||
void
|
||||
Assembler::bindLater(Label* label, wasm::TrapDesc target)
|
||||
Assembler::bindLater(Label* label, wasm::OldTrapDesc target)
|
||||
{
|
||||
if (label->used()) {
|
||||
BufferOffset b(label);
|
||||
do {
|
||||
append(wasm::TrapSite(target, b.getOffset()));
|
||||
append(wasm::OldTrapSite(target, b.getOffset()));
|
||||
} while (nextLink(b, &b));
|
||||
}
|
||||
label->reset();
|
||||
|
|
|
@ -1614,7 +1614,7 @@ class Assembler : public AssemblerShared
|
|||
BufferOffset as_b(BOffImm off, Condition c, Label* documentation = nullptr);
|
||||
|
||||
BufferOffset as_b(Label* l, Condition c = Always);
|
||||
BufferOffset as_b(wasm::TrapDesc target, Condition c = Always);
|
||||
BufferOffset as_b(wasm::OldTrapDesc target, Condition c = Always);
|
||||
BufferOffset as_b(BOffImm off, Condition c, BufferOffset inst);
|
||||
|
||||
// blx can go to either an immediate or a register. When blx'ing to a
|
||||
|
@ -1722,7 +1722,7 @@ class Assembler : public AssemblerShared
|
|||
bool nextLink(BufferOffset b, BufferOffset* next);
|
||||
void bind(Label* label, BufferOffset boff = BufferOffset());
|
||||
void bind(RepatchLabel* label);
|
||||
void bindLater(Label* label, wasm::TrapDesc target);
|
||||
void bindLater(Label* label, wasm::OldTrapDesc target);
|
||||
uint32_t currentOffset() {
|
||||
return nextOffset().getOffset();
|
||||
}
|
||||
|
|
|
@ -531,7 +531,7 @@ CodeGeneratorARM::divICommon(MDiv* mir, Register lhs, Register rhs, Register out
|
|||
masm.ma_cmp(rhs, Imm32(-1), scratch, Assembler::Equal);
|
||||
if (mir->canTruncateOverflow()) {
|
||||
if (mir->trapOnError()) {
|
||||
masm.ma_b(trap(mir, wasm::Trap::IntegerOverflow), Assembler::Equal);
|
||||
masm.ma_b(oldTrap(mir, wasm::Trap::IntegerOverflow), Assembler::Equal);
|
||||
} else {
|
||||
// (-INT32_MIN)|0 = INT32_MIN
|
||||
Label skip;
|
||||
|
@ -551,7 +551,7 @@ CodeGeneratorARM::divICommon(MDiv* mir, Register lhs, Register rhs, Register out
|
|||
masm.as_cmp(rhs, Imm8(0));
|
||||
if (mir->canTruncateInfinities()) {
|
||||
if (mir->trapOnError()) {
|
||||
masm.ma_b(trap(mir, wasm::Trap::IntegerDivideByZero), Assembler::Equal);
|
||||
masm.ma_b(oldTrap(mir, wasm::Trap::IntegerDivideByZero), Assembler::Equal);
|
||||
} else {
|
||||
// Infinity|0 == 0
|
||||
Label skip;
|
||||
|
@ -714,7 +714,7 @@ CodeGeneratorARM::modICommon(MMod* mir, Register lhs, Register rhs, Register out
|
|||
// wasm allows negative lhs and return 0 in this case.
|
||||
MOZ_ASSERT(mir->isTruncated());
|
||||
masm.as_cmp(rhs, Imm8(0));
|
||||
masm.ma_b(trap(mir, wasm::Trap::IntegerDivideByZero), Assembler::Equal);
|
||||
masm.ma_b(oldTrap(mir, wasm::Trap::IntegerDivideByZero), Assembler::Equal);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2120,7 +2120,7 @@ CodeGeneratorARM::visitWasmAddOffset(LWasmAddOffset* lir)
|
|||
ScratchRegisterScope scratch(masm);
|
||||
masm.ma_add(base, Imm32(mir->offset()), out, scratch, SetCC);
|
||||
|
||||
masm.ma_b(trap(mir, wasm::Trap::OutOfBounds), Assembler::CarrySet);
|
||||
masm.ma_b(oldTrap(mir, wasm::Trap::OutOfBounds), Assembler::CarrySet);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
|
@ -2417,7 +2417,7 @@ CodeGeneratorARM::generateUDivModZeroCheck(Register rhs, Register output, Label*
|
|||
masm.as_cmp(rhs, Imm8(0));
|
||||
if (mir->isTruncated()) {
|
||||
if (mir->trapOnError()) {
|
||||
masm.ma_b(trap(mir, wasm::Trap::IntegerDivideByZero), Assembler::Equal);
|
||||
masm.ma_b(oldTrap(mir, wasm::Trap::IntegerDivideByZero), Assembler::Equal);
|
||||
} else {
|
||||
Label skip;
|
||||
masm.ma_b(&skip, Assembler::NotEqual);
|
||||
|
@ -2768,7 +2768,7 @@ CodeGeneratorARM::visitDivOrModI64(LDivOrModI64* lir)
|
|||
if (lir->canBeDivideByZero()) {
|
||||
Register temp = WasmGetTemporaryForDivOrMod(lhs, rhs);
|
||||
masm.branchTest64(Assembler::Zero, rhs, rhs, temp,
|
||||
trap(lir, wasm::Trap::IntegerDivideByZero));
|
||||
oldTrap(lir, wasm::Trap::IntegerDivideByZero));
|
||||
}
|
||||
|
||||
auto* mir = lir->mir();
|
||||
|
@ -2781,7 +2781,7 @@ CodeGeneratorARM::visitDivOrModI64(LDivOrModI64* lir)
|
|||
if (mir->isMod())
|
||||
masm.xor64(output, output);
|
||||
else
|
||||
masm.jump(trap(lir, wasm::Trap::IntegerOverflow));
|
||||
masm.jump(oldTrap(lir, wasm::Trap::IntegerOverflow));
|
||||
masm.jump(&done);
|
||||
masm.bind(¬min);
|
||||
}
|
||||
|
@ -2814,7 +2814,7 @@ CodeGeneratorARM::visitUDivOrModI64(LUDivOrModI64* lir)
|
|||
if (lir->canBeDivideByZero()) {
|
||||
Register temp = WasmGetTemporaryForDivOrMod(lhs, rhs);
|
||||
masm.branchTest64(Assembler::Zero, rhs, rhs, temp,
|
||||
trap(lir, wasm::Trap::IntegerDivideByZero));
|
||||
oldTrap(lir, wasm::Trap::IntegerDivideByZero));
|
||||
}
|
||||
|
||||
masm.setupWasmABICall();
|
||||
|
|
|
@ -1390,7 +1390,7 @@ MacroAssemblerARM::ma_b(Label* dest, Assembler::Condition c)
|
|||
}
|
||||
|
||||
BufferOffset
|
||||
MacroAssemblerARM::ma_b(wasm::TrapDesc target, Assembler::Condition c)
|
||||
MacroAssemblerARM::ma_b(wasm::OldTrapDesc target, Assembler::Condition c)
|
||||
{
|
||||
return as_b(target, c);
|
||||
}
|
||||
|
@ -5751,12 +5751,12 @@ MacroAssemblerARM::outOfLineWasmTruncateToIntCheck(FloatRegister input, MIRType
|
|||
|
||||
// Handle errors.
|
||||
bind(&fail);
|
||||
asMasm().jump(wasm::TrapDesc(trapOffset, wasm::Trap::IntegerOverflow,
|
||||
asMasm().framePushed()));
|
||||
asMasm().jump(wasm::OldTrapDesc(trapOffset, wasm::Trap::IntegerOverflow,
|
||||
asMasm().framePushed()));
|
||||
|
||||
bind(&inputIsNaN);
|
||||
asMasm().jump(wasm::TrapDesc(trapOffset, wasm::Trap::InvalidConversionToInteger,
|
||||
asMasm().framePushed()));
|
||||
asMasm().jump(wasm::OldTrapDesc(trapOffset, wasm::Trap::InvalidConversionToInteger,
|
||||
asMasm().framePushed()));
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -346,7 +346,7 @@ class MacroAssemblerARM : public Assembler
|
|||
|
||||
// Branches when done from within arm-specific code.
|
||||
BufferOffset ma_b(Label* dest, Condition c = Always);
|
||||
BufferOffset ma_b(wasm::TrapDesc target, Condition c = Always);
|
||||
BufferOffset ma_b(wasm::OldTrapDesc target, Condition c = Always);
|
||||
void ma_b(void* target, Condition c = Always);
|
||||
void ma_bx(Register dest, Condition c = Always);
|
||||
|
||||
|
@ -677,7 +677,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
|
|||
ma_ldr(addr, scratch, scratch2);
|
||||
ma_bx(scratch);
|
||||
}
|
||||
void jump(wasm::TrapDesc target) {
|
||||
void jump(wasm::OldTrapDesc target) {
|
||||
as_b(target);
|
||||
}
|
||||
|
||||
|
|
|
@ -215,7 +215,7 @@ class Assembler : public vixl::Assembler
|
|||
void bind(Label* label) { bind(label, nextOffset()); }
|
||||
void bind(Label* label, BufferOffset boff);
|
||||
void bind(RepatchLabel* label);
|
||||
void bindLater(Label* label, wasm::TrapDesc target) {
|
||||
void bindLater(Label* label, wasm::OldTrapDesc target) {
|
||||
MOZ_CRASH("NYI");
|
||||
}
|
||||
|
||||
|
|
|
@ -493,10 +493,10 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
|
|||
}
|
||||
|
||||
using vixl::MacroAssembler::B;
|
||||
void B(wasm::TrapDesc) {
|
||||
void B(wasm::OldTrapDesc) {
|
||||
MOZ_CRASH("NYI");
|
||||
}
|
||||
void B(wasm::TrapDesc, Condition cond) {
|
||||
void B(wasm::OldTrapDesc, Condition cond) {
|
||||
MOZ_CRASH("NYI");
|
||||
}
|
||||
|
||||
|
@ -673,7 +673,7 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
|
|||
loadPtr(addr, ip0);
|
||||
Br(vixl::ip0);
|
||||
}
|
||||
void jump(wasm::TrapDesc target) {
|
||||
void jump(wasm::OldTrapDesc target) {
|
||||
MOZ_CRASH("NYI");
|
||||
}
|
||||
|
||||
|
|
|
@ -1809,7 +1809,7 @@ AssemblerMIPSShared::bind(Label* label, BufferOffset boff)
|
|||
}
|
||||
|
||||
void
|
||||
AssemblerMIPSShared::bindLater(Label* label, wasm::TrapDesc target)
|
||||
AssemblerMIPSShared::bindLater(Label* label, wasm::OldTrapDesc target)
|
||||
{
|
||||
if (label->used()) {
|
||||
int32_t next;
|
||||
|
@ -1818,7 +1818,7 @@ AssemblerMIPSShared::bindLater(Label* label, wasm::TrapDesc target)
|
|||
do {
|
||||
Instruction* inst = editSrc(b);
|
||||
|
||||
append(wasm::TrapSite(target, b.getOffset()));
|
||||
append(wasm::OldTrapSite(target, b.getOffset()));
|
||||
next = inst[1].encode();
|
||||
inst[1].makeNop();
|
||||
|
||||
|
|
|
@ -1231,7 +1231,7 @@ class AssemblerMIPSShared : public AssemblerShared
|
|||
|
||||
// label operations
|
||||
void bind(Label* label, BufferOffset boff = BufferOffset());
|
||||
void bindLater(Label* label, wasm::TrapDesc target);
|
||||
void bindLater(Label* label, wasm::OldTrapDesc target);
|
||||
virtual void bind(InstImm* inst, uintptr_t branch, uintptr_t target) = 0;
|
||||
void bind(CodeOffset* label) {
|
||||
label->bind(currentOffset());
|
||||
|
|
|
@ -568,7 +568,7 @@ CodeGeneratorMIPSShared::visitDivI(LDivI* ins)
|
|||
// Handle divide by zero.
|
||||
if (mir->canBeDivideByZero()) {
|
||||
if (mir->trapOnError()) {
|
||||
masm.ma_b(rhs, rhs, trap(mir, wasm::Trap::IntegerDivideByZero), Assembler::Zero);
|
||||
masm.ma_b(rhs, rhs, oldTrap(mir, wasm::Trap::IntegerDivideByZero), Assembler::Zero);
|
||||
} else if (mir->canTruncateInfinities()) {
|
||||
// Truncated division by zero is zero (Infinity|0 == 0)
|
||||
Label notzero;
|
||||
|
@ -590,7 +590,7 @@ CodeGeneratorMIPSShared::visitDivI(LDivI* ins)
|
|||
|
||||
masm.move32(Imm32(-1), temp);
|
||||
if (mir->trapOnError()) {
|
||||
masm.ma_b(rhs, temp, trap(mir, wasm::Trap::IntegerOverflow), Assembler::Equal);
|
||||
masm.ma_b(rhs, temp, oldTrap(mir, wasm::Trap::IntegerOverflow), Assembler::Equal);
|
||||
} else if (mir->canTruncateOverflow()) {
|
||||
// (-INT32_MIN)|0 == INT32_MIN
|
||||
Label skip;
|
||||
|
@ -718,7 +718,7 @@ CodeGeneratorMIPSShared::visitModI(LModI* ins)
|
|||
if (mir->canBeDivideByZero()) {
|
||||
if (mir->isTruncated()) {
|
||||
if (mir->trapOnError()) {
|
||||
masm.ma_b(rhs, rhs, trap(mir, wasm::Trap::IntegerDivideByZero), Assembler::Zero);
|
||||
masm.ma_b(rhs, rhs, oldTrap(mir, wasm::Trap::IntegerDivideByZero), Assembler::Zero);
|
||||
} else {
|
||||
Label skip;
|
||||
masm.ma_b(rhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump);
|
||||
|
@ -1559,10 +1559,10 @@ CodeGeneratorMIPSShared::visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCh
|
|||
|
||||
// Handle errors.
|
||||
masm.bind(&fail);
|
||||
masm.jump(trap(ool, wasm::Trap::IntegerOverflow));
|
||||
masm.jump(oldTrap(ool, wasm::Trap::IntegerOverflow));
|
||||
|
||||
masm.bind(&inputIsNaN);
|
||||
masm.jump(trap(ool, wasm::Trap::InvalidConversionToInteger));
|
||||
masm.jump(oldTrap(ool, wasm::Trap::InvalidConversionToInteger));
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -2415,7 +2415,7 @@ CodeGeneratorMIPSShared::visitUDivOrMod(LUDivOrMod* ins)
|
|||
if (ins->canBeDivideByZero()) {
|
||||
if (ins->mir()->isTruncated()) {
|
||||
if (ins->trapOnError()) {
|
||||
masm.ma_b(rhs, rhs, trap(ins, wasm::Trap::IntegerDivideByZero), Assembler::Zero);
|
||||
masm.ma_b(rhs, rhs, oldTrap(ins, wasm::Trap::IntegerDivideByZero), Assembler::Zero);
|
||||
} else {
|
||||
// Infinity|0 == 0
|
||||
Label notzero;
|
||||
|
@ -2767,7 +2767,7 @@ CodeGeneratorMIPSShared::visitWasmAddOffset(LWasmAddOffset* lir)
|
|||
Register base = ToRegister(lir->base());
|
||||
Register out = ToRegister(lir->output());
|
||||
|
||||
masm.ma_addTestCarry(out, base, Imm32(mir->offset()), trap(mir, wasm::Trap::OutOfBounds));
|
||||
masm.ma_addTestCarry(out, base, Imm32(mir->offset()), oldTrap(mir, wasm::Trap::OutOfBounds));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
|
|
|
@ -337,8 +337,8 @@ template void
|
|||
MacroAssemblerMIPSShared::ma_addTestCarry<Label*>(Register rd, Register rs,
|
||||
Register rt, Label* overflow);
|
||||
template void
|
||||
MacroAssemblerMIPSShared::ma_addTestCarry<wasm::TrapDesc>(Register rd, Register rs, Register rt,
|
||||
wasm::TrapDesc overflow);
|
||||
MacroAssemblerMIPSShared::ma_addTestCarry<wasm::OldTrapDesc>(Register rd, Register rs, Register rt,
|
||||
wasm::OldTrapDesc overflow);
|
||||
|
||||
template <typename L>
|
||||
void
|
||||
|
@ -352,8 +352,8 @@ template void
|
|||
MacroAssemblerMIPSShared::ma_addTestCarry<Label*>(Register rd, Register rs,
|
||||
Imm32 imm, Label* overflow);
|
||||
template void
|
||||
MacroAssemblerMIPSShared::ma_addTestCarry<wasm::TrapDesc>(Register rd, Register rs, Imm32 imm,
|
||||
wasm::TrapDesc overflow);
|
||||
MacroAssemblerMIPSShared::ma_addTestCarry<wasm::OldTrapDesc>(Register rd, Register rs, Imm32 imm,
|
||||
wasm::OldTrapDesc overflow);
|
||||
|
||||
// Subtract.
|
||||
void
|
||||
|
@ -796,7 +796,7 @@ MacroAssemblerMIPSShared::ma_b(Register lhs, ImmPtr imm, Label* l, Condition c,
|
|||
|
||||
template <typename T>
|
||||
void
|
||||
MacroAssemblerMIPSShared::ma_b(Register lhs, T rhs, wasm::TrapDesc target, Condition c,
|
||||
MacroAssemblerMIPSShared::ma_b(Register lhs, T rhs, wasm::OldTrapDesc target, Condition c,
|
||||
JumpKind jumpKind)
|
||||
{
|
||||
Label label;
|
||||
|
@ -805,13 +805,13 @@ MacroAssemblerMIPSShared::ma_b(Register lhs, T rhs, wasm::TrapDesc target, Condi
|
|||
}
|
||||
|
||||
template void MacroAssemblerMIPSShared::ma_b<Register>(Register lhs, Register rhs,
|
||||
wasm::TrapDesc target, Condition c,
|
||||
wasm::OldTrapDesc target, Condition c,
|
||||
JumpKind jumpKind);
|
||||
template void MacroAssemblerMIPSShared::ma_b<Imm32>(Register lhs, Imm32 rhs,
|
||||
wasm::TrapDesc target, Condition c,
|
||||
wasm::OldTrapDesc target, Condition c,
|
||||
JumpKind jumpKind);
|
||||
template void MacroAssemblerMIPSShared::ma_b<ImmTag>(Register lhs, ImmTag rhs,
|
||||
wasm::TrapDesc target, Condition c,
|
||||
wasm::OldTrapDesc target, Condition c,
|
||||
JumpKind jumpKind);
|
||||
|
||||
void
|
||||
|
@ -821,7 +821,7 @@ MacroAssemblerMIPSShared::ma_b(Label* label, JumpKind jumpKind)
|
|||
}
|
||||
|
||||
void
|
||||
MacroAssemblerMIPSShared::ma_b(wasm::TrapDesc target, JumpKind jumpKind)
|
||||
MacroAssemblerMIPSShared::ma_b(wasm::OldTrapDesc target, JumpKind jumpKind)
|
||||
{
|
||||
Label label;
|
||||
asMasm().branchWithCode(getBranchCode(BranchIsJump), &label, jumpKind);
|
||||
|
|
|
@ -164,11 +164,11 @@ class MacroAssemblerMIPSShared : public Assembler
|
|||
ma_b(lhs, ScratchRegister, l, c, jumpKind);
|
||||
}
|
||||
template <typename T>
|
||||
void ma_b(Register lhs, T rhs, wasm::TrapDesc target, Condition c,
|
||||
void ma_b(Register lhs, T rhs, wasm::OldTrapDesc target, Condition c,
|
||||
JumpKind jumpKind = LongJump);
|
||||
|
||||
void ma_b(Label* l, JumpKind jumpKind = LongJump);
|
||||
void ma_b(wasm::TrapDesc target, JumpKind jumpKind = LongJump);
|
||||
void ma_b(wasm::OldTrapDesc target, JumpKind jumpKind = LongJump);
|
||||
|
||||
// fp instructions
|
||||
void ma_lis(FloatRegister dest, float value);
|
||||
|
|
|
@ -380,7 +380,7 @@ CodeGeneratorMIPS::visitDivOrModI64(LDivOrModI64* lir)
|
|||
|
||||
// Handle divide by zero.
|
||||
if (lir->canBeDivideByZero())
|
||||
masm.branchTest64(Assembler::Zero, rhs, rhs, temp, trap(lir, wasm::Trap::IntegerDivideByZero));
|
||||
masm.branchTest64(Assembler::Zero, rhs, rhs, temp, oldTrap(lir, wasm::Trap::IntegerDivideByZero));
|
||||
|
||||
// Handle an integer overflow exception from INT64_MIN / -1.
|
||||
if (lir->canBeNegativeOverflow()) {
|
||||
|
@ -390,7 +390,7 @@ CodeGeneratorMIPS::visitDivOrModI64(LDivOrModI64* lir)
|
|||
if (lir->mir()->isMod()) {
|
||||
masm.xor64(output, output);
|
||||
} else {
|
||||
masm.jump(trap(lir, wasm::Trap::IntegerOverflow));
|
||||
masm.jump(oldTrap(lir, wasm::Trap::IntegerOverflow));
|
||||
}
|
||||
masm.jump(&done);
|
||||
masm.bind(¬min);
|
||||
|
@ -433,7 +433,7 @@ CodeGeneratorMIPS::visitUDivOrModI64(LUDivOrModI64* lir)
|
|||
|
||||
// Prevent divide by zero.
|
||||
if (lir->canBeDivideByZero())
|
||||
masm.branchTest64(Assembler::Zero, rhs, rhs, temp, trap(lir, wasm::Trap::IntegerDivideByZero));
|
||||
masm.branchTest64(Assembler::Zero, rhs, rhs, temp, oldTrap(lir, wasm::Trap::IntegerDivideByZero));
|
||||
|
||||
masm.setupWasmABICall();
|
||||
masm.passABIArg(lhs.high);
|
||||
|
|
|
@ -238,8 +238,8 @@ template void
|
|||
MacroAssemblerMIPS::ma_addTestOverflow<Label*>(Register rd, Register rs,
|
||||
Register rt, Label* overflow);
|
||||
template void
|
||||
MacroAssemblerMIPS::ma_addTestOverflow<wasm::TrapDesc>(Register rd, Register rs, Register rt,
|
||||
wasm::TrapDesc overflow);
|
||||
MacroAssemblerMIPS::ma_addTestOverflow<wasm::OldTrapDesc>(Register rd, Register rs, Register rt,
|
||||
wasm::OldTrapDesc overflow);
|
||||
|
||||
template <typename L>
|
||||
void
|
||||
|
@ -270,8 +270,8 @@ template void
|
|||
MacroAssemblerMIPS::ma_addTestOverflow<Label*>(Register rd, Register rs,
|
||||
Imm32 imm, Label* overflow);
|
||||
template void
|
||||
MacroAssemblerMIPS::ma_addTestOverflow<wasm::TrapDesc>(Register rd, Register rs, Imm32 imm,
|
||||
wasm::TrapDesc overflow);
|
||||
MacroAssemblerMIPS::ma_addTestOverflow<wasm::OldTrapDesc>(Register rd, Register rs, Imm32 imm,
|
||||
wasm::OldTrapDesc overflow);
|
||||
|
||||
// Subtract.
|
||||
void
|
||||
|
|
|
@ -306,7 +306,7 @@ class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS
|
|||
branch(code);
|
||||
}
|
||||
|
||||
void jump(wasm::TrapDesc target) {
|
||||
void jump(wasm::OldTrapDesc target) {
|
||||
ma_b(target);
|
||||
}
|
||||
|
||||
|
|
|
@ -362,7 +362,7 @@ CodeGeneratorMIPS64::visitDivOrModI64(LDivOrModI64* lir)
|
|||
|
||||
// Handle divide by zero.
|
||||
if (lir->canBeDivideByZero())
|
||||
masm.ma_b(rhs, rhs, trap(lir, wasm::Trap::IntegerDivideByZero), Assembler::Zero);
|
||||
masm.ma_b(rhs, rhs, oldTrap(lir, wasm::Trap::IntegerDivideByZero), Assembler::Zero);
|
||||
|
||||
// Handle an integer overflow exception from INT64_MIN / -1.
|
||||
if (lir->canBeNegativeOverflow()) {
|
||||
|
@ -372,7 +372,7 @@ CodeGeneratorMIPS64::visitDivOrModI64(LDivOrModI64* lir)
|
|||
if (lir->mir()->isMod()) {
|
||||
masm.ma_xor(output, output);
|
||||
} else {
|
||||
masm.jump(trap(lir, wasm::Trap::IntegerOverflow));
|
||||
masm.jump(oldTrap(lir, wasm::Trap::IntegerOverflow));
|
||||
}
|
||||
masm.jump(&done);
|
||||
masm.bind(¬min);
|
||||
|
@ -399,7 +399,7 @@ CodeGeneratorMIPS64::visitUDivOrModI64(LUDivOrModI64* lir)
|
|||
|
||||
// Prevent divide by zero.
|
||||
if (lir->canBeDivideByZero())
|
||||
masm.ma_b(rhs, rhs, trap(lir, wasm::Trap::IntegerDivideByZero), Assembler::Zero);
|
||||
masm.ma_b(rhs, rhs, oldTrap(lir, wasm::Trap::IntegerDivideByZero), Assembler::Zero);
|
||||
|
||||
masm.as_ddivu(lhs, rhs);
|
||||
|
||||
|
|
|
@ -489,8 +489,8 @@ template void
|
|||
MacroAssemblerMIPS64::ma_addTestOverflow<Label*>(Register rd, Register rs,
|
||||
Register rt, Label* overflow);
|
||||
template void
|
||||
MacroAssemblerMIPS64::ma_addTestOverflow<wasm::TrapDesc>(Register rd, Register rs, Register rt,
|
||||
wasm::TrapDesc overflow);
|
||||
MacroAssemblerMIPS64::ma_addTestOverflow<wasm::OldTrapDesc>(Register rd, Register rs, Register rt,
|
||||
wasm::OldTrapDesc overflow);
|
||||
|
||||
template <typename L>
|
||||
void
|
||||
|
@ -511,8 +511,8 @@ template void
|
|||
MacroAssemblerMIPS64::ma_addTestOverflow<Label*>(Register rd, Register rs,
|
||||
Imm32 imm, Label* overflow);
|
||||
template void
|
||||
MacroAssemblerMIPS64::ma_addTestOverflow<wasm::TrapDesc>(Register rd, Register rs, Imm32 imm,
|
||||
wasm::TrapDesc overflow);
|
||||
MacroAssemblerMIPS64::ma_addTestOverflow<wasm::OldTrapDesc>(Register rd, Register rs, Imm32 imm,
|
||||
wasm::OldTrapDesc overflow);
|
||||
|
||||
// Subtract.
|
||||
void
|
||||
|
|
|
@ -335,7 +335,7 @@ class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
|
|||
branch(code);
|
||||
}
|
||||
|
||||
void jump(wasm::TrapDesc target) {
|
||||
void jump(wasm::OldTrapDesc target) {
|
||||
ma_b(target);
|
||||
}
|
||||
|
||||
|
|
|
@ -210,7 +210,7 @@ class MacroAssemblerNone : public Assembler
|
|||
void flushBuffer() { MOZ_CRASH(); }
|
||||
|
||||
template <typename T> void bind(T) { MOZ_CRASH(); }
|
||||
void bindLater(Label*, wasm::TrapDesc) { MOZ_CRASH(); }
|
||||
void bindLater(Label*, wasm::OldTrapDesc) { MOZ_CRASH(); }
|
||||
template <typename T> void j(Condition, T) { MOZ_CRASH(); }
|
||||
template <typename T> void jump(T) { MOZ_CRASH(); }
|
||||
void writeCodePointer(CodeOffset* label) { MOZ_CRASH(); }
|
||||
|
|
|
@ -811,47 +811,47 @@ struct CallFarJump
|
|||
|
||||
typedef Vector<CallFarJump, 0, SystemAllocPolicy> CallFarJumpVector;
|
||||
|
||||
// The TrapDesc struct describes a wasm trap that is about to be emitted. This
|
||||
// The OldTrapDesc struct describes a wasm trap that is about to be emitted. This
|
||||
// includes the logical wasm bytecode offset to report, the kind of instruction
|
||||
// causing the trap, and the stack depth right before control is transferred to
|
||||
// the trap out-of-line path.
|
||||
|
||||
struct TrapDesc : BytecodeOffset
|
||||
struct OldTrapDesc : BytecodeOffset
|
||||
{
|
||||
enum Kind { Jump, MemoryAccess };
|
||||
Kind kind;
|
||||
Trap trap;
|
||||
uint32_t framePushed;
|
||||
|
||||
TrapDesc(BytecodeOffset offset, Trap trap, uint32_t framePushed, Kind kind = Jump)
|
||||
OldTrapDesc(BytecodeOffset offset, Trap trap, uint32_t framePushed, Kind kind = Jump)
|
||||
: BytecodeOffset(offset), kind(kind), trap(trap), framePushed(framePushed)
|
||||
{}
|
||||
};
|
||||
|
||||
// A TrapSite captures all relevant information at the point of emitting the
|
||||
// An OldTrapSite captures all relevant information at the point of emitting the
|
||||
// in-line trapping instruction for the purpose of generating the out-of-line
|
||||
// trap code (at the end of the function).
|
||||
|
||||
struct TrapSite : TrapDesc
|
||||
struct OldTrapSite : OldTrapDesc
|
||||
{
|
||||
uint32_t codeOffset;
|
||||
|
||||
TrapSite(TrapDesc trap, uint32_t codeOffset)
|
||||
: TrapDesc(trap), codeOffset(codeOffset)
|
||||
OldTrapSite(OldTrapDesc trap, uint32_t codeOffset)
|
||||
: OldTrapDesc(trap), codeOffset(codeOffset)
|
||||
{}
|
||||
};
|
||||
|
||||
typedef Vector<TrapSite, 0, SystemAllocPolicy> TrapSiteVector;
|
||||
typedef Vector<OldTrapSite, 0, SystemAllocPolicy> OldTrapSiteVector;
|
||||
|
||||
// A TrapFarJump records the offset of a jump that needs to be patched to a trap
|
||||
// An OldTrapFarJump records the offset of a jump that needs to be patched to a trap
|
||||
// exit at the end of the module when trap exits are emitted.
|
||||
|
||||
struct TrapFarJump
|
||||
struct OldTrapFarJump
|
||||
{
|
||||
Trap trap;
|
||||
jit::CodeOffset jump;
|
||||
|
||||
TrapFarJump(Trap trap, jit::CodeOffset jump)
|
||||
OldTrapFarJump(Trap trap, jit::CodeOffset jump)
|
||||
: trap(trap), jump(jump)
|
||||
{}
|
||||
|
||||
|
@ -860,7 +860,7 @@ struct TrapFarJump
|
|||
}
|
||||
};
|
||||
|
||||
typedef Vector<TrapFarJump, 0, SystemAllocPolicy> TrapFarJumpVector;
|
||||
typedef Vector<OldTrapFarJump, 0, SystemAllocPolicy> OldTrapFarJumpVector;
|
||||
|
||||
} // namespace wasm
|
||||
|
||||
|
@ -871,8 +871,8 @@ class AssemblerShared
|
|||
{
|
||||
wasm::CallSiteVector callSites_;
|
||||
wasm::CallSiteTargetVector callSiteTargets_;
|
||||
wasm::TrapSiteVector trapSites_;
|
||||
wasm::TrapFarJumpVector trapFarJumps_;
|
||||
wasm::OldTrapSiteVector oldTrapSites_;
|
||||
wasm::OldTrapFarJumpVector oldTrapFarJumps_;
|
||||
wasm::CallFarJumpVector callFarJumps_;
|
||||
wasm::MemoryAccessVector memoryAccesses_;
|
||||
wasm::SymbolicAccessVector symbolicAccesses_;
|
||||
|
@ -930,11 +930,11 @@ class AssemblerShared
|
|||
enoughMemory_ &= callSites_.emplaceBack(desc, retAddr.offset());
|
||||
enoughMemory_ &= callSiteTargets_.emplaceBack(mozilla::Forward<Args>(args)...);
|
||||
}
|
||||
void append(wasm::TrapSite trapSite) {
|
||||
enoughMemory_ &= trapSites_.append(trapSite);
|
||||
void append(wasm::OldTrapSite trapSite) {
|
||||
enoughMemory_ &= oldTrapSites_.append(trapSite);
|
||||
}
|
||||
void append(wasm::TrapFarJump jmp) {
|
||||
enoughMemory_ &= trapFarJumps_.append(jmp);
|
||||
void append(wasm::OldTrapFarJump jmp) {
|
||||
enoughMemory_ &= oldTrapFarJumps_.append(jmp);
|
||||
}
|
||||
void append(wasm::CallFarJump jmp) {
|
||||
enoughMemory_ &= callFarJumps_.append(jmp);
|
||||
|
@ -945,11 +945,11 @@ class AssemblerShared
|
|||
void append(const wasm::MemoryAccessDesc& access, size_t codeOffset, size_t framePushed) {
|
||||
if (access.hasTrap()) {
|
||||
// If a memory access is trapping (wasm, SIMD.js, Atomics), create a
|
||||
// TrapSite now which will generate a trap out-of-line path at the end
|
||||
// OldTrapSite now which will generate a trap out-of-line path at the end
|
||||
// of the function which will *then* append a MemoryAccess.
|
||||
wasm::TrapDesc trap(access.trapOffset(), wasm::Trap::OutOfBounds, framePushed,
|
||||
wasm::TrapSite::MemoryAccess);
|
||||
append(wasm::TrapSite(trap, codeOffset));
|
||||
wasm::OldTrapDesc trap(access.trapOffset(), wasm::Trap::OutOfBounds, framePushed,
|
||||
wasm::OldTrapSite::MemoryAccess);
|
||||
append(wasm::OldTrapSite(trap, codeOffset));
|
||||
} else {
|
||||
// Otherwise, this is a plain asm.js access. On WASM_HUGE_MEMORY
|
||||
// platforms, asm.js uses signal handlers to remove bounds checks
|
||||
|
@ -966,8 +966,8 @@ class AssemblerShared
|
|||
|
||||
wasm::CallSiteVector& callSites() { return callSites_; }
|
||||
wasm::CallSiteTargetVector& callSiteTargets() { return callSiteTargets_; }
|
||||
wasm::TrapSiteVector& trapSites() { return trapSites_; }
|
||||
wasm::TrapFarJumpVector& trapFarJumps() { return trapFarJumps_; }
|
||||
wasm::OldTrapSiteVector& oldTrapSites() { return oldTrapSites_; }
|
||||
wasm::OldTrapFarJumpVector& oldTrapFarJumps() { return oldTrapFarJumps_; }
|
||||
wasm::CallFarJumpVector& callFarJumps() { return callFarJumps_; }
|
||||
wasm::MemoryAccessVector& memoryAccesses() { return memoryAccesses_; }
|
||||
wasm::SymbolicAccessVector& symbolicAccesses() { return symbolicAccesses_; }
|
||||
|
|
|
@ -503,8 +503,8 @@ class CodeGeneratorShared : public LElementVisitor
|
|||
#endif
|
||||
|
||||
template <class T>
|
||||
wasm::TrapDesc trap(T* mir, wasm::Trap trap) {
|
||||
return wasm::TrapDesc(mir->bytecodeOffset(), trap, masm.framePushed());
|
||||
wasm::OldTrapDesc oldTrap(T* mir, wasm::Trap trap) {
|
||||
return wasm::OldTrapDesc(mir->bytecodeOffset(), trap, masm.framePushed());
|
||||
}
|
||||
|
||||
private:
|
||||
|
|
|
@ -285,7 +285,7 @@ CodeGeneratorX64::visitDivOrModI64(LDivOrModI64* lir)
|
|||
|
||||
// Handle divide by zero.
|
||||
if (lir->canBeDivideByZero()) {
|
||||
masm.branchTestPtr(Assembler::Zero, rhs, rhs, trap(lir, wasm::Trap::IntegerDivideByZero));
|
||||
masm.branchTestPtr(Assembler::Zero, rhs, rhs, oldTrap(lir, wasm::Trap::IntegerDivideByZero));
|
||||
}
|
||||
|
||||
// Handle an integer overflow exception from INT64_MIN / -1.
|
||||
|
@ -296,7 +296,7 @@ CodeGeneratorX64::visitDivOrModI64(LDivOrModI64* lir)
|
|||
if (lir->mir()->isMod())
|
||||
masm.xorl(output, output);
|
||||
else
|
||||
masm.jump(trap(lir, wasm::Trap::IntegerOverflow));
|
||||
masm.jump(oldTrap(lir, wasm::Trap::IntegerOverflow));
|
||||
masm.jump(&done);
|
||||
masm.bind(¬min);
|
||||
}
|
||||
|
@ -328,7 +328,7 @@ CodeGeneratorX64::visitUDivOrModI64(LUDivOrModI64* lir)
|
|||
|
||||
// Prevent divide by zero.
|
||||
if (lir->canBeDivideByZero())
|
||||
masm.branchTestPtr(Assembler::Zero, rhs, rhs, trap(lir, wasm::Trap::IntegerDivideByZero));
|
||||
masm.branchTestPtr(Assembler::Zero, rhs, rhs, oldTrap(lir, wasm::Trap::IntegerDivideByZero));
|
||||
|
||||
// Zero extend the lhs into rdx to make (rdx:rax).
|
||||
masm.xorl(rdx, rdx);
|
||||
|
|
|
@ -922,12 +922,12 @@ class AssemblerX86Shared : public AssemblerShared
|
|||
void j(Condition cond, RepatchLabel* label) { jSrc(cond, label); }
|
||||
void jmp(RepatchLabel* label) { jmpSrc(label); }
|
||||
|
||||
void j(Condition cond, wasm::TrapDesc target) {
|
||||
void j(Condition cond, wasm::OldTrapDesc target) {
|
||||
Label l;
|
||||
j(cond, &l);
|
||||
bindLater(&l, target);
|
||||
}
|
||||
void jmp(wasm::TrapDesc target) {
|
||||
void jmp(wasm::OldTrapDesc target) {
|
||||
Label l;
|
||||
jmp(&l);
|
||||
bindLater(&l, target);
|
||||
|
@ -963,11 +963,11 @@ class AssemblerX86Shared : public AssemblerShared
|
|||
}
|
||||
label->bind(dst.offset());
|
||||
}
|
||||
void bindLater(Label* label, wasm::TrapDesc target) {
|
||||
void bindLater(Label* label, wasm::OldTrapDesc target) {
|
||||
if (label->used()) {
|
||||
JmpSrc jmp(label->offset());
|
||||
do {
|
||||
append(wasm::TrapSite(target, jmp.offset()));
|
||||
append(wasm::OldTrapSite(target, jmp.offset()));
|
||||
} while (masm.nextJump(jmp, &jmp));
|
||||
}
|
||||
label->reset();
|
||||
|
|
|
@ -435,7 +435,7 @@ CodeGeneratorX86Shared::visitWasmAddOffset(LWasmAddOffset* lir)
|
|||
masm.move32(base, out);
|
||||
masm.add32(Imm32(mir->offset()), out);
|
||||
|
||||
masm.j(Assembler::CarrySet, trap(mir, wasm::Trap::OutOfBounds));
|
||||
masm.j(Assembler::CarrySet, oldTrap(mir, wasm::Trap::OutOfBounds));
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -1026,7 +1026,7 @@ CodeGeneratorX86Shared::visitUDivOrMod(LUDivOrMod* ins)
|
|||
masm.test32(rhs, rhs);
|
||||
if (ins->mir()->isTruncated()) {
|
||||
if (ins->trapOnError()) {
|
||||
masm.j(Assembler::Zero, trap(ins, wasm::Trap::IntegerDivideByZero));
|
||||
masm.j(Assembler::Zero, oldTrap(ins, wasm::Trap::IntegerDivideByZero));
|
||||
} else {
|
||||
ool = new(alloc()) ReturnZero(output);
|
||||
masm.j(Assembler::Zero, ool->entry());
|
||||
|
@ -1074,7 +1074,7 @@ CodeGeneratorX86Shared::visitUDivOrModConstant(LUDivOrModConstant *ins) {
|
|||
if (d == 0) {
|
||||
if (ins->mir()->isTruncated()) {
|
||||
if (ins->trapOnError())
|
||||
masm.jump(trap(ins, wasm::Trap::IntegerDivideByZero));
|
||||
masm.jump(oldTrap(ins, wasm::Trap::IntegerDivideByZero));
|
||||
else
|
||||
masm.xorl(output, output);
|
||||
} else {
|
||||
|
@ -1213,7 +1213,7 @@ CodeGeneratorX86Shared::visitDivPowTwoI(LDivPowTwoI* ins)
|
|||
if (!mir->isTruncated())
|
||||
bailoutIf(Assembler::Overflow, ins->snapshot());
|
||||
else if (mir->trapOnError())
|
||||
masm.j(Assembler::Overflow, trap(mir, wasm::Trap::IntegerOverflow));
|
||||
masm.j(Assembler::Overflow, oldTrap(mir, wasm::Trap::IntegerOverflow));
|
||||
} else if (mir->isUnsigned() && !mir->isTruncated()) {
|
||||
// Unsigned division by 1 can overflow if output is not
|
||||
// truncated.
|
||||
|
@ -1332,7 +1332,7 @@ CodeGeneratorX86Shared::visitDivI(LDivI* ins)
|
|||
if (mir->canBeDivideByZero()) {
|
||||
masm.test32(rhs, rhs);
|
||||
if (mir->trapOnError()) {
|
||||
masm.j(Assembler::Zero, trap(mir, wasm::Trap::IntegerDivideByZero));
|
||||
masm.j(Assembler::Zero, oldTrap(mir, wasm::Trap::IntegerDivideByZero));
|
||||
} else if (mir->canTruncateInfinities()) {
|
||||
// Truncated division by zero is zero (Infinity|0 == 0)
|
||||
if (!ool)
|
||||
|
@ -1351,7 +1351,7 @@ CodeGeneratorX86Shared::visitDivI(LDivI* ins)
|
|||
masm.j(Assembler::NotEqual, ¬min);
|
||||
masm.cmp32(rhs, Imm32(-1));
|
||||
if (mir->trapOnError()) {
|
||||
masm.j(Assembler::Equal, trap(mir, wasm::Trap::IntegerOverflow));
|
||||
masm.j(Assembler::Equal, oldTrap(mir, wasm::Trap::IntegerOverflow));
|
||||
} else if (mir->canTruncateOverflow()) {
|
||||
// (-INT32_MIN)|0 == INT32_MIN and INT32_MIN is already in the
|
||||
// output register (lhs == eax).
|
||||
|
@ -1501,7 +1501,7 @@ CodeGeneratorX86Shared::visitModI(LModI* ins)
|
|||
masm.test32(rhs, rhs);
|
||||
if (mir->isTruncated()) {
|
||||
if (mir->trapOnError()) {
|
||||
masm.j(Assembler::Zero, trap(mir, wasm::Trap::IntegerDivideByZero));
|
||||
masm.j(Assembler::Zero, oldTrap(mir, wasm::Trap::IntegerDivideByZero));
|
||||
} else {
|
||||
if (!ool)
|
||||
ool = new(alloc()) ReturnZero(edx);
|
||||
|
@ -2523,7 +2523,7 @@ CodeGeneratorX86Shared::visitOutOfLineSimdFloatToIntCheck(OutOfLineSimdFloatToIn
|
|||
masm.jump(ool->rejoin());
|
||||
|
||||
if (gen->compilingWasm()) {
|
||||
masm.bindLater(&onConversionError, trap(ool, wasm::Trap::ImpreciseSimdConversion));
|
||||
masm.bindLater(&onConversionError, oldTrap(ool, wasm::Trap::ImpreciseSimdConversion));
|
||||
} else {
|
||||
masm.bind(&onConversionError);
|
||||
bailout(ool->ins()->snapshot());
|
||||
|
@ -2603,7 +2603,7 @@ CodeGeneratorX86Shared::visitFloat32x4ToUint32x4(LFloat32x4ToUint32x4* ins)
|
|||
masm.cmp32(temp, Imm32(0));
|
||||
|
||||
if (gen->compilingWasm())
|
||||
masm.j(Assembler::NotEqual, trap(mir, wasm::Trap::ImpreciseSimdConversion));
|
||||
masm.j(Assembler::NotEqual, oldTrap(mir, wasm::Trap::ImpreciseSimdConversion));
|
||||
else
|
||||
bailoutIf(Assembler::NotEqual, ins->snapshot());
|
||||
}
|
||||
|
|
|
@ -686,10 +686,10 @@ struct MOZ_RAII AutoHandleWasmTruncateToIntErrors
|
|||
~AutoHandleWasmTruncateToIntErrors() {
|
||||
// Handle errors.
|
||||
masm.bind(&fail);
|
||||
masm.jump(wasm::TrapDesc(off, wasm::Trap::IntegerOverflow, masm.framePushed()));
|
||||
masm.jump(wasm::OldTrapDesc(off, wasm::Trap::IntegerOverflow, masm.framePushed()));
|
||||
|
||||
masm.bind(&inputIsNaN);
|
||||
masm.jump(wasm::TrapDesc(off, wasm::Trap::InvalidConversionToInteger, masm.framePushed()));
|
||||
masm.jump(wasm::OldTrapDesc(off, wasm::Trap::InvalidConversionToInteger, masm.framePushed()));
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -202,7 +202,7 @@ class MacroAssemblerX86Shared : public Assembler
|
|||
void jump(const Address& addr) {
|
||||
jmp(Operand(addr));
|
||||
}
|
||||
void jump(wasm::TrapDesc target) {
|
||||
void jump(wasm::OldTrapDesc target) {
|
||||
jmp(target);
|
||||
}
|
||||
|
||||
|
|
|
@ -1031,7 +1031,7 @@ CodeGeneratorX86::visitDivOrModI64(LDivOrModI64* lir)
|
|||
|
||||
// Handle divide by zero.
|
||||
if (lir->canBeDivideByZero())
|
||||
masm.branchTest64(Assembler::Zero, rhs, rhs, temp, trap(lir, wasm::Trap::IntegerDivideByZero));
|
||||
masm.branchTest64(Assembler::Zero, rhs, rhs, temp, oldTrap(lir, wasm::Trap::IntegerDivideByZero));
|
||||
|
||||
MDefinition* mir = lir->mir();
|
||||
|
||||
|
@ -1043,7 +1043,7 @@ CodeGeneratorX86::visitDivOrModI64(LDivOrModI64* lir)
|
|||
if (mir->isMod())
|
||||
masm.xor64(output, output);
|
||||
else
|
||||
masm.jump(trap(lir, wasm::Trap::IntegerOverflow));
|
||||
masm.jump(oldTrap(lir, wasm::Trap::IntegerOverflow));
|
||||
masm.jump(&done);
|
||||
masm.bind(¬min);
|
||||
}
|
||||
|
@ -1079,7 +1079,7 @@ CodeGeneratorX86::visitUDivOrModI64(LUDivOrModI64* lir)
|
|||
|
||||
// Prevent divide by zero.
|
||||
if (lir->canBeDivideByZero())
|
||||
masm.branchTest64(Assembler::Zero, rhs, rhs, temp, trap(lir, wasm::Trap::IntegerDivideByZero));
|
||||
masm.branchTest64(Assembler::Zero, rhs, rhs, temp, oldTrap(lir, wasm::Trap::IntegerDivideByZero));
|
||||
|
||||
masm.setupWasmABICall();
|
||||
masm.passABIArg(lhs.high);
|
||||
|
|
|
@ -3020,7 +3020,7 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
if (fr.initialSize() > debugFrameReserved)
|
||||
masm.addToStackPtr(Imm32(fr.initialSize() - debugFrameReserved));
|
||||
BytecodeOffset prologueTrapOffset(func_.lineOrBytecode);
|
||||
masm.jump(TrapDesc(prologueTrapOffset, Trap::StackOverflow, debugFrameReserved));
|
||||
masm.jump(OldTrapDesc(prologueTrapOffset, Trap::StackOverflow, debugFrameReserved));
|
||||
|
||||
masm.bind(&returnLabel_);
|
||||
|
||||
|
@ -3045,7 +3045,7 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
if (!generateOutOfLineCode())
|
||||
return false;
|
||||
|
||||
masm.wasmEmitTrapOutOfLineCode();
|
||||
masm.wasmEmitOldTrapOutOfLineCode();
|
||||
|
||||
offsets_.end = masm.currentOffset();
|
||||
|
||||
|
@ -3453,12 +3453,12 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
}
|
||||
|
||||
void checkDivideByZeroI32(RegI32 rhs, RegI32 srcDest, Label* done) {
|
||||
masm.branchTest32(Assembler::Zero, rhs, rhs, trap(Trap::IntegerDivideByZero));
|
||||
masm.branchTest32(Assembler::Zero, rhs, rhs, oldTrap(Trap::IntegerDivideByZero));
|
||||
}
|
||||
|
||||
void checkDivideByZeroI64(RegI64 r) {
|
||||
ScratchI32 scratch(*this);
|
||||
masm.branchTest64(Assembler::Zero, r, r, scratch, trap(Trap::IntegerDivideByZero));
|
||||
masm.branchTest64(Assembler::Zero, r, r, scratch, oldTrap(Trap::IntegerDivideByZero));
|
||||
}
|
||||
|
||||
void checkDivideSignedOverflowI32(RegI32 rhs, RegI32 srcDest, Label* done, bool zeroOnOverflow) {
|
||||
|
@ -3469,7 +3469,7 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
moveImm32(0, srcDest);
|
||||
masm.jump(done);
|
||||
} else {
|
||||
masm.branch32(Assembler::Equal, rhs, Imm32(-1), trap(Trap::IntegerOverflow));
|
||||
masm.branch32(Assembler::Equal, rhs, Imm32(-1), oldTrap(Trap::IntegerOverflow));
|
||||
}
|
||||
masm.bind(¬Min);
|
||||
}
|
||||
|
@ -3482,7 +3482,7 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
masm.xor64(srcDest, srcDest);
|
||||
masm.jump(done);
|
||||
} else {
|
||||
masm.jump(trap(Trap::IntegerOverflow));
|
||||
masm.jump(oldTrap(Trap::IntegerOverflow));
|
||||
}
|
||||
masm.bind(¬min);
|
||||
}
|
||||
|
@ -3809,7 +3809,7 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
|
||||
void unreachableTrap()
|
||||
{
|
||||
masm.jump(trap(Trap::Unreachable));
|
||||
masm.jump(oldTrap(Trap::Unreachable));
|
||||
#ifdef DEBUG
|
||||
masm.breakpoint();
|
||||
#endif
|
||||
|
@ -3940,7 +3940,7 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
(access->isAtomic() && !check->omitAlignmentCheck && !check->onlyPointerAlignment))
|
||||
{
|
||||
masm.branchAdd32(Assembler::CarrySet, Imm32(access->offset()), ptr,
|
||||
trap(Trap::OutOfBounds));
|
||||
oldTrap(Trap::OutOfBounds));
|
||||
access->clearOffset();
|
||||
check->onlyPointerAlignment = true;
|
||||
}
|
||||
|
@ -3951,7 +3951,7 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
MOZ_ASSERT(check->onlyPointerAlignment);
|
||||
// We only care about the low pointer bits here.
|
||||
masm.branchTest32(Assembler::NonZero, ptr, Imm32(access->byteSize() - 1),
|
||||
trap(Trap::UnalignedAccess));
|
||||
oldTrap(Trap::UnalignedAccess));
|
||||
}
|
||||
|
||||
// Ensure no tls if we don't need it.
|
||||
|
@ -3972,7 +3972,7 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
if (!check->omitBoundsCheck) {
|
||||
masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr,
|
||||
Address(tls, offsetof(TlsData, boundsCheckLimit)),
|
||||
trap(Trap::OutOfBounds));
|
||||
oldTrap(Trap::OutOfBounds));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -4905,11 +4905,11 @@ class BaseCompiler final : public BaseCompilerInterface
|
|||
return iter_.bytecodeOffset();
|
||||
}
|
||||
|
||||
TrapDesc trap(Trap t) const {
|
||||
OldTrapDesc oldTrap(Trap t) const {
|
||||
// Use masm.framePushed() because the value needed by the trap machinery
|
||||
// is the size of the frame overall, not the height of the stack area of
|
||||
// the frame.
|
||||
return TrapDesc(bytecodeOffset(), t, masm.framePushed());
|
||||
return OldTrapDesc(bytecodeOffset(), t, masm.framePushed());
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
|
@ -8395,7 +8395,7 @@ BaseCompiler::emitWait(ValType type, uint32_t byteSize)
|
|||
default:
|
||||
MOZ_CRASH();
|
||||
}
|
||||
masm.branchTest32(Assembler::Signed, ReturnReg, ReturnReg, trap(Trap::ThrowReported));
|
||||
masm.branchTest32(Assembler::Signed, ReturnReg, ReturnReg, oldTrap(Trap::ThrowReported));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -8414,7 +8414,7 @@ BaseCompiler::emitWake()
|
|||
return true;
|
||||
|
||||
emitInstanceCall(lineOrBytecode, SigPII_, ExprType::I32, SymbolicAddress::Wake);
|
||||
masm.branchTest32(Assembler::Signed, ReturnReg, ReturnReg, trap(Trap::ThrowReported));
|
||||
masm.branchTest32(Assembler::Signed, ReturnReg, ReturnReg, oldTrap(Trap::ThrowReported));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -234,7 +234,7 @@ WasmHandleThrow()
|
|||
}
|
||||
|
||||
static void
|
||||
WasmReportTrap(int32_t trapIndex)
|
||||
WasmOldReportTrap(int32_t trapIndex)
|
||||
{
|
||||
JSContext* cx = TlsContext.get();
|
||||
|
||||
|
@ -439,9 +439,9 @@ AddressOf(SymbolicAddress imm, ABIFunctionType* abiType)
|
|||
case SymbolicAddress::HandleThrow:
|
||||
*abiType = Args_General0;
|
||||
return FuncCast(WasmHandleThrow, *abiType);
|
||||
case SymbolicAddress::ReportTrap:
|
||||
case SymbolicAddress::OldReportTrap:
|
||||
*abiType = Args_General1;
|
||||
return FuncCast(WasmReportTrap, *abiType);
|
||||
return FuncCast(WasmOldReportTrap, *abiType);
|
||||
case SymbolicAddress::ReportOutOfBounds:
|
||||
*abiType = Args_General0;
|
||||
return FuncCast(WasmReportOutOfBounds, *abiType);
|
||||
|
@ -595,7 +595,7 @@ wasm::NeedsBuiltinThunk(SymbolicAddress sym)
|
|||
case SymbolicAddress::HandleExecutionInterrupt: // GenerateInterruptExit
|
||||
case SymbolicAddress::HandleDebugTrap: // GenerateDebugTrapStub
|
||||
case SymbolicAddress::HandleThrow: // GenerateThrowStub
|
||||
case SymbolicAddress::ReportTrap: // GenerateTrapExit
|
||||
case SymbolicAddress::OldReportTrap: // GenerateOldTrapExit
|
||||
case SymbolicAddress::ReportOutOfBounds: // GenerateOutOfBoundsExit
|
||||
case SymbolicAddress::ReportUnalignedAccess: // GeneratesUnalignedExit
|
||||
case SymbolicAddress::CallImport_Void: // GenerateImportInterpExit
|
||||
|
@ -891,8 +891,8 @@ wasm::EnsureBuiltinThunksInitialized()
|
|||
MOZ_ASSERT(masm.callSites().empty());
|
||||
MOZ_ASSERT(masm.callSiteTargets().empty());
|
||||
MOZ_ASSERT(masm.callFarJumps().empty());
|
||||
MOZ_ASSERT(masm.trapSites().empty());
|
||||
MOZ_ASSERT(masm.trapFarJumps().empty());
|
||||
MOZ_ASSERT(masm.oldTrapSites().empty());
|
||||
MOZ_ASSERT(masm.oldTrapFarJumps().empty());
|
||||
MOZ_ASSERT(masm.callFarJumps().empty());
|
||||
MOZ_ASSERT(masm.memoryAccesses().empty());
|
||||
MOZ_ASSERT(masm.symbolicAccesses().empty());
|
||||
|
|
|
@ -446,7 +446,7 @@ wasm::GenerateFunctionPrologue(MacroAssembler& masm, unsigned framePushed, const
|
|||
// Generate table entry:
|
||||
offsets->begin = masm.currentOffset();
|
||||
BytecodeOffset trapOffset(0); // ignored by masm.wasmEmitTrapOutOfLineCode
|
||||
TrapDesc trap(trapOffset, Trap::IndirectCallBadSig, masm.framePushed());
|
||||
OldTrapDesc trap(trapOffset, Trap::IndirectCallBadSig, masm.framePushed());
|
||||
switch (sigId.kind()) {
|
||||
case SigIdDesc::Kind::Global: {
|
||||
Register scratch = WasmTableCallScratchReg;
|
||||
|
@ -635,7 +635,7 @@ ProfilingFrameIterator::initFromExitFP(const Frame* fp)
|
|||
case CodeRange::ImportJitExit:
|
||||
case CodeRange::ImportInterpExit:
|
||||
case CodeRange::BuiltinThunk:
|
||||
case CodeRange::TrapExit:
|
||||
case CodeRange::OldTrapExit:
|
||||
case CodeRange::DebugTrap:
|
||||
case CodeRange::OutOfBoundsExit:
|
||||
case CodeRange::UnalignedExit:
|
||||
|
@ -714,7 +714,7 @@ js::wasm::StartUnwinding(const RegisterState& registers, UnwindState* unwindStat
|
|||
case CodeRange::ImportJitExit:
|
||||
case CodeRange::ImportInterpExit:
|
||||
case CodeRange::BuiltinThunk:
|
||||
case CodeRange::TrapExit:
|
||||
case CodeRange::OldTrapExit:
|
||||
case CodeRange::DebugTrap:
|
||||
#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
if ((offsetFromEntry >= BeforePushRetAddr && offsetFromEntry < PushedFP) || codeRange->isThunk()) {
|
||||
|
@ -913,7 +913,7 @@ ProfilingFrameIterator::operator++()
|
|||
case CodeRange::ImportJitExit:
|
||||
case CodeRange::ImportInterpExit:
|
||||
case CodeRange::BuiltinThunk:
|
||||
case CodeRange::TrapExit:
|
||||
case CodeRange::OldTrapExit:
|
||||
case CodeRange::DebugTrap:
|
||||
case CodeRange::OutOfBoundsExit:
|
||||
case CodeRange::UnalignedExit:
|
||||
|
@ -941,7 +941,7 @@ ThunkedNativeToDescription(SymbolicAddress func)
|
|||
case SymbolicAddress::HandleExecutionInterrupt:
|
||||
case SymbolicAddress::HandleDebugTrap:
|
||||
case SymbolicAddress::HandleThrow:
|
||||
case SymbolicAddress::ReportTrap:
|
||||
case SymbolicAddress::OldReportTrap:
|
||||
case SymbolicAddress::ReportOutOfBounds:
|
||||
case SymbolicAddress::ReportUnalignedAccess:
|
||||
case SymbolicAddress::CallImport_Void:
|
||||
|
@ -1071,7 +1071,7 @@ ProfilingFrameIterator::label() const
|
|||
case CodeRange::ImportJitExit: return importJitDescription;
|
||||
case CodeRange::BuiltinThunk: return builtinNativeDescription;
|
||||
case CodeRange::ImportInterpExit: return importInterpDescription;
|
||||
case CodeRange::TrapExit: return trapDescription;
|
||||
case CodeRange::OldTrapExit: return trapDescription;
|
||||
case CodeRange::DebugTrap: return debugTrapDescription;
|
||||
case CodeRange::OutOfBoundsExit: return "out-of-bounds stub (in wasm)";
|
||||
case CodeRange::UnalignedExit: return "unaligned trap stub (in wasm)";
|
||||
|
|
|
@ -47,9 +47,9 @@ CompiledCode::swap(MacroAssembler& masm)
|
|||
|
||||
callSites.swap(masm.callSites());
|
||||
callSiteTargets.swap(masm.callSiteTargets());
|
||||
trapSites.swap(masm.trapSites());
|
||||
oldTrapSites.swap(masm.oldTrapSites());
|
||||
callFarJumps.swap(masm.callFarJumps());
|
||||
trapFarJumps.swap(masm.trapFarJumps());
|
||||
oldTrapFarJumps.swap(masm.oldTrapFarJumps());
|
||||
memoryAccesses.swap(masm.memoryAccesses());
|
||||
symbolicAccesses.swap(masm.symbolicAccesses());
|
||||
codeLabels.swap(masm.codeLabels());
|
||||
|
@ -75,7 +75,7 @@ ModuleGenerator::ModuleGenerator(const CompileArgs& args, ModuleEnvironment* env
|
|||
lifo_(GENERATOR_LIFO_DEFAULT_CHUNK_SIZE),
|
||||
masmAlloc_(&lifo_),
|
||||
masm_(MacroAssembler::WasmToken(), masmAlloc_),
|
||||
trapCodeOffsets_(),
|
||||
oldTrapCodeOffsets_(),
|
||||
debugTrapCodeOffset_(),
|
||||
lastPatchedCallSite_(0),
|
||||
startOfUnpatchedCallsites_(0),
|
||||
|
@ -86,7 +86,7 @@ ModuleGenerator::ModuleGenerator(const CompileArgs& args, ModuleEnvironment* env
|
|||
finishedFuncDefs_(false)
|
||||
{
|
||||
MOZ_ASSERT(IsCompilingWasm());
|
||||
std::fill(trapCodeOffsets_.begin(), trapCodeOffsets_.end(), 0);
|
||||
std::fill(oldTrapCodeOffsets_.begin(), oldTrapCodeOffsets_.end(), 0);
|
||||
}
|
||||
|
||||
ModuleGenerator::~ModuleGenerator()
|
||||
|
@ -436,14 +436,14 @@ ModuleGenerator::linkCallSites()
|
|||
masm_.patchCall(callerOffset, p->value());
|
||||
break;
|
||||
}
|
||||
case CallSiteDesc::TrapExit: {
|
||||
case CallSiteDesc::OldTrapExit: {
|
||||
if (!existingTrapFarJumps[target.trap()]) {
|
||||
// See MacroAssembler::wasmEmitTrapOutOfLineCode for why we must
|
||||
// See MacroAssembler::wasmEmitOldTrapOutOfLineCode for why we must
|
||||
// reload the TLS register on this path.
|
||||
Offsets offsets;
|
||||
offsets.begin = masm_.currentOffset();
|
||||
masm_.loadPtr(Address(FramePointer, offsetof(Frame, tls)), WasmTlsReg);
|
||||
if (!trapFarJumps_.emplaceBack(target.trap(), masm_.farJumpWithPatch()))
|
||||
if (!oldTrapFarJumps_.emplaceBack(target.trap(), masm_.farJumpWithPatch()))
|
||||
return false;
|
||||
offsets.end = masm_.currentOffset();
|
||||
if (masm_.oom())
|
||||
|
@ -503,9 +503,9 @@ ModuleGenerator::noteCodeRange(uint32_t codeRangeIndex, const CodeRange& codeRan
|
|||
case CodeRange::ImportInterpExit:
|
||||
metadataTier_->funcImports[codeRange.funcIndex()].initInterpExitOffset(codeRange.begin());
|
||||
break;
|
||||
case CodeRange::TrapExit:
|
||||
MOZ_ASSERT(!trapCodeOffsets_[codeRange.trap()]);
|
||||
trapCodeOffsets_[codeRange.trap()] = codeRange.begin();
|
||||
case CodeRange::OldTrapExit:
|
||||
MOZ_ASSERT(!oldTrapCodeOffsets_[codeRange.trap()]);
|
||||
oldTrapCodeOffsets_[codeRange.trap()] = codeRange.begin();
|
||||
break;
|
||||
case CodeRange::DebugTrap:
|
||||
MOZ_ASSERT(!debugTrapCodeOffset_);
|
||||
|
@ -580,10 +580,10 @@ ModuleGenerator::linkCompiledCode(const CompiledCode& code)
|
|||
if (!callSiteTargets_.appendAll(code.callSiteTargets))
|
||||
return false;
|
||||
|
||||
MOZ_ASSERT(code.trapSites.empty());
|
||||
MOZ_ASSERT(code.oldTrapSites.empty());
|
||||
|
||||
auto trapFarJumpOp = [=](uint32_t, TrapFarJump* tfj) { tfj->offsetBy(offsetInModule); };
|
||||
if (!AppendForEach(&trapFarJumps_, code.trapFarJumps, trapFarJumpOp))
|
||||
auto trapFarJumpOp = [=](uint32_t, OldTrapFarJump* tfj) { tfj->offsetBy(offsetInModule); };
|
||||
if (!AppendForEach(&oldTrapFarJumps_, code.oldTrapFarJumps, trapFarJumpOp))
|
||||
return false;
|
||||
|
||||
auto callFarJumpOp = [=](uint32_t, CallFarJump* cfj) { cfj->offsetBy(offsetInModule); };
|
||||
|
@ -788,8 +788,8 @@ ModuleGenerator::finishCode()
|
|||
for (CallFarJump far : callFarJumps_)
|
||||
masm_.patchFarJump(far.jump, funcCodeRange(far.funcIndex).funcNormalEntry());
|
||||
|
||||
for (TrapFarJump far : trapFarJumps_)
|
||||
masm_.patchFarJump(far.jump, trapCodeOffsets_[far.trap]);
|
||||
for (OldTrapFarJump far : oldTrapFarJumps_)
|
||||
masm_.patchFarJump(far.jump, oldTrapCodeOffsets_[far.trap]);
|
||||
|
||||
for (CodeOffset farJump : debugTrapFarJumps_)
|
||||
masm_.patchFarJump(farJump, debugTrapCodeOffset_);
|
||||
|
@ -798,8 +798,8 @@ ModuleGenerator::finishCode()
|
|||
|
||||
MOZ_ASSERT(masm_.callSites().empty());
|
||||
MOZ_ASSERT(masm_.callSiteTargets().empty());
|
||||
MOZ_ASSERT(masm_.trapSites().empty());
|
||||
MOZ_ASSERT(masm_.trapFarJumps().empty());
|
||||
MOZ_ASSERT(masm_.oldTrapSites().empty());
|
||||
MOZ_ASSERT(masm_.oldTrapFarJumps().empty());
|
||||
MOZ_ASSERT(masm_.callFarJumps().empty());
|
||||
MOZ_ASSERT(masm_.memoryAccesses().empty());
|
||||
MOZ_ASSERT(masm_.symbolicAccesses().empty());
|
||||
|
|
|
@ -64,8 +64,8 @@ struct CompiledCode
|
|||
CodeRangeVector codeRanges;
|
||||
CallSiteVector callSites;
|
||||
CallSiteTargetVector callSiteTargets;
|
||||
TrapSiteVector trapSites;
|
||||
TrapFarJumpVector trapFarJumps;
|
||||
OldTrapSiteVector oldTrapSites;
|
||||
OldTrapFarJumpVector oldTrapFarJumps;
|
||||
CallFarJumpVector callFarJumps;
|
||||
MemoryAccessVector memoryAccesses;
|
||||
SymbolicAccessVector symbolicAccesses;
|
||||
|
@ -78,8 +78,8 @@ struct CompiledCode
|
|||
codeRanges.clear();
|
||||
callSites.clear();
|
||||
callSiteTargets.clear();
|
||||
trapSites.clear();
|
||||
trapFarJumps.clear();
|
||||
oldTrapSites.clear();
|
||||
oldTrapFarJumps.clear();
|
||||
callFarJumps.clear();
|
||||
memoryAccesses.clear();
|
||||
symbolicAccesses.clear();
|
||||
|
@ -92,8 +92,8 @@ struct CompiledCode
|
|||
codeRanges.empty() &&
|
||||
callSites.empty() &&
|
||||
callSiteTargets.empty() &&
|
||||
trapSites.empty() &&
|
||||
trapFarJumps.empty() &&
|
||||
oldTrapSites.empty() &&
|
||||
oldTrapFarJumps.empty() &&
|
||||
callFarJumps.empty() &&
|
||||
memoryAccesses.empty() &&
|
||||
symbolicAccesses.empty() &&
|
||||
|
@ -145,7 +145,7 @@ struct CompileTask
|
|||
class MOZ_STACK_CLASS ModuleGenerator
|
||||
{
|
||||
typedef Vector<CompileTask, 0, SystemAllocPolicy> CompileTaskVector;
|
||||
typedef EnumeratedArray<Trap, Trap::Limit, uint32_t> Uint32TrapArray;
|
||||
typedef EnumeratedArray<Trap, Trap::Limit, uint32_t> OldTrapOffsetArray;
|
||||
typedef Vector<jit::CodeOffset, 0, SystemAllocPolicy> CodeOffsetVector;
|
||||
|
||||
// Constant parameters
|
||||
|
@ -168,9 +168,9 @@ class MOZ_STACK_CLASS ModuleGenerator
|
|||
jit::TempAllocator masmAlloc_;
|
||||
jit::MacroAssembler masm_;
|
||||
Uint32Vector funcToCodeRange_;
|
||||
Uint32TrapArray trapCodeOffsets_;
|
||||
OldTrapOffsetArray oldTrapCodeOffsets_;
|
||||
uint32_t debugTrapCodeOffset_;
|
||||
TrapFarJumpVector trapFarJumps_;
|
||||
OldTrapFarJumpVector oldTrapFarJumps_;
|
||||
CallFarJumpVector callFarJumps_;
|
||||
CallSiteTargetVector callSiteTargets_;
|
||||
uint32_t lastPatchedCallSite_;
|
||||
|
|
|
@ -533,7 +533,7 @@ GenerateImportFunction(jit::MacroAssembler& masm, const FuncImport& fi, SigIdDes
|
|||
|
||||
GenerateFunctionEpilogue(masm, framePushed, offsets);
|
||||
|
||||
masm.wasmEmitTrapOutOfLineCode();
|
||||
masm.wasmEmitOldTrapOutOfLineCode();
|
||||
|
||||
return FinishOffsets(masm, offsets);
|
||||
}
|
||||
|
@ -1001,12 +1001,12 @@ wasm::GenerateBuiltinThunk(MacroAssembler& masm, ABIFunctionType abiType, ExitRe
|
|||
return FinishOffsets(masm, offsets);
|
||||
}
|
||||
|
||||
// Generate a stub that calls into ReportTrap with the right trap reason.
|
||||
// Generate a stub that calls into WasmOldReportTrap with the right trap reason.
|
||||
// This stub is called with ABIStackAlignment by a trap out-of-line path. An
|
||||
// exit prologue/epilogue is used so that stack unwinding picks up the
|
||||
// current JitActivation. Unwinding will begin at the caller of this trap exit.
|
||||
static bool
|
||||
GenerateTrapExit(MacroAssembler& masm, Trap trap, Label* throwLabel, CallableOffsets* offsets)
|
||||
GenerateOldTrapExit(MacroAssembler& masm, Trap trap, Label* throwLabel, CallableOffsets* offsets)
|
||||
{
|
||||
masm.haltingAlign(CodeAlignment);
|
||||
|
||||
|
@ -1028,7 +1028,7 @@ GenerateTrapExit(MacroAssembler& masm, Trap trap, Label* throwLabel, CallableOff
|
|||
MOZ_ASSERT(i.done());
|
||||
|
||||
masm.assertStackAlignment(ABIStackAlignment);
|
||||
masm.call(SymbolicAddress::ReportTrap);
|
||||
masm.call(SymbolicAddress::OldReportTrap);
|
||||
|
||||
masm.jump(throwLabel);
|
||||
|
||||
|
@ -1369,7 +1369,7 @@ wasm::GenerateStubs(const ModuleEnvironment& env, const FuncImportVector& import
|
|||
|
||||
for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
|
||||
CallableOffsets offsets;
|
||||
if (!GenerateTrapExit(masm, trap, &throwLabel, &offsets))
|
||||
if (!GenerateOldTrapExit(masm, trap, &throwLabel, &offsets))
|
||||
return false;
|
||||
if (!code->codeRanges.emplaceBack(trap, offsets))
|
||||
return false;
|
||||
|
|
|
@ -728,7 +728,7 @@ CodeRange::CodeRange(Kind kind, CallableOffsets offsets)
|
|||
PodZero(&u);
|
||||
#ifdef DEBUG
|
||||
switch (kind_) {
|
||||
case TrapExit:
|
||||
case OldTrapExit:
|
||||
case DebugTrap:
|
||||
case BuiltinThunk:
|
||||
break;
|
||||
|
@ -773,7 +773,7 @@ CodeRange::CodeRange(Trap trap, CallableOffsets offsets)
|
|||
: begin_(offsets.begin),
|
||||
ret_(offsets.ret),
|
||||
end_(offsets.end),
|
||||
kind_(TrapExit)
|
||||
kind_(OldTrapExit)
|
||||
{
|
||||
MOZ_ASSERT(begin_ < ret_);
|
||||
MOZ_ASSERT(ret_ < end_);
|
||||
|
|
|
@ -1011,7 +1011,7 @@ class CodeRange
|
|||
ImportJitExit, // fast-path calling from wasm into JIT code
|
||||
ImportInterpExit, // slow-path calling from wasm into C++ interp
|
||||
BuiltinThunk, // fast-path calling from wasm into a C++ native
|
||||
TrapExit, // calls C++ to report and jumps to throw stub
|
||||
OldTrapExit, // calls C++ to report and jumps to throw stub
|
||||
DebugTrap, // calls C++ to handle debug event
|
||||
FarJumpIsland, // inserted to connect otherwise out-of-range insns
|
||||
OutOfBoundsExit, // stub jumped to by non-standard asm.js SIMD/Atomics
|
||||
|
@ -1087,7 +1087,7 @@ class CodeRange
|
|||
return kind() == ImportJitExit;
|
||||
}
|
||||
bool isTrapExit() const {
|
||||
return kind() == TrapExit;
|
||||
return kind() == OldTrapExit;
|
||||
}
|
||||
bool isDebugTrap() const {
|
||||
return kind() == DebugTrap;
|
||||
|
@ -1187,12 +1187,12 @@ LookupInSorted(const CodeRangeVector& codeRanges, CodeRange::OffsetInCode target
|
|||
struct BytecodeOffset
|
||||
{
|
||||
static const uint32_t INVALID = -1;
|
||||
uint32_t bytecodeOffset;
|
||||
uint32_t offset;
|
||||
|
||||
BytecodeOffset() : bytecodeOffset(INVALID) {}
|
||||
explicit BytecodeOffset(uint32_t bytecodeOffset) : bytecodeOffset(bytecodeOffset) {}
|
||||
BytecodeOffset() : offset(INVALID) {}
|
||||
explicit BytecodeOffset(uint32_t offset) : offset(offset) {}
|
||||
|
||||
bool isValid() const { return bytecodeOffset != INVALID; }
|
||||
bool isValid() const { return offset != INVALID; }
|
||||
};
|
||||
|
||||
// While the frame-pointer chain allows the stack to be unwound without
|
||||
|
@ -1210,7 +1210,7 @@ class CallSiteDesc
|
|||
Func, // pc-relative call to a specific function
|
||||
Dynamic, // dynamic callee called via register
|
||||
Symbolic, // call to a single symbolic callee
|
||||
TrapExit, // call to a trap exit
|
||||
OldTrapExit,// call to a trap exit (being removed)
|
||||
EnterFrame, // call to a enter frame handler
|
||||
LeaveFrame, // call to a leave frame handler
|
||||
Breakpoint // call to instruction breakpoint
|
||||
|
@ -1333,7 +1333,7 @@ enum class SymbolicAddress
|
|||
HandleExecutionInterrupt,
|
||||
HandleDebugTrap,
|
||||
HandleThrow,
|
||||
ReportTrap,
|
||||
OldReportTrap,
|
||||
ReportOutOfBounds,
|
||||
ReportUnalignedAccess,
|
||||
CallImport_Void,
|
||||
|
|
Загрузка…
Ссылка в новой задаче