Bug 1639153 - Part 6.3: Establish dependency from tls for arm callWithABI div/mod i64. r=lth

To be able to call c++ runtime via Builtin thunk we need to set up WasmTlsReg.
In this patch I create dependencies from MIR level to Codegen to be sure that WasmTlsReg is alive
when we call runtime in div/mod i64 for arm.

Differential Revision: https://phabricator.services.mozilla.com/D88762
This commit is contained in:
Dmitry Bezhetskov 2020-09-14 04:00:18 +00:00
Родитель 4f480c9072
Коммит b72efe024e
4 изменённых файлов: 101 добавлений и 95 удалений

Просмотреть файл

@ -2557,26 +2557,12 @@ void CodeGenerator::visitSignExtendInt64(LSignExtendInt64* lir) {
masm.ma_asr(Imm32(31), output.low, output.high);
}
static Register WasmGetTemporaryForDivOrMod(Register64 lhs, Register64 rhs) {
MOZ_ASSERT(IsCompilingWasm());
// All inputs are useAtStart for a call instruction. As a result we cannot
// ask the register allocator for a non-aliasing temp.
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
regs.take(lhs.low);
regs.take(lhs.high);
// The FramePointer shouldn't be clobbered for profiling.
regs.take(FramePointer);
if (lhs != rhs) {
regs.take(rhs.low);
regs.take(rhs.high);
}
return regs.takeAny();
}
void CodeGenerator::visitDivOrModI64(LDivOrModI64* lir) {
MOZ_ASSERT(gen->compilingWasm());
MOZ_ASSERT(ToRegister(lir->getOperand(LDivOrModI64::Tls)) == WasmTlsReg);
masm.Push(WasmTlsReg);
int32_t framePushedAfterTls = masm.framePushed();
Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
Register64 output = ToOutRegister64(lir);
@ -2587,9 +2573,9 @@ void CodeGenerator::visitDivOrModI64(LDivOrModI64* lir) {
// Handle divide by zero.
if (lir->canBeDivideByZero()) {
Register temp = WasmGetTemporaryForDivOrMod(lhs, rhs);
Label nonZero;
masm.branchTest64(Assembler::NonZero, rhs, rhs, temp, &nonZero);
// We can use WasmTlsReg as temp register because we preserved it before.
masm.branchTest64(Assembler::NonZero, rhs, rhs, WasmTlsReg, &nonZero);
masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
masm.bind(&nonZero);
}
@ -2601,7 +2587,7 @@ void CodeGenerator::visitDivOrModI64(LDivOrModI64* lir) {
Label notmin;
masm.branch64(Assembler::NotEqual, lhs, Imm64(INT64_MIN), &notmin);
masm.branch64(Assembler::NotEqual, rhs, Imm64(-1), &notmin);
if (mir->isMod()) {
if (mir->isWasmBuiltinModI64()) {
masm.xor64(output, output);
} else {
masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->bytecodeOffset());
@ -2616,20 +2602,27 @@ void CodeGenerator::visitDivOrModI64(LDivOrModI64* lir) {
masm.passABIArg(rhs.high);
masm.passABIArg(rhs.low);
if (mir->isMod()) {
int32_t tlsOffset = masm.framePushed() - framePushedAfterTls;
if (mir->isWasmBuiltinModI64()) {
masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::ModI64,
mozilla::Nothing());
mozilla::Some(tlsOffset));
} else {
masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::DivI64,
mozilla::Nothing());
mozilla::Some(tlsOffset));
}
MOZ_ASSERT(ReturnReg64 == output);
masm.bind(&done);
masm.Pop(WasmTlsReg);
}
void CodeGenerator::visitUDivOrModI64(LUDivOrModI64* lir) {
MOZ_ASSERT(gen->compilingWasm());
MOZ_ASSERT(ToRegister(lir->getOperand(LDivOrModI64::Tls)) == WasmTlsReg);
masm.Push(WasmTlsReg);
int32_t framePushedAfterTls = masm.framePushed();
Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
@ -2637,9 +2630,9 @@ void CodeGenerator::visitUDivOrModI64(LUDivOrModI64* lir) {
// Prevent divide by zero.
if (lir->canBeDivideByZero()) {
Register temp = WasmGetTemporaryForDivOrMod(lhs, rhs);
Label nonZero;
masm.branchTest64(Assembler::NonZero, rhs, rhs, temp, &nonZero);
// We can use WasmTlsReg as temp register because we preserved it before.
masm.branchTest64(Assembler::NonZero, rhs, rhs, WasmTlsReg, &nonZero);
masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
masm.bind(&nonZero);
}
@ -2650,15 +2643,16 @@ void CodeGenerator::visitUDivOrModI64(LUDivOrModI64* lir) {
masm.passABIArg(rhs.high);
masm.passABIArg(rhs.low);
MOZ_ASSERT(gen->compilingWasm());
MDefinition* mir = lir->mir();
if (mir->isMod()) {
int32_t tlsOffset = masm.framePushed() - framePushedAfterTls;
if (mir->isWasmBuiltinModI64()) {
masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::UModI64,
mozilla::Nothing());
mozilla::Some(tlsOffset));
} else {
masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::UDivI64,
mozilla::Nothing());
mozilla::Some(tlsOffset));
}
masm.Pop(WasmTlsReg);
}
void CodeGenerator::visitCompareI64(LCompareI64* lir) {

Просмотреть файл

@ -95,80 +95,86 @@ class LDivI : public LBinaryMath<1> {
};
class LDivOrModI64
: public LCallInstructionHelper<INT64_PIECES, INT64_PIECES * 2, 0> {
: public LCallInstructionHelper<INT64_PIECES, INT64_PIECES * 2 + 1, 0> {
public:
LIR_HEADER(DivOrModI64)
static const size_t Lhs = 0;
static const size_t Rhs = INT64_PIECES;
static const size_t Tls = 2 * INT64_PIECES;
LDivOrModI64(const LInt64Allocation& lhs, const LInt64Allocation& rhs)
LDivOrModI64(const LInt64Allocation& lhs, const LInt64Allocation& rhs,
const LAllocation& tls)
: LCallInstructionHelper(classOpcode) {
setInt64Operand(Lhs, lhs);
setInt64Operand(Rhs, rhs);
setOperand(Tls, tls);
}
MBinaryArithInstruction* mir() const {
MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
return static_cast<MBinaryArithInstruction*>(mir_);
MDefinition* mir() const {
MOZ_ASSERT(mir_->isWasmBuiltinDivI64() || mir_->isWasmBuiltinModI64());
return mir_;
}
bool canBeDivideByZero() const {
if (mir_->isMod()) {
return mir_->toMod()->canBeDivideByZero();
if (mir_->isWasmBuiltinModI64()) {
return mir_->toWasmBuiltinModI64()->canBeDivideByZero();
}
return mir_->toDiv()->canBeDivideByZero();
return mir_->toWasmBuiltinDivI64()->canBeDivideByZero();
}
bool canBeNegativeOverflow() const {
if (mir_->isMod()) {
return mir_->toMod()->canBeNegativeDividend();
if (mir_->isWasmBuiltinModI64()) {
return mir_->toWasmBuiltinModI64()->canBeNegativeDividend();
}
return mir_->toDiv()->canBeNegativeOverflow();
return mir_->toWasmBuiltinDivI64()->canBeNegativeOverflow();
}
wasm::BytecodeOffset bytecodeOffset() const {
MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
if (mir_->isMod()) {
return mir_->toMod()->bytecodeOffset();
MOZ_ASSERT(mir_->isWasmBuiltinDivI64() || mir_->isWasmBuiltinModI64());
if (mir_->isWasmBuiltinModI64()) {
return mir_->toWasmBuiltinModI64()->bytecodeOffset();
}
return mir_->toDiv()->bytecodeOffset();
return mir_->toWasmBuiltinDivI64()->bytecodeOffset();
}
};
class LUDivOrModI64
: public LCallInstructionHelper<INT64_PIECES, INT64_PIECES * 2, 0> {
: public LCallInstructionHelper<INT64_PIECES, INT64_PIECES * 2 + 1, 0> {
public:
LIR_HEADER(UDivOrModI64)
static const size_t Lhs = 0;
static const size_t Rhs = INT64_PIECES;
static const size_t Tls = 2 * INT64_PIECES;
LUDivOrModI64(const LInt64Allocation& lhs, const LInt64Allocation& rhs)
LUDivOrModI64(const LInt64Allocation& lhs, const LInt64Allocation& rhs,
const LAllocation& tls)
: LCallInstructionHelper(classOpcode) {
setInt64Operand(Lhs, lhs);
setInt64Operand(Rhs, rhs);
setOperand(Tls, tls);
}
MBinaryArithInstruction* mir() const {
MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
return static_cast<MBinaryArithInstruction*>(mir_);
MDefinition* mir() const {
MOZ_ASSERT(mir_->isWasmBuiltinDivI64() || mir_->isWasmBuiltinModI64());
return mir_;
}
bool canBeDivideByZero() const {
if (mir_->isMod()) {
return mir_->toMod()->canBeDivideByZero();
if (mir_->isWasmBuiltinModI64()) {
return mir_->toWasmBuiltinModI64()->canBeDivideByZero();
}
return mir_->toDiv()->canBeDivideByZero();
return mir_->toWasmBuiltinDivI64()->canBeDivideByZero();
}
bool canBeNegativeOverflow() const {
if (mir_->isMod()) {
return mir_->toMod()->canBeNegativeDividend();
if (mir_->isWasmBuiltinModI64()) {
return mir_->toWasmBuiltinModI64()->canBeNegativeDividend();
}
return mir_->toDiv()->canBeNegativeOverflow();
return mir_->toWasmBuiltinDivI64()->canBeNegativeOverflow();
}
wasm::BytecodeOffset bytecodeOffset() const {
MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
if (mir_->isMod()) {
return mir_->toMod()->bytecodeOffset();
MOZ_ASSERT(mir_->isWasmBuiltinDivI64() || mir_->isWasmBuiltinModI64());
if (mir_->isWasmBuiltinModI64()) {
return mir_->toWasmBuiltinModI64()->bytecodeOffset();
}
return mir_->toDiv()->bytecodeOffset();
return mir_->toWasmBuiltinDivI64()->bytecodeOffset();
}
};

Просмотреть файл

@ -409,47 +409,53 @@ void LIRGeneratorARM::lowerModI(MMod* mod) {
}
void LIRGeneratorARM::lowerDivI64(MDiv* div) {
if (div->isUnsigned()) {
lowerUDivI64(div);
return;
}
LDivOrModI64* lir = new (alloc()) LDivOrModI64(
useInt64RegisterAtStart(div->lhs()), useInt64RegisterAtStart(div->rhs()));
defineReturn(lir, div);
MOZ_CRASH("We use MWasmBuiltinDivI64 instead.");
}
void LIRGeneratorARM::lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div) {
MOZ_CRASH("We don't use runtime div for this architecture");
}
void LIRGeneratorARM::lowerModI64(MMod* mod) {
if (mod->isUnsigned()) {
lowerUModI64(mod);
if (div->isUnsigned()) {
LUDivOrModI64* lir =
new (alloc()) LUDivOrModI64(useInt64RegisterAtStart(div->lhs()),
useInt64RegisterAtStart(div->rhs()),
useFixedAtStart(div->tls(), WasmTlsReg));
defineReturn(lir, div);
return;
}
LDivOrModI64* lir = new (alloc()) LDivOrModI64(
useInt64RegisterAtStart(mod->lhs()), useInt64RegisterAtStart(mod->rhs()));
defineReturn(lir, mod);
}
void LIRGeneratorARM::lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod) {
MOZ_CRASH("We don't use runtime mod for this architecture");
}
void LIRGeneratorARM::lowerUDivI64(MDiv* div) {
LUDivOrModI64* lir = new (alloc()) LUDivOrModI64(
useInt64RegisterAtStart(div->lhs()), useInt64RegisterAtStart(div->rhs()));
useInt64RegisterAtStart(div->lhs()), useInt64RegisterAtStart(div->rhs()),
useFixedAtStart(div->tls(), WasmTlsReg));
defineReturn(lir, div);
}
void LIRGeneratorARM::lowerUModI64(MMod* mod) {
LUDivOrModI64* lir = new (alloc()) LUDivOrModI64(
useInt64RegisterAtStart(mod->lhs()), useInt64RegisterAtStart(mod->rhs()));
void LIRGeneratorARM::lowerModI64(MMod* mod) {
MOZ_CRASH("We use MWasmBuiltinModI64 instead.");
}
void LIRGeneratorARM::lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod) {
if (mod->isUnsigned()) {
LUDivOrModI64* lir =
new (alloc()) LUDivOrModI64(useInt64RegisterAtStart(mod->lhs()),
useInt64RegisterAtStart(mod->rhs()),
useFixedAtStart(mod->tls(), WasmTlsReg));
defineReturn(lir, mod);
return;
}
LDivOrModI64* lir = new (alloc()) LDivOrModI64(
useInt64RegisterAtStart(mod->lhs()), useInt64RegisterAtStart(mod->rhs()),
useFixedAtStart(mod->tls(), WasmTlsReg));
defineReturn(lir, mod);
}
void LIRGeneratorARM::lowerUDivI64(MDiv* div) {
MOZ_CRASH("We use MWasmBuiltinDivI64 instead.");
}
void LIRGeneratorARM::lowerUModI64(MMod* mod) {
MOZ_CRASH("We use MWasmBuiltinModI64 instead.");
}
void LIRGenerator::visitPowHalf(MPowHalf* ins) {
MDefinition* input = ins->input();
MOZ_ASSERT(input->type() == MIRType::Double);

Просмотреть файл

@ -461,9 +461,9 @@ class FunctionCompiler {
rhs = rhs2;
}
// For x86 we implement i64 div via c++ runtime.
// A call to c++ runtime requires tls pointer.
#ifdef JS_CODEGEN_X86
// For x86 and arm we implement i64 div via c++ builtin.
// A call to c++ builtin requires tls pointer.
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
if (type == MIRType::Int64) {
auto* ins =
MWasmBuiltinDivI64::New(alloc(), lhs, rhs, tlsPointer_, unsignd,
@ -495,9 +495,9 @@ class FunctionCompiler {
rhs = rhs2;
}
// This is because x86 codegen calls runtime via BuiltinThunk and so it
// needs WasmTlsReg to be live.
#ifdef JS_CODEGEN_X86
// For x86 and arm we implement i64 mod via c++ builtin.
// A call to c++ builtin requires tls pointer.
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
if (type == MIRType::Int64) {
auto* ins =
MWasmBuiltinModI64::New(alloc(), lhs, rhs, tlsPointer_, unsignd,