Bug 1490993 part 1 - Always use braces for if/for/while statements in js/src/jit/x64. r=mgaudet

Differential Revision: https://phabricator.services.mozilla.com/D5762

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Jan de Mooij 2018-09-13 13:51:46 +00:00
Родитель 1a37ecbbc1
Коммит ea80c8629f
10 изменённых файлов: 237 добавлений и 143 удалений

Просмотреть файл

@ -90,10 +90,11 @@ ABIArgGenerator::next(MIRType type)
stackOffset_ += sizeof(uint64_t);
break;
}
if (type == MIRType::Float32)
if (type == MIRType::Float32) {
current_ = ABIArg(FloatArgRegs[floatRegIndex_++].asSingle());
else
} else {
current_ = ABIArg(FloatArgRegs[floatRegIndex_++]);
}
break;
case MIRType::Int8x16:
case MIRType::Int16x8:
@ -140,8 +141,9 @@ Assembler::addPendingJump(JmpSrc src, ImmPtr target, RelocationKind reloc)
// Emit reloc before modifying the jump table, since it computes a 0-based
// index. This jump is not patchable at runtime.
if (reloc == RelocationKind::JITCODE)
if (reloc == RelocationKind::JITCODE) {
writeRelocation(src, reloc);
}
enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), target.value, reloc));
}
@ -181,8 +183,9 @@ Assembler::PatchJumpEntry(uint8_t* entry, uint8_t* target)
void
Assembler::finish()
{
if (oom())
if (oom()) {
return;
}
if (!jumps_.length()) {
// Since we may be folowed by non-executable data, eagerly insert an
@ -200,8 +203,9 @@ Assembler::finish()
// jump relocation buffer if any JitCode references exist and must be
// tracked for GC.
MOZ_ASSERT_IF(jumpRelocations_.length(), jumpRelocations_.length() >= sizeof(uint32_t));
if (jumpRelocations_.length())
if (jumpRelocations_.length()) {
*(uint32_t*)jumpRelocations_.buffer() = extendedJumpTable_;
}
// Zero the extended jumps table.
for (size_t i = 0; i < jumps_.length(); i++) {
@ -270,8 +274,9 @@ class RelocationIterator
}
bool read() {
if (!reader_.more())
if (!reader_.more()) {
return false;
}
offset_ = reader_.readUnsigned();
extOffset_ = reader_.readUnsigned();
return true;

Просмотреть файл

@ -903,10 +903,11 @@ class Assembler : public AssemblerX86Shared
// though. Use xorl instead of xorq since they are functionally
// equivalent (32-bit instructions zero-extend their results to 64 bits)
// and xorl has a smaller encoding.
if (word.value == 0)
if (word.value == 0) {
xorl(dest, dest);
else
} else {
movq(word, dest);
}
}
void mov(ImmPtr imm, Register dest) {
movq(imm, dest);
@ -1138,8 +1139,9 @@ GetIntArgReg(uint32_t intArg, uint32_t floatArg, Register* out)
#else
uint32_t arg = intArg;
#endif
if (arg >= NumIntArgRegs)
if (arg >= NumIntArgRegs) {
return false;
}
*out = IntArgRegs[arg];
return true;
}
@ -1152,8 +1154,9 @@ GetIntArgReg(uint32_t intArg, uint32_t floatArg, Register* out)
static inline bool
GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out)
{
if (GetIntArgReg(usedIntArgs, usedFloatArgs, out))
if (GetIntArgReg(usedIntArgs, usedFloatArgs, out)) {
return true;
}
// Unfortunately, we have to assume things about the point at which
// GetIntArgReg returns false, because we need to know how many registers it
// can allocate.
@ -1163,8 +1166,9 @@ GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out)
uint32_t arg = usedIntArgs;
#endif
arg -= NumIntArgRegs;
if (arg >= NumCallTempNonArgRegs)
if (arg >= NumCallTempNonArgRegs) {
return false;
}
*out = CallTempNonArgRegs[arg];
return true;
}
@ -1177,8 +1181,9 @@ GetFloatArgReg(uint32_t intArg, uint32_t floatArg, FloatRegister* out)
#else
uint32_t arg = floatArg;
#endif
if (floatArg >= NumFloatArgRegs)
if (floatArg >= NumFloatArgRegs) {
return false;
}
*out = FloatArgRegs[arg];
return true;
}

Просмотреть файл

@ -57,10 +57,11 @@ class BaseAssemblerX64 : public BaseAssembler
m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_ADD);
m_formatter.immediate8s(imm);
} else {
if (dst == rax)
if (dst == rax) {
m_formatter.oneByteOp64(OP_ADD_EAXIv);
else
} else {
m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_ADD);
}
m_formatter.immediate32(imm);
}
}
@ -69,10 +70,11 @@ class BaseAssemblerX64 : public BaseAssembler
{
// 32-bit immediate always, for patching.
spew("addq $0x%04x, %s", imm, GPReg64Name(dst));
if (dst == rax)
if (dst == rax) {
m_formatter.oneByteOp64(OP_ADD_EAXIv);
else
} else {
m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_ADD);
}
m_formatter.immediate32(imm);
}
@ -216,10 +218,11 @@ class BaseAssemblerX64 : public BaseAssembler
m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_AND);
m_formatter.immediate8s(imm);
} else {
if (dst == rax)
if (dst == rax) {
m_formatter.oneByteOp64(OP_AND_EAXIv);
else
} else {
m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_AND);
}
m_formatter.immediate32(imm);
}
}
@ -243,10 +246,11 @@ class BaseAssemblerX64 : public BaseAssembler
m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_OR);
m_formatter.immediate8s(imm);
} else {
if (dst == rax)
if (dst == rax) {
m_formatter.oneByteOp64(OP_OR_EAXIv);
else
} else {
m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_OR);
}
m_formatter.immediate32(imm);
}
}
@ -294,10 +298,11 @@ class BaseAssemblerX64 : public BaseAssembler
m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_SUB);
m_formatter.immediate8s(imm);
} else {
if (dst == rax)
if (dst == rax) {
m_formatter.oneByteOp64(OP_SUB_EAXIv);
else
} else {
m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_SUB);
}
m_formatter.immediate32(imm);
}
}
@ -315,10 +320,11 @@ class BaseAssemblerX64 : public BaseAssembler
m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_XOR);
m_formatter.immediate8s(imm);
} else {
if (dst == rax)
if (dst == rax) {
m_formatter.oneByteOp64(OP_XOR_EAXIv);
else
} else {
m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_XOR);
}
m_formatter.immediate32(imm);
}
}
@ -345,9 +351,9 @@ class BaseAssemblerX64 : public BaseAssembler
{
MOZ_ASSERT(imm < 64);
spew("sarq $%d, %s", imm, GPReg64Name(dst));
if (imm == 1)
if (imm == 1) {
m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_SAR);
else {
} else {
m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_SAR);
m_formatter.immediate8u(imm);
}
@ -357,9 +363,9 @@ class BaseAssemblerX64 : public BaseAssembler
{
MOZ_ASSERT(imm < 64);
spew("shlq $%d, %s", imm, GPReg64Name(dst));
if (imm == 1)
if (imm == 1) {
m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_SHL);
else {
} else {
m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_SHL);
m_formatter.immediate8u(imm);
}
@ -369,9 +375,9 @@ class BaseAssemblerX64 : public BaseAssembler
{
MOZ_ASSERT(imm < 64);
spew("shrq $%d, %s", imm, GPReg64Name(dst));
if (imm == 1)
if (imm == 1) {
m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_SHR);
else {
} else {
m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_SHR);
m_formatter.immediate8u(imm);
}
@ -381,9 +387,9 @@ class BaseAssemblerX64 : public BaseAssembler
{
MOZ_ASSERT(imm < 64);
spew("rolq $%d, %s", imm, GPReg64Name(dst));
if (imm == 1)
if (imm == 1) {
m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_ROL);
else {
} else {
m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_ROL);
m_formatter.immediate8u(imm);
}
@ -398,9 +404,9 @@ class BaseAssemblerX64 : public BaseAssembler
{
MOZ_ASSERT(imm < 64);
spew("rorq $%d, %s", imm, GPReg64Name(dst));
if (imm == 1)
if (imm == 1) {
m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_ROR);
else {
} else {
m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_ROR);
m_formatter.immediate8u(imm);
}
@ -473,10 +479,11 @@ class BaseAssemblerX64 : public BaseAssembler
m_formatter.oneByteOp64(OP_GROUP1_EvIb, lhs, GROUP1_OP_CMP);
m_formatter.immediate8s(rhs);
} else {
if (lhs == rax)
if (lhs == rax) {
m_formatter.oneByteOp64(OP_CMP_EAXIv);
else
} else {
m_formatter.oneByteOp64(OP_GROUP1_EvIz, lhs, GROUP1_OP_CMP);
}
m_formatter.immediate32(rhs);
}
}
@ -536,10 +543,11 @@ class BaseAssemblerX64 : public BaseAssembler
return;
}
spew("testq $0x%" PRIx64 ", %s", int64_t(rhs), GPReg64Name(lhs));
if (lhs == rax)
if (lhs == rax) {
m_formatter.oneByteOp64(OP_TEST_EAXIv);
else
} else {
m_formatter.oneByteOp64(OP_GROUP3_EvIz, lhs, GROUP3_OP_TEST);
}
m_formatter.immediate32(rhs);
}
@ -958,20 +966,22 @@ class BaseAssemblerX64 : public BaseAssembler
m_formatter.legacySSEPrefix(ty);
m_formatter.twoByteRipOp(opcode, 0, dst);
JmpSrc label(m_formatter.size());
if (IsXMMReversedOperands(opcode))
if (IsXMMReversedOperands(opcode)) {
spew("%-11s%s, " MEM_o32r "", legacySSEOpName(name), XMMRegName(dst), ADDR_o32r(label.offset()));
else
} else {
spew("%-11s" MEM_o32r ", %s", legacySSEOpName(name), ADDR_o32r(label.offset()), XMMRegName(dst));
}
return label;
}
m_formatter.twoByteRipOpVex(ty, opcode, 0, src0, dst);
JmpSrc label(m_formatter.size());
if (src0 == invalid_xmm) {
if (IsXMMReversedOperands(opcode))
if (IsXMMReversedOperands(opcode)) {
spew("%-11s%s, " MEM_o32r "", name, XMMRegName(dst), ADDR_o32r(label.offset()));
else
} else {
spew("%-11s" MEM_o32r ", %s", name, ADDR_o32r(label.offset()), XMMRegName(dst));
}
} else {
spew("%-11s" MEM_o32r ", %s, %s", name, ADDR_o32r(label.offset()), XMMRegName(src0), XMMRegName(dst));
}
@ -982,20 +992,22 @@ class BaseAssemblerX64 : public BaseAssembler
RegisterID rm, XMMRegisterID src0, XMMRegisterID dst)
{
if (useLegacySSEEncoding(src0, dst)) {
if (IsXMMReversedOperands(opcode))
if (IsXMMReversedOperands(opcode)) {
spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName(dst), GPRegName(rm));
else
} else {
spew("%-11s%s, %s", legacySSEOpName(name), GPRegName(rm), XMMRegName(dst));
}
m_formatter.legacySSEPrefix(ty);
m_formatter.twoByteOp64(opcode, rm, dst);
return;
}
if (src0 == invalid_xmm) {
if (IsXMMReversedOperands(opcode))
if (IsXMMReversedOperands(opcode)) {
spew("%-11s%s, %s", name, XMMRegName(dst), GPRegName(rm));
else
} else {
spew("%-11s%s, %s", name, GPRegName(rm), XMMRegName(dst));
}
} else {
spew("%-11s%s, %s, %s", name, GPRegName(rm), XMMRegName(src0), XMMRegName(dst));
}
@ -1006,23 +1018,25 @@ class BaseAssemblerX64 : public BaseAssembler
XMMRegisterID rm, RegisterID dst)
{
if (useLegacySSEEncodingForOtherOutput()) {
if (IsXMMReversedOperands(opcode))
if (IsXMMReversedOperands(opcode)) {
spew("%-11s%s, %s", legacySSEOpName(name), GPRegName(dst), XMMRegName(rm));
else if (opcode == OP2_MOVD_EdVd)
} else if (opcode == OP2_MOVD_EdVd) {
spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName((XMMRegisterID)dst), GPRegName((RegisterID)rm));
else
} else {
spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName(rm), GPRegName(dst));
}
m_formatter.legacySSEPrefix(ty);
m_formatter.twoByteOp64(opcode, (RegisterID)rm, dst);
return;
}
if (IsXMMReversedOperands(opcode))
if (IsXMMReversedOperands(opcode)) {
spew("%-11s%s, %s", name, GPRegName(dst), XMMRegName(rm));
else if (opcode == OP2_MOVD_EdVd)
} else if (opcode == OP2_MOVD_EdVd) {
spew("%-11s%s, %s", name, XMMRegName((XMMRegisterID)dst), GPRegName((RegisterID)rm));
else
} else {
spew("%-11s%s, %s", name, XMMRegName(rm), GPRegName(dst));
}
m_formatter.twoByteOpVex64(ty, opcode, (RegisterID)rm, invalid_xmm, (XMMRegisterID)dst);
}
};

Просмотреть файл

@ -42,8 +42,9 @@ CodeGeneratorX64::ToOperand64(const LInt64Allocation& a64)
{
const LAllocation& a = a64.value();
MOZ_ASSERT(!a.isFloatReg());
if (a.isGeneralReg())
if (a.isGeneralReg()) {
return Operand(a.toGeneralReg()->reg());
}
return Operand(masm.getStackPointer(), ToStackOffset(a));
}
@ -157,10 +158,11 @@ CodeGenerator::visitCompareB(LCompareB* lir)
// Load boxed boolean in ScratchReg.
ScratchRegisterScope scratch(masm);
if (rhs->isConstant())
if (rhs->isConstant()) {
masm.moveValue(rhs->toConstant()->toJSValue(), ValueOperand(scratch));
else
} else {
masm.boxValue(JSVAL_TYPE_BOOLEAN, ToRegister(rhs), scratch);
}
// Perform the comparison.
masm.cmpPtr(lhs.valueReg(), scratch);
@ -179,10 +181,11 @@ CodeGenerator::visitCompareBAndBranch(LCompareBAndBranch* lir)
// Load boxed boolean in ScratchReg.
ScratchRegisterScope scratch(masm);
if (rhs->isConstant())
if (rhs->isConstant()) {
masm.moveValue(rhs->toConstant()->toJSValue(), ValueOperand(scratch));
else
} else {
masm.boxValue(JSVAL_TYPE_BOOLEAN, ToRegister(rhs), scratch);
}
// Perform the comparison.
masm.cmpPtr(lhs.valueReg(), scratch);
@ -230,10 +233,11 @@ CodeGenerator::visitCompareI64(LCompareI64* lir)
Register lhsReg = ToRegister64(lhs).reg;
Register output = ToRegister(lir->output());
if (IsConstant(rhs))
if (IsConstant(rhs)) {
masm.cmpPtr(lhsReg, ImmWord(ToInt64(rhs)));
else
} else {
masm.cmpPtr(lhsReg, ToOperand64(rhs));
}
bool isSigned = mir->compareType() == MCompare::Compare_Int64;
masm.emitSet(JSOpToCondition(lir->jsop(), isSigned), output);
@ -250,10 +254,11 @@ CodeGenerator::visitCompareI64AndBranch(LCompareI64AndBranch* lir)
LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
Register lhsReg = ToRegister64(lhs).reg;
if (IsConstant(rhs))
if (IsConstant(rhs)) {
masm.cmpPtr(lhsReg, ImmWord(ToInt64(rhs)));
else
} else {
masm.cmpPtr(lhsReg, ToOperand64(rhs));
}
bool isSigned = mir->compareType() == MCompare::Compare_Int64;
emitBranch(JSOpToCondition(lir->jsop(), isSigned), lir->ifTrue(), lir->ifFalse());
@ -274,8 +279,9 @@ CodeGenerator::visitDivOrModI64(LDivOrModI64* lir)
Label done;
// Put the lhs in rax.
if (lhs != rax)
if (lhs != rax) {
masm.mov(lhs, rax);
}
// Handle divide by zero.
if (lir->canBeDivideByZero()) {
@ -290,10 +296,11 @@ CodeGenerator::visitDivOrModI64(LDivOrModI64* lir)
Label notOverflow;
masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(INT64_MIN), &notOverflow);
masm.branchPtr(Assembler::NotEqual, rhs, ImmWord(-1), &notOverflow);
if (lir->mir()->isMod())
if (lir->mir()->isMod()) {
masm.xorl(output, output);
else
} else {
masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->bytecodeOffset());
}
masm.jump(&done);
masm.bind(&notOverflow);
}
@ -318,8 +325,9 @@ CodeGenerator::visitUDivOrModI64(LUDivOrModI64* lir)
MOZ_ASSERT_IF(output.value == rdx, ToRegister(lir->remainder()) == rax);
// Put the lhs in rax.
if (lhs != rax)
if (lhs != rax) {
masm.mov(lhs, rax);
}
Label done;
@ -434,10 +442,11 @@ CodeGeneratorX64::emitWasmLoad(T* ins)
? Operand(HeapReg, offset)
: Operand(HeapReg, ToRegister(ptr), TimesOne, offset);
if (mir->type() == MIRType::Int64)
if (mir->type() == MIRType::Int64) {
masm.wasmLoadI64(mir->access(), srcAddr, ToOutRegister64(ins));
else
} else {
masm.wasmLoad(mir->access(), srcAddr, ToAnyRegister(ins->output()));
}
}
void
@ -583,8 +592,9 @@ CodeGenerator::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins)
MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
Scalar::Type accessType = mir->access().type();
if (accessType == Scalar::Uint32)
if (accessType == Scalar::Uint32) {
accessType = Scalar::Int32;
}
AtomicOp op = mir->operation();
BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->access().offset());
@ -621,10 +631,11 @@ CodeGenerator::visitWasmAtomicBinopHeapForEffect(LWasmAtomicBinopHeapForEffect*
masm.wasmAtomicEffectOp64(mir->access(), op, val, srcAddr);
} else if (value->isConstant()) {
Imm32 c(0);
if (value->toConstant()->type() == MIRType::Int64)
if (value->toConstant()->type() == MIRType::Int64) {
c = Imm32(ToInt64(value));
else
} else {
c = Imm32(ToInt32(value));
}
masm.wasmAtomicEffectOp(mir->access(), op, c, srcAddr, InvalidReg);
} else {
masm.wasmAtomicEffectOp(mir->access(), op, ToRegister(value), srcAddr, InvalidReg);
@ -661,10 +672,11 @@ CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir)
const LAllocation* input = lir->getOperand(0);
Register output = ToRegister(lir->output());
if (lir->mir()->bottomHalf())
if (lir->mir()->bottomHalf()) {
masm.movl(ToOperand(input), output);
else
} else {
MOZ_CRASH("Not implemented.");
}
}
void
@ -673,10 +685,11 @@ CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir)
const LAllocation* input = lir->getOperand(0);
Register output = ToRegister(lir->output());
if (lir->mir()->isUnsigned())
if (lir->mir()->isUnsigned()) {
masm.movl(ToOperand(input), output);
else
} else {
masm.movslq(ToOperand(input), output);
}
}
void
@ -717,19 +730,21 @@ CodeGenerator::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir)
Label* oolRejoin = ool->rejoin();
bool isSaturating = mir->isSaturating();
if (inputType == MIRType::Double) {
if (mir->isUnsigned())
if (mir->isUnsigned()) {
masm.wasmTruncateDoubleToUInt64(input, output, isSaturating,
oolEntry, oolRejoin, temp);
else
} else {
masm.wasmTruncateDoubleToInt64(input, output, isSaturating,
oolEntry, oolRejoin, temp);
}
} else {
if (mir->isUnsigned())
if (mir->isUnsigned()) {
masm.wasmTruncateFloat32ToUInt64(input, output, isSaturating,
oolEntry, oolRejoin, temp);
else
} else {
masm.wasmTruncateFloat32ToInt64(input, output, isSaturating,
oolEntry, oolRejoin, temp);
}
}
}
@ -747,15 +762,17 @@ CodeGenerator::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir)
MOZ_ASSERT(isUnsigned == !lir->getTemp(0)->isBogusTemp());
if (outputType == MIRType::Double) {
if (isUnsigned)
if (isUnsigned) {
masm.convertUInt64ToDouble(input, output, ToRegister(lir->getTemp(0)));
else
} else {
masm.convertInt64ToDouble(input, output);
}
} else {
if (isUnsigned)
if (isUnsigned) {
masm.convertUInt64ToFloat32(input, output, ToRegister(lir->getTemp(0)));
else
} else {
masm.convertInt64ToFloat32(input, output);
}
}
}

Просмотреть файл

@ -108,19 +108,22 @@ class LDivOrModI64 : public LBinaryMath<1>
return static_cast<MBinaryArithInstruction*>(mir_);
}
bool canBeDivideByZero() const {
if (mir_->isMod())
if (mir_->isMod()) {
return mir_->toMod()->canBeDivideByZero();
}
return mir_->toDiv()->canBeDivideByZero();
}
bool canBeNegativeOverflow() const {
if (mir_->isMod())
if (mir_->isMod()) {
return mir_->toMod()->canBeNegativeDividend();
}
return mir_->toDiv()->canBeNegativeOverflow();
}
wasm::BytecodeOffset bytecodeOffset() const {
MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
if (mir_->isMod())
if (mir_->isMod()) {
return mir_->toMod()->bytecodeOffset();
}
return mir_->toDiv()->bytecodeOffset();
}
};
@ -155,15 +158,17 @@ class LUDivOrModI64 : public LBinaryMath<1>
}
bool canBeDivideByZero() const {
if (mir_->isMod())
if (mir_->isMod()) {
return mir_->toMod()->canBeDivideByZero();
}
return mir_->toDiv()->canBeDivideByZero();
}
wasm::BytecodeOffset bytecodeOffset() const {
MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
if (mir_->isMod())
if (mir_->isMod()) {
return mir_->toMod()->bytecodeOffset();
}
return mir_->toDiv()->bytecodeOffset();
}
};

Просмотреть файл

@ -100,8 +100,9 @@ LIRGenerator::visitUnbox(MUnbox* unbox)
if (box->type() == MIRType::ObjectOrNull) {
LUnboxObjectOrNull* lir = new(alloc()) LUnboxObjectOrNull(useRegisterAtStart(box));
if (unbox->fallible())
if (unbox->fallible()) {
assignSnapshot(lir, unbox->bailoutKind());
}
defineReuseInput(lir, unbox, 0);
return;
}
@ -119,8 +120,9 @@ LIRGenerator::visitUnbox(MUnbox* unbox)
lir = new(alloc()) LUnbox(useAtStart(box));
}
if (unbox->fallible())
if (unbox->fallible()) {
assignSnapshot(lir, unbox->bailoutKind());
}
define(lir, unbox);
}
@ -229,10 +231,11 @@ LIRGenerator::visitWasmStore(MWasmStore* ins)
break;
case Scalar::Int64:
// No way to encode an int64-to-memory move on x64.
if (value->isConstant() && value->type() != MIRType::Int64)
if (value->isConstant() && value->type() != MIRType::Int64) {
valueAlloc = useOrConstantAtStart(value);
else
} else {
valueAlloc = useRegisterAtStart(value);
}
break;
case Scalar::Float32:
case Scalar::Float64:
@ -387,12 +390,13 @@ LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins)
value,
bitOp ? temp() : LDefinition::BogusTemp());
if (reuseInput)
if (reuseInput) {
defineReuseInput(lir, ins, LWasmAtomicBinopHeap::valueOp);
else if (bitOp)
} else if (bitOp) {
defineFixed(lir, ins, LAllocation(AnyRegister(rax)));
else
} else {
define(lir, ins);
}
}
void

Просмотреть файл

@ -561,8 +561,9 @@ MacroAssembler::popcnt64(Register64 src64, Register64 dest64, Register tmp)
return;
}
if (src != dest)
if (src != dest) {
movq(src, dest);
}
MOZ_ASSERT(tmp != dest);
@ -642,8 +643,9 @@ MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val, Label* succe
"other condition codes not supported");
branchPtr(cond, lhs.reg, ImmWord(val.value), success);
if (fail)
if (fail) {
jump(fail);
}
}
void
@ -657,8 +659,9 @@ MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs, Label*
"other condition codes not supported");
branchPtr(cond, lhs.reg, rhs.reg, success);
if (fail)
if (fail) {
jump(fail);
}
}
void
@ -721,8 +724,9 @@ void
MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs, Register rhs, Label* label)
{
ScratchRegisterScope scratch(*this);
if (rhs != scratch)
if (rhs != scratch) {
movePtr(rhs, scratch);
}
// Instead of unboxing lhs, box rhs and do direct comparison with lhs.
rshiftPtr(Imm32(1), scratch);
branchPtr(cond, lhs, scratch, label);
@ -863,14 +867,16 @@ MacroAssembler::spectreBoundsCheck32(Register index, Register length, Register m
MOZ_ASSERT(index != scratch);
MOZ_ASSERT(length != scratch);
if (JitOptions.spectreIndexMasking)
if (JitOptions.spectreIndexMasking) {
move32(Imm32(0), scratch);
}
cmp32(index, length);
j(Assembler::AboveOrEqual, failure);
if (JitOptions.spectreIndexMasking)
if (JitOptions.spectreIndexMasking) {
cmovCCl(Assembler::AboveOrEqual, scratch, index);
}
}
void
@ -885,14 +891,16 @@ MacroAssembler::spectreBoundsCheck32(Register index, const Address& length, Regi
MOZ_ASSERT(index != scratch);
MOZ_ASSERT(length.base != scratch);
if (JitOptions.spectreIndexMasking)
if (JitOptions.spectreIndexMasking) {
move32(Imm32(0), scratch);
}
cmp32(index, Operand(length));
j(Assembler::AboveOrEqual, failure);
if (JitOptions.spectreIndexMasking)
if (JitOptions.spectreIndexMasking) {
cmovCCl(Assembler::AboveOrEqual, scratch, index);
}
}
// ========================================================================

Просмотреть файл

@ -21,11 +21,13 @@ using namespace js::jit;
void
MacroAssemblerX64::loadConstantDouble(double d, FloatRegister dest)
{
if (maybeInlineDouble(d, dest))
if (maybeInlineDouble(d, dest)) {
return;
}
Double* dbl = getDouble(d);
if (!dbl)
if (!dbl) {
return;
}
// The constants will be stored in a pool appended to the text (see
// finish()), so they will always be a fixed distance from the
// instructions which reference them. This allows the instructions to use
@ -38,11 +40,13 @@ MacroAssemblerX64::loadConstantDouble(double d, FloatRegister dest)
void
MacroAssemblerX64::loadConstantFloat32(float f, FloatRegister dest)
{
if (maybeInlineFloat(f, dest))
if (maybeInlineFloat(f, dest)) {
return;
}
Float* flt = getFloat(f);
if (!flt)
if (!flt) {
return;
}
// See comment in loadConstantDouble
JmpSrc j = masm.vmovss_ripr(dest.encoding());
propagateOOM(flt->uses.append(CodeOffset(j.offset())));
@ -51,11 +55,13 @@ MacroAssemblerX64::loadConstantFloat32(float f, FloatRegister dest)
void
MacroAssemblerX64::loadConstantSimd128Int(const SimdConstant& v, FloatRegister dest)
{
if (maybeInlineSimd128Int(v, dest))
if (maybeInlineSimd128Int(v, dest)) {
return;
}
SimdData* val = getSimdData(v);
if (!val)
if (!val) {
return;
}
JmpSrc j = masm.vmovdqa_ripr(dest.encoding());
propagateOOM(val->uses.append(CodeOffset(j.offset())));
}
@ -63,11 +69,13 @@ MacroAssemblerX64::loadConstantSimd128Int(const SimdConstant& v, FloatRegister d
void
MacroAssemblerX64::loadConstantSimd128Float(const SimdConstant&v, FloatRegister dest)
{
if (maybeInlineSimd128Float(v, dest))
if (maybeInlineSimd128Float(v, dest)) {
return;
}
SimdData* val = getSimdData(v);
if (!val)
if (!val) {
return;
}
JmpSrc j = masm.vmovaps_ripr(dest.encoding());
propagateOOM(val->uses.append(CodeOffset(j.offset())));
}
@ -87,23 +95,26 @@ MacroAssemblerX64::bindOffsets(const MacroAssemblerX86Shared::UsesVector& uses)
void
MacroAssemblerX64::finish()
{
if (!doubles_.empty())
if (!doubles_.empty()) {
masm.haltingAlign(sizeof(double));
}
for (const Double& d : doubles_) {
bindOffsets(d.uses);
masm.doubleConstant(d.value);
}
if (!floats_.empty())
if (!floats_.empty()) {
masm.haltingAlign(sizeof(float));
}
for (const Float& f : floats_) {
bindOffsets(f.uses);
masm.floatConstant(f.value);
}
// SIMD memory values must be suitably aligned.
if (!simds_.empty())
if (!simds_.empty()) {
masm.haltingAlign(SimdMemoryAlignment);
}
for (const SimdData& v : simds_) {
bindOffsets(v.uses);
masm.simd128Constant(v.value.bytes());
@ -285,8 +296,9 @@ MacroAssembler::subFromStackPtr(Imm32 imm32)
subl(Imm32(1), scratch);
j(Assembler::NonZero, &top);
amountLeft -= fullPages * 4096;
if (amountLeft)
if (amountLeft) {
subq(Imm32(amountLeft), StackPointer);
}
}
}
}
@ -330,8 +342,9 @@ MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm)
// Position all arguments.
{
enoughMemory_ &= moveResolver_.resolve();
if (!enoughMemory_)
if (!enoughMemory_) {
return;
}
MoveEmitter emitter(*this);
emitter.emit(moveResolver_);
@ -345,8 +358,9 @@ void
MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result, bool cleanupArg)
{
freeStack(stackAdjust);
if (dynamicAlignment_)
if (dynamicAlignment_) {
pop(rsp);
}
#ifdef DEBUG
MOZ_ASSERT(inCall_);
@ -358,8 +372,9 @@ static bool
IsIntArgReg(Register reg)
{
for (uint32_t i = 0; i < NumIntArgRegs; i++) {
if (IntArgRegs[i] == reg)
if (IntArgRegs[i] == reg) {
return true;
}
}
return false;
@ -435,8 +450,9 @@ MacroAssembler::moveValue(const TypedOrValueRegister& src, const ValueOperand& d
void
MacroAssembler::moveValue(const ValueOperand& src, const ValueOperand& dest)
{
if (src == dest)
if (src == dest) {
return;
}
movq(src.valueReg(), dest.valueReg());
}
@ -453,8 +469,9 @@ MacroAssembler::moveValue(const Value& src, const ValueOperand& dest)
void
MacroAssembler::loadStoreBuffer(Register ptr, Register buffer)
{
if (ptr != buffer)
if (ptr != buffer) {
movePtr(ptr, buffer);
}
orPtr(Imm32(gc::ChunkMask), buffer);
loadPtr(Address(buffer, gc::ChunkStoreBufferOffsetFromLastByte), buffer);
}
@ -563,20 +580,22 @@ MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType value
if ((valueType == MIRType::Int32 || valueType == MIRType::Boolean) && slotType == valueType) {
if (value.constant()) {
Value val = value.value();
if (valueType == MIRType::Int32)
if (valueType == MIRType::Int32) {
store32(Imm32(val.toInt32()), dest);
else
} else {
store32(Imm32(val.toBoolean() ? 1 : 0), dest);
}
} else {
store32(value.reg().typedReg().gpr(), dest);
}
return;
}
if (value.constant())
if (value.constant()) {
storeValue(value.value(), dest);
else
} else {
storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(), dest);
}
}
template void
@ -909,8 +928,9 @@ MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access, cons
Register64 output)
{
MOZ_ASSERT(output.reg == rax);
if (expected != output)
if (expected != output) {
movq(expected.reg, output.reg);
}
append(access, size());
lock_cmpxchgq(replacement.reg, Operand(mem));
}
@ -921,8 +941,9 @@ MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access, cons
Register64 output)
{
MOZ_ASSERT(output.reg == rax);
if (expected != output)
if (expected != output) {
movq(expected.reg, output.reg);
}
append(access, size());
lock_cmpxchgq(replacement.reg, Operand(mem));
}
@ -931,8 +952,9 @@ void
MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access, const Address& mem,
Register64 value, Register64 output)
{
if (value != output)
if (value != output) {
movq(value.reg, output.reg);
}
append(access, masm.size());
xchgq(output.reg, Operand(mem));
}
@ -941,8 +963,9 @@ void
MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access, const BaseIndex& mem,
Register64 value, Register64 output)
{
if (value != output)
if (value != output) {
movq(value.reg, output.reg);
}
append(access, masm.size());
xchgq(output.reg, Operand(mem));
}
@ -953,13 +976,15 @@ WasmAtomicFetchOp64(MacroAssembler& masm, const wasm::MemoryAccessDesc access, A
Register value, const T& mem, Register temp, Register output)
{
if (op == AtomicFetchAddOp) {
if (value != output)
if (value != output) {
masm.movq(value, output);
}
masm.append(access, masm.size());
masm.lock_xaddq(output, Operand(mem));
} else if (op == AtomicFetchSubOp) {
if (value != output)
if (value != output) {
masm.movq(value, output);
}
masm.negq(output);
masm.append(access, masm.size());
masm.lock_xaddq(output, Operand(mem));

Просмотреть файл

@ -99,8 +99,9 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
void writeDataRelocation(const Value& val) {
if (val.isGCThing()) {
gc::Cell* cell = val.toGCThing();
if (cell && gc::IsInsideNursery(cell))
if (cell && gc::IsInsideNursery(cell)) {
embedsNurseryPointers_ = true;
}
dataRelocations_.writeUnsigned(masm.currentOffset());
}
}
@ -212,8 +213,9 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
void tagValue(JSValueType type, Register payload, ValueOperand dest) {
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(dest.valueReg() != scratch);
if (payload != dest.valueReg())
if (payload != dest.valueReg()) {
movq(payload, dest.valueReg());
}
mov(ImmShiftedTag(type), scratch);
orq(scratch, dest.valueReg());
}
@ -686,8 +688,9 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
}
void splitTag(Register src, Register dest) {
if (src != dest)
if (src != dest) {
movq(src, dest);
}
shrq(Imm32(JSVAL_TAG_SHIFT), dest);
}
void splitTag(const ValueOperand& operand, Register dest) {
@ -825,8 +828,9 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
mov(ImmWord(JSVAL_TYPE_TO_SHIFTED_TAG(type)), scratch);
// If src is already a register, then src and dest are the same
// thing and we don't need to move anything into dest.
if (src.kind() != Operand::REG)
if (src.kind() != Operand::REG) {
movq(src, dest);
}
xorq(scratch, dest);
} else {
mov(ImmWord(JSVAL_TYPE_TO_SHIFTED_TAG(type)), dest);
@ -974,12 +978,13 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
template <typename T>
void loadUnboxedValue(const T& src, MIRType type, AnyRegister dest) {
if (dest.isFloat())
if (dest.isFloat()) {
loadInt32OrDouble(src, dest.fpu());
else if (type == MIRType::ObjectOrNull)
} else if (type == MIRType::ObjectOrNull) {
unboxObjectOrNull(src, dest.gpr());
else
} else {
unboxNonDouble(Operand(src), dest.gpr(), ValueTypeFromMIRType(type));
}
}
template <typename T>

Просмотреть файл

@ -537,8 +537,9 @@ PushBailoutFrame(MacroAssembler& masm, Register spArg)
// the float registers to have the maximal possible size
// (Simd128DataSize). To work around this, we just spill the double
// registers by hand here, using the register dump offset directly.
for (GeneralRegisterBackwardIterator iter(AllRegs.gprs()); iter.more(); ++iter)
for (GeneralRegisterBackwardIterator iter(AllRegs.gprs()); iter.more(); ++iter) {
masm.Push(*iter);
}
masm.reserveStack(sizeof(RegisterDump::FPUArray));
for (FloatRegisterBackwardIterator iter(AllRegs.fpus()); iter.more(); ++iter) {
@ -675,8 +676,9 @@ JitRuntime::generateVMWrapper(JSContext* cx, MacroAssembler& masm, const VMFunct
break;
}
if (!generateTLEnterVM(masm, f))
if (!generateTLEnterVM(masm, f)) {
return false;
}
masm.setupUnalignedABICall(regs.getAny());
masm.passABIArg(cxreg);
@ -687,10 +689,11 @@ JitRuntime::generateVMWrapper(JSContext* cx, MacroAssembler& masm, const VMFunct
for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
switch (f.argProperties(explicitArg)) {
case VMFunction::WordByValue:
if (f.argPassedInFloatReg(explicitArg))
if (f.argPassedInFloatReg(explicitArg)) {
masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::DOUBLE);
else
} else {
masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL);
}
argDisp += sizeof(void*);
break;
case VMFunction::WordByRef:
@ -705,13 +708,15 @@ JitRuntime::generateVMWrapper(JSContext* cx, MacroAssembler& masm, const VMFunct
}
// Copy the implicit outparam, if any.
if (outReg != InvalidReg)
if (outReg != InvalidReg) {
masm.passABIArg(outReg);
}
masm.callWithABI(f.wrapped, MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
if (!generateTLExitVM(masm, f))
if (!generateTLExitVM(masm, f)) {
return false;
}
// Test for failure.
switch (f.failType()) {
@ -767,8 +772,9 @@ JitRuntime::generateVMWrapper(JSContext* cx, MacroAssembler& masm, const VMFunct
// Until C++ code is instrumented against Spectre, prevent speculative
// execution from returning any private data.
if (f.returnsData() && JitOptions.spectreJitToCxxCalls)
if (f.returnsData() && JitOptions.spectreJitToCxxCalls) {
masm.speculationBarrier();
}
masm.leaveExitFrame();
masm.retn(Imm32(sizeof(ExitFrameLayout) +