Bug 1436691 : [MIPS32] Use ldc1 and sdc1 for double loads and stores; r=bbouvier

--HG--
extra : rebase_source : e72b96ab09141e204c48c184d8e80ab89f168246
This commit is contained in:
Dragan Mladjenovic 2018-02-06 14:35:49 +01:00
Родитель c7bdffd735
Коммит 642340d528
16 изменённых файлов: 275 добавлений и 322 удалений

Просмотреть файл

@ -1217,35 +1217,35 @@ AssemblerMIPSShared::as_dextu(Register rt, Register rs, uint16_t pos, uint16_t s
// FP instructions
BufferOffset
AssemblerMIPSShared::as_ld(FloatRegister fd, Register base, int32_t off)
AssemblerMIPSShared::as_ldc1(FloatRegister ft, Register base, int32_t off)
{
MOZ_ASSERT(Imm16::IsInSignedRange(off));
spew("ldc1 %3s, (0x%x)%2s", fd.name(), off, base.name());
return writeInst(InstImm(op_ldc1, base, fd, Imm16(off)).encode());
spew("ldc1 %3s, (0x%x)%2s", ft.name(), off, base.name());
return writeInst(InstImm(op_ldc1, base, ft, Imm16(off)).encode());
}
BufferOffset
AssemblerMIPSShared::as_sd(FloatRegister fd, Register base, int32_t off)
AssemblerMIPSShared::as_sdc1(FloatRegister ft, Register base, int32_t off)
{
MOZ_ASSERT(Imm16::IsInSignedRange(off));
spew("sdc1 %3s, (0x%x)%2s", fd.name(), off, base.name());
return writeInst(InstImm(op_sdc1, base, fd, Imm16(off)).encode());
spew("sdc1 %3s, (0x%x)%2s", ft.name(), off, base.name());
return writeInst(InstImm(op_sdc1, base, ft, Imm16(off)).encode());
}
BufferOffset
AssemblerMIPSShared::as_ls(FloatRegister fd, Register base, int32_t off)
AssemblerMIPSShared::as_lwc1(FloatRegister ft, Register base, int32_t off)
{
MOZ_ASSERT(Imm16::IsInSignedRange(off));
spew("lwc1 %3s, (0x%x)%2s", fd.name(), off, base.name());
return writeInst(InstImm(op_lwc1, base, fd, Imm16(off)).encode());
spew("lwc1 %3s, (0x%x)%2s", ft.name(), off, base.name());
return writeInst(InstImm(op_lwc1, base, ft, Imm16(off)).encode());
}
BufferOffset
AssemblerMIPSShared::as_ss(FloatRegister fd, Register base, int32_t off)
AssemblerMIPSShared::as_swc1(FloatRegister ft, Register base, int32_t off)
{
MOZ_ASSERT(Imm16::IsInSignedRange(off));
spew("swc1 %3s, (0x%x)%2s", fd.name(), off, base.name());
return writeInst(InstImm(op_swc1, base, fd, Imm16(off)).encode());
spew("swc1 %3s, (0x%x)%2s", ft.name(), off, base.name());
return writeInst(InstImm(op_swc1, base, ft, Imm16(off)).encode());
}
BufferOffset

Просмотреть файл

@ -1139,13 +1139,11 @@ class AssemblerMIPSShared : public AssemblerShared
// FP instructions
// Use these two functions only when you are sure address is aligned.
// Otherwise, use ma_ld and ma_sd.
BufferOffset as_ld(FloatRegister fd, Register base, int32_t off);
BufferOffset as_sd(FloatRegister fd, Register base, int32_t off);
BufferOffset as_ldc1(FloatRegister ft, Register base, int32_t off);
BufferOffset as_sdc1(FloatRegister ft, Register base, int32_t off);
BufferOffset as_ls(FloatRegister fd, Register base, int32_t off);
BufferOffset as_ss(FloatRegister fd, Register base, int32_t off);
BufferOffset as_lwc1(FloatRegister ft, Register base, int32_t off);
BufferOffset as_swc1(FloatRegister ft, Register base, int32_t off);
// Loongson-specific FP load and store instructions
BufferOffset as_gsldl(FloatRegister fd, Register base, int32_t off);

Просмотреть файл

@ -1037,6 +1037,28 @@ MacroAssembler::storeFloat32x3(FloatRegister src, const BaseIndex& dest)
MOZ_CRASH("NYI");
}
void
MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const Address& addr)
{
ma_sd(src, addr);
}
void
MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const BaseIndex& addr)
{
ma_sd(src, addr);
}
void
MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const Address& addr)
{
ma_ss(src, addr);
}
void
MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const BaseIndex& addr)
{
ma_ss(src, addr);
}
void
MacroAssembler::memoryBarrier(MemoryBarrierBits barrier)
{

Просмотреть файл

@ -1118,14 +1118,6 @@ MacroAssemblerMIPSShared::ma_lis(FloatRegister dest, float value)
}
}
void
MacroAssemblerMIPSShared::ma_liNegZero(FloatRegister dest)
{
moveToDoubleLo(zero, dest);
ma_li(ScratchRegister, Imm32(INT_MIN));
asMasm().moveToDoubleHi(ScratchRegister, dest);
}
void
MacroAssemblerMIPSShared::ma_sd(FloatRegister ft, BaseIndex address)
{
@ -1178,6 +1170,20 @@ MacroAssemblerMIPSShared::ma_ss(FloatRegister ft, BaseIndex address)
asMasm().ma_ss(ft, Address(SecondScratchReg, address.offset));
}
void
MacroAssemblerMIPSShared::ma_ld(FloatRegister ft, const BaseIndex& src)
{
asMasm().computeScaledAddress(src, SecondScratchReg);
asMasm().ma_ld(ft, Address(SecondScratchReg, src.offset));
}
void
MacroAssemblerMIPSShared::ma_ls(FloatRegister ft, const BaseIndex& src)
{
asMasm().computeScaledAddress(src, SecondScratchReg);
asMasm().ma_ls(ft, Address(SecondScratchReg, src.offset));
}
void
MacroAssemblerMIPSShared::ma_bc1s(FloatRegister lhs, FloatRegister rhs, Label* label,
DoubleCondition c, JumpKind jumpKind, FPConditionBit fcc)
@ -1290,6 +1296,44 @@ MacroAssemblerMIPSShared::minMaxFloat32(FloatRegister srcDest, FloatRegister sec
bind(&done);
}
void
MacroAssemblerMIPSShared::loadDouble(const Address& address, FloatRegister dest)
{
asMasm().ma_ld(dest, address);
}
void
MacroAssemblerMIPSShared::loadDouble(const BaseIndex& src, FloatRegister dest)
{
asMasm().ma_ld(dest, src);
}
void
MacroAssemblerMIPSShared::loadFloatAsDouble(const Address& address, FloatRegister dest)
{
asMasm().ma_ls(dest, address);
as_cvtds(dest, dest);
}
void
MacroAssemblerMIPSShared::loadFloatAsDouble(const BaseIndex& src, FloatRegister dest)
{
asMasm().loadFloat32(src, dest);
as_cvtds(dest, dest);
}
void
MacroAssemblerMIPSShared::loadFloat32(const Address& address, FloatRegister dest)
{
asMasm().ma_ls(dest, address);
}
void
MacroAssemblerMIPSShared::loadFloat32(const BaseIndex& src, FloatRegister dest)
{
asMasm().ma_ls(dest, src);
}
void
MacroAssemblerMIPSShared::ma_call(ImmPtr dest)
{
@ -1371,7 +1415,7 @@ void
MacroAssembler::Push(FloatRegister f)
{
ma_push(f);
adjustFrame(int32_t(sizeof(double)));
adjustFrame(int32_t(f.pushSize()));
}
void
@ -1385,21 +1429,21 @@ void
MacroAssembler::Pop(FloatRegister f)
{
ma_pop(f);
adjustFrame(-int32_t(sizeof(double)));
adjustFrame(-int32_t(f.pushSize()));
}
void
MacroAssembler::Pop(const ValueOperand& val)
{
popValue(val);
framePushed_ -= sizeof(Value);
adjustFrame(-int32_t(sizeof(Value)));
}
void
MacroAssembler::PopStackPtr()
{
loadPtr(Address(StackPointer, 0), StackPointer);
framePushed_ -= sizeof(intptr_t);
adjustFrame(-int32_t(sizeof(intptr_t)));
}
@ -1972,12 +2016,10 @@ MacroAssemblerMIPSShared::wasmLoadImpl(const wasm::MemoryAccessDesc& access, Reg
asMasm().memoryBarrierBefore(access.sync());
if (isFloat) {
if (byteSize == 4) {
asMasm().loadFloat32(address, output.fpu());
} else {
asMasm().computeScaledAddress(address, SecondScratchReg);
asMasm().as_ld(output.fpu(), SecondScratchReg, 0);
}
if (byteSize == 4)
asMasm().ma_ls(output.fpu(), address);
else
asMasm().ma_ld(output.fpu(), address);
} else {
asMasm().ma_load(output.gpr(), address, static_cast<LoadStoreSize>(8 * byteSize),
isSigned ? SignExtend : ZeroExtend);
@ -2036,15 +2078,10 @@ MacroAssemblerMIPSShared::wasmStoreImpl(const wasm::MemoryAccessDesc& access, An
asMasm().memoryBarrierBefore(access.sync());
if (isFloat) {
if (byteSize == 4) {
asMasm().storeFloat32(value.fpu(), address);
} else {
//asMasm().storeDouble(value.fpu(), address);
// For time being storeDouble for mips32 uses two store instructions,
// so we emit only one to get correct behavior in case of OOB access.
asMasm().computeScaledAddress(address, SecondScratchReg);
asMasm().as_sd(value.fpu(), SecondScratchReg, 0);
}
if (byteSize == 4)
asMasm().ma_ss(value.fpu(), address);
else
asMasm().ma_sd(value.fpu(), address);
} else {
asMasm().ma_store(value.gpr(), address,
static_cast<LoadStoreSize>(8 * byteSize),

Просмотреть файл

@ -172,10 +172,12 @@ class MacroAssemblerMIPSShared : public Assembler
// fp instructions
void ma_lis(FloatRegister dest, float value);
void ma_liNegZero(FloatRegister dest);
void ma_sd(FloatRegister fd, BaseIndex address);
void ma_ss(FloatRegister fd, BaseIndex address);
void ma_sd(FloatRegister src, BaseIndex address);
void ma_ss(FloatRegister src, BaseIndex address);
void ma_ld(FloatRegister dest, const BaseIndex& src);
void ma_ls(FloatRegister dest, const BaseIndex& src);
//FP branches
void ma_bc1s(FloatRegister lhs, FloatRegister rhs, Label* label, DoubleCondition c,
@ -192,12 +194,6 @@ class MacroAssemblerMIPSShared : public Assembler
void ma_cmp_set_double(Register dst, FloatRegister lhs, FloatRegister rhs, DoubleCondition c);
void ma_cmp_set_float32(Register dst, FloatRegister lhs, FloatRegister rhs, DoubleCondition c);
BufferOffset ma_BoundsCheck(Register bounded) {
BufferOffset bo = m_buffer.nextOffset();
ma_liPatchable(bounded, Imm32(0));
return bo;
}
void moveToDoubleLo(Register src, FloatRegister dest) {
as_mtc1(src, dest);
}
@ -217,6 +213,16 @@ class MacroAssemblerMIPSShared : public Assembler
void minMaxDouble(FloatRegister srcDest, FloatRegister other, bool handleNaN, bool isMax);
void minMaxFloat32(FloatRegister srcDest, FloatRegister other, bool handleNaN, bool isMax);
void loadDouble(const Address& addr, FloatRegister dest);
void loadDouble(const BaseIndex& src, FloatRegister dest);
// Load a float value into a register, then expand it to a double.
void loadFloatAsDouble(const Address& addr, FloatRegister dest);
void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest);
void loadFloat32(const Address& addr, FloatRegister dest);
void loadFloat32(const BaseIndex& src, FloatRegister dest);
void outOfLineWasmTruncateToInt32Check(FloatRegister input, Register output, MIRType fromType,
TruncFlags flags, Label* rejoin,
wasm::BytecodeOffset trapOffset);

Просмотреть файл

@ -131,6 +131,7 @@ class FloatRegister : public FloatRegisterMIPSShared
}
bool equiv(const FloatRegister& other) const { return other.kind_ == kind_; }
size_t size() const { return (kind_ == Double) ? 8 : 4; }
size_t pushSize() const { return size(); }
bool isInvalid() const {
return code_ == FloatRegisters::invalid_freg;
}

Просмотреть файл

@ -1016,32 +1016,6 @@ MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src, Register
ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
}
// ========================================================================
// Memory access primitives.
void
MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const Address& addr)
{
ma_sd(src, addr);
}
void
MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const BaseIndex& addr)
{
MOZ_ASSERT(addr.offset == 0);
ma_sd(src, addr);
}
void
MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const Address& addr)
{
ma_ss(src, addr);
}
void
MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const BaseIndex& addr)
{
MOZ_ASSERT(addr.offset == 0);
ma_ss(src, addr);
}
// ========================================================================
// wasm support

Просмотреть файл

@ -870,7 +870,7 @@ void
MacroAssemblerMIPS::ma_ls(FloatRegister ft, Address address)
{
if (Imm16::IsInSignedRange(address.offset)) {
as_ls(ft, address.base, address.offset);
as_lwc1(ft, address.base, address.offset);
} else {
MOZ_ASSERT(address.base != ScratchRegister);
ma_li(ScratchRegister, Imm32(address.offset));
@ -878,7 +878,7 @@ MacroAssemblerMIPS::ma_ls(FloatRegister ft, Address address)
as_gslsx(ft, address.base, ScratchRegister, 0);
} else {
as_addu(ScratchRegister, address.base, ScratchRegister);
as_ls(ft, ScratchRegister, 0);
as_lwc1(ft, ScratchRegister, 0);
}
}
}
@ -886,37 +886,34 @@ MacroAssemblerMIPS::ma_ls(FloatRegister ft, Address address)
void
MacroAssemblerMIPS::ma_ld(FloatRegister ft, Address address)
{
// Use single precision load instructions so we don't have to worry about
// alignment.
int32_t off = address.offset + PAYLOAD_OFFSET;
int32_t off2 = address.offset + TAG_OFFSET;
if (Imm16::IsInSignedRange(off) && Imm16::IsInSignedRange(off2)) {
as_ls(ft, address.base, off);
as_ls(getOddPair(ft), address.base, off2);
if (Imm16::IsInSignedRange(address.offset)) {
as_ldc1(ft, address.base, address.offset);
} else {
MOZ_ASSERT(address.base != ScratchRegister);
ma_li(ScratchRegister, Imm32(off));
as_addu(ScratchRegister, address.base, ScratchRegister);
as_ls(ft, ScratchRegister, PAYLOAD_OFFSET);
as_ls(getOddPair(ft), ScratchRegister, TAG_OFFSET);
ma_li(ScratchRegister, Imm32(address.offset));
if (isLoongson()) {
as_gsldx(ft, address.base, ScratchRegister, 0);
} else {
as_addu(ScratchRegister, address.base, ScratchRegister);
as_ldc1(ft, ScratchRegister, 0);
}
}
}
void
MacroAssemblerMIPS::ma_sd(FloatRegister ft, Address address)
{
int32_t off = address.offset + PAYLOAD_OFFSET;
int32_t off2 = address.offset + TAG_OFFSET;
if (Imm16::IsInSignedRange(off) && Imm16::IsInSignedRange(off2)) {
as_ss(ft, address.base, off);
as_ss(getOddPair(ft), address.base, off2);
if (Imm16::IsInSignedRange(address.offset)) {
as_sdc1(ft, address.base, address.offset);
} else {
MOZ_ASSERT(address.base != ScratchRegister);
ma_li(ScratchRegister, Imm32(off));
as_addu(ScratchRegister, address.base, ScratchRegister);
as_ss(ft, ScratchRegister, PAYLOAD_OFFSET);
as_ss(getOddPair(ft), ScratchRegister, TAG_OFFSET);
ma_li(ScratchRegister, Imm32(address.offset));
if (isLoongson()) {
as_gssdx(ft, address.base, ScratchRegister, 0);
} else {
as_addu(ScratchRegister, address.base, ScratchRegister);
as_sdc1(ft, ScratchRegister, 0);
}
}
}
@ -924,7 +921,7 @@ void
MacroAssemblerMIPS::ma_ss(FloatRegister ft, Address address)
{
if (Imm16::IsInSignedRange(address.offset)) {
as_ss(ft, address.base, address.offset);
as_swc1(ft, address.base, address.offset);
} else {
MOZ_ASSERT(address.base != ScratchRegister);
ma_li(ScratchRegister, Imm32(address.offset));
@ -932,23 +929,51 @@ MacroAssemblerMIPS::ma_ss(FloatRegister ft, Address address)
as_gsssx(ft, address.base, ScratchRegister, 0);
} else {
as_addu(ScratchRegister, address.base, ScratchRegister);
as_ss(ft, ScratchRegister, 0);
as_swc1(ft, ScratchRegister, 0);
}
}
}
void
MacroAssemblerMIPS::ma_pop(FloatRegister fs)
MacroAssemblerMIPS::ma_ldc1WordAligned(FloatRegister ft, Register base, int32_t off)
{
ma_ld(fs.doubleOverlay(), Address(StackPointer, 0));
as_addiu(StackPointer, StackPointer, sizeof(double));
MOZ_ASSERT(Imm16::IsInSignedRange(off + PAYLOAD_OFFSET) &&
Imm16::IsInSignedRange(off + TAG_OFFSET));
as_lwc1(ft, base, off + PAYLOAD_OFFSET);
as_lwc1(getOddPair(ft), base, off + TAG_OFFSET);
}
void
MacroAssemblerMIPS::ma_push(FloatRegister fs)
MacroAssemblerMIPS::ma_sdc1WordAligned(FloatRegister ft, Register base, int32_t off)
{
as_addiu(StackPointer, StackPointer, -sizeof(double));
ma_sd(fs.doubleOverlay(), Address(StackPointer, 0));
MOZ_ASSERT(Imm16::IsInSignedRange(off + PAYLOAD_OFFSET) &&
Imm16::IsInSignedRange(off + TAG_OFFSET));
as_swc1(ft, base, off + PAYLOAD_OFFSET);
as_swc1(getOddPair(ft), base, off + TAG_OFFSET);
}
void
MacroAssemblerMIPS::ma_pop(FloatRegister f)
{
if (f.isDouble())
ma_ldc1WordAligned(f, StackPointer, 0);
else
as_lwc1(f, StackPointer, 0);
as_addiu(StackPointer, StackPointer, f.size());
}
void
MacroAssemblerMIPS::ma_push(FloatRegister f)
{
as_addiu(StackPointer, StackPointer, -f.size());
if(f.isDouble())
ma_sdc1WordAligned(f, StackPointer, 0);
else
as_swc1(f, StackPointer, 0);
}
bool
@ -1110,19 +1135,6 @@ MacroAssemblerMIPSCompat::loadPrivate(const Address& address, Register dest)
ma_lw(dest, Address(address.base, address.offset + PAYLOAD_OFFSET));
}
void
MacroAssemblerMIPSCompat::loadDouble(const Address& address, FloatRegister dest)
{
ma_ld(dest, address);
}
void
MacroAssemblerMIPSCompat::loadDouble(const BaseIndex& src, FloatRegister dest)
{
computeScaledAddress(src, SecondScratchReg);
ma_ld(dest, Address(SecondScratchReg, src.offset));
}
void
MacroAssemblerMIPSCompat::loadUnalignedDouble(const wasm::MemoryAccessDesc& access,
const BaseIndex& src, Register temp, FloatRegister dest)
@ -1155,33 +1167,6 @@ MacroAssemblerMIPSCompat::loadUnalignedDouble(const wasm::MemoryAccessDesc& acce
}
}
void
MacroAssemblerMIPSCompat::loadFloatAsDouble(const Address& address, FloatRegister dest)
{
ma_ls(dest, address);
as_cvtds(dest, dest);
}
void
MacroAssemblerMIPSCompat::loadFloatAsDouble(const BaseIndex& src, FloatRegister dest)
{
loadFloat32(src, dest);
as_cvtds(dest, dest);
}
void
MacroAssemblerMIPSCompat::loadFloat32(const Address& address, FloatRegister dest)
{
ma_ls(dest, address);
}
void
MacroAssemblerMIPSCompat::loadFloat32(const BaseIndex& src, FloatRegister dest)
{
computeScaledAddress(src, SecondScratchReg);
ma_ls(dest, Address(SecondScratchReg, src.offset));
}
void
MacroAssemblerMIPSCompat::loadUnalignedFloat32(const wasm::MemoryAccessDesc& access,
const BaseIndex& src, Register temp, FloatRegister dest)
@ -2190,7 +2175,7 @@ MacroAssembler::PushRegsInMask(LiveRegisterSet set)
diffF -= sizeof(double);
for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); ++iter) {
as_sd(*iter, SecondScratchReg, -diffF);
as_sdc1(*iter, SecondScratchReg, -diffF);
diffF -= sizeof(double);
}
@ -2216,7 +2201,7 @@ MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore)
LiveFloatRegisterSet fpignore(ignore.fpus().reduceSetForPush());
for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); ++iter) {
if (!ignore.has(*iter))
as_ld(*iter, SecondScratchReg, -diffF);
as_ldc1(*iter, SecondScratchReg, -diffF);
diffF -= sizeof(double);
}
freeStack(reservedF);
@ -2256,7 +2241,7 @@ MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest, Register scra
diffF -= sizeof(double);
for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); ++iter) {
as_sd(*iter, scratch, -diffF);
as_sdc1(*iter, scratch, -diffF);
diffF -= sizeof(double);
}
MOZ_ASSERT(diffF == 0);

Просмотреть файл

@ -53,6 +53,8 @@ class MacroAssemblerMIPS : public MacroAssemblerMIPSShared
using MacroAssemblerMIPSShared::ma_li;
using MacroAssemblerMIPSShared::ma_ss;
using MacroAssemblerMIPSShared::ma_sd;
using MacroAssemblerMIPSShared::ma_ls;
using MacroAssemblerMIPSShared::ma_ld;
using MacroAssemblerMIPSShared::ma_load;
using MacroAssemblerMIPSShared::ma_store;
using MacroAssemblerMIPSShared::ma_cmp_set;
@ -122,13 +124,16 @@ class MacroAssemblerMIPS : public MacroAssemblerMIPSShared
void ma_mv(FloatRegister src, ValueOperand dest);
void ma_mv(ValueOperand src, FloatRegister dest);
void ma_ls(FloatRegister fd, Address address);
void ma_ld(FloatRegister fd, Address address);
void ma_sd(FloatRegister fd, Address address);
void ma_ss(FloatRegister fd, Address address);
void ma_ls(FloatRegister ft, Address address);
void ma_ld(FloatRegister ft, Address address);
void ma_sd(FloatRegister ft, Address address);
void ma_ss(FloatRegister ft, Address address);
void ma_pop(FloatRegister fs);
void ma_push(FloatRegister fs);
void ma_ldc1WordAligned(FloatRegister ft, Register base, int32_t off);
void ma_sdc1WordAligned(FloatRegister ft, Register base, int32_t off);
void ma_pop(FloatRegister f);
void ma_push(FloatRegister f);
void ma_cmp_set(Register dst, Register lhs, ImmPtr imm, Condition c) {
ma_cmp_set(dst, lhs, Imm32(uint32_t(imm.value)), c);
@ -636,17 +641,9 @@ class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS
void storeUnalignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
void storeUnalignedSimd128Float(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
void loadDouble(const Address& addr, FloatRegister dest);
void loadDouble(const BaseIndex& src, FloatRegister dest);
void loadUnalignedDouble(const wasm::MemoryAccessDesc& access, const BaseIndex& src,
Register temp, FloatRegister dest);
// Load a float value into a register, then expand it to a double.
void loadFloatAsDouble(const Address& addr, FloatRegister dest);
void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest);
void loadFloat32(const Address& addr, FloatRegister dest);
void loadFloat32(const BaseIndex& src, FloatRegister dest);
void loadUnalignedFloat32(const wasm::MemoryAccessDesc& access, const BaseIndex& src,
Register temp, FloatRegister dest);

Просмотреть файл

@ -70,24 +70,24 @@ GenerateReturn(MacroAssembler& masm, int returnCode)
MOZ_ASSERT(masm.framePushed() == sizeof(EnterJITRegs));
// Restore non-volatile registers
masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s0)), s0);
masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s1)), s1);
masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s2)), s2);
masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s3)), s3);
masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s4)), s4);
masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s5)), s5);
masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s6)), s6);
masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s7)), s7);
masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, fp)), fp);
masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, ra)), ra);
masm.as_lw(s0, StackPointer, offsetof(EnterJITRegs, s0));
masm.as_lw(s1, StackPointer, offsetof(EnterJITRegs, s1));
masm.as_lw(s2, StackPointer, offsetof(EnterJITRegs, s2));
masm.as_lw(s3, StackPointer, offsetof(EnterJITRegs, s3));
masm.as_lw(s4, StackPointer, offsetof(EnterJITRegs, s4));
masm.as_lw(s5, StackPointer, offsetof(EnterJITRegs, s5));
masm.as_lw(s6, StackPointer, offsetof(EnterJITRegs, s6));
masm.as_lw(s7, StackPointer, offsetof(EnterJITRegs, s7));
masm.as_lw(fp, StackPointer, offsetof(EnterJITRegs, fp));
masm.as_lw(ra, StackPointer, offsetof(EnterJITRegs, ra));
// Restore non-volatile floating point registers
masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f20)), f20);
masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f22)), f22);
masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f24)), f24);
masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f26)), f26);
masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f28)), f28);
masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f30)), f30);
masm.as_ldc1(f20, StackPointer, offsetof(EnterJITRegs, f20));
masm.as_ldc1(f22, StackPointer, offsetof(EnterJITRegs, f22));
masm.as_ldc1(f24, StackPointer, offsetof(EnterJITRegs, f24));
masm.as_ldc1(f26, StackPointer, offsetof(EnterJITRegs, f26));
masm.as_ldc1(f28, StackPointer, offsetof(EnterJITRegs, f28));
masm.as_ldc1(f30, StackPointer, offsetof(EnterJITRegs, f30));
masm.freeStack(sizeof(EnterJITRegs));
@ -101,23 +101,23 @@ GeneratePrologue(MacroAssembler& masm)
// rather than the JIT'd code, because they are scanned by the conservative
// scanner.
masm.reserveStack(sizeof(EnterJITRegs));
masm.storePtr(s0, Address(StackPointer, offsetof(EnterJITRegs, s0)));
masm.storePtr(s1, Address(StackPointer, offsetof(EnterJITRegs, s1)));
masm.storePtr(s2, Address(StackPointer, offsetof(EnterJITRegs, s2)));
masm.storePtr(s3, Address(StackPointer, offsetof(EnterJITRegs, s3)));
masm.storePtr(s4, Address(StackPointer, offsetof(EnterJITRegs, s4)));
masm.storePtr(s5, Address(StackPointer, offsetof(EnterJITRegs, s5)));
masm.storePtr(s6, Address(StackPointer, offsetof(EnterJITRegs, s6)));
masm.storePtr(s7, Address(StackPointer, offsetof(EnterJITRegs, s7)));
masm.storePtr(fp, Address(StackPointer, offsetof(EnterJITRegs, fp)));
masm.storePtr(ra, Address(StackPointer, offsetof(EnterJITRegs, ra)));
masm.as_sw(s0, StackPointer, offsetof(EnterJITRegs, s0));
masm.as_sw(s1, StackPointer, offsetof(EnterJITRegs, s1));
masm.as_sw(s2, StackPointer, offsetof(EnterJITRegs, s2));
masm.as_sw(s3, StackPointer, offsetof(EnterJITRegs, s3));
masm.as_sw(s4, StackPointer, offsetof(EnterJITRegs, s4));
masm.as_sw(s5, StackPointer, offsetof(EnterJITRegs, s5));
masm.as_sw(s6, StackPointer, offsetof(EnterJITRegs, s6));
masm.as_sw(s7, StackPointer, offsetof(EnterJITRegs, s7));
masm.as_sw(fp, StackPointer, offsetof(EnterJITRegs, fp));
masm.as_sw(ra, StackPointer, offsetof(EnterJITRegs, ra));
masm.as_sd(f20, StackPointer, offsetof(EnterJITRegs, f20));
masm.as_sd(f22, StackPointer, offsetof(EnterJITRegs, f22));
masm.as_sd(f24, StackPointer, offsetof(EnterJITRegs, f24));
masm.as_sd(f26, StackPointer, offsetof(EnterJITRegs, f26));
masm.as_sd(f28, StackPointer, offsetof(EnterJITRegs, f28));
masm.as_sd(f30, StackPointer, offsetof(EnterJITRegs, f30));
masm.as_sdc1(f20, StackPointer, offsetof(EnterJITRegs, f20));
masm.as_sdc1(f22, StackPointer, offsetof(EnterJITRegs, f22));
masm.as_sdc1(f24, StackPointer, offsetof(EnterJITRegs, f24));
masm.as_sdc1(f26, StackPointer, offsetof(EnterJITRegs, f26));
masm.as_sdc1(f28, StackPointer, offsetof(EnterJITRegs, f28));
masm.as_sdc1(f30, StackPointer, offsetof(EnterJITRegs, f30));
}
@ -364,9 +364,10 @@ JitRuntime::generateInvalidator(MacroAssembler& masm, Label* bailoutTail)
// Save floating point registers
// We can use as_sd because stack is alligned.
for (uint32_t i = 0; i < FloatRegisters::TotalDouble; i ++)
masm.as_sd(FloatRegister::FromIndex(i, FloatRegister::Double), StackPointer,
InvalidationBailoutStack::offsetOfFpRegs() + i * sizeof(double));
for (uint32_t i = 0; i < FloatRegisters::TotalDouble; i ++) {
masm.as_sdc1(FloatRegister::FromIndex(i, FloatRegister::Double), StackPointer,
InvalidationBailoutStack::offsetOfFpRegs() + i * sizeof(double));
}
// Pass pointer to InvalidationBailoutStack structure.
masm.movePtr(StackPointer, a0);
@ -576,10 +577,11 @@ PushBailoutFrame(MacroAssembler& masm, uint32_t frameClass, Register spArg)
}
// Save floating point registers
// We can use as_sd because stack is alligned.
for (uint32_t i = 0; i < FloatRegisters::TotalDouble; i++)
masm.as_sd(FloatRegister::FromIndex(i, FloatRegister::Double), StackPointer,
BailoutStack::offsetOfFpRegs() + i * sizeof(double));
// We can use as_sdc1 because stack is alligned.
for (uint32_t i = 0; i < FloatRegisters::TotalDouble; i++) {
masm.as_sdc1(FloatRegister::FromIndex(i, FloatRegister::Double), StackPointer,
BailoutStack::offsetOfFpRegs() + i * sizeof(double));
}
// Store the frameSize_ or tableOffset_ stored in ra
// See: JitRuntime::generateBailoutTable()
@ -775,8 +777,8 @@ JitRuntime::generateVMWrapper(JSContext* cx, MacroAssembler& masm, const VMFunct
break;
case VMFunction::DoubleByRef:
// Copy double sized argument to aligned place.
masm.ma_ld(ScratchDoubleReg, Address(argsBase, argDisp));
masm.as_sd(ScratchDoubleReg, doubleArgs, doubleArgDisp);
masm.ma_ldc1WordAligned(ScratchDoubleReg, argsBase, argDisp);
masm.as_sdc1(ScratchDoubleReg, doubleArgs, doubleArgDisp);
masm.passABIArg(MoveOperand(doubleArgs, doubleArgDisp, MoveOperand::EFFECTIVE_ADDRESS),
MoveOp::GENERAL);
doubleArgDisp += sizeof(double);
@ -841,7 +843,7 @@ JitRuntime::generateVMWrapper(JSContext* cx, MacroAssembler& masm, const VMFunct
case Type_Double:
if (cx->runtime()->jitSupportsFloatingPoint) {
masm.as_ld(ReturnDoubleReg, StackPointer, 0);
masm.as_ldc1(ReturnDoubleReg, StackPointer, 0);
} else {
masm.assumeUnreachable("Unable to load into float reg, with no FP support.");
}

Просмотреть файл

@ -122,6 +122,8 @@ class FloatRegister : public FloatRegisterMIPSShared
}
bool equiv(const FloatRegister& other) const { return other.kind_ == kind_; }
size_t size() const { return (kind_ == Codes::Double) ? sizeof(double) : sizeof (float); }
// Always push doubles to maintain 8-byte stack alignment.
size_t pushSize() const { return sizeof(double); }
bool isInvalid() const {
return reg_ == FloatRegisters::invalid_freg;
}

Просмотреть файл

@ -768,32 +768,6 @@ MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src, Register
as_sll(dest, dest, 0);
}
// ========================================================================
// Memory access primitives.
void
MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const Address& addr)
{
ma_sd(src, addr);
}
void
MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const BaseIndex& addr)
{
MOZ_ASSERT(addr.offset == 0);
ma_sd(src, addr);
}
void
MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const Address& addr)
{
ma_ss(src, addr);
}
void
MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const BaseIndex& addr)
{
MOZ_ASSERT(addr.offset == 0);
ma_ss(src, addr);
}
// ========================================================================
// wasm support
@ -809,7 +783,7 @@ void
MacroAssembler::wasmBoundsCheck(Condition cond, Register index, Address boundsCheckLimit, L label)
{
SecondScratchRegisterScope scratch2(*this);
load32(boundsCheckLimit,SecondScratchReg);
load32(boundsCheckLimit, SecondScratchReg);
ma_b(index, SecondScratchReg, label, cond);
}

Просмотреть файл

@ -850,7 +850,7 @@ void
MacroAssemblerMIPS64::ma_ls(FloatRegister ft, Address address)
{
if (Imm16::IsInSignedRange(address.offset)) {
as_ls(ft, address.base, address.offset);
as_lwc1(ft, address.base, address.offset);
} else {
MOZ_ASSERT(address.base != ScratchRegister);
ma_li(ScratchRegister, Imm32(address.offset));
@ -858,7 +858,7 @@ MacroAssemblerMIPS64::ma_ls(FloatRegister ft, Address address)
as_gslsx(ft, address.base, ScratchRegister, 0);
} else {
as_daddu(ScratchRegister, address.base, ScratchRegister);
as_ls(ft, ScratchRegister, 0);
as_lwc1(ft, ScratchRegister, 0);
}
}
}
@ -867,7 +867,7 @@ void
MacroAssemblerMIPS64::ma_ld(FloatRegister ft, Address address)
{
if (Imm16::IsInSignedRange(address.offset)) {
as_ld(ft, address.base, address.offset);
as_ldc1(ft, address.base, address.offset);
} else {
MOZ_ASSERT(address.base != ScratchRegister);
ma_li(ScratchRegister, Imm32(address.offset));
@ -875,7 +875,7 @@ MacroAssemblerMIPS64::ma_ld(FloatRegister ft, Address address)
as_gsldx(ft, address.base, ScratchRegister, 0);
} else {
as_daddu(ScratchRegister, address.base, ScratchRegister);
as_ld(ft, ScratchRegister, 0);
as_ldc1(ft, ScratchRegister, 0);
}
}
}
@ -884,7 +884,7 @@ void
MacroAssemblerMIPS64::ma_sd(FloatRegister ft, Address address)
{
if (Imm16::IsInSignedRange(address.offset)) {
as_sd(ft, address.base, address.offset);
as_sdc1(ft, address.base, address.offset);
} else {
MOZ_ASSERT(address.base != ScratchRegister);
ma_li(ScratchRegister, Imm32(address.offset));
@ -892,7 +892,7 @@ MacroAssemblerMIPS64::ma_sd(FloatRegister ft, Address address)
as_gssdx(ft, address.base, ScratchRegister, 0);
} else {
as_daddu(ScratchRegister, address.base, ScratchRegister);
as_sd(ft, ScratchRegister, 0);
as_sdc1(ft, ScratchRegister, 0);
}
}
}
@ -901,7 +901,7 @@ void
MacroAssemblerMIPS64::ma_ss(FloatRegister ft, Address address)
{
if (Imm16::IsInSignedRange(address.offset)) {
as_ss(ft, address.base, address.offset);
as_swc1(ft, address.base, address.offset);
} else {
MOZ_ASSERT(address.base != ScratchRegister);
ma_li(ScratchRegister, Imm32(address.offset));
@ -909,23 +909,23 @@ MacroAssemblerMIPS64::ma_ss(FloatRegister ft, Address address)
as_gsssx(ft, address.base, ScratchRegister, 0);
} else {
as_daddu(ScratchRegister, address.base, ScratchRegister);
as_ss(ft, ScratchRegister, 0);
as_swc1(ft, ScratchRegister, 0);
}
}
}
void
MacroAssemblerMIPS64::ma_pop(FloatRegister fs)
MacroAssemblerMIPS64::ma_pop(FloatRegister f)
{
ma_ld(fs, Address(StackPointer, 0));
as_ldc1(f, StackPointer, 0);
as_daddiu(StackPointer, StackPointer, sizeof(double));
}
void
MacroAssemblerMIPS64::ma_push(FloatRegister fs)
MacroAssemblerMIPS64::ma_push(FloatRegister f)
{
as_daddiu(StackPointer, StackPointer, (int32_t)-sizeof(double));
ma_sd(fs, Address(StackPointer, 0));
as_sdc1(f, StackPointer, 0);
}
bool
@ -1088,19 +1088,6 @@ MacroAssemblerMIPS64Compat::loadPrivate(const Address& address, Register dest)
ma_dsll(dest, dest, Imm32(1));
}
void
MacroAssemblerMIPS64Compat::loadDouble(const Address& address, FloatRegister dest)
{
ma_ld(dest, address);
}
void
MacroAssemblerMIPS64Compat::loadDouble(const BaseIndex& src, FloatRegister dest)
{
computeScaledAddress(src, SecondScratchReg);
ma_ld(dest, Address(SecondScratchReg, src.offset));
}
void
MacroAssemblerMIPS64Compat::loadUnalignedDouble(const wasm::MemoryAccessDesc& access,
const BaseIndex& src, Register temp, FloatRegister dest)
@ -1120,33 +1107,6 @@ MacroAssemblerMIPS64Compat::loadUnalignedDouble(const wasm::MemoryAccessDesc& ac
moveToDouble(temp, dest);
}
void
MacroAssemblerMIPS64Compat::loadFloatAsDouble(const Address& address, FloatRegister dest)
{
ma_ls(dest, address);
as_cvtds(dest, dest);
}
void
MacroAssemblerMIPS64Compat::loadFloatAsDouble(const BaseIndex& src, FloatRegister dest)
{
loadFloat32(src, dest);
as_cvtds(dest, dest);
}
void
MacroAssemblerMIPS64Compat::loadFloat32(const Address& address, FloatRegister dest)
{
ma_ls(dest, address);
}
void
MacroAssemblerMIPS64Compat::loadFloat32(const BaseIndex& src, FloatRegister dest)
{
computeScaledAddress(src, SecondScratchReg);
ma_ls(dest, Address(SecondScratchReg, src.offset));
}
void
MacroAssemblerMIPS64Compat::loadUnalignedFloat32(const wasm::MemoryAccessDesc& access,
const BaseIndex& src, Register temp, FloatRegister dest)

Просмотреть файл

@ -51,6 +51,8 @@ class MacroAssemblerMIPS64 : public MacroAssemblerMIPSShared
using MacroAssemblerMIPSShared::ma_li;
using MacroAssemblerMIPSShared::ma_ss;
using MacroAssemblerMIPSShared::ma_sd;
using MacroAssemblerMIPSShared::ma_ls;
using MacroAssemblerMIPSShared::ma_ld;
using MacroAssemblerMIPSShared::ma_load;
using MacroAssemblerMIPSShared::ma_store;
using MacroAssemblerMIPSShared::ma_cmp_set;
@ -133,13 +135,13 @@ class MacroAssemblerMIPS64 : public MacroAssemblerMIPSShared
void ma_mv(FloatRegister src, ValueOperand dest);
void ma_mv(ValueOperand src, FloatRegister dest);
void ma_ls(FloatRegister fd, Address address);
void ma_ld(FloatRegister fd, Address address);
void ma_sd(FloatRegister fd, Address address);
void ma_ss(FloatRegister fd, Address address);
void ma_ls(FloatRegister ft, Address address);
void ma_ld(FloatRegister ft, Address address);
void ma_sd(FloatRegister ft, Address address);
void ma_ss(FloatRegister ft, Address address);
void ma_pop(FloatRegister fs);
void ma_push(FloatRegister fs);
void ma_pop(FloatRegister f);
void ma_push(FloatRegister f);
void ma_cmp_set(Register dst, Register lhs, ImmWord imm, Condition c);
void ma_cmp_set(Register dst, Register lhs, ImmPtr imm, Condition c);
@ -660,17 +662,8 @@ class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
void storeUnalignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
void storeUnalignedSimd128Float(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
void loadDouble(const Address& addr, FloatRegister dest);
void loadDouble(const BaseIndex& src, FloatRegister dest);
void loadUnalignedDouble(const wasm::MemoryAccessDesc& access, const BaseIndex& src,
Register temp, FloatRegister dest);
// Load a float value into a register, then expand it to a double.
void loadFloatAsDouble(const Address& addr, FloatRegister dest);
void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest);
void loadFloat32(const Address& addr, FloatRegister dest);
void loadFloat32(const BaseIndex& src, FloatRegister dest);
void loadUnalignedFloat32(const wasm::MemoryAccessDesc& access, const BaseIndex& src,
Register temp, FloatRegister dest);

Просмотреть файл

@ -64,6 +64,8 @@ struct EnterJITRegs
static void
GenerateReturn(MacroAssembler& masm, int returnCode)
{
MOZ_ASSERT(masm.framePushed() == sizeof(EnterJITRegs));
if (isLoongson()) {
// Restore non-volatile registers
masm.as_ld(s0, StackPointer, offsetof(EnterJITRegs, s0));
@ -92,14 +94,14 @@ GenerateReturn(MacroAssembler& masm, int returnCode)
masm.as_ld(ra, StackPointer, offsetof(EnterJITRegs, ra));
// Restore non-volatile floating point registers
masm.as_ld(f24, StackPointer, offsetof(EnterJITRegs, f24));
masm.as_ld(f25, StackPointer, offsetof(EnterJITRegs, f25));
masm.as_ld(f26, StackPointer, offsetof(EnterJITRegs, f26));
masm.as_ld(f27, StackPointer, offsetof(EnterJITRegs, f27));
masm.as_ld(f28, StackPointer, offsetof(EnterJITRegs, f28));
masm.as_ld(f29, StackPointer, offsetof(EnterJITRegs, f29));
masm.as_ld(f30, StackPointer, offsetof(EnterJITRegs, f30));
masm.as_ld(f31, StackPointer, offsetof(EnterJITRegs, f31));
masm.as_ldc1(f24, StackPointer, offsetof(EnterJITRegs, f24));
masm.as_ldc1(f25, StackPointer, offsetof(EnterJITRegs, f25));
masm.as_ldc1(f26, StackPointer, offsetof(EnterJITRegs, f26));
masm.as_ldc1(f27, StackPointer, offsetof(EnterJITRegs, f27));
masm.as_ldc1(f28, StackPointer, offsetof(EnterJITRegs, f28));
masm.as_ldc1(f29, StackPointer, offsetof(EnterJITRegs, f29));
masm.as_ldc1(f30, StackPointer, offsetof(EnterJITRegs, f30));
masm.as_ldc1(f31, StackPointer, offsetof(EnterJITRegs, f31));
}
masm.freeStack(sizeof(EnterJITRegs));
@ -139,14 +141,14 @@ GeneratePrologue(MacroAssembler& masm)
masm.as_sd(ra, StackPointer, offsetof(EnterJITRegs, ra));
masm.as_sd(a7, StackPointer, offsetof(EnterJITRegs, a7));
masm.as_sd(f24, StackPointer, offsetof(EnterJITRegs, f24));
masm.as_sd(f25, StackPointer, offsetof(EnterJITRegs, f25));
masm.as_sd(f26, StackPointer, offsetof(EnterJITRegs, f26));
masm.as_sd(f27, StackPointer, offsetof(EnterJITRegs, f27));
masm.as_sd(f28, StackPointer, offsetof(EnterJITRegs, f28));
masm.as_sd(f29, StackPointer, offsetof(EnterJITRegs, f29));
masm.as_sd(f30, StackPointer, offsetof(EnterJITRegs, f30));
masm.as_sd(f31, StackPointer, offsetof(EnterJITRegs, f31));
masm.as_sdc1(f24, StackPointer, offsetof(EnterJITRegs, f24));
masm.as_sdc1(f25, StackPointer, offsetof(EnterJITRegs, f25));
masm.as_sdc1(f26, StackPointer, offsetof(EnterJITRegs, f26));
masm.as_sdc1(f27, StackPointer, offsetof(EnterJITRegs, f27));
masm.as_sdc1(f28, StackPointer, offsetof(EnterJITRegs, f28));
masm.as_sdc1(f29, StackPointer, offsetof(EnterJITRegs, f29));
masm.as_sdc1(f30, StackPointer, offsetof(EnterJITRegs, f30));
masm.as_sdc1(f31, StackPointer, offsetof(EnterJITRegs, f31));
}
@ -796,7 +798,7 @@ JitRuntime::generateVMWrapper(JSContext* cx, MacroAssembler& masm, const VMFunct
case Type_Double:
if (cx->runtime()->jitSupportsFloatingPoint) {
masm.as_ld(ReturnDoubleReg, StackPointer, 0);
masm.as_ldc1(ReturnDoubleReg, StackPointer, 0);
} else {
masm.assumeUnreachable("Unable to load into float reg, with no FP support.");
}

Просмотреть файл

@ -1191,7 +1191,7 @@ class BaseStackFrame
static const size_t StackSizeOfPtr = sizeof(intptr_t);
static const size_t StackSizeOfInt64 = sizeof(int64_t);
#ifdef JS_CODEGEN_ARM
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32)
static const size_t StackSizeOfFloat = sizeof(float);
#else
static const size_t StackSizeOfFloat = sizeof(double);