From e9dbacb941ef9ae80370c0f465f9bea26f7d405e Mon Sep 17 00:00:00 2001 From: "Nicolas B. Pierron" Date: Fri, 14 Feb 2014 12:24:19 -0800 Subject: [PATCH] Bug 969375 - MIPS port: Added Assembler-mips files. r=froydnj,nbp --- js/src/jit/mips/Assembler-mips.cpp | 1684 ++++++++++++++++++++++++++++ js/src/jit/mips/Assembler-mips.h | 1251 +++++++++++++++++++++ 2 files changed, 2935 insertions(+) create mode 100644 js/src/jit/mips/Assembler-mips.cpp create mode 100644 js/src/jit/mips/Assembler-mips.h diff --git a/js/src/jit/mips/Assembler-mips.cpp b/js/src/jit/mips/Assembler-mips.cpp new file mode 100644 index 000000000000..b2c7123c1f94 --- /dev/null +++ b/js/src/jit/mips/Assembler-mips.cpp @@ -0,0 +1,1684 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/mips/Assembler-mips.h" + +#include "mozilla/DebugOnly.h" +#include "mozilla/MathAlgorithms.h" + +#include "jscompartment.h" +#include "jsutil.h" + +#include "assembler/jit/ExecutableAllocator.h" +#include "gc/Marking.h" +#include "jit/JitCompartment.h" + +using mozilla::DebugOnly; + +using namespace js; +using namespace js::jit; + +ABIArgGenerator::ABIArgGenerator() + : usedArgSlots_(0), + firstArgFloat(false), + current_() +{} + +ABIArg +ABIArgGenerator::next(MIRType type) +{ + MOZ_ASSUME_UNREACHABLE("NYI"); + return ABIArg(); +} +const Register ABIArgGenerator::NonArgReturnVolatileReg0 = t0; +const Register ABIArgGenerator::NonArgReturnVolatileReg1 = t1; + +// Encode a standard register when it is being used as rd, the rs, and +// an extra register(rt). These should never be called with an InvalidReg. +uint32_t +js::jit::RS(Register r) +{ + JS_ASSERT((r.code() & ~RegMask) == 0); + return r.code() << RSShift; +} + +uint32_t +js::jit::RT(Register r) +{ + JS_ASSERT((r.code() & ~RegMask) == 0); + return r.code() << RTShift; +} + +uint32_t +js::jit::RT(FloatRegister r) +{ + JS_ASSERT(r.code() < FloatRegisters::Total); + return (2 * r.code()) << RTShift; +} + +// Use to code odd float registers. +// :TODO: Bug 972836, It will be removed once we can use odd regs. +uint32_t +js::jit::RT(uint32_t regCode) +{ + JS_ASSERT((regCode & ~RegMask) == 0); + return regCode << RTShift; +} + +uint32_t +js::jit::RD(Register r) +{ + JS_ASSERT((r.code() & ~RegMask) == 0); + return r.code() << RDShift; +} + +uint32_t +js::jit::RD(FloatRegister r) +{ + JS_ASSERT(r.code() < FloatRegisters::Total); + return (2 * r.code()) << RDShift; +} + +// Use to code odd float registers. +// :TODO: Bug 972836, It will be removed once we can use odd regs. +uint32_t +js::jit::RD(uint32_t regCode) +{ + JS_ASSERT((regCode & ~RegMask) == 0); + return regCode << RDShift; +} + +uint32_t +js::jit::SA(uint32_t value) +{ + JS_ASSERT(value < 32); + return value << SAShift; +} + +uint32_t +js::jit::SA(FloatRegister r) +{ + JS_ASSERT(r.code() < FloatRegisters::Total); + return (2 * r.code()) << SAShift; +} + +Register +js::jit::toRS(Instruction &i) +{ + return Register::FromCode((i.encode() & RSMask ) >> RSShift); +} + +Register +js::jit::toRT(Instruction &i) +{ + return Register::FromCode((i.encode() & RTMask ) >> RTShift); +} + +Register +js::jit::toRD(Instruction &i) +{ + return Register::FromCode((i.encode() & RDMask ) >> RDShift); +} + +Register +js::jit::toR(Instruction &i) +{ + return Register::FromCode(i.encode() & RegMask); +} + +void +InstImm::extractImm16(BOffImm16 *dest) +{ + *dest = BOffImm16(*this); +} + +// Used to patch jumps created by MacroAssemblerMIPSCompat::jumpWithPatch. +void +jit::PatchJump(CodeLocationJump &jump_, CodeLocationLabel label) +{ + Instruction *inst1 = (Instruction *)jump_.raw(); + Instruction *inst2 = inst1->next(); + + Assembler::updateLuiOriValue(inst1, inst2, (uint32_t)label.raw()); + + AutoFlushCache::updateTop(uintptr_t(inst1), 8); +} + +void +Assembler::finish() +{ + JS_ASSERT(!isFinished); + isFinished = true; +} + +void +Assembler::executableCopy(uint8_t *buffer) +{ + JS_ASSERT(isFinished); + m_buffer.executableCopy(buffer); + + // Patch all long jumps during code copy. + for (size_t i = 0; i < longJumps_.length(); i++) { + Instruction *inst1 = (Instruction *) ((uint32_t)buffer + longJumps_[i]); + + uint32_t value = extractLuiOriValue(inst1, inst1->next()); + updateLuiOriValue(inst1, inst1->next(), (uint32_t)buffer + value); + } + + AutoFlushCache::updateTop((uintptr_t)buffer, m_buffer.size()); +} + +uint32_t +Assembler::actualOffset(uint32_t off_) const +{ + return off_; +} + +uint32_t +Assembler::actualIndex(uint32_t idx_) const +{ + return idx_; +} + +uint8_t * +Assembler::PatchableJumpAddress(JitCode *code, uint32_t pe_) +{ + return code->raw() + pe_; +} + +class RelocationIterator +{ + CompactBufferReader reader_; + // offset in bytes + uint32_t offset_; + + public: + RelocationIterator(CompactBufferReader &reader) + : reader_(reader) + { } + + bool read() { + if (!reader_.more()) + return false; + offset_ = reader_.readUnsigned(); + return true; + } + + uint32_t offset() const { + return offset_; + } +}; + +uintptr_t +Assembler::getPointer(uint8_t *instPtr) +{ + Instruction *inst = (Instruction*)instPtr; + return Assembler::extractLuiOriValue(inst, inst->next()); +} + +static JitCode * +CodeFromJump(Instruction *jump) +{ + uint8_t *target = (uint8_t *)Assembler::extractLuiOriValue(jump, jump->next()); + return JitCode::FromExecutable(target); +} + +void +Assembler::TraceJumpRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader) +{ + RelocationIterator iter(reader); + while (iter.read()) { + JitCode *child = CodeFromJump((Instruction *)(code->raw() + iter.offset())); + MarkJitCodeUnbarriered(trc, &child, "rel32"); + } +} + +static void +TraceDataRelocations(JSTracer *trc, uint8_t *buffer, CompactBufferReader &reader) +{ + while (reader.more()) { + size_t offset = reader.readUnsigned(); + Instruction *inst = (Instruction*)(buffer + offset); + void *ptr = (void *)Assembler::extractLuiOriValue(inst, inst->next()); + + // No barrier needed since these are constants. + gc::MarkGCThingUnbarriered(trc, reinterpret_cast(&ptr), "ion-masm-ptr"); + } +} + +static void +TraceDataRelocations(JSTracer *trc, MIPSBuffer *buffer, CompactBufferReader &reader) +{ + while (reader.more()) { + BufferOffset bo (reader.readUnsigned()); + MIPSBuffer::AssemblerBufferInstIterator iter(bo, buffer); + + void *ptr = (void *)Assembler::extractLuiOriValue(iter.cur(), iter.next()); + + // No barrier needed since these are constants. + gc::MarkGCThingUnbarriered(trc, reinterpret_cast(&ptr), "ion-masm-ptr"); + } +} + +void +Assembler::TraceDataRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader) +{ + ::TraceDataRelocations(trc, code->raw(), reader); +} + +void +Assembler::copyJumpRelocationTable(uint8_t *dest) +{ + if (jumpRelocations_.length()) + memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length()); +} + +void +Assembler::copyDataRelocationTable(uint8_t *dest) +{ + if (dataRelocations_.length()) + memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length()); +} + +void +Assembler::copyPreBarrierTable(uint8_t *dest) +{ + if (preBarriers_.length()) + memcpy(dest, preBarriers_.buffer(), preBarriers_.length()); +} + +void +Assembler::trace(JSTracer *trc) +{ + for (size_t i = 0; i < jumps_.length(); i++) { + RelativePatch &rp = jumps_[i]; + if (rp.kind == Relocation::JITCODE) { + JitCode *code = JitCode::FromExecutable((uint8_t *)rp.target); + MarkJitCodeUnbarriered(trc, &code, "masmrel32"); + JS_ASSERT(code == JitCode::FromExecutable((uint8_t *)rp.target)); + } + } + if (dataRelocations_.length()) { + CompactBufferReader reader(dataRelocations_); + ::TraceDataRelocations(trc, &m_buffer, reader); + } +} + +void +Assembler::processCodeLabels(uint8_t *rawCode) +{ + for (size_t i = 0; i < codeLabels_.length(); i++) { + CodeLabel label = codeLabels_[i]; + Bind(rawCode, label.dest(), rawCode + actualOffset(label.src()->offset())); + } +} + +void +Assembler::Bind(uint8_t *rawCode, AbsoluteLabel *label, const void *address) +{ + if (label->used()) { + int32_t src = label->offset(); + do { + Instruction *inst = (Instruction *) (rawCode + src); + uint32_t next = Assembler::extractLuiOriValue(inst, inst->next()); + Assembler::updateLuiOriValue(inst, inst->next(), (uint32_t)address); + src = next; + } while (src != AbsoluteLabel::INVALID_OFFSET); + } + label->bind(); +} + +Assembler::Condition +Assembler::InvertCondition(Condition cond) +{ + switch (cond) { + case Equal: + return NotEqual; + case NotEqual: + return Equal; + case Zero: + return NonZero; + case NonZero: + return Zero; + case LessThan: + return GreaterThanOrEqual; + case LessThanOrEqual: + return GreaterThan; + case GreaterThan: + return LessThanOrEqual; + case GreaterThanOrEqual: + return LessThan; + case Above: + return BelowOrEqual; + case AboveOrEqual: + return Below; + case Below: + return AboveOrEqual; + case BelowOrEqual: + return Above; + case Signed: + return NotSigned; + case NotSigned: + return Signed; + default: + MOZ_ASSUME_UNREACHABLE("unexpected condition"); + return Equal; + } +} + +Assembler::DoubleCondition +Assembler::InvertCondition(DoubleCondition cond) +{ + switch (cond) { + case DoubleOrdered: + return DoubleUnordered; + case DoubleEqual: + return DoubleNotEqualOrUnordered; + case DoubleNotEqual: + return DoubleEqualOrUnordered; + case DoubleGreaterThan: + return DoubleLessThanOrEqualOrUnordered; + case DoubleGreaterThanOrEqual: + return DoubleLessThanOrUnordered; + case DoubleLessThan: + return DoubleGreaterThanOrEqualOrUnordered; + case DoubleLessThanOrEqual: + return DoubleGreaterThanOrUnordered; + case DoubleUnordered: + return DoubleOrdered; + case DoubleEqualOrUnordered: + return DoubleNotEqual; + case DoubleNotEqualOrUnordered: + return DoubleEqual; + case DoubleGreaterThanOrUnordered: + return DoubleLessThanOrEqual; + case DoubleGreaterThanOrEqualOrUnordered: + return DoubleLessThan; + case DoubleLessThanOrUnordered: + return DoubleGreaterThanOrEqual; + case DoubleLessThanOrEqualOrUnordered: + return DoubleGreaterThan; + default: + MOZ_ASSUME_UNREACHABLE("unexpected condition"); + return DoubleEqual; + } +} + +BOffImm16::BOffImm16(InstImm inst) + : data(inst.encode() & Imm16Mask) +{ +} + +bool +Assembler::oom() const +{ + return m_buffer.oom() || + !enoughMemory_ || + jumpRelocations_.oom() || + dataRelocations_.oom() || + preBarriers_.oom(); +} + +bool +Assembler::addCodeLabel(CodeLabel label) +{ + return codeLabels_.append(label); +} + +// Size of the instruction stream, in bytes. +size_t +Assembler::size() const +{ + return m_buffer.size(); +} + +// Size of the relocation table, in bytes. +size_t +Assembler::jumpRelocationTableBytes() const +{ + return jumpRelocations_.length(); +} + +size_t +Assembler::dataRelocationTableBytes() const +{ + return dataRelocations_.length(); +} + +size_t +Assembler::preBarrierTableBytes() const +{ + return preBarriers_.length(); +} + +// Size of the data table, in bytes. +size_t +Assembler::bytesNeeded() const +{ + return size() + + jumpRelocationTableBytes() + + dataRelocationTableBytes() + + preBarrierTableBytes(); +} + +// write a blob of binary into the instruction stream +BufferOffset +Assembler::writeInst(uint32_t x, uint32_t *dest) +{ + if (dest == nullptr) + return m_buffer.putInt(x); + + writeInstStatic(x, dest); + return BufferOffset(); +} + +void +Assembler::writeInstStatic(uint32_t x, uint32_t *dest) +{ + JS_ASSERT(dest != nullptr); + *dest = x; +} + +BufferOffset +Assembler::align(int alignment) +{ + BufferOffset ret; + JS_ASSERT(m_buffer.isAligned(4)); + if (alignment == 8) { + if (!m_buffer.isAligned(alignment)) { + BufferOffset tmp = as_nop(); + if (!ret.assigned()) + ret = tmp; + } + } else { + JS_ASSERT((alignment & (alignment - 1)) == 0); + while (size() & (alignment - 1)) { + BufferOffset tmp = as_nop(); + if (!ret.assigned()) + ret = tmp; + } + } + return ret; +} + +BufferOffset +Assembler::as_nop() +{ + return writeInst(op_special | ff_sll); +} + +// Logical operations. +BufferOffset +Assembler::as_and(Register rd, Register rs, Register rt) +{ + return writeInst(InstReg(op_special, rs, rt, rd, ff_and).encode()); +} + +BufferOffset +Assembler::as_or(Register rd, Register rs, Register rt) +{ + return writeInst(InstReg(op_special, rs, rt, rd, ff_or).encode()); +} + +BufferOffset +Assembler::as_xor(Register rd, Register rs, Register rt) +{ + return writeInst(InstReg(op_special, rs, rt, rd, ff_xor).encode()); +} + +BufferOffset +Assembler::as_nor(Register rd, Register rs, Register rt) +{ + return writeInst(InstReg(op_special, rs, rt, rd, ff_nor).encode()); +} + +BufferOffset +Assembler::as_andi(Register rd, Register rs, int32_t j) +{ + JS_ASSERT(Imm16::isInUnsignedRange(j)); + return writeInst(InstImm(op_andi, rs, rd, Imm16(j)).encode()); +} + +BufferOffset +Assembler::as_ori(Register rd, Register rs, int32_t j) +{ + JS_ASSERT(Imm16::isInUnsignedRange(j)); + return writeInst(InstImm(op_ori, rs, rd, Imm16(j)).encode()); +} + +BufferOffset +Assembler::as_xori(Register rd, Register rs, int32_t j) +{ + JS_ASSERT(Imm16::isInUnsignedRange(j)); + return writeInst(InstImm(op_xori, rs, rd, Imm16(j)).encode()); +} + +// Branch and jump instructions +BufferOffset +Assembler::as_bal(BOffImm16 off) +{ + BufferOffset bo = writeInst(InstImm(op_regimm, zero, rt_bgezal, off).encode()); + return bo; +} + +InstImm +Assembler::getBranchCode(bool isCall) +{ + if (isCall) + return InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)); + + return InstImm(op_beq, zero, zero, BOffImm16(0)); +} + +InstImm +Assembler::getBranchCode(Register s, Register t, Condition c) +{ + JS_ASSERT(c == Assembler::Equal || c == Assembler::NotEqual); + return InstImm(c == Assembler::Equal ? op_beq : op_bne, s, t, BOffImm16(0)); +} + +InstImm +Assembler::getBranchCode(Register s, Condition c) +{ + switch (c) { + case Assembler::Equal: + case Assembler::Zero: + case Assembler::BelowOrEqual: + return InstImm(op_beq, s, zero, BOffImm16(0)); + case Assembler::NotEqual: + case Assembler::NonZero: + case Assembler::Above: + return InstImm(op_bne, s, zero, BOffImm16(0)); + case Assembler::GreaterThan: + return InstImm(op_bgtz, s, zero, BOffImm16(0)); + case Assembler::GreaterThanOrEqual: + case Assembler::NotSigned: + return InstImm(op_regimm, s, rt_bgez, BOffImm16(0)); + case Assembler::LessThan: + case Assembler::Signed: + return InstImm(op_regimm, s, rt_bltz, BOffImm16(0)); + case Assembler::LessThanOrEqual: + return InstImm(op_blez, s, zero, BOffImm16(0)); + default: + MOZ_ASSUME_UNREACHABLE("Condition not supported."); + } +} + +InstImm +Assembler::getBranchCode(bool testTrue, FPConditionBit fcc) +{ + JS_ASSERT(!(fcc && FccMask)); + uint32_t rtField = ((testTrue ? 1 : 0) | (fcc << FccShift)) << RTShift; + + return InstImm(op_cop1, rs_bc1, rtField, BOffImm16(0)); +} + +BufferOffset +Assembler::as_j(JOffImm26 off) +{ + BufferOffset bo = writeInst(InstJump(op_j, off).encode()); + return bo; +} +BufferOffset +Assembler::as_jal(JOffImm26 off) +{ + BufferOffset bo = writeInst(InstJump(op_jal, off).encode()); + return bo; +} + +BufferOffset +Assembler::as_jr(Register rs) +{ + BufferOffset bo = writeInst(InstReg(op_special, rs, zero, zero, ff_jr).encode()); + return bo; +} +BufferOffset +Assembler::as_jalr(Register rs) +{ + BufferOffset bo = writeInst(InstReg(op_special, rs, zero, ra, ff_jalr).encode()); + return bo; +} + + +// Arithmetic instructions +BufferOffset +Assembler::as_addu(Register rd, Register rs, Register rt) +{ + return writeInst(InstReg(op_special, rs, rt, rd, ff_addu).encode()); +} + +BufferOffset +Assembler::as_addiu(Register rd, Register rs, int32_t j) +{ + JS_ASSERT(Imm16::isInSignedRange(j)); + return writeInst(InstImm(op_addiu, rs, rd, Imm16(j)).encode()); +} + +BufferOffset +Assembler::as_subu(Register rd, Register rs, Register rt) +{ + return writeInst(InstReg(op_special, rs, rt, rd, ff_subu).encode()); +} + +BufferOffset +Assembler::as_mult(Register rs, Register rt) +{ + return writeInst(InstReg(op_special, rs, rt, ff_mult).encode()); +} + +BufferOffset +Assembler::as_multu(Register rs, Register rt) +{ + return writeInst(InstReg(op_special, rs, rt, ff_multu).encode()); +} + +BufferOffset +Assembler::as_div(Register rs, Register rt) +{ + return writeInst(InstReg(op_special, rs, rt, ff_div).encode()); +} + +BufferOffset +Assembler::as_divu(Register rs, Register rt) +{ + return writeInst(InstReg(op_special, rs, rt, ff_divu).encode()); +} + +BufferOffset +Assembler::as_mul(Register rd, Register rs, Register rt) +{ + return writeInst(InstReg(op_special2, rs, rt, rd, ff_mul).encode()); +} + +BufferOffset +Assembler::as_lui(Register rd, int32_t j) +{ + JS_ASSERT(Imm16::isInUnsignedRange(j)); + return writeInst(InstImm(op_lui, zero, rd, Imm16(j)).encode()); +} + +// Shift instructions +BufferOffset +Assembler::as_sll(Register rd, Register rt, uint16_t sa) +{ + JS_ASSERT(sa < 32); + return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_sll).encode()); +} + +BufferOffset +Assembler::as_sllv(Register rd, Register rt, Register rs) +{ + return writeInst(InstReg(op_special, rs, rt, rd, ff_sllv).encode()); +} + +BufferOffset +Assembler::as_srl(Register rd, Register rt, uint16_t sa) +{ + JS_ASSERT(sa < 32); + return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_srl).encode()); +} + +BufferOffset +Assembler::as_srlv(Register rd, Register rt, Register rs) +{ + return writeInst(InstReg(op_special, rs, rt, rd, ff_srlv).encode()); +} + +BufferOffset +Assembler::as_sra(Register rd, Register rt, uint16_t sa) +{ + JS_ASSERT(sa < 32); + return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_sra).encode()); +} + +BufferOffset +Assembler::as_srav(Register rd, Register rt, Register rs) +{ + return writeInst(InstReg(op_special, rs, rt, rd, ff_srav).encode()); +} + +BufferOffset +Assembler::as_rotr(Register rd, Register rt, uint16_t sa) +{ + JS_ASSERT(sa < 32); + return writeInst(InstReg(op_special, rs_one, rt, rd, sa, ff_srl).encode()); +} + +BufferOffset +Assembler::as_rotrv(Register rd, Register rt, Register rs) +{ + return writeInst(InstReg(op_special, rs, rt, rd, 1, ff_srlv).encode()); +} + +// Load and store instructions +BufferOffset +Assembler::as_lb(Register rd, Register rs, int16_t off) +{ + return writeInst(InstImm(op_lb, rs, rd, Imm16(off)).encode()); +} + +BufferOffset +Assembler::as_lbu(Register rd, Register rs, int16_t off) +{ + return writeInst(InstImm(op_lbu, rs, rd, Imm16(off)).encode()); +} + +BufferOffset +Assembler::as_lh(Register rd, Register rs, int16_t off) +{ + return writeInst(InstImm(op_lh, rs, rd, Imm16(off)).encode()); +} + +BufferOffset +Assembler::as_lhu(Register rd, Register rs, int16_t off) +{ + return writeInst(InstImm(op_lhu, rs, rd, Imm16(off)).encode()); +} + +BufferOffset +Assembler::as_lw(Register rd, Register rs, int16_t off) +{ + return writeInst(InstImm(op_lw, rs, rd, Imm16(off)).encode()); +} + +BufferOffset +Assembler::as_lwl(Register rd, Register rs, int16_t off) +{ + return writeInst(InstImm(op_lwl, rs, rd, Imm16(off)).encode()); +} + +BufferOffset +Assembler::as_lwr(Register rd, Register rs, int16_t off) +{ + return writeInst(InstImm(op_lwr, rs, rd, Imm16(off)).encode()); +} + +BufferOffset +Assembler::as_sb(Register rd, Register rs, int16_t off) +{ + return writeInst(InstImm(op_sb, rs, rd, Imm16(off)).encode()); +} + +BufferOffset +Assembler::as_sh(Register rd, Register rs, int16_t off) +{ + return writeInst(InstImm(op_sh, rs, rd, Imm16(off)).encode()); +} + +BufferOffset +Assembler::as_sw(Register rd, Register rs, int16_t off) +{ + return writeInst(InstImm(op_sw, rs, rd, Imm16(off)).encode()); +} + +BufferOffset +Assembler::as_swl(Register rd, Register rs, int16_t off) +{ + return writeInst(InstImm(op_swl, rs, rd, Imm16(off)).encode()); +} + +BufferOffset +Assembler::as_swr(Register rd, Register rs, int16_t off) +{ + return writeInst(InstImm(op_swr, rs, rd, Imm16(off)).encode()); +} + +// Move from HI/LO register. +BufferOffset +Assembler::as_mfhi(Register rd) +{ + return writeInst(InstReg(op_special, rd, ff_mfhi).encode()); +} + +BufferOffset +Assembler::as_mflo(Register rd) +{ + return writeInst(InstReg(op_special, rd, ff_mflo).encode()); +} + +// Set on less than. +BufferOffset +Assembler::as_slt(Register rd, Register rs, Register rt) +{ + return writeInst(InstReg(op_special, rs, rt, rd, ff_slt).encode()); +} + +BufferOffset +Assembler::as_sltu(Register rd, Register rs, Register rt) +{ + return writeInst(InstReg(op_special, rs, rt, rd, ff_sltu).encode()); +} + +BufferOffset +Assembler::as_slti(Register rd, Register rs, int32_t j) +{ + JS_ASSERT(Imm16::isInSignedRange(j)); + return writeInst(InstImm(op_slti, rs, rd, Imm16(j)).encode()); +} + +BufferOffset +Assembler::as_sltiu(Register rd, Register rs, uint32_t j) +{ + JS_ASSERT(Imm16::isInUnsignedRange(j)); + return writeInst(InstImm(op_sltiu, rs, rd, Imm16(j)).encode()); +} + +// Conditional move. +BufferOffset +Assembler::as_movz(Register rd, Register rs, Register rt) +{ + return writeInst(InstReg(op_special, rs, rt, rd, ff_movz).encode()); +} + +BufferOffset +Assembler::as_movn(Register rd, Register rs, Register rt) +{ + return writeInst(InstReg(op_special, rs, rt, rd, ff_movn).encode()); +} + +BufferOffset +Assembler::as_movt(Register rd, Register rs, uint16_t cc) +{ + Register rt; + rt = Register::FromCode((cc & 0x7) << 2 | 1); + return writeInst(InstReg(op_special, rs, rt, rd, ff_movci).encode()); +} + +BufferOffset +Assembler::as_movf(Register rd, Register rs, uint16_t cc) +{ + Register rt; + rt = Register::FromCode((cc & 0x7) << 2 | 0); + return writeInst(InstReg(op_special, rs, rt, rd, ff_movci).encode()); +} + +// Bit twiddling. +BufferOffset +Assembler::as_clz(Register rd, Register rs, Register rt) +{ + return writeInst(InstReg(op_special2, rs, rt, rd, ff_clz).encode()); +} + +BufferOffset +Assembler::as_ins(Register rt, Register rs, uint16_t pos, uint16_t size) +{ + JS_ASSERT(pos < 32 && size != 0 && size <= 32 && pos + size != 0 && pos + size >= 32); + Register rd; + rd = Register::FromCode(pos + size - 1); + return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_ins).encode()); +} + +BufferOffset +Assembler::as_ext(Register rt, Register rs, uint16_t pos, uint16_t size) +{ + JS_ASSERT(pos < 32 && size != 0 && size <= 32 && pos + size != 0 && pos + size >= 32); + Register rd; + rd = Register::FromCode(size - 1); + return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_ext).encode()); +} + +// FP instructions +BufferOffset +Assembler::as_ld(FloatRegister fd, Register base, int32_t off) +{ + JS_ASSERT(Imm16::isInSignedRange(off)); + return writeInst(InstImm(op_ldc1, base, fd, Imm16(off)).encode()); +} + +BufferOffset +Assembler::as_sd(FloatRegister fd, Register base, int32_t off) +{ + JS_ASSERT(Imm16::isInSignedRange(off)); + return writeInst(InstImm(op_sdc1, base, fd, Imm16(off)).encode()); +} + +BufferOffset +Assembler::as_ls(FloatRegister fd, Register base, int32_t off) +{ + JS_ASSERT(Imm16::isInSignedRange(off)); + return writeInst(InstImm(op_lwc1, base, fd, Imm16(off)).encode()); +} + +BufferOffset +Assembler::as_ss(FloatRegister fd, Register base, int32_t off) +{ + JS_ASSERT(Imm16::isInSignedRange(off)); + return writeInst(InstImm(op_swc1, base, fd, Imm16(off)).encode()); +} + +BufferOffset +Assembler::as_movs(FloatRegister fd, FloatRegister fs) +{ + return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_mov_fmt).encode()); +} + +BufferOffset +Assembler::as_movd(FloatRegister fd, FloatRegister fs) +{ + return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_mov_fmt).encode()); +} + +BufferOffset +Assembler::as_mtc1(Register rt, FloatRegister fs) +{ + return writeInst(InstReg(op_cop1, rs_mtc1, rt, fs).encode()); +} + +BufferOffset +Assembler::as_mfc1(Register rt, FloatRegister fs) +{ + return writeInst(InstReg(op_cop1, rs_mfc1, rt, fs).encode()); +} + + +// :TODO: Bug 972836, Remove _Odd functions once we can use odd regs. +BufferOffset +Assembler::as_ls_Odd(FloatRegister fd, Register base, int32_t off) +{ + JS_ASSERT(Imm16::isInSignedRange(off)); + // Hardcoded because it will be removed once we can use odd regs. + return writeInst(op_lwc1 | RS(base) | RT(fd.code() * 2 + 1) | Imm16(off).encode()); +} + +BufferOffset +Assembler::as_ss_Odd(FloatRegister fd, Register base, int32_t off) +{ + JS_ASSERT(Imm16::isInSignedRange(off)); + // Hardcoded because it will be removed once we can use odd regs. + return writeInst(op_swc1 | RS(base) | RT(fd.code() * 2 + 1) | Imm16(off).encode()); +} + +BufferOffset +Assembler::as_mtc1_Odd(Register rt, FloatRegister fs) +{ + // Hardcoded because it will be removed once we can use odd regs. + return writeInst(op_cop1 | rs_mtc1 | RT(rt) | RD(fs.code() * 2 + 1)); +} + +BufferOffset +Assembler::as_mfc1_Odd(Register rt, FloatRegister fs) +{ + // Hardcoded because it will be removed once we can use odd regs. + return writeInst(op_cop1 | rs_mfc1 | RT(rt) | RD(fs.code() * 2 + 1)); +} + + +// FP convert instructions +BufferOffset +Assembler::as_ceilws(FloatRegister fd, FloatRegister fs) +{ + return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_ceil_w_fmt).encode()); +} + +BufferOffset +Assembler::as_floorws(FloatRegister fd, FloatRegister fs) +{ + return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_floor_w_fmt).encode()); +} + +BufferOffset +Assembler::as_roundws(FloatRegister fd, FloatRegister fs) +{ + return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_round_w_fmt).encode()); +} + +BufferOffset +Assembler::as_truncws(FloatRegister fd, FloatRegister fs) +{ + return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_trunc_w_fmt).encode()); +} + +BufferOffset +Assembler::as_ceilwd(FloatRegister fd, FloatRegister fs) +{ + return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_ceil_w_fmt).encode()); +} + +BufferOffset +Assembler::as_floorwd(FloatRegister fd, FloatRegister fs) +{ + return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_floor_w_fmt).encode()); +} + +BufferOffset +Assembler::as_roundwd(FloatRegister fd, FloatRegister fs) +{ + return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_round_w_fmt).encode()); +} + +BufferOffset +Assembler::as_truncwd(FloatRegister fd, FloatRegister fs) +{ + return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_trunc_w_fmt).encode()); +} + +BufferOffset +Assembler::as_cvtds(FloatRegister fd, FloatRegister fs) +{ + return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_cvt_d_fmt).encode()); +} + +BufferOffset +Assembler::as_cvtdw(FloatRegister fd, FloatRegister fs) +{ + return writeInst(InstReg(op_cop1, rs_w, zero, fs, fd, ff_cvt_d_fmt).encode()); +} + +BufferOffset +Assembler::as_cvtsd(FloatRegister fd, FloatRegister fs) +{ + return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_cvt_s_fmt).encode()); +} + +BufferOffset +Assembler::as_cvtsw(FloatRegister fd, FloatRegister fs) +{ + return writeInst(InstReg(op_cop1, rs_w, zero, fs, fd, ff_cvt_s_fmt).encode()); +} + +BufferOffset +Assembler::as_cvtwd(FloatRegister fd, FloatRegister fs) +{ + return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_cvt_w_fmt).encode()); +} + +BufferOffset +Assembler::as_cvtws(FloatRegister fd, FloatRegister fs) +{ + return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_cvt_w_fmt).encode()); +} + +// FP arithmetic instructions +BufferOffset +Assembler::as_adds(FloatRegister fd, FloatRegister fs, FloatRegister ft) +{ + return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_add_fmt).encode()); +} + +BufferOffset +Assembler::as_addd(FloatRegister fd, FloatRegister fs, FloatRegister ft) +{ + return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_add_fmt).encode()); +} + +BufferOffset +Assembler::as_subs(FloatRegister fd, FloatRegister fs, FloatRegister ft) +{ + return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_sub_fmt).encode()); +} + +BufferOffset +Assembler::as_subd(FloatRegister fd, FloatRegister fs, FloatRegister ft) +{ + return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_sub_fmt).encode()); +} + +BufferOffset +Assembler::as_abss(FloatRegister fd, FloatRegister fs) +{ + return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_abs_fmt).encode()); +} + +BufferOffset +Assembler::as_absd(FloatRegister fd, FloatRegister fs) +{ + return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_abs_fmt).encode()); +} + +BufferOffset +Assembler::as_negd(FloatRegister fd, FloatRegister fs) +{ + return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_neg_fmt).encode()); +} + +BufferOffset +Assembler::as_muls(FloatRegister fd, FloatRegister fs, FloatRegister ft) +{ + return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_mul_fmt).encode()); +} + +BufferOffset +Assembler::as_muld(FloatRegister fd, FloatRegister fs, FloatRegister ft) +{ + return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_mul_fmt).encode()); +} + +BufferOffset +Assembler::as_divs(FloatRegister fd, FloatRegister fs, FloatRegister ft) +{ + return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_div_fmt).encode()); +} + +BufferOffset +Assembler::as_divd(FloatRegister fd, FloatRegister fs, FloatRegister ft) +{ + return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_div_fmt).encode()); +} + +BufferOffset +Assembler::as_sqrts(FloatRegister fd, FloatRegister fs) +{ + return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_sqrt_fmt).encode()); +} + +BufferOffset +Assembler::as_sqrtd(FloatRegister fd, FloatRegister fs) +{ + return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_sqrt_fmt).encode()); +} + +// FP compare instructions +BufferOffset +Assembler::as_cfs(FloatRegister fs, FloatRegister ft, FPConditionBit fcc) +{ + return writeInst(InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_f_fmt).encode()); +} + +BufferOffset +Assembler::as_cuns(FloatRegister fs, FloatRegister ft, FPConditionBit fcc) +{ + return writeInst(InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_un_fmt).encode()); +} + +BufferOffset +Assembler::as_ceqs(FloatRegister fs, FloatRegister ft, FPConditionBit fcc) +{ + return writeInst(InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_eq_fmt).encode()); +} + +BufferOffset +Assembler::as_cueqs(FloatRegister fs, FloatRegister ft, FPConditionBit fcc) +{ + return writeInst(InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_ueq_fmt).encode()); +} + +BufferOffset +Assembler::as_colts(FloatRegister fs, FloatRegister ft, FPConditionBit fcc) +{ + return writeInst(InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_olt_fmt).encode()); +} + +BufferOffset +Assembler::as_cults(FloatRegister fs, FloatRegister ft, FPConditionBit fcc) +{ + return writeInst(InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_ult_fmt).encode()); +} + +BufferOffset +Assembler::as_coles(FloatRegister fs, FloatRegister ft, FPConditionBit fcc) +{ + return writeInst(InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_ole_fmt).encode()); +} + +BufferOffset +Assembler::as_cules(FloatRegister fs, FloatRegister ft, FPConditionBit fcc) +{ + return writeInst(InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_ule_fmt).encode()); +} + +BufferOffset +Assembler::as_cfd(FloatRegister fs, FloatRegister ft, FPConditionBit fcc) +{ + return writeInst(InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_f_fmt).encode()); +} + +BufferOffset +Assembler::as_cund(FloatRegister fs, FloatRegister ft, FPConditionBit fcc) +{ + return writeInst(InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_un_fmt).encode()); +} + +BufferOffset +Assembler::as_ceqd(FloatRegister fs, FloatRegister ft, FPConditionBit fcc) +{ + return writeInst(InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_eq_fmt).encode()); +} + +BufferOffset +Assembler::as_cueqd(FloatRegister fs, FloatRegister ft, FPConditionBit fcc) +{ + return writeInst(InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_ueq_fmt).encode()); +} + +BufferOffset +Assembler::as_coltd(FloatRegister fs, FloatRegister ft, FPConditionBit fcc) +{ + return writeInst(InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_olt_fmt).encode()); +} + +BufferOffset +Assembler::as_cultd(FloatRegister fs, FloatRegister ft, FPConditionBit fcc) +{ + return writeInst(InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_ult_fmt).encode()); +} + +BufferOffset +Assembler::as_coled(FloatRegister fs, FloatRegister ft, FPConditionBit fcc) +{ + return writeInst(InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_ole_fmt).encode()); +} + +BufferOffset +Assembler::as_culed(FloatRegister fs, FloatRegister ft, FPConditionBit fcc) +{ + return writeInst(InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_ule_fmt).encode()); +} + +void +Assembler::bind(Label *label, BufferOffset boff) +{ + // If our caller didn't give us an explicit target to bind to + // then we want to bind to the location of the next instruction + BufferOffset dest = boff.assigned() ? boff : nextOffset(); + if (label->used()) { + int32_t next; + + // A used label holds a link to branch that uses it. + BufferOffset b(label); + do { + Instruction *inst = editSrc(b); + + // Second word holds a pointer to the next branch in label's chain. + next = inst[1].encode(); + bind(reinterpret_cast(inst), b.getOffset(), dest.getOffset()); + + b = BufferOffset(next); + } while (next != LabelBase::INVALID_OFFSET); + } + label->bind(dest.getOffset()); +} + +void +Assembler::bind(InstImm *inst, uint32_t branch, uint32_t target) +{ + int32_t offset = target - branch; + InstImm inst_bgezal = InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)); + InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0)); + + // If encoded offset is 4, then the jump must be short + if (BOffImm16(inst[0]).decode() == 4) { + JS_ASSERT(BOffImm16::isInRange(offset)); + inst[0].setBOffImm16(BOffImm16(offset)); + inst[1].makeNop(); + return; + } + if (BOffImm16::isInRange(offset)) { + bool conditional = (inst[0].encode() != inst_bgezal.encode() && + inst[0].encode() != inst_beq.encode()); + + inst[0].setBOffImm16(BOffImm16(offset)); + inst[1].makeNop(); + + // Skip the trailing nops in conditional branches. + if (conditional) { + inst[2] = InstImm(op_regimm, zero, rt_bgez, BOffImm16(3 * sizeof(void *))).encode(); + // There are 2 nops after this + } + return; + } + + if (inst[0].encode() == inst_bgezal.encode()) { + // Handle long call. + addLongJump(BufferOffset(branch)); + writeLuiOriInstructions(inst, &inst[1], ScratchRegister, target); + inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr).encode(); + // There is 1 nop after this. + } else if (inst[0].encode() == inst_beq.encode()) { + // Handle long unconditional jump. + addLongJump(BufferOffset(branch)); + writeLuiOriInstructions(inst, &inst[1], ScratchRegister, target); + inst[2] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode(); + // There is 1 nop after this. + } else { + // Handle long conditional jump. + inst[0] = invertBranch(inst[0], BOffImm16(5 * sizeof(void *))); + // No need for a "nop" here because we can clobber scratch. + addLongJump(BufferOffset(branch + sizeof(void *))); + writeLuiOriInstructions(&inst[1], &inst[2], ScratchRegister, target); + inst[3] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode(); + // There is 1 nop after this. + } +} + +void +Assembler::bind(RepatchLabel *label) +{ + BufferOffset dest = nextOffset(); + if (label->used()) { + // If the label has a use, then change this use to refer to + // the bound label; + BufferOffset b(label->offset()); + Instruction *inst1 = editSrc(b); + Instruction *inst2 = inst1->next(); + + updateLuiOriValue(inst1, inst2, dest.getOffset()); + } + label->bind(dest.getOffset()); +} + +void +Assembler::retarget(Label *label, Label *target) +{ + if (label->used()) { + if (target->bound()) { + bind(label, BufferOffset(target)); + } else if (target->used()) { + // The target is not bound but used. Prepend label's branch list + // onto target's. + int32_t next; + BufferOffset labelBranchOffset(label); + + // Find the head of the use chain for label. + do { + Instruction *inst = editSrc(labelBranchOffset); + + // Second word holds a pointer to the next branch in chain. + next = inst[1].encode(); + labelBranchOffset = BufferOffset(next); + } while (next != LabelBase::INVALID_OFFSET); + + // Then patch the head of label's use chain to the tail of + // target's use chain, prepending the entire use chain of target. + Instruction *inst = editSrc(labelBranchOffset); + int32_t prev = target->use(label->offset()); + inst[1].setData(prev); + } else { + // The target is unbound and unused. We can just take the head of + // the list hanging off of label, and dump that into target. + DebugOnly prev = target->use(label->offset()); + JS_ASSERT((int32_t)prev == Label::INVALID_OFFSET); + } + } + label->reset(); +} + +void dbg_break() {} +static int stopBKPT = -1; +void +Assembler::as_break(uint32_t code) +{ + JS_ASSERT(code <= MAX_BREAK_CODE); + writeInst(op_special | code << RTShift | ff_break); +} + +uint32_t +Assembler::patchWrite_NearCallSize() +{ + return 4 * sizeof(uint32_t); +} + +void +Assembler::patchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall) +{ + Instruction *inst = (Instruction *) start.raw(); + uint8_t *dest = toCall.raw(); + + // Overwrite whatever instruction used to be here with a call. + // Always use long jump for two reasons: + // - Jump has to be the same size because of patchWrite_NearCallSize. + // - Return address has to be at the end of replaced block. + // Short jump wouldn't be more efficient. + writeLuiOriInstructions(inst, &inst[1], ScratchRegister, (uint32_t)dest); + inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr); + inst[3] = InstNOP(); + + // Ensure everyone sees the code that was just written into memory. + AutoFlushCache::updateTop(uintptr_t(inst), patchWrite_NearCallSize()); +} + +uint32_t +Assembler::extractLuiOriValue(Instruction *inst0, Instruction *inst1) +{ + InstImm *i0 = (InstImm *) inst0; + InstImm *i1 = (InstImm *) inst1; + JS_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift)); + JS_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift)); + + uint32_t value = i0->extractImm16Value() << 16; + value = value | i1->extractImm16Value(); + return value; +} + +void +Assembler::updateLuiOriValue(Instruction *inst0, Instruction *inst1, uint32_t value) +{ + JS_ASSERT(inst0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift)); + JS_ASSERT(inst1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift)); + + ((InstImm *) inst0)->setImm16(Imm16::upper(Imm32(value))); + ((InstImm *) inst1)->setImm16(Imm16::lower(Imm32(value))); +} + +void +Assembler::writeLuiOriInstructions(Instruction *inst0, Instruction *inst1, + Register reg, uint32_t value) +{ + *inst0 = InstImm(op_lui, zero, reg, Imm16::upper(Imm32(value))); + *inst1 = InstImm(op_ori, reg, reg, Imm16::lower(Imm32(value))); +} + +void +Assembler::patchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue, + PatchedImmPtr expectedValue) +{ + Instruction *inst = (Instruction *) label.raw(); + + // Extract old Value + DebugOnly value = Assembler::extractLuiOriValue(&inst[0], &inst[1]); + JS_ASSERT(value == uint32_t(expectedValue.value)); + + // Replace with new value + Assembler::updateLuiOriValue(inst, inst->next(), uint32_t(newValue.value)); + + AutoFlushCache::updateTop(uintptr_t(inst), 8); +} + +void +Assembler::patchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expectedValue) +{ + patchDataWithValueCheck(label, PatchedImmPtr(newValue.value), + PatchedImmPtr(expectedValue.value)); +} + +// This just stomps over memory with 32 bits of raw data. Its purpose is to +// overwrite the call of JITed code with 32 bits worth of an offset. This will +// is only meant to function on code that has been invalidated, so it should +// be totally safe. Since that instruction will never be executed again, a +// ICache flush should not be necessary +void +Assembler::patchWrite_Imm32(CodeLocationLabel label, Imm32 imm) +{ + // Raw is going to be the return address. + uint32_t *raw = (uint32_t*)label.raw(); + // Overwrite the 4 bytes before the return address, which will + // end up being the call instruction. + *(raw - 1) = imm.value; +} + +uint8_t * +Assembler::nextInstruction(uint8_t *inst_, uint32_t *count) +{ + Instruction *inst = reinterpret_cast(inst_); + if (count != nullptr) + *count += sizeof(Instruction); + return reinterpret_cast(inst->next()); +} + +// Since there are no pools in MIPS implementation, this should be simple. +Instruction * +Instruction::next() +{ + return this + 1; +} + +InstImm Assembler::invertBranch(InstImm branch, BOffImm16 skipOffset) +{ + uint32_t rt = 0; + Opcode op = (Opcode) (branch.extractOpcode() << OpcodeShift); + switch(op) { + case op_beq: + branch.setBOffImm16(skipOffset); + branch.setOpcode(op_bne); + return branch; + case op_bne: + branch.setBOffImm16(skipOffset); + branch.setOpcode(op_beq); + return branch; + case op_bgtz: + branch.setBOffImm16(skipOffset); + branch.setOpcode(op_blez); + return branch; + case op_blez: + branch.setBOffImm16(skipOffset); + branch.setOpcode(op_bgtz); + return branch; + case op_regimm: + branch.setBOffImm16(skipOffset); + rt = branch.extractRT(); + if (rt == (rt_bltz >> RTShift)) { + branch.setRT(rt_bgez); + return branch; + } + if (rt == (rt_bgez >> RTShift)) { + branch.setRT(rt_bltz); + return branch; + } + + MOZ_ASSUME_UNREACHABLE("Error creating long branch."); + return branch; + + case op_cop1: + JS_ASSERT(branch.extractRS() == rs_bc1 >> RSShift); + + branch.setBOffImm16(skipOffset); + rt = branch.extractRT(); + if (rt & 0x1) + branch.setRT((RTField) ((rt & ~0x1) << RTShift)); + else + branch.setRT((RTField) ((rt | 0x1) << RTShift)); + return branch; + } + + MOZ_ASSUME_UNREACHABLE("Error creating long branch."); + return branch; +} + +void +Assembler::ToggleToJmp(CodeLocationLabel inst_) +{ + InstImm * inst = (InstImm *)inst_.raw(); + + JS_ASSERT(inst->extractOpcode() == ((uint32_t)op_andi >> OpcodeShift)); + // We converted beq to andi, so now we restore it. + inst->setOpcode(op_beq); + + AutoFlushCache::updateTop((uintptr_t)inst, 4); +} + +void +Assembler::ToggleToCmp(CodeLocationLabel inst_) +{ + InstImm * inst = (InstImm *)inst_.raw(); + + // toggledJump is allways used for short jumps. + JS_ASSERT(inst->extractOpcode() == ((uint32_t)op_beq >> OpcodeShift)); + // Replace "beq $zero, $zero, offset" with "andi $zero, $zero, offset" + inst->setOpcode(op_andi); + + AutoFlushCache::updateTop((uintptr_t)inst, 4); +} + +void +Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) +{ + Instruction *inst = (Instruction *)inst_.raw(); + InstImm *i0 = (InstImm *) inst; + InstImm *i1 = (InstImm *) i0->next(); + Instruction *i2 = (Instruction *) i1->next(); + + JS_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift)); + JS_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift)); + + if (enabled) { + InstReg jalr = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr); + *i2 = jalr; + } else { + InstNOP nop; + *i2 = nop; + } + + AutoFlushCache::updateTop((uintptr_t)i2, 4); +} + +void Assembler::updateBoundsCheck(uint32_t heapSize, Instruction *inst) +{ + MOZ_ASSUME_UNREACHABLE("NYI"); +} + +void +AutoFlushCache::update(uintptr_t newStart, size_t len) +{ + uintptr_t newStop = newStart + len; + if (this == nullptr) { + // just flush right here and now. + JSC::ExecutableAllocator::cacheFlush((void*)newStart, len); + return; + } + used_ = true; + if (!start_) { + IonSpewCont(IonSpew_CacheFlush, "."); + start_ = newStart; + stop_ = newStop; + return; + } + + if (newStop < start_ - 4096 || newStart > stop_ + 4096) { + // If this would add too many pages to the range. Flush recorded range + // and make a new range. + IonSpewCont(IonSpew_CacheFlush, "*"); + JSC::ExecutableAllocator::cacheFlush((void*)start_, stop_); + start_ = newStart; + stop_ = newStop; + return; + } + start_ = Min(start_, newStart); + stop_ = Max(stop_, newStop); + IonSpewCont(IonSpew_CacheFlush, "."); +} + +AutoFlushCache::~AutoFlushCache() +{ + if (!runtime_) + return; + + flushAnyway(); + IonSpewCont(IonSpew_CacheFlush, ">", name_); + if (runtime_->flusher() == this) { + IonSpewFin(IonSpew_CacheFlush); + runtime_->setFlusher(nullptr); + } +} + +void +AutoFlushCache::flushAnyway() +{ + if (!runtime_) + return; + + IonSpewCont(IonSpew_CacheFlush, "|", name_); + + if (!used_) + return; + + if (start_) { + JSC::ExecutableAllocator::cacheFlush((void *)start_, + size_t(stop_ - start_ + sizeof(Instruction))); + } else { + JSC::ExecutableAllocator::cacheFlush(nullptr, 0xff000000); + } + used_ = false; +} + diff --git a/js/src/jit/mips/Assembler-mips.h b/js/src/jit/mips/Assembler-mips.h new file mode 100644 index 000000000000..b9c5a20c04f9 --- /dev/null +++ b/js/src/jit/mips/Assembler-mips.h @@ -0,0 +1,1251 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_mips_Assembler_mips_h +#define jit_mips_Assembler_mips_h + +#include "mozilla/ArrayUtils.h" +#include "mozilla/Attributes.h" +#include "mozilla/MathAlgorithms.h" + +#include "jit/CompactBuffer.h" +#include "jit/IonCode.h" +#include "jit/IonSpewer.h" +#include "jit/mips/Architecture-mips.h" +#include "jit/shared/Assembler-shared.h" +#include "jit/shared/IonAssemblerBuffer.h" + +namespace js { +namespace jit { + +static MOZ_CONSTEXPR_VAR Register zero = { Registers::zero }; +static MOZ_CONSTEXPR_VAR Register at = { Registers::at }; +static MOZ_CONSTEXPR_VAR Register v0 = { Registers::v0 }; +static MOZ_CONSTEXPR_VAR Register v1 = { Registers::v1 }; +static MOZ_CONSTEXPR_VAR Register a0 = { Registers::a0 }; +static MOZ_CONSTEXPR_VAR Register a1 = { Registers::a1 }; +static MOZ_CONSTEXPR_VAR Register a2 = { Registers::a2 }; +static MOZ_CONSTEXPR_VAR Register a3 = { Registers::a3 }; +static MOZ_CONSTEXPR_VAR Register t0 = { Registers::t0 }; +static MOZ_CONSTEXPR_VAR Register t1 = { Registers::t1 }; +static MOZ_CONSTEXPR_VAR Register t2 = { Registers::t2 }; +static MOZ_CONSTEXPR_VAR Register t3 = { Registers::t3 }; +static MOZ_CONSTEXPR_VAR Register t4 = { Registers::t4 }; +static MOZ_CONSTEXPR_VAR Register t5 = { Registers::t5 }; +static MOZ_CONSTEXPR_VAR Register t6 = { Registers::t6 }; +static MOZ_CONSTEXPR_VAR Register t7 = { Registers::t7 }; +static MOZ_CONSTEXPR_VAR Register s0 = { Registers::s0 }; +static MOZ_CONSTEXPR_VAR Register s1 = { Registers::s1 }; +static MOZ_CONSTEXPR_VAR Register s2 = { Registers::s2 }; +static MOZ_CONSTEXPR_VAR Register s3 = { Registers::s3 }; +static MOZ_CONSTEXPR_VAR Register s4 = { Registers::s4 }; +static MOZ_CONSTEXPR_VAR Register s5 = { Registers::s5 }; +static MOZ_CONSTEXPR_VAR Register s6 = { Registers::s6 }; +static MOZ_CONSTEXPR_VAR Register s7 = { Registers::s7 }; +static MOZ_CONSTEXPR_VAR Register t8 = { Registers::t8 }; +static MOZ_CONSTEXPR_VAR Register t9 = { Registers::t9 }; +static MOZ_CONSTEXPR_VAR Register k0 = { Registers::k0 }; +static MOZ_CONSTEXPR_VAR Register k1 = { Registers::k1 }; +static MOZ_CONSTEXPR_VAR Register gp = { Registers::gp }; +static MOZ_CONSTEXPR_VAR Register sp = { Registers::sp }; +static MOZ_CONSTEXPR_VAR Register fp = { Registers::fp }; +static MOZ_CONSTEXPR_VAR Register ra = { Registers::ra }; + +static MOZ_CONSTEXPR_VAR Register ScratchRegister = at; + +// Use arg reg from EnterJIT function as OsrFrameReg. +static MOZ_CONSTEXPR_VAR Register OsrFrameReg = a3; +static MOZ_CONSTEXPR_VAR Register ArgumentsRectifierReg = s3; +static MOZ_CONSTEXPR_VAR Register CallTempReg0 = t0; +static MOZ_CONSTEXPR_VAR Register CallTempReg1 = t1; +static MOZ_CONSTEXPR_VAR Register CallTempReg2 = t2; +static MOZ_CONSTEXPR_VAR Register CallTempReg3 = t3; +static MOZ_CONSTEXPR_VAR Register CallTempReg4 = t4; +static MOZ_CONSTEXPR_VAR Register CallTempReg5 = t5; + +static MOZ_CONSTEXPR_VAR Register IntArgReg0 = a0; +static MOZ_CONSTEXPR_VAR Register IntArgReg1 = a1; +static MOZ_CONSTEXPR_VAR Register IntArgReg2 = a2; +static MOZ_CONSTEXPR_VAR Register IntArgReg3 = a3; +static MOZ_CONSTEXPR_VAR Register GlobalReg = s6; // used by Odin +static MOZ_CONSTEXPR_VAR Register HeapReg = s7; // used by Odin +static MOZ_CONSTEXPR_VAR Register CallTempNonArgRegs[] = { t0, t1, t2, t3, t4 }; +static const uint32_t NumCallTempNonArgRegs = mozilla::ArrayLength(CallTempNonArgRegs); + +class ABIArgGenerator +{ + unsigned usedArgSlots_; + bool firstArgFloat; + ABIArg current_; + + public: + ABIArgGenerator(); + ABIArg next(MIRType argType); + ABIArg ¤t() { return current_; } + + uint32_t stackBytesConsumedSoFar() const { + if (usedArgSlots_ <= 4) + return 4 * sizeof(intptr_t); + + return usedArgSlots_ * sizeof(intptr_t); + } + + static const Register NonArgReturnVolatileReg0; + static const Register NonArgReturnVolatileReg1; +}; + +static MOZ_CONSTEXPR_VAR Register PreBarrierReg = a1; + +static MOZ_CONSTEXPR_VAR Register InvalidReg = { Registers::invalid_reg }; +static MOZ_CONSTEXPR_VAR FloatRegister InvalidFloatReg = { FloatRegisters::invalid_freg }; + +static MOZ_CONSTEXPR_VAR Register JSReturnReg_Type = v1; +static MOZ_CONSTEXPR_VAR Register JSReturnReg_Data = v0; +static MOZ_CONSTEXPR_VAR Register StackPointer = sp; +static MOZ_CONSTEXPR_VAR Register FramePointer = fp; +static MOZ_CONSTEXPR_VAR Register ReturnReg = v0; +static MOZ_CONSTEXPR_VAR FloatRegister ReturnFloatReg = { FloatRegisters::f0 }; +static MOZ_CONSTEXPR_VAR FloatRegister ScratchFloatReg = { FloatRegisters::f18 }; +static MOZ_CONSTEXPR_VAR FloatRegister SecondScratchFloatReg = { FloatRegisters::f16 }; + +static MOZ_CONSTEXPR_VAR FloatRegister NANReg = { FloatRegisters::f30 }; + +static MOZ_CONSTEXPR_VAR FloatRegister f0 = {FloatRegisters::f0}; +static MOZ_CONSTEXPR_VAR FloatRegister f2 = {FloatRegisters::f2}; +static MOZ_CONSTEXPR_VAR FloatRegister f4 = {FloatRegisters::f4}; +static MOZ_CONSTEXPR_VAR FloatRegister f6 = {FloatRegisters::f6}; +static MOZ_CONSTEXPR_VAR FloatRegister f8 = {FloatRegisters::f8}; +static MOZ_CONSTEXPR_VAR FloatRegister f10 = {FloatRegisters::f10}; +static MOZ_CONSTEXPR_VAR FloatRegister f12 = {FloatRegisters::f12}; +static MOZ_CONSTEXPR_VAR FloatRegister f14 = {FloatRegisters::f14}; +static MOZ_CONSTEXPR_VAR FloatRegister f16 = {FloatRegisters::f16}; +static MOZ_CONSTEXPR_VAR FloatRegister f18 = {FloatRegisters::f18}; +static MOZ_CONSTEXPR_VAR FloatRegister f20 = {FloatRegisters::f20}; +static MOZ_CONSTEXPR_VAR FloatRegister f22 = {FloatRegisters::f22}; +static MOZ_CONSTEXPR_VAR FloatRegister f24 = {FloatRegisters::f24}; +static MOZ_CONSTEXPR_VAR FloatRegister f26 = {FloatRegisters::f26}; +static MOZ_CONSTEXPR_VAR FloatRegister f28 = {FloatRegisters::f28}; +static MOZ_CONSTEXPR_VAR FloatRegister f30 = {FloatRegisters::f30}; + +// MIPS CPUs can only load multibyte data that is "naturally" +// four-byte-aligned, sp register should be eight-byte-aligned. +static const uint32_t StackAlignment = 8; +static const uint32_t CodeAlignment = 4; +static const bool StackKeptAligned = true; +// NativeFrameSize is the size of return adress on stack in AsmJS functions. +static const uint32_t NativeFrameSize = sizeof(void*); +static const uint32_t AlignmentAtPrologue = 0; +static const uint32_t AlignmentMidPrologue = NativeFrameSize; + +static const Scale ScalePointer = TimesFour; + +// MIPS instruction types +// +---------------------------------------------------------------+ +// | 6 | 5 | 5 | 5 | 5 | 6 | +// +---------------------------------------------------------------+ +// Register type | Opcode | Rs | Rt | Rd | Sa | Function | +// +---------------------------------------------------------------+ +// | 6 | 5 | 5 | 16 | +// +---------------------------------------------------------------+ +// Immediate type | Opcode | Rs | Rt | 2's complement constant | +// +---------------------------------------------------------------+ +// | 6 | 26 | +// +---------------------------------------------------------------+ +// Jump type | Opcode | jump_target | +// +---------------------------------------------------------------+ +// 31 bit bit 0 + +// MIPS instruction encoding constants. +static const uint32_t OpcodeShift = 26; +static const uint32_t OpcodeBits = 6; +static const uint32_t RSShift = 21; +static const uint32_t RSBits = 5; +static const uint32_t RTShift = 16; +static const uint32_t RTBits = 5; +static const uint32_t RDShift = 11; +static const uint32_t RDBits = 5; +static const uint32_t SAShift = 6; +static const uint32_t SABits = 5; +static const uint32_t FunctionShift = 0; +static const uint32_t FunctionBits = 5; +static const uint32_t Imm16Shift = 0; +static const uint32_t Imm16Bits = 16; +static const uint32_t Imm26Shift = 0; +static const uint32_t Imm26Bits = 26; +static const uint32_t Imm28Shift = 0; +static const uint32_t Imm28Bits = 28; +static const uint32_t ImmFieldShift = 2; +static const uint32_t FccMask = 0x7; +static const uint32_t FccShift = 2; + + +// MIPS instruction field bit masks. +static const uint32_t OpcodeMask = ((1 << OpcodeBits) - 1) << OpcodeShift; +static const uint32_t Imm16Mask = ((1 << Imm16Bits) - 1) << Imm16Shift; +static const uint32_t Imm26Mask = ((1 << Imm26Bits) - 1) << Imm26Shift; +static const uint32_t Imm28Mask = ((1 << Imm28Bits) - 1) << Imm28Shift; +static const uint32_t RSMask = ((1 << RSBits) - 1) << RSShift; +static const uint32_t RTMask = ((1 << RTBits) - 1) << RTShift; +static const uint32_t RDMask = ((1 << RDBits) - 1) << RDShift; +static const uint32_t SAMask = ((1 << SABits) - 1) << SAShift; +static const uint32_t FunctionMask = ((1 << FunctionBits) - 1) << FunctionShift; +static const uint32_t RegMask = Registers::Total - 1; +static const uint32_t StackAlignmentMask = StackAlignment - 1; + +static const int32_t MAX_BREAK_CODE = 1024 - 1; + +class Instruction; +class InstReg; +class InstImm; +class InstJump; +class BranchInstBlock; + +uint32_t RS(Register r); +uint32_t RT(Register r); +uint32_t RT(uint32_t regCode); +uint32_t RT(FloatRegister r); +uint32_t RD(Register r); +uint32_t RD(FloatRegister r); +uint32_t RD(uint32_t regCode); +uint32_t SA(uint32_t value); +uint32_t SA(FloatRegister r); + +Register toRS (Instruction &i); +Register toRT (Instruction &i); +Register toRD (Instruction &i); +Register toR (Instruction &i); + +// MIPS enums for instruction fields +enum Opcode { + op_special = 0 << OpcodeShift, + op_regimm = 1 << OpcodeShift, + + op_j = 2 << OpcodeShift, + op_jal = 3 << OpcodeShift, + op_beq = 4 << OpcodeShift, + op_bne = 5 << OpcodeShift, + op_blez = 6 << OpcodeShift, + op_bgtz = 7 << OpcodeShift, + + op_addi = 8 << OpcodeShift, + op_addiu = 9 << OpcodeShift, + op_slti = 10 << OpcodeShift, + op_sltiu = 11 << OpcodeShift, + op_andi = 12 << OpcodeShift, + op_ori = 13 << OpcodeShift, + op_xori = 14 << OpcodeShift, + op_lui = 15 << OpcodeShift, + + op_cop1 = 17 << OpcodeShift, + op_cop1x = 19 << OpcodeShift, + + op_beql = 20 << OpcodeShift, + op_bnel = 21 << OpcodeShift, + op_blezl = 22 << OpcodeShift, + op_bgtzl = 23 << OpcodeShift, + + op_special2 = 28 << OpcodeShift, + op_special3 = 31 << OpcodeShift, + + op_lb = 32 << OpcodeShift, + op_lh = 33 << OpcodeShift, + op_lwl = 34 << OpcodeShift, + op_lw = 35 << OpcodeShift, + op_lbu = 36 << OpcodeShift, + op_lhu = 37 << OpcodeShift, + op_lwr = 38 << OpcodeShift, + op_sb = 40 << OpcodeShift, + op_sh = 41 << OpcodeShift, + op_swl = 42 << OpcodeShift, + op_sw = 43 << OpcodeShift, + op_swr = 46 << OpcodeShift, + + op_lwc1 = 49 << OpcodeShift, + op_ldc1 = 53 << OpcodeShift, + + op_swc1 = 57 << OpcodeShift, + op_sdc1 = 61 << OpcodeShift +}; + +enum RSField { + rs_zero = 0 << RSShift, + // cop1 encoding of RS field. + rs_mfc1 = 0 << RSShift, + rs_one = 1 << RSShift, + rs_cfc1 = 2 << RSShift, + rs_mfhc1 = 3 << RSShift, + rs_mtc1 = 4 << RSShift, + rs_ctc1 = 6 << RSShift, + rs_mthc1 = 7 << RSShift, + rs_bc1 = 8 << RSShift, + rs_s = 16 << RSShift, + rs_d = 17 << RSShift, + rs_w = 20 << RSShift, + rs_ps = 22 << RSShift +}; + +enum RTField { + rt_zero = 0 << RTShift, + // regimm encoding of RT field. + rt_bltz = 0 << RTShift, + rt_bgez = 1 << RTShift, + rt_bltzal = 16 << RTShift, + rt_bgezal = 17 << RTShift +}; + +enum FunctionField { + // special encoding of function field. + ff_sll = 0, + ff_movci = 1, + ff_srl = 2, + ff_sra = 3, + ff_sllv = 4, + ff_srlv = 6, + ff_srav = 7, + + ff_jr = 8, + ff_jalr = 9, + ff_movz = 10, + ff_movn = 11, + ff_break = 13, + + ff_mfhi = 16, + ff_mflo = 18, + + ff_mult = 24, + ff_multu = 25, + ff_div = 26, + ff_divu = 27, + + ff_add = 32, + ff_addu = 33, + ff_sub = 34, + ff_subu = 35, + ff_and = 36, + ff_or = 37, + ff_xor = 38, + ff_nor = 39, + + ff_slt = 42, + ff_sltu = 43, + + // special2 encoding of function field. + ff_mul = 2, + ff_clz = 32, + ff_clo = 33, + + // special3 encoding of function field. + ff_ext = 0, + ff_ins = 4, + + // cop1 encoding of function field. + ff_add_fmt = 0, + ff_sub_fmt = 1, + ff_mul_fmt = 2, + ff_div_fmt = 3, + ff_sqrt_fmt = 4, + ff_abs_fmt = 5, + ff_mov_fmt = 6, + ff_neg_fmt = 7, + + ff_round_w_fmt = 12, + ff_trunc_w_fmt = 13, + ff_ceil_w_fmt = 14, + ff_floor_w_fmt = 15, + + ff_cvt_s_fmt = 32, + ff_cvt_d_fmt = 33, + ff_cvt_w_fmt = 36, + + ff_c_f_fmt = 48, + ff_c_un_fmt = 49, + ff_c_eq_fmt = 50, + ff_c_ueq_fmt = 51, + ff_c_olt_fmt = 52, + ff_c_ult_fmt = 53, + ff_c_ole_fmt = 54, + ff_c_ule_fmt = 55, +}; + +class MacroAssemblerMIPS; +class Operand; + +// A BOffImm16 is a 16 bit immediate that is used for branches. +class BOffImm16 +{ + uint32_t data; + + public: + uint32_t encode() { + JS_ASSERT(!isInvalid()); + return data; + } + int32_t decode() { + JS_ASSERT(!isInvalid()); + return (int32_t(data << 18) >> 16) + 4; + } + + explicit BOffImm16(int offset) + : data ((offset - 4) >> 2 & Imm16Mask) + { + JS_ASSERT((offset & 0x3) == 0); + JS_ASSERT(isInRange(offset)); + } + static bool isInRange(int offset) { + if ((offset - 4) < (INT16_MIN << 2)) + return false; + if ((offset - 4) > (INT16_MAX << 2)) + return false; + return true; + } + static const uint32_t INVALID = 0x00020000; + BOffImm16() + : data(INVALID) + { } + + bool isInvalid() { + return data == INVALID; + } + Instruction *getDest(Instruction *src); + + BOffImm16(InstImm inst); +}; + +// A JOffImm26 is a 26 bit immediate that is used for unconditional jumps. +class JOffImm26 +{ + uint32_t data; + + public: + uint32_t encode() { + JS_ASSERT(!isInvalid()); + return data; + } + int32_t decode() { + JS_ASSERT(!isInvalid()); + return (int32_t(data << 8) >> 6) + 4; + } + + explicit JOffImm26(int offset) + : data ((offset - 4) >> 2 & Imm26Mask) + { + JS_ASSERT((offset & 0x3) == 0); + JS_ASSERT(isInRange(offset)); + } + static bool isInRange(int offset) { + if ((offset - 4) < -536870912) + return false; + if ((offset - 4) > 536870908) + return false; + return true; + } + static const uint32_t INVALID = 0x20000000; + JOffImm26() + : data(INVALID) + { } + + bool isInvalid() { + return data == INVALID; + } + Instruction *getDest(Instruction *src); + +}; + +class Imm16 +{ + uint16_t value; + + public: + Imm16(); + Imm16(uint32_t imm) + : value(imm) + { } + uint32_t encode() { + return value; + } + int32_t decodeSigned() { + return value; + } + uint32_t decodeUnsigned() { + return value; + } + static bool isInSignedRange(int32_t imm) { + return imm >= INT16_MIN && imm <= INT16_MAX; + } + static bool isInUnsignedRange(uint32_t imm) { + return imm <= UINT16_MAX ; + } + static Imm16 lower (Imm32 imm) { + return Imm16(imm.value & 0xffff); + } + static Imm16 upper (Imm32 imm) { + return Imm16((imm.value >> 16) & 0xffff); + } +}; + +class Operand +{ + public: + enum Tag { + REG, + FREG, + MEM + }; + + private: + Tag tag : 3; + uint32_t reg : 5; + int32_t offset; + + public: + Operand (Register reg_) + : tag(REG), reg(reg_.code()) + { } + + Operand (FloatRegister freg) + : tag(FREG), reg(freg.code()) + { } + + Operand (Register base, Imm32 off) + : tag(MEM), reg(base.code()), offset(off.value) + { } + + Operand (Register base, int32_t off) + : tag(MEM), reg(base.code()), offset(off) + { } + + Operand (const Address &addr) + : tag(MEM), reg(addr.base.code()), offset(addr.offset) + { } + + Tag getTag() const { + return tag; + } + + Register toReg() const { + JS_ASSERT(tag == REG); + return Register::FromCode(reg); + } + + FloatRegister toFReg() const { + JS_ASSERT(tag == FREG); + return FloatRegister::FromCode(reg); + } + + void toAddr(Register *r, Imm32 *dest) const { + JS_ASSERT(tag == MEM); + *r = Register::FromCode(reg); + *dest = Imm32(offset); + } + Address toAddress() const { + JS_ASSERT(tag == MEM); + return Address(Register::FromCode(reg), offset); + } + int32_t disp() const { + JS_ASSERT(tag == MEM); + return offset; + } + + int32_t base() const { + JS_ASSERT(tag == MEM); + return reg; + } + Register baseReg() const { + JS_ASSERT(tag == MEM); + return Register::FromCode(reg); + } +}; + +void +PatchJump(CodeLocationJump &jump_, CodeLocationLabel label); +class Assembler; +typedef js::jit::AssemblerBuffer<1024, Instruction> MIPSBuffer; + +class Assembler +{ + public: + + enum Condition { + Equal, + NotEqual, + Above, + AboveOrEqual, + Below, + BelowOrEqual, + GreaterThan, + GreaterThanOrEqual, + LessThan, + LessThanOrEqual, + Overflow, + Signed, + NotSigned, + Zero, + NonZero, + Always, + }; + + enum DoubleCondition { + // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN. + DoubleOrdered, + DoubleEqual, + DoubleNotEqual, + DoubleGreaterThan, + DoubleGreaterThanOrEqual, + DoubleLessThan, + DoubleLessThanOrEqual, + // If either operand is NaN, these conditions always evaluate to true. + DoubleUnordered, + DoubleEqualOrUnordered, + DoubleNotEqualOrUnordered, + DoubleGreaterThanOrUnordered, + DoubleGreaterThanOrEqualOrUnordered, + DoubleLessThanOrUnordered, + DoubleLessThanOrEqualOrUnordered + }; + + enum FPConditionBit { + FCC0 = 0, + FCC1, + FCC2, + FCC3, + FCC4, + FCC5, + FCC6, + FCC7 + }; + + // :( this should be protected, but since CodeGenerator + // wants to use it, It needs to go out here :( + + BufferOffset nextOffset() { + return m_buffer.nextOffset(); + } + + protected: + Instruction * editSrc (BufferOffset bo) { + return m_buffer.getInst(bo); + } + public: + uint32_t actualOffset(uint32_t) const; + uint32_t actualIndex(uint32_t) const; + static uint8_t *PatchableJumpAddress(JitCode *code, uint32_t index); + protected: + + // structure for fixing up pc-relative loads/jumps when a the machine code + // gets moved (executable copy, gc, etc.) + struct RelativePatch + { + // the offset within the code buffer where the value is loaded that + // we want to fix-up + BufferOffset offset; + void *target; + Relocation::Kind kind; + + RelativePatch(BufferOffset offset, void *target, Relocation::Kind kind) + : offset(offset), + target(target), + kind(kind) + { } + }; + + js::Vector codeLabels_; + js::Vector jumps_; + js::Vector longJumps_; + AsmJSAbsoluteLinkVector asmJSAbsoluteLinks_; + + CompactBufferWriter jumpRelocations_; + CompactBufferWriter dataRelocations_; + CompactBufferWriter relocations_; + CompactBufferWriter preBarriers_; + + bool enoughMemory_; + + MIPSBuffer m_buffer; + + public: + Assembler() + : enoughMemory_(true), + m_buffer(), + isFinished(false) + { } + + static Condition InvertCondition(Condition cond); + static DoubleCondition InvertCondition(DoubleCondition cond); + + // MacroAssemblers hold onto gcthings, so they are traced by the GC. + void trace(JSTracer *trc); + void writeRelocation(BufferOffset src) { + jumpRelocations_.writeUnsigned(src.getOffset()); + } + + // As opposed to x86/x64 version, the data relocation has to be executed + // before to recover the pointer, and not after. + void writeDataRelocation(const ImmGCPtr &ptr) { + if (ptr.value) + dataRelocations_.writeUnsigned(nextOffset().getOffset()); + } + void writePrebarrierOffset(CodeOffsetLabel label) { + preBarriers_.writeUnsigned(label.offset()); + } + + public: + static uintptr_t getPointer(uint8_t *); + + bool oom() const; + + void setPrinter(Sprinter *sp) { + } + + private: + bool isFinished; + public: + void finish(); + void executableCopy(void *buffer); + void copyJumpRelocationTable(uint8_t *dest); + void copyDataRelocationTable(uint8_t *dest); + void copyPreBarrierTable(uint8_t *dest); + + bool addCodeLabel(CodeLabel label); + size_t numCodeLabels() const { + return codeLabels_.length(); + } + CodeLabel codeLabel(size_t i) { + return codeLabels_[i]; + } + + size_t numAsmJSAbsoluteLinks() const { + return asmJSAbsoluteLinks_.length(); + } + AsmJSAbsoluteLink asmJSAbsoluteLink(size_t i) const { + return asmJSAbsoluteLinks_[i]; + } + + // Size of the instruction stream, in bytes. + size_t size() const; + // Size of the jump relocation table, in bytes. + size_t jumpRelocationTableBytes() const; + size_t dataRelocationTableBytes() const; + size_t preBarrierTableBytes() const; + + // Size of the data table, in bytes. + size_t bytesNeeded() const; + + // Write a blob of binary into the instruction stream *OR* + // into a destination address. If dest is nullptr (the default), then the + // instruction gets written into the instruction stream. If dest is not null + // it is interpreted as a pointer to the location that we want the + // instruction to be written. + BufferOffset writeInst(uint32_t x, uint32_t *dest = nullptr); + // A static variant for the cases where we don't want to have an assembler + // object at all. Normally, you would use the dummy (nullptr) object. + static void writeInstStatic(uint32_t x, uint32_t *dest); + + public: + BufferOffset align(int alignment); + BufferOffset as_nop(); + + // Branch and jump instructions + BufferOffset as_bal(BOffImm16 off); + + InstImm getBranchCode(bool isCall); + InstImm getBranchCode(Register s, Register t, Condition c); + InstImm getBranchCode(Register s, Condition c); + InstImm getBranchCode(bool testTrue, FPConditionBit fcc); + + BufferOffset as_j(JOffImm26 off); + BufferOffset as_jal(JOffImm26 off); + + BufferOffset as_jr(Register rs); + BufferOffset as_jalr(Register rs); + + // Arithmetic instructions + BufferOffset as_addu(Register rd, Register rs, Register rt); + BufferOffset as_addiu(Register rd, Register rs, int32_t j); + BufferOffset as_subu(Register rd, Register rs, Register rt); + BufferOffset as_mult(Register rs, Register rt); + BufferOffset as_multu(Register rs, Register rt); + BufferOffset as_div(Register rs, Register rt); + BufferOffset as_divu(Register rs, Register rt); + BufferOffset as_mul(Register rd, Register rs, Register rt); + + // Logical instructions + BufferOffset as_and(Register rd, Register rs, Register rt); + BufferOffset as_or(Register rd, Register rs, Register rt); + BufferOffset as_xor(Register rd, Register rs, Register rt); + BufferOffset as_nor(Register rd, Register rs, Register rt); + + BufferOffset as_andi(Register rd, Register rs, int32_t j); + BufferOffset as_ori(Register rd, Register rs, int32_t j); + BufferOffset as_xori(Register rd, Register rs, int32_t j); + BufferOffset as_lui(Register rd, int32_t j); + + // Shift instructions + // as_sll(zero, zero, x) instructions are reserved as nop + BufferOffset as_sll(Register rd, Register rt, uint16_t sa); + BufferOffset as_sllv(Register rd, Register rt, Register rs); + BufferOffset as_srl(Register rd, Register rt, uint16_t sa); + BufferOffset as_srlv(Register rd, Register rt, Register rs); + BufferOffset as_sra(Register rd, Register rt, uint16_t sa); + BufferOffset as_srav(Register rd, Register rt, Register rs); + BufferOffset as_rotr(Register rd, Register rt, uint16_t sa); + BufferOffset as_rotrv(Register rd, Register rt, Register rs); + + // Load and store instructions + BufferOffset as_lb(Register rd, Register rs, int16_t off); + BufferOffset as_lbu(Register rd, Register rs, int16_t off); + BufferOffset as_lh(Register rd, Register rs, int16_t off); + BufferOffset as_lhu(Register rd, Register rs, int16_t off); + BufferOffset as_lw(Register rd, Register rs, int16_t off); + BufferOffset as_lwl(Register rd, Register rs, int16_t off); + BufferOffset as_lwr(Register rd, Register rs, int16_t off); + BufferOffset as_sb(Register rd, Register rs, int16_t off); + BufferOffset as_sh(Register rd, Register rs, int16_t off); + BufferOffset as_sw(Register rd, Register rs, int16_t off); + BufferOffset as_swl(Register rd, Register rs, int16_t off); + BufferOffset as_swr(Register rd, Register rs, int16_t off); + + // Move from HI/LO register. + BufferOffset as_mfhi(Register rd); + BufferOffset as_mflo(Register rd); + + // Set on less than. + BufferOffset as_slt(Register rd, Register rs, Register rt); + BufferOffset as_sltu(Register rd, Register rs, Register rt); + BufferOffset as_slti(Register rd, Register rs, int32_t j); + BufferOffset as_sltiu(Register rd, Register rs, uint32_t j); + + // Conditional move. + BufferOffset as_movz(Register rd, Register rs, Register rt); + BufferOffset as_movn(Register rd, Register rs, Register rt); + BufferOffset as_movt(Register rd, Register rs, uint16_t cc = 0); + BufferOffset as_movf(Register rd, Register rs, uint16_t cc = 0); + + // Bit twiddling. + BufferOffset as_clz(Register rd, Register rs, Register rt = Register::FromCode(0)); + BufferOffset as_ins(Register rt, Register rs, uint16_t pos, uint16_t size); + BufferOffset as_ext(Register rt, Register rs, uint16_t pos, uint16_t size); + + // FP instructions + + // Use these two functions only when you are sure address is aligned. + // Otherwise, use ma_ld and ma_sd. + BufferOffset as_ld(FloatRegister fd, Register base, int32_t off); + BufferOffset as_sd(FloatRegister fd, Register base, int32_t off); + + BufferOffset as_ls(FloatRegister fd, Register base, int32_t off); + BufferOffset as_ss(FloatRegister fd, Register base, int32_t off); + + BufferOffset as_movs(FloatRegister fd, FloatRegister fs); + BufferOffset as_movd(FloatRegister fd, FloatRegister fs); + + BufferOffset as_mtc1(Register rt, FloatRegister fs); + BufferOffset as_mfc1(Register rt, FloatRegister fs); + + protected: + // These instructions should only be used to access the odd part of + // 64-bit register pair. Do not use odd registers as 32-bit registers. + // :TODO: Bug 972836, Remove _Odd functions once we can use odd regs. + BufferOffset as_ls_Odd(FloatRegister fd, Register base, int32_t off); + BufferOffset as_ss_Odd(FloatRegister fd, Register base, int32_t off); + BufferOffset as_mtc1_Odd(Register rt, FloatRegister fs); + public: + // Made public because CodeGenerator uses it to check for -0 + BufferOffset as_mfc1_Odd(Register rt, FloatRegister fs); + + // FP convert instructions + BufferOffset as_ceilws(FloatRegister fd, FloatRegister fs); + BufferOffset as_floorws(FloatRegister fd, FloatRegister fs); + BufferOffset as_roundws(FloatRegister fd, FloatRegister fs); + BufferOffset as_truncws(FloatRegister fd, FloatRegister fs); + + BufferOffset as_ceilwd(FloatRegister fd, FloatRegister fs); + BufferOffset as_floorwd(FloatRegister fd, FloatRegister fs); + BufferOffset as_roundwd(FloatRegister fd, FloatRegister fs); + BufferOffset as_truncwd(FloatRegister fd, FloatRegister fs); + + BufferOffset as_cvtdl(FloatRegister fd, FloatRegister fs); + BufferOffset as_cvtds(FloatRegister fd, FloatRegister fs); + BufferOffset as_cvtdw(FloatRegister fd, FloatRegister fs); + BufferOffset as_cvtld(FloatRegister fd, FloatRegister fs); + BufferOffset as_cvtls(FloatRegister fd, FloatRegister fs); + BufferOffset as_cvtsd(FloatRegister fd, FloatRegister fs); + BufferOffset as_cvtsl(FloatRegister fd, FloatRegister fs); + BufferOffset as_cvtsw(FloatRegister fd, FloatRegister fs); + BufferOffset as_cvtwd(FloatRegister fd, FloatRegister fs); + BufferOffset as_cvtws(FloatRegister fd, FloatRegister fs); + + // FP arithmetic instructions + BufferOffset as_adds(FloatRegister fd, FloatRegister fs, FloatRegister ft); + BufferOffset as_addd(FloatRegister fd, FloatRegister fs, FloatRegister ft); + BufferOffset as_subs(FloatRegister fd, FloatRegister fs, FloatRegister ft); + BufferOffset as_subd(FloatRegister fd, FloatRegister fs, FloatRegister ft); + + BufferOffset as_abss(FloatRegister fd, FloatRegister fs); + BufferOffset as_absd(FloatRegister fd, FloatRegister fs); + BufferOffset as_negd(FloatRegister fd, FloatRegister fs); + + BufferOffset as_muls(FloatRegister fd, FloatRegister fs, FloatRegister ft); + BufferOffset as_muld(FloatRegister fd, FloatRegister fs, FloatRegister ft); + BufferOffset as_divs(FloatRegister fd, FloatRegister fs, FloatRegister ft); + BufferOffset as_divd(FloatRegister fd, FloatRegister fs, FloatRegister ft); + BufferOffset as_sqrts(FloatRegister fd, FloatRegister fs); + BufferOffset as_sqrtd(FloatRegister fd, FloatRegister fs); + + // FP compare instructions + BufferOffset as_cfs(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0); + BufferOffset as_cuns(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0); + BufferOffset as_ceqs(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0); + BufferOffset as_cueqs(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0); + BufferOffset as_colts(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0); + BufferOffset as_cults(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0); + BufferOffset as_coles(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0); + BufferOffset as_cules(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0); + + BufferOffset as_cfd(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0); + BufferOffset as_cund(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0); + BufferOffset as_ceqd(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0); + BufferOffset as_cueqd(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0); + BufferOffset as_coltd(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0); + BufferOffset as_cultd(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0); + BufferOffset as_coled(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0); + BufferOffset as_culed(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0); + + + // label operations + void bind(Label *label, BufferOffset boff = BufferOffset()); + void bind(RepatchLabel *label); + uint32_t currentOffset() { + return nextOffset().getOffset(); + } + void retarget(Label *label, Label *target); + void Bind(uint8_t *rawCode, AbsoluteLabel *label, const void *address); + + // See Bind + size_t labelOffsetToPatchOffset(size_t offset) { + return actualOffset(offset); + } + + void call(Label *label); + void call(void *target); + + void as_break(uint32_t code); + + public: + static void TraceJumpRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader); + static void TraceDataRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader); + + protected: + InstImm invertBranch(InstImm branch, BOffImm16 skipOffset); + void bind(InstImm *inst, uint32_t branch, uint32_t target); + void addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind kind) { + enoughMemory_ &= jumps_.append(RelativePatch(src, target.value, kind)); + if (kind == Relocation::JITCODE) + writeRelocation(src); + } + + void addLongJump(BufferOffset src) { + enoughMemory_ &= longJumps_.append(src.getOffset()); + } + + public: + size_t numLongJumps() const { + return longJumps_.length(); + } + uint32_t longJump(size_t i) { + return longJumps_[i]; + } + + // Copy the assembly code to the given buffer, and perform any pending + // relocations relying on the target address. + void executableCopy(uint8_t *buffer); + + void flushBuffer() { + } + + static uint32_t patchWrite_NearCallSize(); + static uint32_t nopSize() { return 4; } + + static uint32_t extractLuiOriValue(Instruction *inst0, Instruction *inst1); + static void updateLuiOriValue(Instruction *inst0, Instruction *inst1, uint32_t value); + static void writeLuiOriInstructions(Instruction *inst, Instruction *inst1, + Register reg, uint32_t value); + + static void patchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall); + static void patchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue, + PatchedImmPtr expectedValue); + static void patchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, + ImmPtr expectedValue); + static void patchWrite_Imm32(CodeLocationLabel label, Imm32 imm); + static uint32_t alignDoubleArg(uint32_t offset) { + return (offset + 1U) &~ 1U; + } + + static uint8_t *nextInstruction(uint8_t *instruction, uint32_t *count = nullptr); + + static void ToggleToJmp(CodeLocationLabel inst_); + static void ToggleToCmp(CodeLocationLabel inst_); + + static void ToggleCall(CodeLocationLabel inst_, bool enabled); + + static void updateBoundsCheck(uint32_t logHeapSize, Instruction *inst); + void processCodeLabels(uint8_t *rawCode); + + bool bailed() { + return m_buffer.bail(); + } +}; // Assembler + +// An Instruction is a structure for both encoding and decoding any and all +// MIPS instructions. +class Instruction +{ + protected: + // sll zero, zero, 0 + static const uint32_t NopInst = 0x00000000; + + uint32_t data; + + // Standard constructor + Instruction (uint32_t data_) : data(data_) { } + + // You should never create an instruction directly. You should create a + // more specific instruction which will eventually call one of these + // constructors for you. + public: + uint32_t encode() const { + return data; + } + + void makeNop() { + data = NopInst; + } + + void setData(uint32_t data) { + this->data = data; + } + + const Instruction & operator=(const Instruction &src) { + data = src.data; + return *this; + } + + // Extract the one particular bit. + uint32_t extractBit(uint32_t bit) { + return (encode() >> bit) & 1; + } + // Extract a bit field out of the instruction + uint32_t extractBitField(uint32_t hi, uint32_t lo) { + return (encode() >> lo) & ((2 << (hi - lo)) - 1); + } + // Since all MIPS instructions have opcode, the opcode + // extractor resides in the base class. + uint32_t extractOpcode() { + return extractBitField(OpcodeShift + OpcodeBits - 1, OpcodeShift); + } + // Return the fields at their original place in the instruction encoding. + Opcode OpcodeFieldRaw() const { + return static_cast(encode() & OpcodeMask); + } + + // Get the next instruction in the instruction stream. + // This does neat things like ignoreconstant pools and their guards. + Instruction *next(); + + // Sometimes, an api wants a uint32_t (or a pointer to it) rather than + // an instruction. raw() just coerces this into a pointer to a uint32_t + const uint32_t *raw() const { return &data; } + uint32_t size() const { return 4; } +}; // Instruction + +// make sure that it is the right size +static_assert(sizeof(Instruction) == 4, "Size of Instruction class has to be 4 bytes."); + +class InstNOP : public Instruction +{ + public: + InstNOP() + : Instruction(NopInst) + { } + +}; + +// Class for register type instructions. +class InstReg : public Instruction +{ + public: + InstReg(Opcode op, Register rd, FunctionField ff) + : Instruction(op | RD(rd) | ff) + { } + InstReg(Opcode op, Register rs, Register rt, FunctionField ff) + : Instruction(op | RS(rs) | RT(rt) | ff) + { } + InstReg(Opcode op, Register rs, Register rt, Register rd, FunctionField ff) + : Instruction(op | RS(rs) | RT(rt) | RD(rd) | ff) + { } + InstReg(Opcode op, Register rs, Register rt, Register rd, uint32_t sa, FunctionField ff) + : Instruction(op | RS(rs) | RT(rt) | RD(rd) | SA(sa) | ff) + { } + InstReg(Opcode op, RSField rs, Register rt, Register rd, uint32_t sa, FunctionField ff) + : Instruction(op | rs | RT(rt) | RD(rd) | SA(sa) | ff) + { } + InstReg(Opcode op, Register rs, RTField rt, Register rd, uint32_t sa, FunctionField ff) + : Instruction(op | RS(rs) | rt | RD(rd) | SA(sa) | ff) + { } + InstReg(Opcode op, Register rs, uint32_t cc, Register rd, uint32_t sa, FunctionField ff) + : Instruction(op | RS(rs) | cc | RD(rd) | SA(sa) | ff) + { } + InstReg(Opcode op, uint32_t code, FunctionField ff) + : Instruction(op | code | ff) + { } + // for float point + InstReg(Opcode op, RSField rs, Register rt, FloatRegister rd) + : Instruction(op | rs | RT(rt) | RD(rd)) + { } + InstReg(Opcode op, RSField rs, Register rt, FloatRegister rd, uint32_t sa, FunctionField ff) + : Instruction(op | rs | RT(rt) | RD(rd) | SA(sa) | ff) + { } + InstReg(Opcode op, RSField rs, Register rt, FloatRegister fs, FloatRegister fd, FunctionField ff) + : Instruction(op | rs | RT(rt) | RD(fs) | SA(fd) | ff) + { } + InstReg(Opcode op, RSField rs, FloatRegister ft, FloatRegister fs, FloatRegister fd, FunctionField ff) + : Instruction(op | rs | RT(ft) | RD(fs) | SA(fd) | ff) + { } + InstReg(Opcode op, RSField rs, FloatRegister ft, FloatRegister fd, uint32_t sa, FunctionField ff) + : Instruction(op | rs | RT(ft) | RD(fd) | SA(sa) | ff) + { } + + uint32_t extractRS () { + return extractBitField(RSShift + RSBits - 1, RSShift); + } + uint32_t extractRT () { + return extractBitField(RTShift + RTBits - 1, RTShift); + } + uint32_t extractRD () { + return extractBitField(RDShift + RDBits - 1, RDShift); + } + uint32_t extractSA () { + return extractBitField(SAShift + SABits - 1, SAShift); + } + uint32_t extractFunctionField () { + return extractBitField(FunctionShift + FunctionBits - 1, FunctionShift); + } +}; + +// Class for branch, load and store instructions with immediate offset. +class InstImm : public Instruction +{ + public: + void extractImm16(BOffImm16 *dest); + + InstImm(Opcode op, Register rs, Register rt, BOffImm16 off) + : Instruction(op | RS(rs) | RT(rt) | off.encode()) + { } + InstImm(Opcode op, Register rs, RTField rt, BOffImm16 off) + : Instruction(op | RS(rs) | rt | off.encode()) + { } + InstImm(Opcode op, RSField rs, uint32_t cc, BOffImm16 off) + : Instruction(op | rs | cc | off.encode()) + { } + InstImm(Opcode op, Register rs, Register rt, Imm16 off) + : Instruction(op | RS(rs) | RT(rt) | off.encode()) + { } + InstImm(uint32_t raw) + : Instruction(raw) + { } + // For floating-point loads and stores. + InstImm(Opcode op, Register rs, FloatRegister rt, Imm16 off) + : Instruction(op | RS(rs) | RT(rt) | off.encode()) + { } + + uint32_t extractOpcode() { + return extractBitField(OpcodeShift + OpcodeBits - 1, OpcodeShift); + } + void setOpcode(Opcode op) { + data = (data & ~OpcodeMask) | op; + } + uint32_t extractRS() { + return extractBitField(RSShift + RSBits - 1, RSShift); + } + uint32_t extractRT() { + return extractBitField(RTShift + RTBits - 1, RTShift); + } + void setRT(RTField rt) { + data = (data & ~RTMask) | rt; + } + uint32_t extractImm16Value() { + return extractBitField(Imm16Shift + Imm16Bits - 1, Imm16Shift); + } + void setBOffImm16(BOffImm16 off) { + // Reset immediate field and replace it + data = (data & ~Imm16Mask) | off.encode(); + } + void setImm16(Imm16 off) { + // Reset immediate field and replace it + data = (data & ~Imm16Mask) | off.encode(); + } +}; + +// Class for Jump type instructions. +class InstJump : public Instruction +{ + public: + InstJump(Opcode op, JOffImm26 off) + : Instruction(op | off.encode()) + { } + + uint32_t extractImm26Value() { + return extractBitField(Imm26Shift + Imm26Bits - 1, Imm26Shift); + } +}; + +static const uint32_t NumIntArgRegs = 4; + +static inline bool +GetIntArgReg(uint32_t usedArgSlots, Register *out) +{ + if (usedArgSlots < NumIntArgRegs) { + *out = Register::FromCode(a0.code() + usedArgSlots); + return true; + } + return false; +} + +// Get a register in which we plan to put a quantity that will be used as an +// integer argument. This differs from GetIntArgReg in that if we have no more +// actual argument registers to use we will fall back on using whatever +// CallTempReg* don't overlap the argument registers, and only fail once those +// run out too. +static inline bool +GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register *out) +{ + // NOTE: We can't properly determine which regs are used if there are + // float arguments. If this is needed, we will have to guess. + JS_ASSERT(usedFloatArgs == 0); + + if (GetIntArgReg(usedIntArgs, out)) + return true; + // Unfortunately, we have to assume things about the point at which + // GetIntArgReg returns false, because we need to know how many registers it + // can allocate. + usedIntArgs -= NumIntArgRegs; + if (usedIntArgs >= NumCallTempNonArgRegs) + return false; + *out = CallTempNonArgRegs[usedIntArgs]; + return true; +} + +static inline uint32_t +GetArgStackDisp(uint32_t usedArgSlots) +{ + JS_ASSERT(usedArgSlots >= NumIntArgRegs); + // Even register arguments have place reserved on stack. + return usedArgSlots * sizeof(intptr_t); +} + +} // namespace jit +} // namespace js + +#endif /* jit_mips_Assembler_mips_h */