зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1160672 - Part 2/2 - Add Mozilla VIXL modifications. r=dougc
This commit is contained in:
Родитель
96782481e8
Коммит
3906b011ff
|
@ -0,0 +1,613 @@
|
|||
// Copyright 2013, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "jsutil.h"
|
||||
|
||||
#include "jit/arm64/vixl/Assembler-vixl.h"
|
||||
#include "jit/Label.h"
|
||||
|
||||
namespace vixl {
|
||||
|
||||
|
||||
// Assembler
|
||||
void Assembler::Reset() {
|
||||
#ifdef DEBUG
|
||||
finalized_ = false;
|
||||
#endif
|
||||
pc_ = nullptr;
|
||||
}
|
||||
|
||||
void Assembler::FinalizeCode() {
|
||||
#ifdef DEBUG
|
||||
finalized_ = true;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
// A common implementation for the LinkAndGet<Type>OffsetTo helpers.
|
||||
//
|
||||
// If the label is bound, returns the offset as a multiple of element_size.
|
||||
// Otherwise, links the instruction to the label and returns the offset to encode
|
||||
// as a multiple of kInstructionSize.
|
||||
//
|
||||
// The offset is calculated by aligning the PC and label addresses down to a
|
||||
// multiple of element_size, then calculating the (scaled) offset between them.
|
||||
// This matches the semantics of adrp, for example.
|
||||
template <int element_size>
|
||||
ptrdiff_t Assembler::LinkAndGetOffsetTo(BufferOffset branch, Label* label) {
|
||||
if (armbuffer_.oom())
|
||||
return js::jit::LabelBase::INVALID_OFFSET;
|
||||
|
||||
// The label is bound: all uses are already linked.
|
||||
if (label->bound()) {
|
||||
ptrdiff_t branch_offset = ptrdiff_t(branch.getOffset() / element_size);
|
||||
ptrdiff_t label_offset = ptrdiff_t(label->offset() / element_size);
|
||||
return label_offset - branch_offset;
|
||||
}
|
||||
|
||||
// The label is unbound and unused: store the offset in the label itself
|
||||
// for patching by bind().
|
||||
if (!label->used()) {
|
||||
label->use(branch.getOffset());
|
||||
return js::jit::LabelBase::INVALID_OFFSET;
|
||||
}
|
||||
|
||||
// The label is unbound but used. Create an implicit linked list between
|
||||
// the branches, and update the linked list head in the label struct.
|
||||
ptrdiff_t prevHeadOffset = static_cast<ptrdiff_t>(label->offset());
|
||||
label->use(branch.getOffset());
|
||||
VIXL_ASSERT(prevHeadOffset - branch.getOffset() != js::jit::LabelBase::INVALID_OFFSET);
|
||||
return prevHeadOffset - branch.getOffset();
|
||||
}
|
||||
|
||||
|
||||
ptrdiff_t Assembler::LinkAndGetByteOffsetTo(BufferOffset branch, Label* label) {
|
||||
return LinkAndGetOffsetTo<1>(branch, label);
|
||||
}
|
||||
|
||||
|
||||
ptrdiff_t Assembler::LinkAndGetInstructionOffsetTo(BufferOffset branch, Label* label) {
|
||||
return LinkAndGetOffsetTo<kInstructionSize>(branch, label);
|
||||
}
|
||||
|
||||
|
||||
ptrdiff_t Assembler::LinkAndGetPageOffsetTo(BufferOffset branch, Label* label) {
|
||||
return LinkAndGetOffsetTo<kPageSize>(branch, label);
|
||||
}
|
||||
|
||||
|
||||
BufferOffset Assembler::b(int imm26) {
|
||||
return EmitBranch(B | ImmUncondBranch(imm26));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::b(Instruction* at, int imm26) {
|
||||
return EmitBranch(at, B | ImmUncondBranch(imm26));
|
||||
}
|
||||
|
||||
|
||||
BufferOffset Assembler::b(int imm19, Condition cond) {
|
||||
return EmitBranch(B_cond | ImmCondBranch(imm19) | cond);
|
||||
}
|
||||
|
||||
|
||||
void Assembler::b(Instruction* at, int imm19, Condition cond) {
|
||||
EmitBranch(at, B_cond | ImmCondBranch(imm19) | cond);
|
||||
}
|
||||
|
||||
|
||||
BufferOffset Assembler::b(Label* label) {
|
||||
// Flush the instruction buffer before calculating relative offset.
|
||||
BufferOffset branch = b(0);
|
||||
Instruction* ins = getInstructionAt(branch);
|
||||
VIXL_ASSERT(ins->IsUncondBranchImm());
|
||||
|
||||
// Encode the relative offset.
|
||||
b(ins, LinkAndGetInstructionOffsetTo(branch, label));
|
||||
return branch;
|
||||
}
|
||||
|
||||
|
||||
BufferOffset Assembler::b(Label* label, Condition cond) {
|
||||
// Flush the instruction buffer before calculating relative offset.
|
||||
BufferOffset branch = b(0, Always);
|
||||
Instruction* ins = getInstructionAt(branch);
|
||||
VIXL_ASSERT(ins->IsCondBranchImm());
|
||||
|
||||
// Encode the relative offset.
|
||||
b(ins, LinkAndGetInstructionOffsetTo(branch, label), cond);
|
||||
return branch;
|
||||
}
|
||||
|
||||
|
||||
void Assembler::bl(int imm26) {
|
||||
EmitBranch(BL | ImmUncondBranch(imm26));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::bl(Instruction* at, int imm26) {
|
||||
EmitBranch(at, BL | ImmUncondBranch(imm26));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::bl(Label* label) {
|
||||
// Flush the instruction buffer before calculating relative offset.
|
||||
BufferOffset branch = b(0);
|
||||
Instruction* ins = getInstructionAt(branch);
|
||||
|
||||
// Encode the relative offset.
|
||||
bl(ins, LinkAndGetInstructionOffsetTo(branch, label));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::cbz(const Register& rt, int imm19) {
|
||||
EmitBranch(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::cbz(Instruction* at, const Register& rt, int imm19) {
|
||||
EmitBranch(at, SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::cbz(const Register& rt, Label* label) {
|
||||
// Flush the instruction buffer before calculating relative offset.
|
||||
BufferOffset branch = b(0);
|
||||
Instruction* ins = getInstructionAt(branch);
|
||||
|
||||
// Encode the relative offset.
|
||||
cbz(ins, rt, LinkAndGetInstructionOffsetTo(branch, label));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::cbnz(const Register& rt, int imm19) {
|
||||
EmitBranch(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::cbnz(Instruction* at, const Register& rt, int imm19) {
|
||||
EmitBranch(at, SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::cbnz(const Register& rt, Label* label) {
|
||||
// Flush the instruction buffer before calculating relative offset.
|
||||
BufferOffset branch = b(0);
|
||||
Instruction* ins = getInstructionAt(branch);
|
||||
|
||||
// Encode the relative offset.
|
||||
cbnz(ins, rt, LinkAndGetInstructionOffsetTo(branch, label));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::tbz(const Register& rt, unsigned bit_pos, int imm14) {
|
||||
VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
|
||||
EmitBranch(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::tbz(Instruction* at, const Register& rt, unsigned bit_pos, int imm14) {
|
||||
VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
|
||||
EmitBranch(at, TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::tbz(const Register& rt, unsigned bit_pos, Label* label) {
|
||||
// Flush the instruction buffer before calculating relative offset.
|
||||
BufferOffset branch = b(0);
|
||||
Instruction* ins = getInstructionAt(branch);
|
||||
|
||||
// Encode the relative offset.
|
||||
tbz(ins, rt, bit_pos, LinkAndGetInstructionOffsetTo(branch, label));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::tbnz(const Register& rt, unsigned bit_pos, int imm14) {
|
||||
VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
|
||||
EmitBranch(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::tbnz(Instruction* at, const Register& rt, unsigned bit_pos, int imm14) {
|
||||
VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
|
||||
EmitBranch(at, TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::tbnz(const Register& rt, unsigned bit_pos, Label* label) {
|
||||
// Flush the instruction buffer before calculating relative offset.
|
||||
BufferOffset branch = b(0);
|
||||
Instruction* ins = getInstructionAt(branch);
|
||||
|
||||
// Encode the relative offset.
|
||||
tbnz(ins, rt, bit_pos, LinkAndGetInstructionOffsetTo(branch, label));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::adr(const Register& rd, int imm21) {
|
||||
VIXL_ASSERT(rd.Is64Bits());
|
||||
EmitBranch(ADR | ImmPCRelAddress(imm21) | Rd(rd));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::adr(Instruction* at, const Register& rd, int imm21) {
|
||||
VIXL_ASSERT(rd.Is64Bits());
|
||||
EmitBranch(at, ADR | ImmPCRelAddress(imm21) | Rd(rd));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::adr(const Register& rd, Label* label) {
|
||||
// Flush the instruction buffer before calculating relative offset.
|
||||
// ADR is not a branch.
|
||||
BufferOffset offset = Emit(0);
|
||||
Instruction* ins = getInstructionAt(offset);
|
||||
|
||||
// Encode the relative offset.
|
||||
adr(ins, rd, LinkAndGetByteOffsetTo(offset, label));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::adrp(const Register& rd, int imm21) {
|
||||
VIXL_ASSERT(rd.Is64Bits());
|
||||
EmitBranch(ADRP | ImmPCRelAddress(imm21) | Rd(rd));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::adrp(Instruction* at, const Register& rd, int imm21) {
|
||||
VIXL_ASSERT(rd.Is64Bits());
|
||||
EmitBranch(at, ADRP | ImmPCRelAddress(imm21) | Rd(rd));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::adrp(const Register& rd, Label* label) {
|
||||
VIXL_ASSERT(AllowPageOffsetDependentCode());
|
||||
|
||||
BufferOffset offset = Emit(0);
|
||||
Instruction* ins = getInstructionAt(offset);
|
||||
|
||||
// Encode the relative offset.
|
||||
adrp(ins, rd, LinkAndGetPageOffsetTo(offset, label));
|
||||
}
|
||||
|
||||
|
||||
BufferOffset Assembler::ands(const Register& rd, const Register& rn, const Operand& operand) {
|
||||
return Logical(rd, rn, operand, ANDS);
|
||||
}
|
||||
|
||||
|
||||
BufferOffset Assembler::tst(const Register& rn, const Operand& operand) {
|
||||
return ands(AppropriateZeroRegFor(rn), rn, operand);
|
||||
}
|
||||
|
||||
|
||||
void Assembler::ldr(Instruction* at, const CPURegister& rt, int imm19) {
|
||||
LoadLiteralOp op = LoadLiteralOpFor(rt);
|
||||
Emit(at, op | ImmLLiteral(imm19) | Rt(rt));
|
||||
}
|
||||
|
||||
|
||||
BufferOffset Assembler::hint(SystemHint code) {
|
||||
return Emit(HINT | ImmHint(code) | Rt(xzr));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::hint(Instruction* at, SystemHint code) {
|
||||
Emit(at, HINT | ImmHint(code) | Rt(xzr));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::svc(Instruction* at, int code) {
|
||||
VIXL_ASSERT(is_uint16(code));
|
||||
Emit(at, SVC | ImmException(code));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::nop(Instruction* at) {
|
||||
hint(at, NOP);
|
||||
}
|
||||
|
||||
|
||||
BufferOffset Assembler::Logical(const Register& rd, const Register& rn,
|
||||
const Operand& operand, LogicalOp op)
|
||||
{
|
||||
VIXL_ASSERT(rd.size() == rn.size());
|
||||
if (operand.IsImmediate()) {
|
||||
int64_t immediate = operand.immediate();
|
||||
unsigned reg_size = rd.size();
|
||||
|
||||
VIXL_ASSERT(immediate != 0);
|
||||
VIXL_ASSERT(immediate != -1);
|
||||
VIXL_ASSERT(rd.Is64Bits() || is_uint32(immediate));
|
||||
|
||||
// If the operation is NOT, invert the operation and immediate.
|
||||
if ((op & NOT) == NOT) {
|
||||
op = static_cast<LogicalOp>(op & ~NOT);
|
||||
immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask);
|
||||
}
|
||||
|
||||
unsigned n, imm_s, imm_r;
|
||||
if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
|
||||
// Immediate can be encoded in the instruction.
|
||||
return LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
|
||||
} else {
|
||||
// This case is handled in the macro assembler.
|
||||
VIXL_UNREACHABLE();
|
||||
}
|
||||
} else {
|
||||
VIXL_ASSERT(operand.IsShiftedRegister());
|
||||
VIXL_ASSERT(operand.reg().size() == rd.size());
|
||||
Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed);
|
||||
return DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
BufferOffset Assembler::LogicalImmediate(const Register& rd, const Register& rn,
|
||||
unsigned n, unsigned imm_s, unsigned imm_r, LogicalOp op)
|
||||
{
|
||||
unsigned reg_size = rd.size();
|
||||
Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd);
|
||||
return Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) |
|
||||
ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg | Rn(rn));
|
||||
}
|
||||
|
||||
|
||||
BufferOffset Assembler::DataProcShiftedRegister(const Register& rd, const Register& rn,
|
||||
const Operand& operand, FlagsUpdate S, Instr op)
|
||||
{
|
||||
VIXL_ASSERT(operand.IsShiftedRegister());
|
||||
VIXL_ASSERT(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
|
||||
return Emit(SF(rd) | op | Flags(S) |
|
||||
ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
|
||||
Rm(operand.reg()) | Rn(rn) | Rd(rd));
|
||||
}
|
||||
|
||||
|
||||
void MozBaseAssembler::InsertIndexIntoTag(uint8_t* load, uint32_t index) {
|
||||
// Store the js::jit::PoolEntry index into the instruction.
|
||||
// finishPool() will walk over all literal load instructions
|
||||
// and use PatchConstantPoolLoad() to patch to the final relative offset.
|
||||
*((uint32_t*)load) |= Assembler::ImmLLiteral(index);
|
||||
}
|
||||
|
||||
|
||||
bool MozBaseAssembler::PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr) {
|
||||
Instruction* load = reinterpret_cast<Instruction*>(loadAddr);
|
||||
|
||||
// The load currently contains the js::jit::PoolEntry's index,
|
||||
// as written by InsertIndexIntoTag().
|
||||
uint32_t index = load->ImmLLiteral();
|
||||
|
||||
// Each entry in the literal pool is uint32_t-sized.
|
||||
uint32_t* constPool = reinterpret_cast<uint32_t*>(constPoolAddr);
|
||||
Instruction* source = reinterpret_cast<Instruction*>(&constPool[index]);
|
||||
|
||||
load->SetImmLLiteral(source);
|
||||
return false; // Nothing uses the return value.
|
||||
}
|
||||
|
||||
|
||||
uint32_t MozBaseAssembler::PlaceConstantPoolBarrier(int offset) {
|
||||
MOZ_CRASH("PlaceConstantPoolBarrier");
|
||||
}
|
||||
|
||||
|
||||
struct PoolHeader {
|
||||
uint32_t data;
|
||||
|
||||
struct Header {
|
||||
// The size should take into account the pool header.
|
||||
// The size is in units of Instruction (4bytes), not byte.
|
||||
union {
|
||||
struct {
|
||||
uint32_t size : 15;
|
||||
bool isNatural : 1;
|
||||
uint32_t ONES : 16;
|
||||
};
|
||||
uint32_t data;
|
||||
};
|
||||
|
||||
Header(int size_, bool isNatural_)
|
||||
: size(size_),
|
||||
isNatural(isNatural_),
|
||||
ONES(0xffff)
|
||||
{ }
|
||||
|
||||
Header(uint32_t data)
|
||||
: data(data)
|
||||
{
|
||||
JS_STATIC_ASSERT(sizeof(Header) == sizeof(uint32_t));
|
||||
VIXL_ASSERT(ONES == 0xffff);
|
||||
}
|
||||
|
||||
uint32_t raw() const {
|
||||
JS_STATIC_ASSERT(sizeof(Header) == sizeof(uint32_t));
|
||||
return data;
|
||||
}
|
||||
};
|
||||
|
||||
PoolHeader(int size_, bool isNatural_)
|
||||
: data(Header(size_, isNatural_).raw())
|
||||
{ }
|
||||
|
||||
uint32_t size() const {
|
||||
Header tmp(data);
|
||||
return tmp.size;
|
||||
}
|
||||
|
||||
uint32_t isNatural() const {
|
||||
Header tmp(data);
|
||||
return tmp.isNatural;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
void MozBaseAssembler::WritePoolHeader(uint8_t* start, js::jit::Pool* p, bool isNatural) {
|
||||
JS_STATIC_ASSERT(sizeof(PoolHeader) == 4);
|
||||
|
||||
// Get the total size of the pool.
|
||||
uint8_t* pool = start + sizeof(PoolHeader) + p->getPoolSize();
|
||||
|
||||
uintptr_t size = pool - start;
|
||||
VIXL_ASSERT((size & 3) == 0);
|
||||
size = size >> 2;
|
||||
VIXL_ASSERT(size < (1 << 15));
|
||||
|
||||
PoolHeader header(size, isNatural);
|
||||
*(PoolHeader*)start = header;
|
||||
}
|
||||
|
||||
|
||||
void MozBaseAssembler::WritePoolFooter(uint8_t* start, js::jit::Pool* p, bool isNatural) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
void MozBaseAssembler::WritePoolGuard(BufferOffset branch, Instruction* inst, BufferOffset dest) {
|
||||
int byteOffset = dest.getOffset() - branch.getOffset();
|
||||
VIXL_ASSERT(byteOffset % kInstructionSize == 0);
|
||||
|
||||
int instOffset = byteOffset >> kInstructionSizeLog2;
|
||||
Assembler::b(inst, instOffset);
|
||||
}
|
||||
|
||||
|
||||
ptrdiff_t MozBaseAssembler::GetBranchOffset(const Instruction* ins) {
|
||||
// Branch instructions use an instruction offset.
|
||||
if (ins->BranchType() != UnknownBranchType)
|
||||
return ins->ImmPCRawOffset() * kInstructionSize;
|
||||
|
||||
// ADR and ADRP encode relative offsets and therefore require patching as if they were branches.
|
||||
// ADR uses a byte offset.
|
||||
if (ins->IsADR())
|
||||
return ins->ImmPCRawOffset();
|
||||
|
||||
// ADRP uses a page offset.
|
||||
if (ins->IsADRP())
|
||||
return ins->ImmPCRawOffset() * kPageSize;
|
||||
|
||||
MOZ_CRASH("Unsupported branch type");
|
||||
}
|
||||
|
||||
|
||||
void MozBaseAssembler::RetargetNearBranch(Instruction* i, int offset, Condition cond, bool final) {
|
||||
if (i->IsCondBranchImm()) {
|
||||
VIXL_ASSERT(i->IsCondB());
|
||||
Assembler::b(i, offset, cond);
|
||||
return;
|
||||
}
|
||||
MOZ_CRASH("Unsupported branch type");
|
||||
}
|
||||
|
||||
|
||||
void MozBaseAssembler::RetargetNearBranch(Instruction* i, int byteOffset, bool final) {
|
||||
const int instOffset = byteOffset >> kInstructionSizeLog2;
|
||||
|
||||
// The only valid conditional instruction is B.
|
||||
if (i->IsCondBranchImm()) {
|
||||
VIXL_ASSERT(byteOffset % kInstructionSize == 0);
|
||||
VIXL_ASSERT(i->IsCondB());
|
||||
Condition cond = static_cast<Condition>(i->ConditionBranch());
|
||||
Assembler::b(i, instOffset, cond);
|
||||
return;
|
||||
}
|
||||
|
||||
// Valid unconditional branches are B and BL.
|
||||
if (i->IsUncondBranchImm()) {
|
||||
VIXL_ASSERT(byteOffset % kInstructionSize == 0);
|
||||
if (i->IsUncondB()) {
|
||||
Assembler::b(i, instOffset);
|
||||
} else {
|
||||
VIXL_ASSERT(i->IsBL());
|
||||
Assembler::bl(i, instOffset);
|
||||
}
|
||||
|
||||
VIXL_ASSERT(i->ImmUncondBranch() == instOffset);
|
||||
return;
|
||||
}
|
||||
|
||||
// Valid compare branches are CBZ and CBNZ.
|
||||
if (i->IsCompareBranch()) {
|
||||
VIXL_ASSERT(byteOffset % kInstructionSize == 0);
|
||||
Register rt = i->SixtyFourBits() ? Register::XRegFromCode(i->Rt())
|
||||
: Register::WRegFromCode(i->Rt());
|
||||
|
||||
if (i->IsCBZ()) {
|
||||
Assembler::cbz(i, rt, instOffset);
|
||||
} else {
|
||||
VIXL_ASSERT(i->IsCBNZ());
|
||||
Assembler::cbnz(i, rt, instOffset);
|
||||
}
|
||||
|
||||
VIXL_ASSERT(i->ImmCmpBranch() == instOffset);
|
||||
return;
|
||||
}
|
||||
|
||||
// Valid test branches are TBZ and TBNZ.
|
||||
if (i->IsTestBranch()) {
|
||||
VIXL_ASSERT(byteOffset % kInstructionSize == 0);
|
||||
// Opposite of ImmTestBranchBit(): MSB in bit 5, 0:5 at bit 40.
|
||||
unsigned bit_pos = (i->ImmTestBranchBit5() << 5) | (i->ImmTestBranchBit40());
|
||||
VIXL_ASSERT(is_uint6(bit_pos));
|
||||
|
||||
// Register size doesn't matter for the encoding.
|
||||
Register rt = Register::XRegFromCode(i->Rt());
|
||||
|
||||
if (i->IsTBZ()) {
|
||||
Assembler::tbz(i, rt, bit_pos, instOffset);
|
||||
} else {
|
||||
VIXL_ASSERT(i->IsTBNZ());
|
||||
Assembler::tbnz(i, rt, bit_pos, instOffset);
|
||||
}
|
||||
|
||||
VIXL_ASSERT(i->ImmTestBranch() == instOffset);
|
||||
return;
|
||||
}
|
||||
|
||||
if (i->IsADR()) {
|
||||
Register rd = Register::XRegFromCode(i->Rd());
|
||||
Assembler::adr(i, rd, byteOffset);
|
||||
return;
|
||||
}
|
||||
|
||||
if (i->IsADRP()) {
|
||||
const int pageOffset = byteOffset >> kPageSizeLog2;
|
||||
Register rd = Register::XRegFromCode(i->Rd());
|
||||
Assembler::adrp(i, rd, pageOffset);
|
||||
return;
|
||||
}
|
||||
|
||||
MOZ_CRASH("Unsupported branch type");
|
||||
}
|
||||
|
||||
|
||||
void MozBaseAssembler::RetargetFarBranch(Instruction* i, uint8_t** slot, uint8_t* dest, Condition cond) {
|
||||
MOZ_CRASH("RetargetFarBranch()");
|
||||
}
|
||||
|
||||
|
||||
} // namespace vixl
|
||||
|
|
@ -0,0 +1,176 @@
|
|||
// Copyright 2013, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef jit_arm64_vixl_MozBaseAssembler_vixl_h
|
||||
#define jit_arm64_vixl_MozBaseAssembler_vixl_h
|
||||
|
||||
#include "jit/arm64/vixl/Constants-vixl.h"
|
||||
#include "jit/arm64/vixl/Instructions-vixl.h"
|
||||
|
||||
#include "jit/shared/Assembler-shared.h"
|
||||
#include "jit/shared/IonAssemblerBufferWithConstantPools.h"
|
||||
|
||||
namespace vixl {
|
||||
|
||||
|
||||
using js::jit::BufferOffset;
|
||||
|
||||
|
||||
class MozBaseAssembler;
|
||||
typedef js::jit::AssemblerBufferWithConstantPools<1024, 4, Instruction, MozBaseAssembler> ARMBuffer;
|
||||
|
||||
|
||||
// Base class for vixl::Assembler, for isolating Moz-specific changes to VIXL.
|
||||
class MozBaseAssembler : public js::jit::AssemblerShared {
|
||||
// Buffer initialization constants.
|
||||
static const unsigned BufferGuardSize = 1;
|
||||
static const unsigned BufferHeaderSize = 1;
|
||||
static const size_t BufferCodeAlignment = 8;
|
||||
static const size_t BufferMaxPoolOffset = 1024;
|
||||
static const unsigned BufferPCBias = 0;
|
||||
static const uint32_t BufferAlignmentFillInstruction = BRK | (0xdead << ImmException_offset);
|
||||
static const uint32_t BufferNopFillInstruction = HINT | (31 << Rt_offset);
|
||||
static const unsigned BufferNumDebugNopsToInsert = 0;
|
||||
|
||||
public:
|
||||
MozBaseAssembler()
|
||||
: armbuffer_(BufferGuardSize,
|
||||
BufferHeaderSize,
|
||||
BufferCodeAlignment,
|
||||
BufferMaxPoolOffset,
|
||||
BufferPCBias,
|
||||
BufferAlignmentFillInstruction,
|
||||
BufferNopFillInstruction,
|
||||
BufferNumDebugNopsToInsert)
|
||||
{ }
|
||||
|
||||
public:
|
||||
// Helper function for use with the ARMBuffer.
|
||||
// The MacroAssembler must create an AutoJitContextAlloc before initializing the buffer.
|
||||
void initWithAllocator() {
|
||||
armbuffer_.initWithAllocator();
|
||||
}
|
||||
|
||||
// Return the Instruction at a given byte offset.
|
||||
Instruction* getInstructionAt(BufferOffset offset) {
|
||||
return armbuffer_.getInst(offset);
|
||||
}
|
||||
|
||||
// Return the byte offset of a bound label.
|
||||
template <typename T>
|
||||
inline T GetLabelByteOffset(const js::jit::Label* label) {
|
||||
VIXL_ASSERT(label->bound());
|
||||
JS_STATIC_ASSERT(sizeof(T) >= sizeof(uint32_t));
|
||||
return reinterpret_cast<T>(label->offset());
|
||||
}
|
||||
|
||||
protected:
|
||||
// Emit the instruction, returning its offset.
|
||||
BufferOffset Emit(Instr instruction, bool isBranch = false) {
|
||||
JS_STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
|
||||
return armbuffer_.putInt(*(uint32_t*)(&instruction), isBranch);
|
||||
}
|
||||
|
||||
BufferOffset EmitBranch(Instr instruction) {
|
||||
return Emit(instruction, true);
|
||||
}
|
||||
|
||||
public:
|
||||
// Emit the instruction at |at|.
|
||||
static void Emit(Instruction* at, Instr instruction) {
|
||||
JS_STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
|
||||
memcpy(at, &instruction, sizeof(instruction));
|
||||
}
|
||||
|
||||
static void EmitBranch(Instruction* at, Instr instruction) {
|
||||
// TODO: Assert that the buffer already has the instruction marked as a branch.
|
||||
Emit(at, instruction);
|
||||
}
|
||||
|
||||
// Emit data inline in the instruction stream.
|
||||
BufferOffset EmitData(void const * data, unsigned size) {
|
||||
VIXL_ASSERT(size % 4 == 0);
|
||||
return armbuffer_.allocEntry(size / sizeof(uint32_t), 0, (uint8_t*)(data), nullptr);
|
||||
}
|
||||
|
||||
public:
|
||||
// Size of the code generated in bytes, including pools.
|
||||
size_t SizeOfCodeGenerated() const {
|
||||
return armbuffer_.size();
|
||||
}
|
||||
|
||||
// Move the pool into the instruction stream.
|
||||
void flushBuffer() {
|
||||
armbuffer_.flushPool();
|
||||
}
|
||||
|
||||
// Inhibit pool flushing for the given number of instructions.
|
||||
// Generating more than |maxInst| instructions in a no-pool region
|
||||
// triggers an assertion within the ARMBuffer.
|
||||
// Does not nest.
|
||||
void enterNoPool(size_t maxInst) {
|
||||
armbuffer_.enterNoPool(maxInst);
|
||||
}
|
||||
|
||||
// Marks the end of a no-pool region.
|
||||
void leaveNoPool() {
|
||||
armbuffer_.leaveNoPool();
|
||||
}
|
||||
|
||||
public:
|
||||
// Static interface used by IonAssemblerBufferWithConstantPools.
|
||||
static void InsertIndexIntoTag(uint8_t* load, uint32_t index);
|
||||
static bool PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
|
||||
static uint32_t PlaceConstantPoolBarrier(int offset);
|
||||
|
||||
static void WritePoolHeader(uint8_t* start, js::jit::Pool* p, bool isNatural);
|
||||
static void WritePoolFooter(uint8_t* start, js::jit::Pool* p, bool isNatural);
|
||||
static void WritePoolGuard(BufferOffset branch, Instruction* inst, BufferOffset dest);
|
||||
|
||||
static ptrdiff_t GetBranchOffset(const Instruction* i);
|
||||
static void RetargetNearBranch(Instruction* i, int offset, Condition cond, bool final = true);
|
||||
static void RetargetNearBranch(Instruction* i, int offset, bool final = true);
|
||||
static void RetargetFarBranch(Instruction* i, uint8_t** slot, uint8_t* dest, Condition cond);
|
||||
|
||||
protected:
|
||||
// The buffer into which code and relocation info are generated.
|
||||
ARMBuffer armbuffer_;
|
||||
|
||||
js::jit::CompactBufferWriter jumpRelocations_;
|
||||
js::jit::CompactBufferWriter dataRelocations_;
|
||||
js::jit::CompactBufferWriter relocations_;
|
||||
js::jit::CompactBufferWriter preBarriers_;
|
||||
|
||||
// Literal pools.
|
||||
mozilla::Array<js::jit::Pool, 4> pools_;
|
||||
};
|
||||
|
||||
|
||||
} // namespace vixl
|
||||
|
||||
|
||||
#endif // jit_arm64_vixl_MozBaseAssembler_vixl_h
|
||||
|
|
@ -0,0 +1,138 @@
|
|||
// Copyright 2013, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "jit/arm64/vixl/Instructions-vixl.h"
|
||||
|
||||
namespace vixl {
|
||||
|
||||
bool Instruction::IsUncondB() const {
|
||||
return Mask(UnconditionalBranchMask) == (UnconditionalBranchFixed | B);
|
||||
}
|
||||
|
||||
|
||||
bool Instruction::IsCondB() const {
|
||||
return Mask(ConditionalBranchMask) == (ConditionalBranchFixed | B_cond);
|
||||
}
|
||||
|
||||
|
||||
bool Instruction::IsBL() const {
|
||||
return Mask(UnconditionalBranchMask) == (UnconditionalBranchFixed | BL);
|
||||
}
|
||||
|
||||
|
||||
bool Instruction::IsBR() const {
|
||||
return Mask(UnconditionalBranchToRegisterMask) == (UnconditionalBranchToRegisterFixed | BR);
|
||||
}
|
||||
|
||||
|
||||
bool Instruction::IsBLR() const {
|
||||
return Mask(UnconditionalBranchToRegisterMask) == (UnconditionalBranchToRegisterFixed | BLR);
|
||||
}
|
||||
|
||||
|
||||
bool Instruction::IsTBZ() const {
|
||||
return Mask(TestBranchMask) == TBZ;
|
||||
}
|
||||
|
||||
|
||||
bool Instruction::IsTBNZ() const {
|
||||
return Mask(TestBranchMask) == TBNZ;
|
||||
}
|
||||
|
||||
|
||||
bool Instruction::IsCBZ() const {
|
||||
return Mask(CompareBranchMask) == CBZ_w || Mask(CompareBranchMask) == CBZ_x;
|
||||
}
|
||||
|
||||
|
||||
bool Instruction::IsCBNZ() const {
|
||||
return Mask(CompareBranchMask) == CBNZ_w || Mask(CompareBranchMask) == CBNZ_x;
|
||||
}
|
||||
|
||||
|
||||
bool Instruction::IsLDR() const {
|
||||
return Mask(LoadLiteralMask) == LDR_x_lit;
|
||||
}
|
||||
|
||||
|
||||
bool Instruction::IsADR() const {
|
||||
return Mask(PCRelAddressingMask) == ADR;
|
||||
}
|
||||
|
||||
|
||||
bool Instruction::IsADRP() const {
|
||||
return Mask(PCRelAddressingMask) == ADRP;
|
||||
}
|
||||
|
||||
|
||||
bool Instruction::IsBranchLinkImm() const {
|
||||
return Mask(UnconditionalBranchFMask) == (UnconditionalBranchFixed | BL);
|
||||
}
|
||||
|
||||
|
||||
bool Instruction::IsTargetReachable(Instruction* target) const {
|
||||
VIXL_ASSERT(((target - this) & 3) == 0);
|
||||
int offset = (target - this) >> kInstructionSizeLog2;
|
||||
switch (BranchType()) {
|
||||
case CondBranchType:
|
||||
return is_int19(offset);
|
||||
case UncondBranchType:
|
||||
return is_int26(offset);
|
||||
case CompareBranchType:
|
||||
return is_int19(offset);
|
||||
case TestBranchType:
|
||||
return is_int14(offset);
|
||||
default:
|
||||
VIXL_UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
ptrdiff_t Instruction::ImmPCRawOffset() const {
|
||||
ptrdiff_t offset;
|
||||
if (IsPCRelAddressing()) {
|
||||
// ADR and ADRP.
|
||||
offset = ImmPCRel();
|
||||
} else if (BranchType() == UnknownBranchType) {
|
||||
offset = ImmLLiteral();
|
||||
} else {
|
||||
offset = ImmBranch();
|
||||
}
|
||||
return offset;
|
||||
}
|
||||
|
||||
|
||||
void Instruction::SetBits32(int msb, int lsb, unsigned value) {
|
||||
uint32_t me;
|
||||
memcpy(&me, this, sizeof(me));
|
||||
uint32_t new_mask = (1 << (msb+1)) - (1 << lsb);
|
||||
uint32_t keep_mask = ~new_mask;
|
||||
me = (me & keep_mask) | ((value << lsb) & new_mask);
|
||||
memcpy(this, &me, sizeof(me));
|
||||
}
|
||||
|
||||
|
||||
} // namespace vixl
|
|
@ -0,0 +1,691 @@
|
|||
// Copyright 2013, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "mozilla/DebugOnly.h"
|
||||
|
||||
#include "jit/arm64/vixl/Debugger-vixl.h"
|
||||
#include "jit/arm64/vixl/Simulator-vixl.h"
|
||||
#include "jit/IonTypes.h"
|
||||
#include "vm/Runtime.h"
|
||||
|
||||
namespace vixl {
|
||||
|
||||
|
||||
using mozilla::DebugOnly;
|
||||
using js::jit::ABIFunctionType;
|
||||
|
||||
|
||||
Simulator::Simulator() {
|
||||
decoder_ = js_new<Decoder>();
|
||||
if (!decoder_) {
|
||||
MOZ_ReportAssertionFailure("[unhandlable oom] Decoder", __FILE__, __LINE__);
|
||||
MOZ_CRASH();
|
||||
}
|
||||
|
||||
// FIXME: This just leaks the Decoder object for now, which is probably OK.
|
||||
// FIXME: We should free it at some point.
|
||||
// FIXME: Note that it can't be stored in the SimulatorRuntime due to lifetime conflicts.
|
||||
this->init(decoder_, stdout);
|
||||
}
|
||||
|
||||
|
||||
Simulator::Simulator(Decoder* decoder, FILE* stream) {
|
||||
this->init(decoder, stream);
|
||||
}
|
||||
|
||||
|
||||
void Simulator::ResetState() {
|
||||
// Reset the system registers.
|
||||
nzcv_ = SimSystemRegister::DefaultValueFor(NZCV);
|
||||
fpcr_ = SimSystemRegister::DefaultValueFor(FPCR);
|
||||
|
||||
// Reset registers to 0.
|
||||
pc_ = NULL;
|
||||
pc_modified_ = false;
|
||||
for (unsigned i = 0; i < kNumberOfRegisters; i++) {
|
||||
set_xreg(i, 0xbadbeef);
|
||||
}
|
||||
// Set FP registers to a value that is a NaN in both 32-bit and 64-bit FP.
|
||||
uint64_t nan_bits = UINT64_C(0x7ff0dead7f8beef1);
|
||||
VIXL_ASSERT(IsSignallingNaN(rawbits_to_double(nan_bits & kDRegMask)));
|
||||
VIXL_ASSERT(IsSignallingNaN(rawbits_to_float(nan_bits & kSRegMask)));
|
||||
for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
|
||||
set_dreg_bits(i, nan_bits);
|
||||
}
|
||||
// Returning to address 0 exits the Simulator.
|
||||
set_lr(kEndOfSimAddress);
|
||||
set_resume_pc(nullptr);
|
||||
}
|
||||
|
||||
|
||||
void Simulator::init(Decoder* decoder, FILE* stream) {
|
||||
// Ensure that shift operations act as the simulator expects.
|
||||
VIXL_ASSERT((static_cast<int32_t>(-1) >> 1) == -1);
|
||||
VIXL_ASSERT((static_cast<uint32_t>(-1) >> 1) == 0x7FFFFFFF);
|
||||
|
||||
// Set up the decoder.
|
||||
decoder_ = decoder;
|
||||
decoder_->AppendVisitor(this);
|
||||
|
||||
stream_ = stream;
|
||||
print_disasm_ = new PrintDisassembler(stream_);
|
||||
set_coloured_trace(false);
|
||||
trace_parameters_ = LOG_NONE;
|
||||
|
||||
ResetState();
|
||||
|
||||
// Allocate and set up the simulator stack.
|
||||
stack_ = new byte[stack_size_];
|
||||
stack_limit_ = stack_ + stack_protection_size_;
|
||||
// Configure the starting stack pointer.
|
||||
// - Find the top of the stack.
|
||||
byte * tos = stack_ + stack_size_;
|
||||
// - There's a protection region at both ends of the stack.
|
||||
tos -= stack_protection_size_;
|
||||
// - The stack pointer must be 16-byte aligned.
|
||||
tos = AlignDown(tos, 16);
|
||||
set_sp(tos);
|
||||
|
||||
// Set the sample period to 10, as the VIXL examples and tests are short.
|
||||
instrumentation_ = new Instrument("vixl_stats.csv", 10);
|
||||
|
||||
// Print a warning about exclusive-access instructions, but only the first
|
||||
// time they are encountered. This warning can be silenced using
|
||||
// SilenceExclusiveAccessWarning().
|
||||
print_exclusive_access_warning_ = true;
|
||||
|
||||
lock_ = PR_NewLock();
|
||||
if (!lock_)
|
||||
MOZ_CRASH("Could not allocate simulator lock.");
|
||||
lockOwner_ = nullptr;
|
||||
redirection_ = nullptr;
|
||||
}
|
||||
|
||||
|
||||
Simulator* Simulator::Current() {
|
||||
return js::TlsPerThreadData.get()->simulator();
|
||||
}
|
||||
|
||||
|
||||
Simulator* Simulator::Create() {
|
||||
Decoder* decoder = js_new<vixl::Decoder>();
|
||||
if (!decoder) {
|
||||
MOZ_ReportAssertionFailure("[unhandlable oom] Decoder", __FILE__, __LINE__);
|
||||
MOZ_CRASH();
|
||||
}
|
||||
|
||||
// FIXME: This just leaks the Decoder object for now, which is probably OK.
|
||||
// FIXME: We should free it at some point.
|
||||
// FIXME: Note that it can't be stored in the SimulatorRuntime due to lifetime conflicts.
|
||||
if (getenv("USE_DEBUGGER") != nullptr) {
|
||||
Debugger* debugger = js_new<Debugger>(decoder, stdout);
|
||||
if (!debugger) {
|
||||
MOZ_ReportAssertionFailure("[unhandlable oom] Decoder", __FILE__, __LINE__);
|
||||
MOZ_CRASH();
|
||||
}
|
||||
return debugger;
|
||||
}
|
||||
|
||||
Simulator* sim = js_new<Simulator>();
|
||||
if (!sim) {
|
||||
MOZ_CRASH("NEED SIMULATOR");
|
||||
return nullptr;
|
||||
}
|
||||
sim->init(decoder, stdout);
|
||||
|
||||
return sim;
|
||||
}
|
||||
|
||||
|
||||
void Simulator::Destroy(Simulator* sim) {
|
||||
js_delete(sim);
|
||||
}
|
||||
|
||||
|
||||
void Simulator::ExecuteInstruction() {
|
||||
// The program counter should always be aligned.
|
||||
VIXL_ASSERT(IsWordAligned(pc_));
|
||||
decoder_->Decode(pc_);
|
||||
const Instruction* rpc = resume_pc_;
|
||||
increment_pc();
|
||||
|
||||
if (MOZ_UNLIKELY(rpc)) {
|
||||
JSRuntime::innermostAsmJSActivation()->setResumePC((void*)pc());
|
||||
set_pc(rpc);
|
||||
// Just calling set_pc turns the pc_modified_ flag on, which means it doesn't
|
||||
// auto-step after executing the next instruction. Force that to off so it
|
||||
// will auto-step after executing the first instruction of the handler.
|
||||
pc_modified_ = false;
|
||||
resume_pc_ = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
uintptr_t Simulator::stackLimit() const {
|
||||
return reinterpret_cast<uintptr_t>(stack_limit_);
|
||||
}
|
||||
|
||||
|
||||
uintptr_t* Simulator::addressOfStackLimit() {
|
||||
return (uintptr_t*)&stack_limit_;
|
||||
}
|
||||
|
||||
|
||||
bool Simulator::overRecursed(uintptr_t newsp) const {
|
||||
if (newsp)
|
||||
newsp = xreg(31, Reg31IsStackPointer);
|
||||
return newsp <= stackLimit();
|
||||
}
|
||||
|
||||
|
||||
bool Simulator::overRecursedWithExtra(uint32_t extra) const {
|
||||
uintptr_t newsp = xreg(31, Reg31IsStackPointer) - extra;
|
||||
return newsp <= stackLimit();
|
||||
}
|
||||
|
||||
|
||||
void Simulator::set_resume_pc(const Instruction* new_resume_pc) {
|
||||
resume_pc_ = AddressUntag(new_resume_pc);
|
||||
}
|
||||
|
||||
|
||||
int64_t Simulator::call(uint8_t* entry, int argument_count, ...) {
|
||||
va_list parameters;
|
||||
va_start(parameters, argument_count);
|
||||
|
||||
// First eight arguments passed in registers.
|
||||
VIXL_ASSERT(argument_count <= 8);
|
||||
// This code should use the type of the called function
|
||||
// (with templates, like the callVM machinery), but since the
|
||||
// number of called functions is miniscule, their types have been
|
||||
// divined from the number of arguments.
|
||||
if (argument_count == 8) {
|
||||
// EnterJitData::jitcode.
|
||||
set_xreg(0, va_arg(parameters, int64_t));
|
||||
// EnterJitData::maxArgc.
|
||||
set_xreg(1, va_arg(parameters, unsigned));
|
||||
// EnterJitData::maxArgv.
|
||||
set_xreg(2, va_arg(parameters, int64_t));
|
||||
// EnterJitData::osrFrame.
|
||||
set_xreg(3, va_arg(parameters, int64_t));
|
||||
// EnterJitData::calleeToken.
|
||||
set_xreg(4, va_arg(parameters, int64_t));
|
||||
// EnterJitData::scopeChain.
|
||||
set_xreg(5, va_arg(parameters, int64_t));
|
||||
// EnterJitData::osrNumStackValues.
|
||||
set_xreg(6, va_arg(parameters, unsigned));
|
||||
// Address of EnterJitData::result.
|
||||
set_xreg(7, va_arg(parameters, int64_t));
|
||||
} else if (argument_count == 2) {
|
||||
// EntryArg* args
|
||||
set_xreg(0, va_arg(parameters, int64_t));
|
||||
// uint8_t* GlobalData
|
||||
set_xreg(1, va_arg(parameters, int64_t));
|
||||
} else if (argument_count == 1) { // irregexp
|
||||
// InputOutputData& data
|
||||
set_xreg(0, va_arg(parameters, int64_t));
|
||||
} else {
|
||||
MOZ_CRASH("Unknown number of arguments");
|
||||
}
|
||||
|
||||
va_end(parameters);
|
||||
|
||||
// Call must transition back to native code on exit.
|
||||
VIXL_ASSERT(xreg(30) == int64_t(kEndOfSimAddress));
|
||||
|
||||
// Execute the simulation.
|
||||
DebugOnly<int64_t> entryStack = xreg(31, Reg31IsStackPointer);
|
||||
RunFrom((Instruction*)entry);
|
||||
DebugOnly<int64_t> exitStack = xreg(31, Reg31IsStackPointer);
|
||||
VIXL_ASSERT(entryStack == exitStack);
|
||||
|
||||
int64_t result = xreg(0);
|
||||
if (getenv("USE_DEBUGGER"))
|
||||
printf("LEAVE\n");
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
// Protects the icache and redirection properties of the simulator.
|
||||
class AutoLockSimulatorCache
|
||||
{
|
||||
friend class Simulator;
|
||||
|
||||
public:
|
||||
explicit AutoLockSimulatorCache(Simulator* sim) : sim_(sim) {
|
||||
PR_Lock(sim_->lock_);
|
||||
VIXL_ASSERT(!sim_->lockOwner_);
|
||||
#ifdef DEBUG
|
||||
sim_->lockOwner_ = PR_GetCurrentThread();
|
||||
#endif
|
||||
}
|
||||
|
||||
~AutoLockSimulatorCache() {
|
||||
#ifdef DEBUG
|
||||
VIXL_ASSERT(sim_->lockOwner_ == PR_GetCurrentThread());
|
||||
sim_->lockOwner_ = nullptr;
|
||||
#endif
|
||||
PR_Unlock(sim_->lock_);
|
||||
}
|
||||
|
||||
private:
|
||||
Simulator* const sim_;
|
||||
};
|
||||
|
||||
|
||||
// When the generated code calls a VM function (masm.callWithABI) we need to
|
||||
// call that function instead of trying to execute it with the simulator
|
||||
// (because it's x64 code instead of AArch64 code). We do that by redirecting the VM
|
||||
// call to a svc (Supervisor Call) instruction that is handled by the
|
||||
// simulator. We write the original destination of the jump just at a known
|
||||
// offset from the svc instruction so the simulator knows what to call.
|
||||
class Redirection
|
||||
{
|
||||
friend class Simulator;
|
||||
|
||||
Redirection(void* nativeFunction, ABIFunctionType type, Simulator* sim)
|
||||
: nativeFunction_(nativeFunction),
|
||||
type_(type),
|
||||
next_(nullptr)
|
||||
{
|
||||
next_ = sim->redirection();
|
||||
// TODO: Flush ICache?
|
||||
sim->setRedirection(this);
|
||||
|
||||
Instruction* instr = (Instruction*)(&svcInstruction_);
|
||||
vixl::Assembler::svc(instr, kCallRtRedirected);
|
||||
}
|
||||
|
||||
public:
|
||||
void* addressOfSvcInstruction() { return &svcInstruction_; }
|
||||
void* nativeFunction() const { return nativeFunction_; }
|
||||
ABIFunctionType type() const { return type_; }
|
||||
|
||||
static Redirection* Get(void* nativeFunction, ABIFunctionType type) {
|
||||
Simulator* sim = Simulator::Current();
|
||||
AutoLockSimulatorCache alsr(sim);
|
||||
|
||||
// TODO: Store srt_ in the simulator for this assertion.
|
||||
// VIXL_ASSERT_IF(pt->simulator(), pt->simulator()->srt_ == srt);
|
||||
|
||||
Redirection* current = sim->redirection();
|
||||
for (; current != nullptr; current = current->next_) {
|
||||
if (current->nativeFunction_ == nativeFunction) {
|
||||
VIXL_ASSERT(current->type() == type);
|
||||
return current;
|
||||
}
|
||||
}
|
||||
|
||||
Redirection* redir = (Redirection*)js_malloc(sizeof(Redirection));
|
||||
if (!redir) {
|
||||
MOZ_ReportAssertionFailure("[unhandlable oom] Simulator redirection", __FILE__, __LINE__);
|
||||
MOZ_CRASH();
|
||||
}
|
||||
new(redir) Redirection(nativeFunction, type, sim);
|
||||
return redir;
|
||||
}
|
||||
|
||||
static const Redirection* FromSvcInstruction(const Instruction* svcInstruction) {
|
||||
const uint8_t* addrOfSvc = reinterpret_cast<const uint8_t*>(svcInstruction);
|
||||
const uint8_t* addrOfRedirection = addrOfSvc - offsetof(Redirection, svcInstruction_);
|
||||
return reinterpret_cast<const Redirection*>(addrOfRedirection);
|
||||
}
|
||||
|
||||
private:
|
||||
void* nativeFunction_;
|
||||
uint32_t svcInstruction_;
|
||||
ABIFunctionType type_;
|
||||
Redirection* next_;
|
||||
};
|
||||
|
||||
|
||||
void Simulator::setRedirection(Redirection* redirection) {
|
||||
// VIXL_ASSERT(lockOwner_); TODO
|
||||
redirection_ = redirection;
|
||||
}
|
||||
|
||||
|
||||
Redirection* Simulator::redirection() const {
|
||||
return redirection_;
|
||||
}
|
||||
|
||||
|
||||
void* Simulator::RedirectNativeFunction(void* nativeFunction, ABIFunctionType type) {
|
||||
Redirection* redirection = Redirection::Get(nativeFunction, type);
|
||||
return redirection->addressOfSvcInstruction();
|
||||
}
|
||||
|
||||
|
||||
void Simulator::VisitException(const Instruction* instr) {
|
||||
switch (instr->Mask(ExceptionMask)) {
|
||||
case BRK: {
|
||||
int lowbit = ImmException_offset;
|
||||
int highbit = ImmException_offset + ImmException_width - 1;
|
||||
HostBreakpoint(instr->Bits(highbit, lowbit));
|
||||
break;
|
||||
}
|
||||
case HLT:
|
||||
switch (instr->ImmException()) {
|
||||
case kUnreachableOpcode:
|
||||
DoUnreachable(instr);
|
||||
return;
|
||||
case kTraceOpcode:
|
||||
DoTrace(instr);
|
||||
return;
|
||||
case kLogOpcode:
|
||||
DoLog(instr);
|
||||
return;
|
||||
case kPrintfOpcode:
|
||||
DoPrintf(instr);
|
||||
return;
|
||||
default:
|
||||
HostBreakpoint();
|
||||
return;
|
||||
}
|
||||
case SVC:
|
||||
// The SVC instruction is hijacked by the JIT as a pseudo-instruction
|
||||
// causing the Simulator to execute host-native code for callWithABI.
|
||||
switch (instr->ImmException()) {
|
||||
case kCallRtRedirected:
|
||||
VisitCallRedirection(instr);
|
||||
return;
|
||||
case kMarkStackPointer:
|
||||
spStack_.append(xreg(31, Reg31IsStackPointer));
|
||||
return;
|
||||
case kCheckStackPointer: {
|
||||
int64_t current = xreg(31, Reg31IsStackPointer);
|
||||
int64_t expected = spStack_.popCopy();
|
||||
VIXL_ASSERT(current == expected);
|
||||
return;
|
||||
}
|
||||
default:
|
||||
VIXL_UNIMPLEMENTED();
|
||||
}
|
||||
break;
|
||||
default:
|
||||
VIXL_UNIMPLEMENTED();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Simulator::setGPR32Result(int32_t result) {
|
||||
set_wreg(0, result);
|
||||
}
|
||||
|
||||
|
||||
void Simulator::setGPR64Result(int64_t result) {
|
||||
set_xreg(0, result);
|
||||
}
|
||||
|
||||
|
||||
void Simulator::setFP32Result(float result) {
|
||||
set_sreg(0, result);
|
||||
}
|
||||
|
||||
|
||||
void Simulator::setFP64Result(double result) {
|
||||
set_dreg(0, result);
|
||||
}
|
||||
|
||||
|
||||
typedef int64_t (*Prototype_General0)();
|
||||
typedef int64_t (*Prototype_General1)(int64_t arg0);
|
||||
typedef int64_t (*Prototype_General2)(int64_t arg0, int64_t arg1);
|
||||
typedef int64_t (*Prototype_General3)(int64_t arg0, int64_t arg1, int64_t arg2);
|
||||
typedef int64_t (*Prototype_General4)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3);
|
||||
typedef int64_t (*Prototype_General5)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3,
|
||||
int64_t arg4);
|
||||
typedef int64_t (*Prototype_General6)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3,
|
||||
int64_t arg4, int64_t arg5);
|
||||
typedef int64_t (*Prototype_General7)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3,
|
||||
int64_t arg4, int64_t arg5, int64_t arg6);
|
||||
typedef int64_t (*Prototype_General8)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3,
|
||||
int64_t arg4, int64_t arg5, int64_t arg6, int64_t arg7);
|
||||
|
||||
typedef int64_t (*Prototype_Int_Double)(double arg0);
|
||||
typedef int64_t (*Prototype_Int_IntDouble)(int32_t arg0, double arg1);
|
||||
|
||||
typedef float (*Prototype_Float32_Float32)(float arg0);
|
||||
|
||||
typedef double (*Prototype_Double_None)();
|
||||
typedef double (*Prototype_Double_Double)(double arg0);
|
||||
typedef double (*Prototype_Double_Int)(int32_t arg0);
|
||||
typedef double (*Prototype_Double_DoubleInt)(double arg0, int64_t arg1);
|
||||
typedef double (*Prototype_Double_IntDouble)(int64_t arg0, double arg1);
|
||||
typedef double (*Prototype_Double_DoubleDouble)(double arg0, double arg1);
|
||||
typedef double (*Prototype_Double_DoubleDoubleDouble)(double arg0, double arg1, double arg2);
|
||||
typedef double (*Prototype_Double_DoubleDoubleDoubleDouble)(double arg0, double arg1,
|
||||
double arg2, double arg3);
|
||||
|
||||
|
||||
// Simulator support for callWithABI().
|
||||
void
|
||||
Simulator::VisitCallRedirection(const Instruction* instr)
|
||||
{
|
||||
VIXL_ASSERT(instr->Mask(ExceptionMask) == SVC);
|
||||
VIXL_ASSERT(instr->ImmException() == kCallRtRedirected);
|
||||
|
||||
const Redirection* redir = Redirection::FromSvcInstruction(instr);
|
||||
uintptr_t nativeFn = reinterpret_cast<uintptr_t>(redir->nativeFunction());
|
||||
|
||||
// Stack must be aligned prior to the call.
|
||||
// FIXME: It's actually our job to perform the alignment...
|
||||
//VIXL_ASSERT((xreg(31, Reg31IsStackPointer) & (StackAlignment - 1)) == 0);
|
||||
|
||||
// Used to assert that callee-saved registers are preserved.
|
||||
DebugOnly<int64_t> x19 = xreg(19);
|
||||
DebugOnly<int64_t> x20 = xreg(20);
|
||||
DebugOnly<int64_t> x21 = xreg(21);
|
||||
DebugOnly<int64_t> x22 = xreg(22);
|
||||
DebugOnly<int64_t> x23 = xreg(23);
|
||||
DebugOnly<int64_t> x24 = xreg(24);
|
||||
DebugOnly<int64_t> x25 = xreg(25);
|
||||
DebugOnly<int64_t> x26 = xreg(26);
|
||||
DebugOnly<int64_t> x27 = xreg(27);
|
||||
DebugOnly<int64_t> x28 = xreg(28);
|
||||
DebugOnly<int64_t> x29 = xreg(29);
|
||||
DebugOnly<int64_t> savedSP = xreg(31, Reg31IsStackPointer);
|
||||
|
||||
// Remember LR for returning from the "call".
|
||||
int64_t savedLR = xreg(30);
|
||||
|
||||
// Allow recursive Simulator calls: returning from the call must stop
|
||||
// the simulation and transition back to native Simulator code.
|
||||
set_xreg(30, int64_t(kEndOfSimAddress));
|
||||
|
||||
// Store argument register values in local variables for ease of use below.
|
||||
int64_t x0 = xreg(0);
|
||||
int64_t x1 = xreg(1);
|
||||
int64_t x2 = xreg(2);
|
||||
int64_t x3 = xreg(3);
|
||||
int64_t x4 = xreg(4);
|
||||
int64_t x5 = xreg(5);
|
||||
int64_t x6 = xreg(6);
|
||||
int64_t x7 = xreg(7);
|
||||
double d0 = dreg(0);
|
||||
double d1 = dreg(1);
|
||||
double d2 = dreg(2);
|
||||
double d3 = dreg(3);
|
||||
float s0 = sreg(0);
|
||||
|
||||
// Dispatch the call and set the return value.
|
||||
switch (redir->type()) {
|
||||
// Cases with int64_t return type.
|
||||
case js::jit::Args_General0: {
|
||||
int64_t ret = reinterpret_cast<Prototype_General0>(nativeFn)();
|
||||
setGPR64Result(ret);
|
||||
break;
|
||||
}
|
||||
case js::jit::Args_General1: {
|
||||
int64_t ret = reinterpret_cast<Prototype_General1>(nativeFn)(x0);
|
||||
setGPR64Result(ret);
|
||||
break;
|
||||
}
|
||||
case js::jit::Args_General2: {
|
||||
int64_t ret = reinterpret_cast<Prototype_General2>(nativeFn)(x0, x1);
|
||||
setGPR64Result(ret);
|
||||
break;
|
||||
}
|
||||
case js::jit::Args_General3: {
|
||||
int64_t ret = reinterpret_cast<Prototype_General3>(nativeFn)(x0, x1, x2);
|
||||
setGPR64Result(ret);
|
||||
break;
|
||||
}
|
||||
case js::jit::Args_General4: {
|
||||
int64_t ret = reinterpret_cast<Prototype_General4>(nativeFn)(x0, x1, x2, x3);
|
||||
setGPR64Result(ret);
|
||||
break;
|
||||
}
|
||||
case js::jit::Args_General5: {
|
||||
int64_t ret = reinterpret_cast<Prototype_General5>(nativeFn)(x0, x1, x2, x3, x4);
|
||||
setGPR64Result(ret);
|
||||
break;
|
||||
}
|
||||
case js::jit::Args_General6: {
|
||||
int64_t ret = reinterpret_cast<Prototype_General6>(nativeFn)(x0, x1, x2, x3, x4, x5);
|
||||
setGPR64Result(ret);
|
||||
break;
|
||||
}
|
||||
case js::jit::Args_General7: {
|
||||
int64_t ret = reinterpret_cast<Prototype_General7>(nativeFn)(x0, x1, x2, x3, x4, x5, x6);
|
||||
setGPR64Result(ret);
|
||||
break;
|
||||
}
|
||||
case js::jit::Args_General8: {
|
||||
int64_t ret = reinterpret_cast<Prototype_General8>(nativeFn)(x0, x1, x2, x3, x4, x5, x6, x7);
|
||||
setGPR64Result(ret);
|
||||
break;
|
||||
}
|
||||
|
||||
// Cases with GPR return type. This can be int32 or int64, but int64 is a safer assumption.
|
||||
case js::jit::Args_Int_Double: {
|
||||
int64_t ret = reinterpret_cast<Prototype_Int_Double>(nativeFn)(d0);
|
||||
setGPR64Result(ret);
|
||||
break;
|
||||
}
|
||||
case js::jit::Args_Int_IntDouble: {
|
||||
int64_t ret = reinterpret_cast<Prototype_Int_IntDouble>(nativeFn)(x0, d0);
|
||||
setGPR64Result(ret);
|
||||
break;
|
||||
}
|
||||
|
||||
// Cases with float return type.
|
||||
case js::jit::Args_Float32_Float32: {
|
||||
float ret = reinterpret_cast<Prototype_Float32_Float32>(nativeFn)(s0);
|
||||
setFP32Result(ret);
|
||||
break;
|
||||
}
|
||||
|
||||
// Cases with double return type.
|
||||
case js::jit::Args_Double_None: {
|
||||
double ret = reinterpret_cast<Prototype_Double_None>(nativeFn)();
|
||||
setFP64Result(ret);
|
||||
break;
|
||||
}
|
||||
case js::jit::Args_Double_Double: {
|
||||
double ret = reinterpret_cast<Prototype_Double_Double>(nativeFn)(d0);
|
||||
setFP64Result(ret);
|
||||
break;
|
||||
}
|
||||
case js::jit::Args_Double_Int: {
|
||||
double ret = reinterpret_cast<Prototype_Double_Int>(nativeFn)(x0);
|
||||
setFP64Result(ret);
|
||||
break;
|
||||
}
|
||||
case js::jit::Args_Double_DoubleInt: {
|
||||
double ret = reinterpret_cast<Prototype_Double_DoubleInt>(nativeFn)(d0, x0);
|
||||
setFP64Result(ret);
|
||||
break;
|
||||
}
|
||||
case js::jit::Args_Double_DoubleDouble: {
|
||||
double ret = reinterpret_cast<Prototype_Double_DoubleDouble>(nativeFn)(d0, d1);
|
||||
setFP64Result(ret);
|
||||
break;
|
||||
}
|
||||
case js::jit::Args_Double_DoubleDoubleDouble: {
|
||||
double ret = reinterpret_cast<Prototype_Double_DoubleDoubleDouble>(nativeFn)(d0, d1, d2);
|
||||
setFP64Result(ret);
|
||||
break;
|
||||
}
|
||||
case js::jit::Args_Double_DoubleDoubleDoubleDouble: {
|
||||
double ret = reinterpret_cast<Prototype_Double_DoubleDoubleDoubleDouble>(nativeFn)(d0, d1, d2, d3);
|
||||
setFP64Result(ret);
|
||||
break;
|
||||
}
|
||||
|
||||
case js::jit::Args_Double_IntDouble: {
|
||||
double ret = reinterpret_cast<Prototype_Double_IntDouble>(nativeFn)(x0, d0);
|
||||
setFP64Result(ret);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
MOZ_CRASH("Unknown function type.");
|
||||
}
|
||||
|
||||
// TODO: Nuke the volatile registers.
|
||||
|
||||
// Assert that callee-saved registers are unchanged.
|
||||
VIXL_ASSERT(xreg(19) == x19);
|
||||
VIXL_ASSERT(xreg(20) == x20);
|
||||
VIXL_ASSERT(xreg(21) == x21);
|
||||
VIXL_ASSERT(xreg(22) == x22);
|
||||
VIXL_ASSERT(xreg(23) == x23);
|
||||
VIXL_ASSERT(xreg(24) == x24);
|
||||
VIXL_ASSERT(xreg(25) == x25);
|
||||
VIXL_ASSERT(xreg(26) == x26);
|
||||
VIXL_ASSERT(xreg(27) == x27);
|
||||
VIXL_ASSERT(xreg(28) == x28);
|
||||
VIXL_ASSERT(xreg(29) == x29);
|
||||
|
||||
// Assert that the stack is unchanged.
|
||||
VIXL_ASSERT(savedSP == xreg(31, Reg31IsStackPointer));
|
||||
|
||||
// Simulate a return.
|
||||
set_lr(savedLR);
|
||||
set_pc((Instruction*)savedLR);
|
||||
if (getenv("USE_DEBUGGER"))
|
||||
printf("SVCRET\n");
|
||||
}
|
||||
|
||||
|
||||
} // namespace vixl
|
||||
|
||||
|
||||
vixl::Simulator* js::PerThreadData::simulator() const {
|
||||
return runtime_->simulator();
|
||||
}
|
||||
|
||||
|
||||
vixl::Simulator* JSRuntime::simulator() const {
|
||||
return simulator_;
|
||||
}
|
||||
|
||||
|
||||
uintptr_t* JSRuntime::addressOfSimulatorStackLimit() {
|
||||
return simulator_->addressOfStackLimit();
|
||||
}
|
||||
|
Загрузка…
Ссылка в новой задаче