Bug 800617 - Fix some ARM-specific uber-nits. r=mjrosenb

This commit is contained in:
Sean Stangl 2012-10-31 15:41:20 -07:00
Родитель 7330a180a0
Коммит 5c455346de
9 изменённых файлов: 412 добавлений и 429 удалений

Просмотреть файл

@ -71,12 +71,9 @@ class Registers
} RegisterID;
typedef RegisterID Code;
static const char *GetName(Code code) {
static const char *Names[] = { "r0", "r1", "r2", "r3",
"r4", "r5", "r6", "r7",
"r8", "r9", "r10", "r11",
"r12", "sp", "r14", "pc"};
static const char *Names[] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
"r8", "r9", "r10", "r11", "r12", "sp", "r14", "pc"};
return Names[code];
}
@ -114,8 +111,7 @@ class Registers
static const uint32 SingleByteRegs =
VolatileMask | NonVolatileMask;
// we should also account for any scratch registers that we care about.x
// possibly the stack as well.
static const uint32 NonAllocatableMask =
(1 << Registers::sp) |
(1 << Registers::r12) | // r12 = ip = scratch
@ -182,10 +178,8 @@ class FloatRegisters
typedef FPRegisterID Code;
static const char *GetName(Code code) {
static const char *Names[] = { "d0", "d1", "d2", "d3",
"d4", "d5", "d6", "d7",
"d8", "d9", "d10", "d11",
"d12", "d13", "d14", "d15"};
static const char *Names[] = { "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15"};
return Names[code];
}
@ -201,9 +195,8 @@ class FloatRegisters
static const uint32 WrapperMask = VolatileMask;
static const uint32 NonAllocatableMask =
// the scratch float register for ARM.
(1 << d0) | (1 << invalid_freg);
// d0 is the ARM scratch float register.
static const uint32 NonAllocatableMask = (1 << d0) | (1 << invalid_freg);
// Registers that can be allocated without being saved, generally.
static const uint32 TempMask = VolatileMask & ~NonAllocatableMask;
@ -217,6 +210,5 @@ bool has16DP();
} // namespace ion
} // namespace js
// we don't want the macro assembler's goods to start leaking out.
#endif // jsion_architecture_arm_h__

Просмотреть файл

@ -12,6 +12,7 @@
#include "assembler/jit/ExecutableAllocator.h"
#include "jscompartment.h"
#include "ion/IonCompartment.h"
using namespace js;
using namespace js::ion;
@ -61,7 +62,6 @@ js::ion::maybeRT(Register r)
uint32
js::ion::maybeRN(Register r)
{
if (r == InvalidReg)
return 0;
@ -107,6 +107,7 @@ js::ion::VD(VFPRegister vr)
{
if (vr.isMissing())
return 0;
//bits 15,14,13,12, 22
VFPRegister::VFPRegIndexSplit s = vr.encode();
return s.bit << 22 | s.block << 12;
@ -116,6 +117,7 @@ js::ion::VN(VFPRegister vr)
{
if (vr.isMissing())
return 0;
// bits 19,18,17,16, 7
VFPRegister::VFPRegIndexSplit s = vr.encode();
return s.bit << 7 | s.block << 16;
@ -125,6 +127,7 @@ js::ion::VM(VFPRegister vr)
{
if (vr.isMissing())
return 0;
// bits 5, 3,2,1,0
VFPRegister::VFPRegIndexSplit s = vr.encode();
return s.bit << 5 | s.block;
@ -179,8 +182,7 @@ InstLDR::asTHIS(Instruction &i)
bool
InstBranchReg::isTHIS(const Instruction &i)
{
return InstBXReg::isTHIS(i) ||
InstBLXReg::isTHIS(i);
return InstBXReg::isTHIS(i) || InstBLXReg::isTHIS(i);
}
InstBranchReg *
@ -204,8 +206,7 @@ InstBranchReg::checkDest(Register dest)
bool
InstBranchImm::isTHIS(const Instruction &i)
{
return InstBImm::isTHIS(i) ||
InstBLImm::isTHIS(i);
return InstBImm::isTHIS(i) || InstBLImm::isTHIS(i);
}
InstBranchImm *
@ -280,8 +281,7 @@ InstBLImm::asTHIS(Instruction &i)
bool
InstMovWT::isTHIS(Instruction &i)
{
return InstMovW::isTHIS(i) ||
InstMovT::isTHIS(i);
return InstMovW::isTHIS(i) || InstMovT::isTHIS(i);
}
InstMovWT *
InstMovWT::asTHIS(Instruction &i)
@ -425,15 +425,14 @@ ion::PatchJump(CodeLocationJump &jump_, CodeLocationLabel label)
Instruction *jump = (Instruction*)jump_.raw();
Assembler::Condition c;
jump->extractCond(&c);
JS_ASSERT(jump->is<InstBranchImm>() ||
jump->is<InstLDR>());
JS_ASSERT(jump->is<InstBranchImm>() || jump->is<InstLDR>());
int jumpOffset = label.raw() - jump_.raw();
if (BOffImm::isInRange(jumpOffset)) {
// This instruction started off as a branch, and will remain one
Assembler::retargetNearBranch(jump, jumpOffset, c);
} else {
// This instruction started off as a branch, but now needs to be demoted to an
// ldr
// This instruction started off as a branch, but now needs to be demoted to an ldr.
uint8 **slot = reinterpret_cast<uint8**>(jump_.jumpTableEntry());
Assembler::retargetFarBranch(jump, slot, label.raw(), c);
}
@ -445,9 +444,9 @@ Assembler::finish()
flush();
JS_ASSERT(!isFinished);
isFinished = true;
for (size_t i = 0; i < jumps_.length(); i++) {
for (size_t i = 0; i < jumps_.length(); i++)
jumps_[i].fixOffset(m_buffer);
}
for (unsigned int i = 0; i < tmpDataRelocations_.length(); i++) {
int offset = tmpDataRelocations_[i].getOffset();
@ -532,16 +531,18 @@ Assembler::getCF32Target(Iter *iter)
Instruction *inst2 = iter->next();
Instruction *inst3 = iter->next();
Instruction *inst4 = iter->next();
if (inst1->is<InstBranchImm>()) {
// see if we have a simple case, b #offset
BOffImm imm;
InstBranchImm *jumpB = inst1->as<InstBranchImm>();
jumpB->extractImm(&imm);
return imm.getDest(inst1)->raw();
} else if (inst1->is<InstMovW>() &&
inst2->is<InstMovT>() &&
(inst3->is<InstBranchReg>() ||
inst4->is<InstBranchReg>())) {
}
if (inst1->is<InstMovW>() && inst2->is<InstMovT>() &&
(inst3->is<InstBranchReg>() || inst4->is<InstBranchReg>()))
{
// see if we have the complex case,
// movw r_temp, #imm1
// movt r_temp, #imm2
@ -556,26 +557,31 @@ Assembler::getCF32Target(Iter *iter)
Imm16 targ_top;
Register temp;
// Extract both the temp register and the bottom immediate.
InstMovW *bottom = inst1->as<InstMovW>();
InstMovT *top = inst2->as<InstMovT>();
InstBranchReg * realBranch =
inst3->is<InstBranchReg>() ?
inst3->as<InstBranchReg>() :
inst4->as<InstBranchReg>();
// extract both the temp register and the bottom immediate
bottom->extractImm(&targ_bot);
bottom->extractDest(&temp);
// extract the top part of the immediate
// Extract the top part of the immediate.
InstMovT *top = inst2->as<InstMovT>();
top->extractImm(&targ_top);
// make sure they are being loaded intothe same register
// Make sure they are being loaded into the same register.
JS_ASSERT(top->checkDest(temp));
// make sure we're branching to the same register.
// Make sure we're branching to the same register.
InstBranchReg *realBranch = inst3->is<InstBranchReg>() ? inst3->as<InstBranchReg>()
: inst4->as<InstBranchReg>();
JS_ASSERT(realBranch->checkDest(temp));
uint32 *dest = (uint32*) (targ_bot.decode() | (targ_top.decode() << 16));
return dest;
} else if (inst1->is<InstLDR>()) {
}
if (inst1->is<InstLDR>()) {
JS_NOT_REACHED("ldr-based relocs NYI");
}
JS_NOT_REACHED("unsupported branch relocation");
return NULL;
}
@ -594,8 +600,8 @@ Assembler::getPtr32Target(Iter *start, Register *dest, RelocStyle *style)
{
Instruction *load1 = start->cur();
Instruction *load2 = start->next();
if (load1->is<InstMovW>() &&
load2->is<InstMovT>()) {
if (load1->is<InstMovW>() && load2->is<InstMovT>()) {
// see if we have the complex case,
// movw r_temp, #imm1
// movt r_temp, #imm2
@ -604,22 +610,27 @@ Assembler::getPtr32Target(Iter *start, Register *dest, RelocStyle *style)
Imm16 targ_top;
Register temp;
// Extract both the temp register and the bottom immediate.
InstMovW *bottom = load1->as<InstMovW>();
InstMovT *top = load2->as<InstMovT>();
// extract both the temp register and the bottom immediate
bottom->extractImm(&targ_bot);
bottom->extractDest(&temp);
// extract the top part of the immediate
// Extract the top part of the immediate.
InstMovT *top = load2->as<InstMovT>();
top->extractImm(&targ_top);
// make sure they are being loaded intothe same register
// Make sure they are being loaded intothe same register.
JS_ASSERT(top->checkDest(temp));
uint32 *value = (uint32*) (targ_bot.decode() | (targ_top.decode() << 16));
if (dest)
*dest = temp;
if (style)
*style = L_MOVWT;
uint32 *value = (uint32*) (targ_bot.decode() | (targ_top.decode() << 16));
return value;
}
JS_NOT_REACHED("unsupported relocation");
return NULL;
}
@ -639,7 +650,7 @@ Assembler::TraceJumpRelocations(JSTracer *trc, IonCode *code, CompactBufferReade
InstructionIterator institer((Instruction *) (code->raw() + iter.offset()));
IonCode *child = CodeFromJump(&institer);
MarkIonCodeUnbarriered(trc, &child, "rel32");
};
}
}
static void
@ -655,7 +666,8 @@ TraceDataRelocations(JSTracer *trc, uint8 *buffer, CompactBufferReader &reader)
}
static void
TraceDataRelocations(JSTracer *trc, ARMBuffer *buffer, js::Vector<BufferOffset, 0, SystemAllocPolicy> *locs)
TraceDataRelocations(JSTracer *trc, ARMBuffer *buffer,
js::Vector<BufferOffset, 0, SystemAllocPolicy> *locs)
{
for (unsigned int idx = 0; idx < locs->length(); idx++) {
BufferOffset bo = (*locs)[idx];
@ -698,9 +710,9 @@ Assembler::trace(JSTracer *trc)
JS_ASSERT(code == IonCode::FromExecutable((uint8*)rp.target));
}
}
if (tmpDataRelocations_.length()) {
if (tmpDataRelocations_.length())
::TraceDataRelocations(trc, &m_buffer, &tmpDataRelocations_);
}
}
void
@ -755,16 +767,18 @@ Imm8::encodeTwoImms(uint32 imm)
// also remember, values are rotated by multiples of two, and left,
// mid or right can have length zero
uint32 imm1, imm2;
int left = (js_bitscan_clz32(imm)) & 30;
int left = (js_bitscan_clz32(imm)) & 0x1E;
uint32 no_n1 = imm & ~(0xff << (24 - left));
// not technically needed: this case only happens if we can encode
// as a single imm8m. There is a perfectly reasonable encoding in this
// case, but we shouldn't encourage people to do things like this.
if (no_n1 == 0)
return TwoImm8mData();
int mid = ((js_bitscan_clz32(no_n1)) & 30);
uint32 no_n2 =
no_n1 & ~((0xff << ((24 - mid)&31)) | 0xff >> ((8 + mid)&31));
int mid = ((js_bitscan_clz32(no_n1)) & 0x1E);
uint32 no_n2 = no_n1 & ~((0xff << ((24 - mid) & 0x1f)) | 0xff >> ((8 + mid) & 0x1f));
if (no_n2 == 0) {
// we hit the easy case, no wraparound.
// note: a single constant *may* look like this.
@ -786,49 +800,50 @@ Imm8::encodeTwoImms(uint32 imm)
JS_ASSERT((imm2shift & 0x1) == 0);
return TwoImm8mData(datastore::Imm8mData(imm1, imm1shift >> 1),
datastore::Imm8mData(imm2, imm2shift >> 1));
} else {
// either it wraps, or it does not fit.
// if we initially chopped off more than 8 bits, then it won't fit.
if (left >= 8)
return TwoImm8mData();
int right = 32 - (js_bitscan_clz32(no_n2) & 30);
// all remaining set bits *must* fit into the lower 8 bits
// the right == 8 case should be handled by the previous case.
if (right > 8) {
return TwoImm8mData();
}
// make sure the initial bits that we removed for no_n1
// fit into the 8-(32-right) leftmost bits
if (((imm & (0xff << (24 - left))) << (8-right)) != 0) {
// BUT we may have removed more bits than we needed to for no_n1
// 0x04104001 e.g. we can encode 0x104 with a single op, then
// 0x04000001 with a second, but we try to encode 0x0410000
// and find that we need a second op for 0x4000, and 0x1 cannot
// be included in the encoding of 0x04100000
no_n1 = imm & ~((0xff >> (8-right)) | (0xff << (24 + right)));
mid = (js_bitscan_clz32(no_n1)) & 30;
no_n2 =
no_n1 & ~((0xff << ((24 - mid)&31)) | 0xff >> ((8 + mid)&31));
if (no_n2 != 0) {
return TwoImm8mData();
}
}
// now assemble all of this information into a two coherent constants
// it is a rotate right from the lower 8 bits.
int imm1shift = 8 - right;
imm1 = 0xff & ((imm << imm1shift) | (imm >> (32 - imm1shift)));
JS_ASSERT ((imm1shift&~0x1e) == 0);
// left + 8 + mid is the position of the leftmost bit of n_2.
// we needed to rotate 0x000000ab right by 8 in order to get
// 0xab000000, then shift again by the leftmost bit in order to
// get the constant that we care about.
int imm2shift = mid + 8;
imm2 = ((imm >> (32 - imm2shift)) | (imm << imm2shift)) & 0xff;
JS_ASSERT((imm1shift & 0x1) == 0);
JS_ASSERT((imm2shift & 0x1) == 0);
return TwoImm8mData(datastore::Imm8mData(imm1, imm1shift >> 1),
datastore::Imm8mData(imm2, imm2shift >> 1));
}
// either it wraps, or it does not fit.
// if we initially chopped off more than 8 bits, then it won't fit.
if (left >= 8)
return TwoImm8mData();
int right = 32 - (js_bitscan_clz32(no_n2) & 30);
// all remaining set bits *must* fit into the lower 8 bits
// the right == 8 case should be handled by the previous case.
if (right > 8)
return TwoImm8mData();
// make sure the initial bits that we removed for no_n1
// fit into the 8-(32-right) leftmost bits
if (((imm & (0xff << (24 - left))) << (8-right)) != 0) {
// BUT we may have removed more bits than we needed to for no_n1
// 0x04104001 e.g. we can encode 0x104 with a single op, then
// 0x04000001 with a second, but we try to encode 0x0410000
// and find that we need a second op for 0x4000, and 0x1 cannot
// be included in the encoding of 0x04100000
no_n1 = imm & ~((0xff >> (8-right)) | (0xff << (24 + right)));
mid = (js_bitscan_clz32(no_n1)) & 30;
no_n2 =
no_n1 & ~((0xff << ((24 - mid)&31)) | 0xff >> ((8 + mid)&31));
if (no_n2 != 0)
return TwoImm8mData();
}
// now assemble all of this information into a two coherent constants
// it is a rotate right from the lower 8 bits.
int imm1shift = 8 - right;
imm1 = 0xff & ((imm << imm1shift) | (imm >> (32 - imm1shift)));
JS_ASSERT ((imm1shift&~0x1e) == 0);
// left + 8 + mid is the position of the leftmost bit of n_2.
// we needed to rotate 0x000000ab right by 8 in order to get
// 0xab000000, then shift again by the leftmost bit in order to
// get the constant that we care about.
int imm2shift = mid + 8;
imm2 = ((imm >> (32 - imm2shift)) | (imm << imm2shift)) & 0xff;
JS_ASSERT((imm1shift & 0x1) == 0);
JS_ASSERT((imm2shift & 0x1) == 0);
return TwoImm8mData(datastore::Imm8mData(imm1, imm1shift >> 1),
datastore::Imm8mData(imm2, imm2shift >> 1));
}
ALUOp
@ -868,9 +883,8 @@ ion::ALUNeg(ALUOp op, Register dest, Imm32 *imm, Register *negDest)
return op_bic;
// orr has orn on thumb2 only.
default:
break;
return op_invalid;
}
return op_invalid;
}
bool
@ -891,9 +905,8 @@ ion::can_dbl(ALUOp op)
case op_orr:
return true;
default:
break;
return false;
}
return false;
}
bool
@ -920,9 +933,8 @@ ion::condsAreSafe(ALUOp op) {
case op_eor:
return true;
default:
break;
return false;
}
return false;
}
ALUOp
@ -1009,11 +1021,14 @@ js::ion::VFPImm::VFPImm(uint32 top)
{
data = -1;
datastore::Imm8VFPImmData tmp;
if (DoubleEncoder::lookup(top, &tmp)) {
if (DoubleEncoder::lookup(top, &tmp))
data = tmp.encode();
}
}
BOffImm::BOffImm(Instruction &inst) : data(inst.encode() & 0x00ffffff) {}
BOffImm::BOffImm(Instruction &inst)
: data(inst.encode() & 0x00ffffff)
{
}
Instruction *
BOffImm::getDest(Instruction *src)
@ -1031,11 +1046,9 @@ VFPRegister
VFPRegister::doubleOverlay()
{
JS_ASSERT(!_isInvalid);
if (kind != Double) {
if (kind != Double)
return VFPRegister(_code >> 1, Double);
} else {
return *this;
}
return *this;
}
VFPRegister
VFPRegister::singleOverlay()
@ -1045,9 +1058,9 @@ VFPRegister::singleOverlay()
// There are no corresponding float registers for d16-d31
ASSERT(_code < 16);
return VFPRegister(_code << 1, Single);
} else {
return VFPRegister(_code, Single);
}
return VFPRegister(_code, Single);
}
VFPRegister
@ -1058,9 +1071,9 @@ VFPRegister::sintOverlay()
// There are no corresponding float registers for d16-d31
ASSERT(_code < 16);
return VFPRegister(_code << 1, Int);
} else {
return VFPRegister(_code, Int);
}
return VFPRegister(_code, Int);
}
VFPRegister
VFPRegister::uintOverlay()
@ -1070,9 +1083,9 @@ VFPRegister::uintOverlay()
// There are no corresponding float registers for d16-d31
ASSERT(_code < 16);
return VFPRegister(_code << 1, UInt);
} else {
return VFPRegister(_code, UInt);
}
return VFPRegister(_code, UInt);
}
bool
@ -1092,9 +1105,7 @@ VFPRegister::isMissing()
bool
Assembler::oom() const
{
return m_buffer.oom() ||
!enoughMemory_ ||
jumpRelocations_.oom();
return m_buffer.oom() || !enoughMemory_ || jumpRelocations_.oom();
}
bool
@ -1151,12 +1162,11 @@ Assembler::bytesNeeded() const
BufferOffset
Assembler::writeInst(uint32 x, uint32 *dest)
{
if (dest == NULL) {
if (dest == NULL)
return m_buffer.putInt(x);
} else {
writeInstStatic(x, dest);
return BufferOffset();
}
writeInstStatic(x, dest);
return BufferOffset();
}
void
Assembler::writeInstStatic(uint32 x, uint32 *dest)
@ -1191,101 +1201,88 @@ Assembler::as_alu(Register dest, Register src1, Operand2 op2,
((src1 == InvalidReg) ? 0 : RN(src1)));
}
BufferOffset
Assembler::as_mov(Register dest,
Operand2 op2, SetCond_ sc, Condition c)
Assembler::as_mov(Register dest, Operand2 op2, SetCond_ sc, Condition c)
{
return as_alu(dest, InvalidReg, op2, op_mov, sc, c);
}
BufferOffset
Assembler::as_mvn(Register dest, Operand2 op2,
SetCond_ sc, Condition c)
Assembler::as_mvn(Register dest, Operand2 op2, SetCond_ sc, Condition c)
{
return as_alu(dest, InvalidReg, op2, op_mvn, sc, c);
}
// logical operations
// Logical operations.
BufferOffset
Assembler::as_and(Register dest, Register src1,
Operand2 op2, SetCond_ sc, Condition c)
Assembler::as_and(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
{
return as_alu(dest, src1, op2, op_and, sc, c);
}
BufferOffset
Assembler::as_bic(Register dest, Register src1,
Operand2 op2, SetCond_ sc, Condition c)
Assembler::as_bic(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
{
return as_alu(dest, src1, op2, op_bic, sc, c);
}
BufferOffset
Assembler::as_eor(Register dest, Register src1,
Operand2 op2, SetCond_ sc, Condition c)
Assembler::as_eor(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
{
return as_alu(dest, src1, op2, op_eor, sc, c);
}
BufferOffset
Assembler::as_orr(Register dest, Register src1,
Operand2 op2, SetCond_ sc, Condition c)
Assembler::as_orr(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
{
return as_alu(dest, src1, op2, op_orr, sc, c);
}
// mathematical operations
// Mathematical operations.
BufferOffset
Assembler::as_adc(Register dest, Register src1,
Operand2 op2, SetCond_ sc, Condition c)
Assembler::as_adc(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
{
return as_alu(dest, src1, op2, op_adc, sc, c);
}
BufferOffset
Assembler::as_add(Register dest, Register src1,
Operand2 op2, SetCond_ sc, Condition c)
Assembler::as_add(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
{
return as_alu(dest, src1, op2, op_add, sc, c);
}
BufferOffset
Assembler::as_sbc(Register dest, Register src1,
Operand2 op2, SetCond_ sc, Condition c)
Assembler::as_sbc(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
{
return as_alu(dest, src1, op2, op_sbc, sc, c);
}
BufferOffset
Assembler::as_sub(Register dest, Register src1,
Operand2 op2, SetCond_ sc, Condition c)
Assembler::as_sub(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
{
return as_alu(dest, src1, op2, op_sub, sc, c);
}
BufferOffset
Assembler::as_rsb(Register dest, Register src1,
Operand2 op2, SetCond_ sc, Condition c)
Assembler::as_rsb(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
{
return as_alu(dest, src1, op2, op_rsb, sc, c);
}
BufferOffset
Assembler::as_rsc(Register dest, Register src1,
Operand2 op2, SetCond_ sc, Condition c)
Assembler::as_rsc(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
{
return as_alu(dest, src1, op2, op_rsc, sc, c);
}
// test operations
// Test operations.
BufferOffset
Assembler::as_cmn(Register src1, Operand2 op2,
Condition c)
Assembler::as_cmn(Register src1, Operand2 op2, Condition c)
{
return as_alu(InvalidReg, src1, op2, op_cmn, SetCond, c);
}
BufferOffset
Assembler::as_cmp(Register src1, Operand2 op2,
Condition c)
Assembler::as_cmp(Register src1, Operand2 op2, Condition c)
{
return as_alu(InvalidReg, src1, op2, op_cmp, SetCond, c);
}
BufferOffset
Assembler::as_teq(Register src1, Operand2 op2,
Condition c)
Assembler::as_teq(Register src1, Operand2 op2, Condition c)
{
return as_alu(InvalidReg, src1, op2, op_teq, SetCond, c);
}
BufferOffset
Assembler::as_tst(Register src1, Operand2 op2,
Condition c)
Assembler::as_tst(Register src1, Operand2 op2, Condition c)
{
return as_alu(InvalidReg, src1, op2, op_tst, SetCond, c);
}
@ -1310,20 +1307,19 @@ const int mull_tag = 0x90;
BufferOffset
Assembler::as_genmul(Register dhi, Register dlo, Register rm, Register rn,
MULOp op, SetCond_ sc, Condition c)
MULOp op, SetCond_ sc, Condition c)
{
return writeInst(RN(dhi) | maybeRD(dlo) | RM(rm) | rn.code() | op | sc | c | mull_tag);
}
BufferOffset
Assembler::as_mul(Register dest, Register src1, Register src2,
SetCond_ sc, Condition c)
Assembler::as_mul(Register dest, Register src1, Register src2, SetCond_ sc, Condition c)
{
return as_genmul(dest, InvalidReg, src1, src2, opm_mul, sc, c);
}
BufferOffset
Assembler::as_mla(Register dest, Register acc, Register src1, Register src2,
SetCond_ sc, Condition c)
SetCond_ sc, Condition c)
{
return as_genmul(dest, acc, src1, src2, opm_mla, sc, c);
}
@ -1340,28 +1336,28 @@ Assembler::as_mls(Register dest, Register acc, Register src1, Register src2, Con
BufferOffset
Assembler::as_umull(Register destHI, Register destLO, Register src1, Register src2,
SetCond_ sc, Condition c)
SetCond_ sc, Condition c)
{
return as_genmul(destHI, destLO, src1, src2, opm_umull, sc, c);
}
BufferOffset
Assembler::as_umlal(Register destHI, Register destLO, Register src1, Register src2,
SetCond_ sc, Condition c)
SetCond_ sc, Condition c)
{
return as_genmul(destHI, destLO, src1, src2, opm_umlal, sc, c);
}
BufferOffset
Assembler::as_smull(Register destHI, Register destLO, Register src1, Register src2,
SetCond_ sc, Condition c)
SetCond_ sc, Condition c)
{
return as_genmul(destHI, destLO, src1, src2, opm_smull, sc, c);
}
BufferOffset
Assembler::as_smlal(Register destHI, Register destLO, Register src1, Register src2,
SetCond_ sc, Condition c)
SetCond_ sc, Condition c)
{
return as_genmul(destHI, destLO, src1, src2, opm_smlal, sc, c);
}
@ -1389,12 +1385,14 @@ class PoolHintData {
poolBranch = 2,
poolVDTR = 3
};
private:
uint32 index : 17;
uint32 cond : 4;
LoadType loadType : 2;
uint32 destReg : 5;
uint32 ONES : 4;
public:
void init(uint32 index_, Assembler::Condition cond_, LoadType lt, const Register &destReg_) {
index = index_;
@ -1456,6 +1454,7 @@ union PoolHintPun {
PoolHintData phd;
uint32 raw;
};
// Handles all of the other integral data transferring functions:
// ldrsb, ldrsh, ldrd, etc.
// size is given in bits.
@ -1483,11 +1482,7 @@ Assembler::as_extdtr(LoadStore ls, int size, bool IsSigned, Index mode,
}
break;
case 64:
if (ls == IsStore) {
extra_bits2 = 0x3;
} else {
extra_bits2 = 0x2;
}
extra_bits2 = (ls == IsStore) ? 0x3 : 0x2;
extra_bits1 = 0;
break;
default:
@ -1582,7 +1577,8 @@ Assembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
break;
case PoolHintData::poolVDTR:
if ((offset + (8 * data.getIndex()) - 8) < -1023 ||
(offset + (8 * data.getIndex()) - 8) > 1023) {
(offset + (8 * data.getIndex()) - 8) > 1023)
{
return false;
}
dummy->as_vdtr(IsLoad, data.getVFPReg(),
@ -1646,23 +1642,23 @@ Assembler::as_b(Label *l, Condition c, bool isPatchable)
BufferOffset ret = as_nop();
as_b(BufferOffset(l).diffB<BOffImm>(ret), c, ret);
return ret;
} else {
int32 old;
BufferOffset ret;
if (l->used()) {
old = l->offset();
// This will currently throw an assertion if we couldn't actually
// encode the offset of the branch.
ret = as_b(BOffImm(old), c, isPatchable);
} else {
old = LabelBase::INVALID_OFFSET;
BOffImm inv;
ret = as_b(inv, c, isPatchable);
}
int32 check = l->use(ret.getOffset());
JS_ASSERT(check == old);
return ret;
}
int32 old;
BufferOffset ret;
if (l->used()) {
old = l->offset();
// This will currently throw an assertion if we couldn't actually
// encode the offset of the branch.
ret = as_b(BOffImm(old), c, isPatchable);
} else {
old = LabelBase::INVALID_OFFSET;
BOffImm inv;
ret = as_b(inv, c, isPatchable);
}
int32 check = l->use(ret.getOffset());
JS_ASSERT(check == old);
return ret;
}
BufferOffset
Assembler::as_b(BOffImm off, Condition c, BufferOffset inst)
@ -1697,24 +1693,24 @@ Assembler::as_bl(Label *l, Condition c)
BufferOffset ret = as_nop();
as_bl(BufferOffset(l).diffB<BOffImm>(ret), c, ret);
return ret;
} else {
int32 old;
BufferOffset ret;
// See if the list was empty :(
if (l->used()) {
// This will currently throw an assertion if we couldn't actually
// encode the offset of the branch.
old = l->offset();
ret = as_bl(BOffImm(old), c);
} else {
old = LabelBase::INVALID_OFFSET;
BOffImm inv;
ret = as_bl(inv, c);
}
int32 check = l->use(ret.getOffset());
JS_ASSERT(check == old);
return ret;
}
int32 old;
BufferOffset ret;
// See if the list was empty :(
if (l->used()) {
// This will currently throw an assertion if we couldn't actually
// encode the offset of the branch.
old = l->offset();
ret = as_bl(BOffImm(old), c);
} else {
old = LabelBase::INVALID_OFFSET;
BOffImm inv;
ret = as_bl(inv, c);
}
int32 check = l->use(ret.getOffset());
JS_ASSERT(check == old);
return ret;
}
BufferOffset
Assembler::as_bl(BOffImm off, Condition c, BufferOffset inst)
@ -1830,7 +1826,7 @@ Assembler::as_vcmpz(VFPRegister vd, Condition c)
return as_vfp_float(vd, NoVFPRegister, NoVFPRegister, opv_cmpz, c);
}
// specifically, a move between two same sized-registers
// Specifically, a move between two same sized-registers.
BufferOffset
Assembler::as_vmov(VFPRegister vd, VFPRegister vsrc, Condition c)
{
@ -1860,9 +1856,8 @@ Assembler::as_vxfer(Register vt1, Register vt2, VFPRegister vm, FloatToCore_ f2c
JS_ASSERT(idx == 0 || idx == 1);
// If we are transferring a single half of the double
// then it must be moving a VFP reg to a core reg.
if (vt2 == InvalidReg) {
if (vt2 == InvalidReg)
JS_ASSERT(f2c == FloatToCore);
}
idx = idx << 21;
} else {
JS_ASSERT(idx == 0);
@ -1904,39 +1899,29 @@ Assembler::as_vcvt(VFPRegister vd, VFPRegister vm, bool useFPSCR,
vfp_size sz = isDouble;
if (vd.isFloat() && vm.isFloat()) {
// Doing a float -> float conversion
if (vm.isSingle()) {
if (vm.isSingle())
sz = isSingle;
}
return writeVFPInst(sz, c | 0x02B700C0 |
VM(vm) | VD(vd));
} else {
// At least one of the registers should be a float.
vcvt_destFloatness destFloat;
vcvt_Signedness opSign;
vcvt_toZero doToZero = toFPSCR;
JS_ASSERT(vd.isFloat() || vm.isFloat());
if (vd.isSingle() || vm.isSingle()) {
sz = isSingle;
}
if (vd.isFloat()) {
destFloat = toFloat;
if (vm.isSInt()) {
opSign = fromSigned;
} else {
opSign = fromUnsigned;
}
} else {
destFloat = toInteger;
if (vd.isSInt()) {
opSign = toSigned;
} else {
opSign = toUnsigned;
}
doToZero = useFPSCR ? toFPSCR : toZero;
}
return writeVFPInst(sz, c | 0x02B80040 | VD(vd) | VM(vm) | destFloat | opSign | doToZero);
}
// At least one of the registers should be a float.
vcvt_destFloatness destFloat;
vcvt_Signedness opSign;
vcvt_toZero doToZero = toFPSCR;
JS_ASSERT(vd.isFloat() || vm.isFloat());
if (vd.isSingle() || vm.isSingle()) {
sz = isSingle;
}
if (vd.isFloat()) {
destFloat = toFloat;
opSign = (vm.isSInt()) ? fromSigned : fromUnsigned;
} else {
destFloat = toInteger;
opSign = (vd.isSInt()) ? toSigned : toUnsigned;
doToZero = useFPSCR ? toFPSCR : toZero;
}
return writeVFPInst(sz, c | 0x02B80040 | VD(vd) | VM(vm) | destFloat | opSign | doToZero);
}
BufferOffset
@ -1949,7 +1934,8 @@ Assembler::as_vcvtFixed(VFPRegister vd, bool isSigned, uint32 fixedPoint, bool t
imm5 = (sx ? 32 : 16) - imm5;
JS_ASSERT(imm5 >= 0);
imm5 = imm5 >> 1 | (imm5 & 1) << 6;
return writeVFPInst(sf, 0x02BA0040 | VD(vd) | toFixed << 18 | sx << 7 | (!isSigned) << 16 | imm5 | c);
return writeVFPInst(sf, 0x02BA0040 | VD(vd) | toFixed << 18 | sx << 7 |
(!isSigned) << 16 | imm5 | c);
}
// xfer between VFP and memory
@ -1985,10 +1971,10 @@ Assembler::as_vimm(VFPRegister vd, VFPImm imm, Condition c)
{
vfp_size sz = vd.isDouble() ? isDouble : isSingle;
if (!vd.isDouble()) {
// totally do not know how to handle this right now
// Don't know how to handle this right now.
if (!vd.isDouble())
JS_NOT_REACHED("non-double immediate");
}
return writeVFPInst(sz, c | imm.encode() | VD(vd) | 0x02B00000);
}
@ -2003,6 +1989,7 @@ Assembler::nextLink(BufferOffset b, BufferOffset *next)
{
Instruction branch = *editSrc(b);
JS_ASSERT(branch.is<InstBranchImm>());
BOffImm destOff;
branch.as<InstBranchImm>()->extractImm(&destOff);
if (destOff.isInvalid())
@ -2056,11 +2043,10 @@ Assembler::bind(RepatchLabel *label)
PoolHintPun p;
p.raw = branch->encode();
Condition cond;
if (p.phd.isValidPoolHint()) {
if (p.phd.isValidPoolHint())
cond = p.phd.getCond();
} else {
else
branch->extractCond(&cond);
}
as_b(dest.diffB<BOffImm>(branchOff), cond, branchOff);
}
label->bind(dest.getOffset());
@ -2141,22 +2127,24 @@ Assembler::as_jumpPool(uint32 numCases)
{
if (numCases == 0)
return BufferOffset();
BufferOffset ret = writeInst(-1);
for (uint32 i = 1; i < numCases; i++)
writeInst(-1);
return ret;
}
ptrdiff_t
Assembler::getBranchOffset(const Instruction *i_)
{
if(i_->is<InstBranchImm>()) {
InstBranchImm *i = i_->as<InstBranchImm>();
BOffImm dest;
i->extractImm(&dest);
return dest.decode();
}
return 0;
if (!i_->is<InstBranchImm>())
return 0;
InstBranchImm *i = i_->as<InstBranchImm>();
BOffImm dest;
i->extractImm(&dest);
return dest.decode();
}
void
Assembler::retargetNearBranch(Instruction *i, int offset, bool final)
@ -2172,12 +2160,10 @@ Assembler::retargetNearBranch(Instruction *i, int offset, Condition cond, bool f
// Retargeting calls is totally unsupported!
JS_ASSERT_IF(i->is<InstBranchImm>(), i->is<InstBImm>());
new (i) InstBImm(BOffImm(offset), cond);
// Flush the cache, since an instruction was overwritten
if (final) {
if (final)
AutoFlushCache::updateTop(uintptr_t(i), 4);
}
}
void
@ -2193,18 +2179,26 @@ Assembler::retargetFarBranch(Instruction *i, uint8 **slot, uint8 *dest, Conditio
}
struct PoolHeader : Instruction {
struct Header {
struct Header
{
// size should take into account the pool header.
// size is in units of Instruction (4bytes), not byte
uint32 size:15;
bool isNatural:1;
uint32 ONES:16;
Header(int size_, bool isNatural_) : size(size_), isNatural(isNatural_), ONES(0xffff) {}
uint32 size : 15;
bool isNatural : 1;
uint32 ONES : 16;
Header(int size_, bool isNatural_)
: size(size_),
isNatural(isNatural_),
ONES(0xffff)
{ }
Header(const Instruction *i) {
JS_STATIC_ASSERT(sizeof(Header) == sizeof(uint32));
memcpy(this, i, sizeof(Header));
JS_ASSERT(ONES == 0xffff);
}
uint32 raw() const {
JS_STATIC_ASSERT(sizeof(Header) == sizeof(uint32));
uint32 dest;
@ -2212,7 +2206,11 @@ struct PoolHeader : Instruction {
return dest;
}
};
PoolHeader(int size_, bool isNatural_) : Instruction (Header(size_, isNatural_).raw(), true) {}
PoolHeader(int size_, bool isNatural_)
: Instruction(Header(size_, isNatural_).raw(), true)
{ }
uint32 size() const {
Header tmp(this);
return tmp.size;
@ -2256,6 +2254,7 @@ Assembler::writePoolFooter(uint8 *start, Pool *p, bool isNatural)
{
return;
}
// The size of an arbitrary 32-bit call in the instruction stream.
// On ARM this sequence is |pc = ldr pc - 4; imm32| given that we
// never reach the imm32.
@ -2392,13 +2391,12 @@ Instruction::next()
const PoolHeader *ph;
// If this is a guard, and the next instruction is a header, always work around the pool
// If it isn't a guard, then start looking ahead.
if (instIsGuard(this, &ph)) {
if (instIsGuard(this, &ph))
return ret + ph->size();
} else if (instIsArtificialGuard(ret, &ph)) {
if (instIsArtificialGuard(ret, &ph))
return ret + 1 + ph->size();
} else if (instIsBNop(ret)) {
if (instIsBNop(ret))
return ret + 1;
}
return ret;
}
@ -2473,9 +2471,10 @@ AutoFlushCache::~AutoFlushCache()
IonSpewFin(IonSpew_CacheFlush);
myCompartment_->setFlusher(NULL);
}
if (!used_) {
if (!used_)
return;
}
if (start_ != NULL) {
JSC::ExecutableAllocator::cacheFlush((void*)start_, (size_t)(stop_ - start_ + sizeof(Instruction)));
} else {

Просмотреть файл

@ -354,11 +354,10 @@ CodeGeneratorARM::visitAddI(LAddI *ins)
const LAllocation *rhs = ins->getOperand(1);
const LDefinition *dest = ins->getDef(0);
if (rhs->isConstant()) {
if (rhs->isConstant())
masm.ma_add(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), SetCond);
} else {
else
masm.ma_add(ToRegister(lhs), ToOperand(rhs), ToRegister(dest), SetCond);
}
if (ins->snapshot() && !bailoutIf(Assembler::Overflow, ins->snapshot()))
return false;
@ -372,11 +371,11 @@ CodeGeneratorARM::visitSubI(LSubI *ins)
const LAllocation *lhs = ins->getOperand(0);
const LAllocation *rhs = ins->getOperand(1);
const LDefinition *dest = ins->getDef(0);
if (rhs->isConstant()) {
if (rhs->isConstant())
masm.ma_sub(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), SetCond);
} else {
else
masm.ma_sub(ToRegister(lhs), ToOperand(rhs), ToRegister(dest), SetCond);
}
if (ins->snapshot() && !bailoutIf(Assembler::Overflow, ins->snapshot()))
return false;
@ -400,7 +399,7 @@ CodeGeneratorARM::visitMulI(LMulI *ins)
Assembler::Condition bailoutCond = (constant == 0) ? Assembler::LessThan : Assembler::Equal;
masm.ma_cmp(ToRegister(lhs), Imm32(0));
if (!bailoutIf(bailoutCond, ins->snapshot()))
return false;
return false;
}
// TODO: move these to ma_mul.
switch (constant) {
@ -461,13 +460,12 @@ CodeGeneratorARM::visitMulI(LMulI *ins)
}
if (!handled) {
if (mul->canOverflow()) {
if (mul->canOverflow())
c = masm.ma_check_mul(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), c);
} else {
else
masm.ma_mul(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest));
}
}
}
}
}
// Bailout on overflow
if (mul->canOverflow() && !bailoutIf(c, ins->snapshot()))
@ -615,9 +613,8 @@ CodeGeneratorARM::visitModPowTwoI(LModPowTwoI *ins)
masm.ma_rsb(Imm32(0), out, NoSetCond, Assembler::Signed);
masm.ma_and(Imm32((1<<ins->shift())-1), out);
masm.ma_rsb(Imm32(0), out, SetCond, Assembler::Signed);
if (!bailoutIf(Assembler::Zero, ins->snapshot())) {
if (!bailoutIf(Assembler::Zero, ins->snapshot()))
return false;
}
masm.bind(&fin);
return true;
}
@ -629,9 +626,8 @@ CodeGeneratorARM::visitModMaskI(LModMaskI *ins)
Register dest = ToRegister(ins->getDef(0));
Register tmp = ToRegister(ins->getTemp(0));
masm.ma_mod_mask(src, dest, tmp, ins->shift());
if (!bailoutIf(Assembler::Zero, ins->snapshot())) {
if (!bailoutIf(Assembler::Zero, ins->snapshot()))
return false;
}
return true;
}
bool
@ -657,25 +653,22 @@ CodeGeneratorARM::visitBitOpI(LBitOpI *ins)
// all of these bitops should be either imm32's, or integer registers.
switch (ins->bitop()) {
case JSOP_BITOR:
if (rhs->isConstant()) {
if (rhs->isConstant())
masm.ma_orr(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest));
} else {
else
masm.ma_orr(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
}
break;
case JSOP_BITXOR:
if (rhs->isConstant()) {
if (rhs->isConstant())
masm.ma_eor(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest));
} else {
else
masm.ma_eor(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
}
break;
case JSOP_BITAND:
if (rhs->isConstant()) {
if (rhs->isConstant())
masm.ma_and(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest));
} else {
else
masm.ma_and(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
}
break;
default:
JS_NOT_REACHED("unexpected binary opcode");
@ -834,9 +827,8 @@ CodeGeneratorARM::visitMoveGroup(LMoveGroup *group)
? MoveResolver::Move::DOUBLE
: MoveResolver::Move::GENERAL;
if (!resolver.addMove(toMoveOperand(from), toMoveOperand(to), kind)) {
if (!resolver.addMove(toMoveOperand(from), toMoveOperand(to), kind))
return false;
}
}
if (!resolver.resolve())
@ -893,7 +885,6 @@ CodeGeneratorARM::emitTableSwitchDispatch(MTableSwitch *mir, const Register &ind
if (!masm.addDeferredData(d, 0))
return false;
return true;
}
@ -906,17 +897,17 @@ CodeGeneratorARM::visitMathD(LMathD *math)
switch (math->jsop()) {
case JSOP_ADD:
masm.ma_vadd(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
break;
masm.ma_vadd(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
break;
case JSOP_SUB:
masm.ma_vsub(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
break;
masm.ma_vsub(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
break;
case JSOP_MUL:
masm.ma_vmul(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
break;
masm.ma_vmul(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
break;
case JSOP_DIV:
masm.ma_vdiv(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
break;
masm.ma_vdiv(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
break;
default:
JS_NOT_REACHED("unexpected opcode");
return false;
@ -1212,6 +1203,7 @@ CodeGeneratorARM::visitCompareB(LCompareB *lir)
emitSet(JSOpToCondition(mir->jsop()), output);
masm.jump(&done);
}
masm.bind(&notBoolean);
{
masm.move32(Imm32(mir->jsop() == JSOP_STRICTNE), output);
@ -1349,13 +1341,15 @@ CodeGeneratorARM::visitLoadElementT(LLoadElementT *load)
Register base = ToRegister(load->elements());
if (load->mir()->type() == MIRType_Double) {
if (load->index()->isConstant()) {
masm.loadInt32OrDouble(Address(base,ToInt32(load->index()) * sizeof(Value)), ToFloatRegister(load->output()));
Address source(base, ToInt32(load->index()) * sizeof(Value));
masm.loadInt32OrDouble(source, ToFloatRegister(load->output()));
} else {
masm.loadInt32OrDouble(base, ToRegister(load->index()), ToFloatRegister(load->output()));
}
} else {
if (load->index()->isConstant()) {
masm.load32(Address(base, ToInt32(load->index()) * sizeof(Value)), ToRegister(load->output()));
Address source(base, ToInt32(load->index()) * sizeof(Value));
masm.load32(source, ToRegister(load->output()));
} else {
masm.ma_ldr(DTRAddr(base, DtrRegImmShift(ToRegister(load->index()), LSL, 3)),
ToRegister(load->output()));

Просмотреть файл

@ -168,8 +168,8 @@ class OutOfLineBailout : public OutOfLineCodeBase<CodeGeneratorARM>
}
};
} // ion
} // js
} // namespace ion
} // namespace js
#endif // jsion_codegen_arm_h__

Просмотреть файл

@ -18,7 +18,7 @@ isValueDTRDCandidate(ValueOperand &val)
// b) Aligned to a multiple of two.
if ((val.typeReg().code() != (val.payloadReg().code() + 1)))
return false;
else if ((val.payloadReg().code() & 1) != 0)
if ((val.payloadReg().code() & 1) != 0)
return false;
return true;
}
@ -69,14 +69,12 @@ bool
MacroAssemblerARM::alu_dbl(Register src1, Imm32 imm, Register dest, ALUOp op,
SetCond_ sc, Condition c)
{
if ((sc == SetCond && ! condsAreSafe(op)) || !can_dbl(op)) {
if ((sc == SetCond && ! condsAreSafe(op)) || !can_dbl(op))
return false;
}
ALUOp interop = getDestVariant(op);
Imm8::TwoImm8mData both = Imm8::encodeTwoImms(imm.value);
if (both.fst.invalid) {
if (both.fst.invalid)
return false;
}
// for the most part, there is no good reason to set the condition
// codes for the first instruction.
// we can do better things if the second instruction doesn't
@ -86,7 +84,6 @@ MacroAssemblerARM::alu_dbl(Register src1, Imm32 imm, Register dest, ALUOp op,
// unfortunately, it is horribly brittle.
as_alu(ScratchRegister, src1, both.fst, interop, NoSetCond, c);
as_alu(dest, ScratchRegister, both.snd, op, sc, c);
// we succeeded!
return true;
}
@ -98,9 +95,8 @@ MacroAssemblerARM::ma_alu(Register src1, Imm32 imm, Register dest,
{
// As it turns out, if you ask for a compare-like instruction
// you *probably* want it to set condition codes.
if (dest == InvalidReg) {
if (dest == InvalidReg)
JS_ASSERT(sc == SetCond);
}
// The operator gives us the ability to determine how
// this can be used.
@ -143,6 +139,7 @@ MacroAssemblerARM::ma_alu(Register src1, Imm32 imm, Register dest,
as_movw(dest, (uint16)imm.value, c);
return;
}
// If they asked for a mvn rfoo, imm, where ~imm fits into 16 bits
// then do it.
if (op == op_mvn && (((~imm.value) & ~ 0xffff) == 0)) {
@ -150,6 +147,7 @@ MacroAssemblerARM::ma_alu(Register src1, Imm32 imm, Register dest,
as_movw(dest, (uint16)~imm.value, c);
return;
}
// TODO: constant dedup may enable us to add dest, r0, 23 *if*
// we are attempting to load a constant that looks similar to one
// that already exists
@ -180,6 +178,7 @@ MacroAssemblerARM::ma_alu(Register src1, Imm32 imm, Register dest,
// Both operations should take 1 cycle, where as add dest, tmp ror 4
// takes two cycles to execute.
}
// Either a) this isn't ARMv7 b) this isn't a move
// start by attempting to generate a two instruction form.
// Some things cannot be made into two-inst forms correctly.
@ -191,10 +190,12 @@ MacroAssemblerARM::ma_alu(Register src1, Imm32 imm, Register dest,
// one instruction variant.
if (alu_dbl(src1, imm, dest, op, sc, c))
return;
// And try with its negative.
if (negOp != op_invalid &&
alu_dbl(src1, negImm, dest, negOp, sc, c))
return;
// Well, damn. We can use two 16 bit mov's, then do the op
// or we can do a single load from a pool then op.
if (hasMOVWT()) {
@ -207,7 +208,6 @@ MacroAssemblerARM::ma_alu(Register src1, Imm32 imm, Register dest,
JS_NOT_REACHED("non-ARMv7 loading of immediates NYI.");
}
as_alu(dest, src1, O2Reg(ScratchRegister), op, sc, c);
// done!
}
void
@ -336,43 +336,38 @@ MacroAssemblerARM::ma_rol(Register shift, Register src, Register dst)
// Move not (dest <- ~src)
void
MacroAssemblerARM::ma_mvn(Imm32 imm, Register dest,
SetCond_ sc, Assembler::Condition c)
MacroAssemblerARM::ma_mvn(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c)
{
ma_alu(InvalidReg, imm, dest, op_mvn, sc, c);
}
void
MacroAssemblerARM::ma_mvn(Register src1, Register dest,
SetCond_ sc, Assembler::Condition c)
MacroAssemblerARM::ma_mvn(Register src1, Register dest, SetCond_ sc, Assembler::Condition c)
{
as_alu(dest, InvalidReg, O2Reg(src1), op_mvn, sc, c);
}
// Negate (dest <- -src), src is a register, rather than a general op2.
void
MacroAssemblerARM::ma_neg(Register src1, Register dest,
SetCond_ sc, Assembler::Condition c)
MacroAssemblerARM::ma_neg(Register src1, Register dest, SetCond_ sc, Assembler::Condition c)
{
as_rsb(dest, src1, Imm8(0), sc, c);
}
// and
// And.
void
MacroAssemblerARM::ma_and(Register src, Register dest,
SetCond_ sc, Assembler::Condition c)
MacroAssemblerARM::ma_and(Register src, Register dest, SetCond_ sc, Assembler::Condition c)
{
ma_and(dest, src, dest);
}
void
MacroAssemblerARM::ma_and(Register src1, Register src2, Register dest,
SetCond_ sc, Assembler::Condition c)
SetCond_ sc, Assembler::Condition c)
{
as_and(dest, src1, O2Reg(src2), sc, c);
}
void
MacroAssemblerARM::ma_and(Imm32 imm, Register dest,
SetCond_ sc, Assembler::Condition c)
MacroAssemblerARM::ma_and(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c)
{
ma_alu(dest, imm, dest, op_and, sc, c);
}
@ -384,18 +379,16 @@ MacroAssemblerARM::ma_and(Imm32 imm, Register src1, Register dest,
}
// bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2)
// Bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2).
void
MacroAssemblerARM::ma_bic(Imm32 imm, Register dest,
SetCond_ sc, Assembler::Condition c)
MacroAssemblerARM::ma_bic(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c)
{
ma_alu(dest, imm, dest, op_bic, sc, c);
}
// exclusive or
// Exclusive or.
void
MacroAssemblerARM::ma_eor(Register src, Register dest,
SetCond_ sc, Assembler::Condition c)
MacroAssemblerARM::ma_eor(Register src, Register dest, SetCond_ sc, Assembler::Condition c)
{
ma_eor(dest, src, dest, sc, c);
}
@ -406,8 +399,7 @@ MacroAssemblerARM::ma_eor(Register src1, Register src2, Register dest,
as_eor(dest, src1, O2Reg(src2), sc, c);
}
void
MacroAssemblerARM::ma_eor(Imm32 imm, Register dest,
SetCond_ sc, Assembler::Condition c)
MacroAssemblerARM::ma_eor(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c)
{
ma_alu(dest, imm, dest, op_eor, sc, c);
}
@ -418,10 +410,9 @@ MacroAssemblerARM::ma_eor(Imm32 imm, Register src1, Register dest,
ma_alu(src1, imm, dest, op_eor, sc, c);
}
// or
// Or.
void
MacroAssemblerARM::ma_orr(Register src, Register dest,
SetCond_ sc, Assembler::Condition c)
MacroAssemblerARM::ma_orr(Register src, Register dest, SetCond_ sc, Assembler::Condition c)
{
ma_orr(dest, src, dest, sc, c);
}
@ -432,20 +423,19 @@ MacroAssemblerARM::ma_orr(Register src1, Register src2, Register dest,
as_orr(dest, src1, O2Reg(src2), sc, c);
}
void
MacroAssemblerARM::ma_orr(Imm32 imm, Register dest,
SetCond_ sc, Assembler::Condition c)
MacroAssemblerARM::ma_orr(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c)
{
ma_alu(dest, imm, dest, op_orr, sc, c);
}
void
MacroAssemblerARM::ma_orr(Imm32 imm, Register src1, Register dest,
SetCond_ sc, Assembler::Condition c)
SetCond_ sc, Assembler::Condition c)
{
ma_alu(src1, imm, dest, op_orr, sc, c);
}
// arithmetic based ops
// add with carry
// Arithmetic-based ops.
// Add with carry.
void
MacroAssemblerARM::ma_adc(Imm32 imm, Register dest, SetCond_ sc, Condition c)
{
@ -462,7 +452,7 @@ MacroAssemblerARM::ma_adc(Register src1, Register src2, Register dest, SetCond_
as_alu(dest, src1, O2Reg(src2), op_adc, sc, c);
}
// add
// Add.
void
MacroAssemblerARM::ma_add(Imm32 imm, Register dest, SetCond_ sc, Condition c)
{
@ -490,7 +480,7 @@ MacroAssemblerARM::ma_add(Register src1, Imm32 op, Register dest, SetCond_ sc, C
ma_alu(src1, op, dest, op_add, sc, c);
}
// subtract with carry
// Subtract with carry.
void
MacroAssemblerARM::ma_sbc(Imm32 imm, Register dest, SetCond_ sc, Condition c)
{
@ -507,7 +497,7 @@ MacroAssemblerARM::ma_sbc(Register src1, Register src2, Register dest, SetCond_
as_alu(dest, src1, O2Reg(src2), op_sbc, sc, c);
}
// subtract
// Subtract.
void
MacroAssemblerARM::ma_sub(Imm32 imm, Register dest, SetCond_ sc, Condition c)
{
@ -534,7 +524,7 @@ MacroAssemblerARM::ma_sub(Register src1, Imm32 op, Register dest, SetCond_ sc, C
ma_alu(src1, op, dest, op_sub, sc, c);
}
// reverse subtract
// Severse subtract.
void
MacroAssemblerARM::ma_rsb(Imm32 imm, Register dest, SetCond_ sc, Condition c)
{
@ -556,7 +546,7 @@ MacroAssemblerARM::ma_rsb(Register src1, Imm32 op2, Register dest, SetCond_ sc,
ma_alu(src1, op2, dest, op_rsb, sc, c);
}
// reverse subtract with carry
// Reverse subtract with carry.
void
MacroAssemblerARM::ma_rsc(Imm32 imm, Register dest, SetCond_ sc, Condition c)
{
@ -573,8 +563,8 @@ MacroAssemblerARM::ma_rsc(Register src1, Register src2, Register dest, SetCond_
as_alu(dest, src1, O2Reg(src2), op_rsc, sc, c);
}
// compares/tests
// compare negative (sets condition codes as src1 + src2 would)
// Compares/tests.
// Compare negative (sets condition codes as src1 + src2 would).
void
MacroAssemblerARM::ma_cmn(Register src1, Imm32 imm, Condition c)
{
@ -591,7 +581,7 @@ MacroAssemblerARM::ma_cmn(Register src1, Operand op, Condition c)
JS_NOT_REACHED("Feature NYI");
}
// compare (src - src2)
// Compare (src - src2).
void
MacroAssemblerARM::ma_cmp(Register src1, Imm32 imm, Condition c)
{
@ -632,7 +622,7 @@ MacroAssemblerARM::ma_cmp(Register src1, Register src2, Condition c)
as_cmp(src1, O2Reg(src2), c);
}
// test for equality, (src1^src2)
// Test for equality, (src1^src2).
void
MacroAssemblerARM::ma_teq(Register src1, Imm32 imm, Condition c)
{
@ -650,7 +640,7 @@ MacroAssemblerARM::ma_teq(Register src1, Operand op, Condition c)
}
// test (src1 & src2)
// Test (src1 & src2).
void
MacroAssemblerARM::ma_tst(Register src1, Imm32 imm, Condition c)
{
@ -688,11 +678,14 @@ MacroAssemblerARM::ma_check_mul(Register src1, Register src2, Register dest, Con
if (cond == Equal || cond == NotEqual) {
as_smull(ScratchRegister, dest, src1, src2, SetCond);
return cond;
} else if (cond == Overflow) {
}
if (cond == Overflow) {
as_smull(ScratchRegister, dest, src1, src2);
as_cmp(ScratchRegister, asr(dest, 31));
return NotEqual;
}
JS_NOT_REACHED("Condition NYI");
return Always;
@ -705,11 +698,14 @@ MacroAssemblerARM::ma_check_mul(Register src1, Imm32 imm, Register dest, Conditi
if (cond == Equal || cond == NotEqual) {
as_smull(ScratchRegister, dest, ScratchRegister, src1, SetCond);
return cond;
} else if (cond == Overflow) {
}
if (cond == Overflow) {
as_smull(ScratchRegister, dest, ScratchRegister, src1);
as_cmp(ScratchRegister, asr(dest, 31));
return NotEqual;
}
JS_NOT_REACHED("Condition NYI");
return Always;
}
@ -773,8 +769,8 @@ MacroAssemblerARM::ma_mod_mask(Register src, Register dest, Register hold, int32
}
// memory
// shortcut for when we know we're transferring 32 bits of data
// Memory.
// Shortcut for when we know we're transferring 32 bits of data.
void
MacroAssemblerARM::ma_dtr(LoadStore ls, Register rn, Imm32 offset, Register rt,
Index mode, Assembler::Condition cc)
@ -850,7 +846,8 @@ MacroAssemblerARM::ma_ldrsb(EDtrAddr addr, Register rt, Index mode, Condition cc
as_extdtr(IsLoad, 8, true, mode, rt, addr, cc);
}
void
MacroAssemblerARM::ma_ldrd(EDtrAddr addr, Register rt, DebugOnly<Register> rt2, Index mode, Condition cc)
MacroAssemblerARM::ma_ldrd(EDtrAddr addr, Register rt, DebugOnly<Register> rt2,
Index mode, Condition cc)
{
JS_ASSERT((rt.code() & 1) == 0);
JS_ASSERT(rt2.value.code() == rt.code() + 1);
@ -868,7 +865,7 @@ MacroAssemblerARM::ma_strb(Register rt, DTRAddr addr, Index mode, Condition cc)
as_dtr(IsStore, 8, mode, rt, addr, cc);
}
// specialty for moving N bits of data, where n == 8,16,32,64
// Specialty for moving N bits of data, where n == 8,16,32,64.
void
MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
Register rn, Register rm, Register rt,
@ -891,6 +888,7 @@ MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrOffImm(off)), cc);
return;
}
// We cannot encode this offset in a a single ldr. For mode == index,
// try to encode it as |add scratch, base, imm; ldr dest, [scratch, +offset]|.
// This does not wark for mode == PreIndex or mode == PostIndex.
@ -929,6 +927,7 @@ MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
as_dtr(IsLoad, size, Offset, pc, DTRAddr(ScratchRegister, DtrOffImm(0)), cc);
return;
}
int bottom = off & 0xfff;
int neg_bottom = 0x1000 - bottom;
// For a regular offset, base == ScratchRegister does what we want. Modify the
@ -1031,7 +1030,7 @@ MacroAssemblerARM::ma_pop(Register r)
void
MacroAssemblerARM::ma_push(Register r)
{
// pushing sp is not well defined, use two instructions
// Pushing sp is not well defined: use two instructions.
if (r == sp) {
ma_mov(sp, ScratchRegister);
r = ScratchRegister;
@ -1054,7 +1053,7 @@ MacroAssemblerARM::ma_vpush(VFPRegister r)
finishFloatTransfer();
}
// branches when done from within arm-specific code
// Branches when done from within arm-specific code.
void
MacroAssemblerARM::ma_b(Label *dest, Assembler::Condition c, bool isPatchable)
{
@ -1067,7 +1066,7 @@ MacroAssemblerARM::ma_bx(Register dest, Assembler::Condition c)
as_bx(dest, c);
}
Assembler::RelocBranchStyle
static Assembler::RelocBranchStyle
b_type()
{
return Assembler::B_LDR;
@ -1101,15 +1100,15 @@ MacroAssemblerARM::ma_b(void *target, Relocation::Kind reloc, Assembler::Conditi
}
}
// this is almost NEVER necessary, we'll basically never be calling a label
// except, possibly in the crazy bailout-table case.
// This is almost NEVER necessary: we'll basically never be calling a label,
// except possibly in the crazy bailout-table case.
void
MacroAssemblerARM::ma_bl(Label *dest, Assembler::Condition c)
{
as_bl(dest, c);
}
//VFP/ALU
// VFP/ALU
void
MacroAssemblerARM::ma_vadd(FloatRegister src1, FloatRegister src2, FloatRegister dst)
{
@ -1166,14 +1165,16 @@ MacroAssemblerARM::ma_vimm(double value, FloatRegister dest)
double d;
} dpun;
dpun.d = value;
if ((dpun.s.lo) == 0) {
if (dpun.s.hi == 0) {
// to zero a register, load 1.0, then execute dN <- dN - dN
// To zero a register, load 1.0, then execute dN <- dN - dN
VFPImm dblEnc(0x3FF00000);
as_vimm(dest, dblEnc);
as_vsub(dest, dest, dest);
return;
}
VFPImm dblEnc(dpun.s.hi);
if (dblEnc.isValid()) {
as_vimm(dest, dblEnc);
@ -1181,7 +1182,7 @@ MacroAssemblerARM::ma_vimm(double value, FloatRegister dest)
}
}
// fall back to putting the value in a pool.
// Fall back to putting the value in a pool.
as_FImm64Pool(dest, value);
}
@ -1251,6 +1252,7 @@ MacroAssemblerARM::ma_vdtr(LoadStore ls, const Operand &addr, VFPRegister rt, Co
as_vdtr(ls, rt, addr.toVFPAddr(), cc);
return;
}
// We cannot encode this offset in a a single ldr. Try to encode it as
// an add scratch, base, imm; ldr dest, [scratch, +offset].
int bottom = off & (0xff << 2);
@ -1535,6 +1537,7 @@ MacroAssemblerARMCompat::load8SignExtend(const BaseIndex &src, const Register &d
ma_lsl(Imm32::ShiftOf(src.scale), index, ScratchRegister);
index = ScratchRegister;
}
if (src.offset != 0) {
if (index != ScratchRegister) {
ma_mov(index, ScratchRegister);
@ -1568,6 +1571,7 @@ MacroAssemblerARMCompat::load16ZeroExtend(const BaseIndex &src, const Register &
ma_lsl(Imm32::ShiftOf(src.scale), index, ScratchRegister);
index = ScratchRegister;
}
if (src.offset != 0) {
if (index != ScratchRegister) {
ma_mov(index, ScratchRegister);
@ -1594,6 +1598,7 @@ MacroAssemblerARMCompat::load16SignExtend(const BaseIndex &src, const Register &
ma_lsl(Imm32::ShiftOf(src.scale), index, ScratchRegister);
index = ScratchRegister;
}
if (src.offset != 0) {
if (index != ScratchRegister) {
ma_mov(index, ScratchRegister);
@ -1766,6 +1771,7 @@ MacroAssemblerARMCompat::store16(const Register &src, const BaseIndex &address)
ma_lsl(Imm32::ShiftOf(address.scale), index, ScratchRegister);
index = ScratchRegister;
}
if (address.offset != 0) {
ma_add(index, Imm32(address.offset), ScratchRegister);
index = ScratchRegister;
@ -1951,6 +1957,7 @@ MacroAssemblerARMCompat::branchDouble(DoubleCondition cond, const FloatRegister
bind(&unordered);
return;
}
if (cond == DoubleEqualOrUnordered) {
ma_b(label, VFP_Unordered);
ma_b(label, VFP_Equal);
@ -1962,12 +1969,10 @@ MacroAssemblerARMCompat::branchDouble(DoubleCondition cond, const FloatRegister
// higher level tag testing code
Operand ToPayload(Operand base) {
return Operand(Register::FromCode(base.base()),
base.disp());
return Operand(Register::FromCode(base.base()), base.disp());
}
Operand ToType(Operand base) {
return Operand(Register::FromCode(base.base()),
base.disp() + sizeof(void *));
return Operand(Register::FromCode(base.base()), base.disp() + sizeof(void *));
}
Assembler::Condition
@ -1989,9 +1994,7 @@ Assembler::Condition
MacroAssemblerARMCompat::testDouble(Assembler::Condition cond, const ValueOperand &value)
{
JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
Assembler::Condition actual = (cond == Equal)
? Below
: AboveOrEqual;
Assembler::Condition actual = (cond == Equal) ? Below : AboveOrEqual;
ma_cmp(value.typeReg(), ImmTag(JSVAL_TAG_CLEAR));
return actual;
}
@ -2215,9 +2218,8 @@ MacroAssemblerARMCompat::unboxValue(const ValueOperand &src, AnyRegister dest)
bind(&notInt32);
unboxDouble(src, dest.fpu());
bind(&end);
} else {
if (src.payloadReg() != dest.gpr())
as_mov(dest.gpr(), O2Reg(src.payloadReg()));
} else if (src.payloadReg() != dest.gpr()) {
as_mov(dest.gpr(), O2Reg(src.payloadReg()));
}
}
@ -2230,8 +2232,7 @@ MacroAssemblerARMCompat::unboxPrivate(const ValueOperand &src, Register dest)
void
MacroAssemblerARMCompat::boxDouble(const FloatRegister &src, const ValueOperand &dest)
{
as_vxfer(dest.payloadReg(), dest.typeReg(),
VFPRegister(src), FloatToCore);
as_vxfer(dest.payloadReg(), dest.typeReg(), VFPRegister(src), FloatToCore);
}
@ -2496,11 +2497,10 @@ void
MacroAssemblerARMCompat::storePayload(const Value &val, Operand dest)
{
jsval_layout jv = JSVAL_TO_IMPL(val);
if (val.isMarkable()) {
if (val.isMarkable())
ma_mov(ImmGCPtr((gc::Cell *)jv.s.payload.ptr), lr);
} else {
else
ma_mov(Imm32(jv.s.payload.i32), lr);
}
ma_str(lr, ToPayload(dest));
}
void
@ -2518,11 +2518,10 @@ void
MacroAssemblerARMCompat::storePayload(const Value &val, Register base, Register index, int32 shift)
{
jsval_layout jv = JSVAL_TO_IMPL(val);
if (val.isMarkable()) {
if (val.isMarkable())
ma_mov(ImmGCPtr((gc::Cell *)jv.s.payload.ptr), ScratchRegister);
} else {
else
ma_mov(Imm32(jv.s.payload.i32), ScratchRegister);
}
JS_STATIC_ASSERT(NUNBOX32_PAYLOAD_OFFSET == 0);
// If NUNBOX32_PAYLOAD_OFFSET is not zero, the memory operand [base + index << shift + imm]
// cannot be encoded into a single instruction, and cannot be integrated into the as_dtr call.
@ -2711,11 +2710,11 @@ MacroAssemblerARMCompat::passABIArg(const FloatRegister &freg)
void MacroAssemblerARMCompat::checkStackAlignment()
{
#ifdef DEBUG
Label good;
ma_tst(sp, Imm32(StackAlignment - 1));
ma_b(&good, Equal);
breakpoint();
bind(&good);
Label good;
ma_tst(sp, Imm32(StackAlignment - 1));
ma_b(&good, Equal);
breakpoint();
bind(&good);
#endif
}
@ -2743,9 +2742,8 @@ MacroAssemblerARMCompat::callWithABI(void *fun, Result result)
emitter.finish();
}
for (int i = 0; i < 2; i++) {
if (!floatArgsInGPR[i].isInvalid()) {
if (!floatArgsInGPR[i].isInvalid())
ma_vxfer(floatArgsInGPR[i], Register::FromCode(i*2), Register::FromCode(i*2+1));
}
}
checkStackAlignment();
ma_call(fun);

Просмотреть файл

@ -20,12 +20,14 @@ namespace ion {
static Register CallReg = ip;
static const int defaultShift = 3;
JS_STATIC_ASSERT(1 << defaultShift == sizeof(jsval));
// MacroAssemblerARM is inheriting form Assembler defined in Assembler-arm.{h,cpp}
class MacroAssemblerARM : public Assembler
{
public:
MacroAssemblerARM() {
}
MacroAssemblerARM()
{ }
void convertInt32ToDouble(const Register &src, const FloatRegister &dest);
void convertUInt32ToDouble(const Register &src, const FloatRegister &dest);
void convertDoubleToFloat(const FloatRegister &src, const FloatRegister &dest);
@ -48,7 +50,8 @@ class MacroAssemblerARM : public Assembler
void ma_alu(Register src1, Operand op2, Register dest, ALUOp op,
SetCond_ sc = NoSetCond, Condition c = Always);
void ma_nop();
void ma_movPatchable(Imm32 imm, Register dest, Assembler::Condition c, RelocStyle rs, Instruction *i = NULL);
void ma_movPatchable(Imm32 imm, Register dest, Assembler::Condition c,
RelocStyle rs, Instruction *i = NULL);
// These should likely be wrapped up as a set of macros
// or something like that. I cannot think of a good reason
// to explicitly have all of this code.

Просмотреть файл

@ -70,6 +70,7 @@ MoveEmitterARM::toOperand(const MoveOperand &operand, bool isFloat) const
// Otherwise, the stack offset may need to be adjusted.
return Operand(StackPointer, operand.disp() + (masm.framePushed() - pushedAtStart_));
}
if (operand.isGeneralReg())
return Operand(operand.reg());
@ -212,11 +213,10 @@ void
MoveEmitterARM::emitDoubleMove(const MoveOperand &from, const MoveOperand &to)
{
if (from.isFloatReg()) {
if (to.isFloatReg()) {
if (to.isFloatReg())
masm.ma_vmov(from.floatReg(), to.floatReg());
} else {
else
masm.ma_vstr(from.floatReg(), toOperand(to, true));
}
} else if (to.isFloatReg()) {
masm.ma_vldr(toOperand(from, true), to.floatReg());
} else {
@ -262,8 +262,7 @@ MoveEmitterARM::finish()
{
assertDone();
if (pushedAtSpill_ != -1 && spilledReg_ != InvalidReg) {
if (pushedAtSpill_ != -1 && spilledReg_ != InvalidReg)
masm.ma_ldr(spillSlot(), spilledReg_);
}
masm.freeStack(masm.framePushed() - pushedAtStart_);
}

Просмотреть файл

@ -63,8 +63,8 @@ class MoveEmitterARM
typedef MoveEmitterARM MoveEmitter;
} // ion
} // js
} // namespace ion
} // namespace js
#endif // jsion_move_resolver_arm_shared_h__

Просмотреть файл

@ -547,31 +547,29 @@ IonCompartment::generateVMWrapper(JSContext *cx, const VMFunction &f)
size_t argDisp = 0;
// Copy arguments.
if (f.explicitArgs) {
for (uint32 explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
MoveOperand from;
switch (f.argProperties(explicitArg)) {
case VMFunction::WordByValue:
masm.passABIArg(MoveOperand(argsBase, argDisp));
argDisp += sizeof(void *);
break;
case VMFunction::DoubleByValue:
JS_NOT_REACHED("VMCalls with double-size value arguments is not supported.");
masm.passABIArg(MoveOperand(argsBase, argDisp));
argDisp += sizeof(void *);
masm.passABIArg(MoveOperand(argsBase, argDisp));
argDisp += sizeof(void *);
break;
case VMFunction::WordByRef:
masm.passABIArg(MoveOperand(argsBase, argDisp, MoveOperand::EFFECTIVE));
argDisp += sizeof(void *);
break;
case VMFunction::DoubleByRef:
masm.passABIArg(MoveOperand(argsBase, argDisp, MoveOperand::EFFECTIVE));
argDisp += 2 * sizeof(void *);
break;
}
// Copy any arguments.
for (uint32 explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
MoveOperand from;
switch (f.argProperties(explicitArg)) {
case VMFunction::WordByValue:
masm.passABIArg(MoveOperand(argsBase, argDisp));
argDisp += sizeof(void *);
break;
case VMFunction::DoubleByValue:
JS_NOT_REACHED("VMCalls with double-size value arguments is not supported.");
masm.passABIArg(MoveOperand(argsBase, argDisp));
argDisp += sizeof(void *);
masm.passABIArg(MoveOperand(argsBase, argDisp));
argDisp += sizeof(void *);
break;
case VMFunction::WordByRef:
masm.passABIArg(MoveOperand(argsBase, argDisp, MoveOperand::EFFECTIVE));
argDisp += sizeof(void *);
break;
case VMFunction::DoubleByRef:
masm.passABIArg(MoveOperand(argsBase, argDisp, MoveOperand::EFFECTIVE));
argDisp += 2 * sizeof(void *);
break;
}
}