зеркало из https://github.com/mozilla/pjs.git
Backout alignment-breaking patches for Bug 493821.
This commit is contained in:
Родитель
4b6aaa7edf
Коммит
803a4b60d9
|
@ -306,7 +306,8 @@ namespace nanojit
|
||||||
ensureRoom(1);
|
ensureRoom(1);
|
||||||
LInsp l = _buf->next();
|
LInsp l = _buf->next();
|
||||||
l->initOpcodeAndClearResv(LIR_quad);
|
l->initOpcodeAndClearResv(LIR_quad);
|
||||||
l->i64.imm64 = imm;
|
l->i64.imm64_0 = int32_t(imm);
|
||||||
|
l->i64.imm64_1 = int32_t(imm>>32);
|
||||||
_buf->commit(1);
|
_buf->commit(1);
|
||||||
_buf->_stats.lir++;
|
_buf->_stats.lir++;
|
||||||
return l;
|
return l;
|
||||||
|
@ -468,13 +469,17 @@ namespace nanojit
|
||||||
uint64_t LIns::imm64() const
|
uint64_t LIns::imm64() const
|
||||||
{
|
{
|
||||||
NanoAssert(isconstq());
|
NanoAssert(isconstq());
|
||||||
return i64.imm64;
|
return (uint64_t(i64.imm64_1) << 32) | uint64_t(i64.imm64_0);
|
||||||
}
|
}
|
||||||
|
|
||||||
double LIns::imm64f() const
|
double LIns::imm64f() const
|
||||||
{
|
{
|
||||||
NanoAssert(isconstq());
|
union {
|
||||||
return i64.d;
|
double f;
|
||||||
|
uint64_t q;
|
||||||
|
} u;
|
||||||
|
u.q = imm64();
|
||||||
|
return u.f;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline uint32_t argSlots(uint32_t argc) {
|
inline uint32_t argSlots(uint32_t argc) {
|
||||||
|
@ -526,13 +531,13 @@ namespace nanojit
|
||||||
{
|
{
|
||||||
if (v == LIR_qlo) {
|
if (v == LIR_qlo) {
|
||||||
if (i->isconstq())
|
if (i->isconstq())
|
||||||
return insImm(i->imm64lo());
|
return insImm(i->imm64_0());
|
||||||
if (i->isop(LIR_qjoin))
|
if (i->isop(LIR_qjoin))
|
||||||
return i->oprnd1();
|
return i->oprnd1();
|
||||||
}
|
}
|
||||||
else if (v == LIR_qhi) {
|
else if (v == LIR_qhi) {
|
||||||
if (i->isconstq())
|
if (i->isconstq())
|
||||||
return insImm(i->imm64hi());
|
return insImm(i->imm64_1());
|
||||||
if (i->isop(LIR_qjoin))
|
if (i->isop(LIR_qjoin))
|
||||||
return i->oprnd2();
|
return i->oprnd2();
|
||||||
}
|
}
|
||||||
|
@ -1513,10 +1518,10 @@ namespace nanojit
|
||||||
#if defined NANOJIT_64BIT
|
#if defined NANOJIT_64BIT
|
||||||
sprintf(buf, "#0x%lx", (nj_printf_ld)ref->imm64());
|
sprintf(buf, "#0x%lx", (nj_printf_ld)ref->imm64());
|
||||||
#else
|
#else
|
||||||
formatImm(ref->imm64hi(), buf);
|
formatImm(ref->imm64_1(), buf);
|
||||||
buf += strlen(buf);
|
buf += strlen(buf);
|
||||||
*buf++ = ':';
|
*buf++ = ':';
|
||||||
formatImm(ref->imm64lo(), buf);
|
formatImm(ref->imm64_0(), buf);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
else if (ref->isconst()) {
|
else if (ref->isconst()) {
|
||||||
|
@ -1564,7 +1569,7 @@ namespace nanojit
|
||||||
|
|
||||||
case LIR_quad:
|
case LIR_quad:
|
||||||
{
|
{
|
||||||
sprintf(s, "#%X:%X /* %g */", i->imm64hi(), i->imm64lo(), i->imm64f());
|
sprintf(s, "#%X:%X /* %g */", i->imm64_1(), i->imm64_0(), i->imm64f());
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -219,11 +219,11 @@ namespace nanojit
|
||||||
};
|
};
|
||||||
|
|
||||||
// Used for LIR_quad.
|
// Used for LIR_quad.
|
||||||
union i64_type
|
struct i64_type
|
||||||
{
|
{
|
||||||
uint64_t imm64;
|
int32_t imm64_0;
|
||||||
double d;
|
int32_t imm64_1;
|
||||||
};
|
};
|
||||||
|
|
||||||
#undef _sign_int
|
#undef _sign_int
|
||||||
|
|
||||||
|
@ -249,9 +249,9 @@ namespace nanojit
|
||||||
inline uint8_t imm8() const { return c.imm8a; }
|
inline uint8_t imm8() const { return c.imm8a; }
|
||||||
inline uint8_t imm8b() const { return c.imm8b; }
|
inline uint8_t imm8b() const { return c.imm8b; }
|
||||||
inline int32_t imm32() const { NanoAssert(isconst()); return i.imm32; }
|
inline int32_t imm32() const { NanoAssert(isconst()); return i.imm32; }
|
||||||
|
inline int32_t imm64_0() const { NanoAssert(isconstq()); return i64.imm64_0; }
|
||||||
|
inline int32_t imm64_1() const { NanoAssert(isconstq()); return i64.imm64_1; }
|
||||||
uint64_t imm64() const;
|
uint64_t imm64() const;
|
||||||
uint32_t imm64lo() const { return uint32_t(imm64()); }
|
|
||||||
uint32_t imm64hi() const { return uint32_t(imm64() >> 32); }
|
|
||||||
double imm64f() const;
|
double imm64f() const;
|
||||||
Reservation* resv() { return &firstWord; }
|
Reservation* resv() { return &firstWord; }
|
||||||
void* payload() const;
|
void* payload() const;
|
||||||
|
|
|
@ -246,7 +246,7 @@ Assembler::asm_arg(ArgSize sz, LInsp arg, Register& r, int& stkd)
|
||||||
if (arg->isop(LIR_quad)) {
|
if (arg->isop(LIR_quad)) {
|
||||||
|
|
||||||
// XXX use some load-multiple action here from our const pool?
|
// XXX use some load-multiple action here from our const pool?
|
||||||
int32_t v = arg->imm64lo(); // for the first iteration
|
int32_t v = arg->imm64_0(); // for the first iteration
|
||||||
for (int k = 0; k < 2; k++) {
|
for (int k = 0; k < 2; k++) {
|
||||||
if (r != UnknownReg) {
|
if (r != UnknownReg) {
|
||||||
asm_ld_imm(r, v);
|
asm_ld_imm(r, v);
|
||||||
|
@ -258,7 +258,7 @@ Assembler::asm_arg(ArgSize sz, LInsp arg, Register& r, int& stkd)
|
||||||
asm_ld_imm(IP, v);
|
asm_ld_imm(IP, v);
|
||||||
stkd += 4;
|
stkd += 4;
|
||||||
}
|
}
|
||||||
v = arg->imm64hi(); // for the second iteration
|
v = arg->imm64_1(); // for the second iteration
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
int d = findMemFor(arg);
|
int d = findMemFor(arg);
|
||||||
|
@ -691,9 +691,9 @@ Assembler::asm_store64(LInsp value, int dr, LInsp base)
|
||||||
|
|
||||||
// XXX use another reg, get rid of dependency
|
// XXX use another reg, get rid of dependency
|
||||||
STR(IP, rb, dr);
|
STR(IP, rb, dr);
|
||||||
LD32_nochk(IP, value->imm64lo());
|
LD32_nochk(IP, value->imm64_0());
|
||||||
STR(IP, rb, dr+4);
|
STR(IP, rb, dr+4);
|
||||||
LD32_nochk(IP, value->imm64hi());
|
LD32_nochk(IP, value->imm64_1());
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -721,7 +721,7 @@ Assembler::asm_store64(LInsp value, int dr, LInsp base)
|
||||||
// has the right value
|
// has the right value
|
||||||
if (value->isconstq()) {
|
if (value->isconstq()) {
|
||||||
underrunProtect(4*4);
|
underrunProtect(4*4);
|
||||||
asm_quad_nochk(rv, value->imm64lo(), value->imm64hi());
|
asm_quad_nochk(rv, value->imm64_0(), value->imm64_1());
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
int da = findMemFor(value);
|
int da = findMemFor(value);
|
||||||
|
@ -735,7 +735,7 @@ Assembler::asm_store64(LInsp value, int dr, LInsp base)
|
||||||
// stick a quad into register rr, where p points to the two
|
// stick a quad into register rr, where p points to the two
|
||||||
// 32-bit parts of the quad, optinally also storing at FP+d
|
// 32-bit parts of the quad, optinally also storing at FP+d
|
||||||
void
|
void
|
||||||
Assembler::asm_quad_nochk(Register rr, int32_t imm64lo, int32_t imm64hi)
|
Assembler::asm_quad_nochk(Register rr, int32_t imm64_0, int32_t imm64_1)
|
||||||
{
|
{
|
||||||
// We're not going to use a slot, because it might be too far
|
// We're not going to use a slot, because it might be too far
|
||||||
// away. Instead, we're going to stick a branch in the stream to
|
// away. Instead, we're going to stick a branch in the stream to
|
||||||
|
@ -744,14 +744,14 @@ Assembler::asm_quad_nochk(Register rr, int32_t imm64lo, int32_t imm64hi)
|
||||||
|
|
||||||
// stream should look like:
|
// stream should look like:
|
||||||
// branch A
|
// branch A
|
||||||
// imm64lo
|
// imm64_0
|
||||||
// imm64hi
|
// imm64_1
|
||||||
// A: FLDD PC-16
|
// A: FLDD PC-16
|
||||||
|
|
||||||
FLDD(rr, PC, -16);
|
FLDD(rr, PC, -16);
|
||||||
|
|
||||||
*(--_nIns) = (NIns) imm64hi;
|
*(--_nIns) = (NIns) imm64_1;
|
||||||
*(--_nIns) = (NIns) imm64lo;
|
*(--_nIns) = (NIns) imm64_0;
|
||||||
|
|
||||||
JMP_nochk(_nIns+2);
|
JMP_nochk(_nIns+2);
|
||||||
}
|
}
|
||||||
|
@ -772,13 +772,13 @@ Assembler::asm_quad(LInsp ins)
|
||||||
FSTD(rr, FP, d);
|
FSTD(rr, FP, d);
|
||||||
|
|
||||||
underrunProtect(4*4);
|
underrunProtect(4*4);
|
||||||
asm_quad_nochk(rr, ins->imm64lo(), ins->imm64hi());
|
asm_quad_nochk(rr, ins->imm64_0(), ins->imm64_1());
|
||||||
} else {
|
} else {
|
||||||
NanoAssert(d);
|
NanoAssert(d);
|
||||||
STR(IP, FP, d+4);
|
STR(IP, FP, d+4);
|
||||||
asm_ld_imm(IP, ins->imm64hi());
|
asm_ld_imm(IP, ins->imm64_1());
|
||||||
STR(IP, FP, d);
|
STR(IP, FP, d);
|
||||||
asm_ld_imm(IP, ins->imm64lo());
|
asm_ld_imm(IP, ins->imm64_0());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -407,9 +407,9 @@ namespace nanojit
|
||||||
// generating a pointless store/load/store sequence
|
// generating a pointless store/load/store sequence
|
||||||
Register rb = findRegFor(base, GpRegs);
|
Register rb = findRegFor(base, GpRegs);
|
||||||
STW32(L0, dr+4, rb);
|
STW32(L0, dr+4, rb);
|
||||||
SET32(value->imm64lo(), L0);
|
SET32(value->imm64_0(), L0);
|
||||||
STW32(L0, dr, rb);
|
STW32(L0, dr, rb);
|
||||||
SET32(value->imm64hi(), L0);
|
SET32(value->imm64_1(), L0);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -876,9 +876,9 @@ namespace nanojit
|
||||||
Register r = registerAlloc(GpRegs);
|
Register r = registerAlloc(GpRegs);
|
||||||
_allocator.addFree(r);
|
_allocator.addFree(r);
|
||||||
STW32(r, d+4, FP);
|
STW32(r, d+4, FP);
|
||||||
SET32(ins->imm64lo(), r);
|
SET32(ins->imm64_0(), r);
|
||||||
STW32(r, d, FP);
|
STW32(r, d, FP);
|
||||||
SET32(ins->imm64hi(), r);
|
SET32(ins->imm64_1(), r);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -566,8 +566,8 @@ namespace nanojit
|
||||||
} else {
|
} else {
|
||||||
rb = findRegFor(base, GpRegs);
|
rb = findRegFor(base, GpRegs);
|
||||||
}
|
}
|
||||||
STi(rb, dr+4, value->imm64hi());
|
STi(rb, dr+4, value->imm64_1());
|
||||||
STi(rb, dr, value->imm64lo());
|
STi(rb, dr, value->imm64_0());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1172,8 +1172,8 @@ namespace nanojit
|
||||||
freeRsrcOf(ins, false);
|
freeRsrcOf(ins, false);
|
||||||
if (d)
|
if (d)
|
||||||
{
|
{
|
||||||
STi(FP,d+4,ins->imm64hi());
|
STi(FP,d+4,ins->imm64_1());
|
||||||
STi(FP,d, ins->imm64lo());
|
STi(FP,d, ins->imm64_0());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче