Bug 613125 - nanojit: improve codegen for LIR_q2i in X64 back-end. r=edwsmith.

--HG--
extra : convert_revision : 1f90e61950c44193ea5a1800c06d7dba8240cfd9
This commit is contained in:
Nicholas Nethercote 2010-11-18 19:36:11 -08:00
Родитель ea3911e8f5
Коммит 5c86e6881e
1 изменённых файлов: 10 добавлений и 3 удалений

Просмотреть файл

@ -1061,11 +1061,11 @@ namespace nanojit
Register r = findRegFor(p, GpRegs);
MOVQSPR(stk_off, r); // movq [rsp+d8], r
if (ty == ARGTYPE_I) {
// extend int32 to int64
// sign extend int32 to int64
NanoAssert(p->isI());
MOVSXDR(r, r);
} else if (ty == ARGTYPE_UI) {
// extend uint32 to uint64
// zero extend uint32 to uint64
NanoAssert(p->isI());
MOVLR(r, r);
} else {
@ -1081,7 +1081,14 @@ namespace nanojit
Register rr, ra;
beginOp1Regs(ins, GpRegs, rr, ra);
NanoAssert(IsGpReg(ra));
MOVLR(rr, ra); // 32bit mov zeros the upper 32bits of the target
// If ra==rr we do nothing. This is valid because we don't assume the
// upper 32-bits of a 64-bit GPR are zero when doing a 32-bit
// operation. More specifically, we widen 32-bit to 64-bit in three
// places, all of which explicitly sign- or zero-extend: asm_ui2uq(),
// asm_regarg() and asm_stkarg(). For the first this is required, for
// the latter two it's unclear if this is required, but it can't hurt.
if (ra != rr)
MOVLR(rr, ra);
endOpRegs(ins, rr, ra);
}