Bug 1125202 - SpiderMonkey: Reorganize the x86 BaseAssembler codebase r=jandem

This commit is contained in:
Dan Gohman 2015-01-30 16:05:56 -08:00
Родитель ea5f7326a0
Коммит 3e84a842b9
17 изменённых файлов: 1224 добавлений и 1192 удалений

Просмотреть файл

@ -784,13 +784,13 @@ AsmJSModule::initHeap(Handle<ArrayBufferObjectMaybeShared *> heap, JSContext *cx
// i.e. ptr >= heapLength + 1 - data-type-byte-size
// (Note that we need >= as this is what codegen uses.)
size_t scalarByteSize = TypedArrayElemSize(access.type());
X86Assembler::setPointer(access.patchLengthAt(code_),
(void*)(heap->byteLength() + 1 - scalarByteSize));
X86Encoding::SetPointer(access.patchLengthAt(code_),
(void*)(heap->byteLength() + 1 - scalarByteSize));
}
void *addr = access.patchOffsetAt(code_);
uint32_t disp = reinterpret_cast<uint32_t>(X86Assembler::getPointer(addr));
uint32_t disp = reinterpret_cast<uint32_t>(X86Encoding::GetPointer(addr));
MOZ_ASSERT(disp <= INT32_MAX);
X86Assembler::setPointer(addr, (void *)(heapOffset + disp));
X86Encoding::SetPointer(addr, (void *)(heapOffset + disp));
}
#elif defined(JS_CODEGEN_X64)
// Even with signal handling being used for most bounds checks, there may be
@ -806,7 +806,7 @@ AsmJSModule::initHeap(Handle<ArrayBufferObjectMaybeShared *> heap, JSContext *cx
if (access.hasLengthCheck()) {
// See comment above for x86 codegen.
size_t scalarByteSize = TypedArrayElemSize(access.type());
X86Assembler::setInt32(access.patchLengthAt(code_), heapLength + 1 - scalarByteSize);
X86Encoding::SetInt32(access.patchLengthAt(code_), heapLength + 1 - scalarByteSize);
}
}
#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
@ -828,9 +828,9 @@ AsmJSModule::restoreHeapToInitialState(ArrayBufferObjectMaybeShared *maybePrevBu
for (unsigned i = 0; i < heapAccesses_.length(); i++) {
const jit::AsmJSHeapAccess &access = heapAccesses_[i];
void *addr = access.patchOffsetAt(code_);
uint8_t *ptr = reinterpret_cast<uint8_t*>(X86Assembler::getPointer(addr));
uint8_t *ptr = reinterpret_cast<uint8_t*>(X86Encoding::GetPointer(addr));
MOZ_ASSERT(ptr >= ptrBase);
X86Assembler::setPointer(addr, (void *)(ptr - ptrBase));
X86Encoding::SetPointer(addr, (void *)(ptr - ptrBase));
}
}
#endif
@ -1678,7 +1678,7 @@ AsmJSModule::setProfilingEnabled(bool enabled, JSContext *cx)
uint8_t *callerRetAddr = code_ + cs.returnAddressOffset();
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
void *callee = X86Assembler::getRel32Target(callerRetAddr);
void *callee = X86Encoding::GetRel32Target(callerRetAddr);
#elif defined(JS_CODEGEN_ARM)
uint8_t *caller = callerRetAddr - 4;
Instruction *callerInsn = reinterpret_cast<Instruction*>(caller);
@ -1706,7 +1706,7 @@ AsmJSModule::setProfilingEnabled(bool enabled, JSContext *cx)
uint8_t *newCallee = enabled ? profilingEntry : entry;
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
X86Assembler::setRel32(callerRetAddr, newCallee);
X86Encoding::SetRel32(callerRetAddr, newCallee);
#elif defined(JS_CODEGEN_ARM)
new (caller) InstBLImm(BOffImm(newCallee - caller), Assembler::Always);
#elif defined(JS_CODEGEN_MIPS)

Просмотреть файл

@ -382,42 +382,42 @@ SetRegisterToCoercedUndefined(CONTEXT *context, Scalar::Type viewType, AnyRegist
{
if (reg.isFloat()) {
switch (reg.fpu().code()) {
case X86Registers::xmm0: SetXMMRegToNaN(viewType, &XMM_sig(context, 0)); break;
case X86Registers::xmm1: SetXMMRegToNaN(viewType, &XMM_sig(context, 1)); break;
case X86Registers::xmm2: SetXMMRegToNaN(viewType, &XMM_sig(context, 2)); break;
case X86Registers::xmm3: SetXMMRegToNaN(viewType, &XMM_sig(context, 3)); break;
case X86Registers::xmm4: SetXMMRegToNaN(viewType, &XMM_sig(context, 4)); break;
case X86Registers::xmm5: SetXMMRegToNaN(viewType, &XMM_sig(context, 5)); break;
case X86Registers::xmm6: SetXMMRegToNaN(viewType, &XMM_sig(context, 6)); break;
case X86Registers::xmm7: SetXMMRegToNaN(viewType, &XMM_sig(context, 7)); break;
case X86Registers::xmm8: SetXMMRegToNaN(viewType, &XMM_sig(context, 8)); break;
case X86Registers::xmm9: SetXMMRegToNaN(viewType, &XMM_sig(context, 9)); break;
case X86Registers::xmm10: SetXMMRegToNaN(viewType, &XMM_sig(context, 10)); break;
case X86Registers::xmm11: SetXMMRegToNaN(viewType, &XMM_sig(context, 11)); break;
case X86Registers::xmm12: SetXMMRegToNaN(viewType, &XMM_sig(context, 12)); break;
case X86Registers::xmm13: SetXMMRegToNaN(viewType, &XMM_sig(context, 13)); break;
case X86Registers::xmm14: SetXMMRegToNaN(viewType, &XMM_sig(context, 14)); break;
case X86Registers::xmm15: SetXMMRegToNaN(viewType, &XMM_sig(context, 15)); break;
case X86Encoding::xmm0: SetXMMRegToNaN(viewType, &XMM_sig(context, 0)); break;
case X86Encoding::xmm1: SetXMMRegToNaN(viewType, &XMM_sig(context, 1)); break;
case X86Encoding::xmm2: SetXMMRegToNaN(viewType, &XMM_sig(context, 2)); break;
case X86Encoding::xmm3: SetXMMRegToNaN(viewType, &XMM_sig(context, 3)); break;
case X86Encoding::xmm4: SetXMMRegToNaN(viewType, &XMM_sig(context, 4)); break;
case X86Encoding::xmm5: SetXMMRegToNaN(viewType, &XMM_sig(context, 5)); break;
case X86Encoding::xmm6: SetXMMRegToNaN(viewType, &XMM_sig(context, 6)); break;
case X86Encoding::xmm7: SetXMMRegToNaN(viewType, &XMM_sig(context, 7)); break;
case X86Encoding::xmm8: SetXMMRegToNaN(viewType, &XMM_sig(context, 8)); break;
case X86Encoding::xmm9: SetXMMRegToNaN(viewType, &XMM_sig(context, 9)); break;
case X86Encoding::xmm10: SetXMMRegToNaN(viewType, &XMM_sig(context, 10)); break;
case X86Encoding::xmm11: SetXMMRegToNaN(viewType, &XMM_sig(context, 11)); break;
case X86Encoding::xmm12: SetXMMRegToNaN(viewType, &XMM_sig(context, 12)); break;
case X86Encoding::xmm13: SetXMMRegToNaN(viewType, &XMM_sig(context, 13)); break;
case X86Encoding::xmm14: SetXMMRegToNaN(viewType, &XMM_sig(context, 14)); break;
case X86Encoding::xmm15: SetXMMRegToNaN(viewType, &XMM_sig(context, 15)); break;
default: MOZ_CRASH();
}
} else {
switch (reg.gpr().code()) {
case X86Registers::eax: RAX_sig(context) = 0; break;
case X86Registers::ecx: RCX_sig(context) = 0; break;
case X86Registers::edx: RDX_sig(context) = 0; break;
case X86Registers::ebx: RBX_sig(context) = 0; break;
case X86Registers::esp: RSP_sig(context) = 0; break;
case X86Registers::ebp: RBP_sig(context) = 0; break;
case X86Registers::esi: RSI_sig(context) = 0; break;
case X86Registers::edi: RDI_sig(context) = 0; break;
case X86Registers::r8: R8_sig(context) = 0; break;
case X86Registers::r9: R9_sig(context) = 0; break;
case X86Registers::r10: R10_sig(context) = 0; break;
case X86Registers::r11: R11_sig(context) = 0; break;
case X86Registers::r12: R12_sig(context) = 0; break;
case X86Registers::r13: R13_sig(context) = 0; break;
case X86Registers::r14: R14_sig(context) = 0; break;
case X86Registers::r15: R15_sig(context) = 0; break;
case X86Encoding::rax: RAX_sig(context) = 0; break;
case X86Encoding::rcx: RCX_sig(context) = 0; break;
case X86Encoding::rdx: RDX_sig(context) = 0; break;
case X86Encoding::rbx: RBX_sig(context) = 0; break;
case X86Encoding::rsp: RSP_sig(context) = 0; break;
case X86Encoding::rbp: RBP_sig(context) = 0; break;
case X86Encoding::rsi: RSI_sig(context) = 0; break;
case X86Encoding::rdi: RDI_sig(context) = 0; break;
case X86Encoding::r8: R8_sig(context) = 0; break;
case X86Encoding::r9: R9_sig(context) = 0; break;
case X86Encoding::r10: R10_sig(context) = 0; break;
case X86Encoding::r11: R11_sig(context) = 0; break;
case X86Encoding::r12: R12_sig(context) = 0; break;
case X86Encoding::r13: R13_sig(context) = 0; break;
case X86Encoding::r14: R14_sig(context) = 0; break;
case X86Encoding::r15: R15_sig(context) = 0; break;
default: MOZ_CRASH();
}
}
@ -551,22 +551,22 @@ SetRegisterToCoercedUndefined(mach_port_t rtThread, x86_thread_state64_t &state,
Scalar::Type viewType = heapAccess.type();
switch (heapAccess.loadedReg().fpu().code()) {
case X86Registers::xmm0: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm0); break;
case X86Registers::xmm1: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm1); break;
case X86Registers::xmm2: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm2); break;
case X86Registers::xmm3: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm3); break;
case X86Registers::xmm4: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm4); break;
case X86Registers::xmm5: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm5); break;
case X86Registers::xmm6: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm6); break;
case X86Registers::xmm7: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm7); break;
case X86Registers::xmm8: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm8); break;
case X86Registers::xmm9: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm9); break;
case X86Registers::xmm10: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm10); break;
case X86Registers::xmm11: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm11); break;
case X86Registers::xmm12: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm12); break;
case X86Registers::xmm13: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm13); break;
case X86Registers::xmm14: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm14); break;
case X86Registers::xmm15: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm15); break;
case X86Encoding::xmm0: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm0); break;
case X86Encoding::xmm1: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm1); break;
case X86Encoding::xmm2: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm2); break;
case X86Encoding::xmm3: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm3); break;
case X86Encoding::xmm4: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm4); break;
case X86Encoding::xmm5: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm5); break;
case X86Encoding::xmm6: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm6); break;
case X86Encoding::xmm7: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm7); break;
case X86Encoding::xmm8: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm8); break;
case X86Encoding::xmm9: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm9); break;
case X86Encoding::xmm10: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm10); break;
case X86Encoding::xmm11: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm11); break;
case X86Encoding::xmm12: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm12); break;
case X86Encoding::xmm13: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm13); break;
case X86Encoding::xmm14: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm14); break;
case X86Encoding::xmm15: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm15); break;
default: MOZ_CRASH();
}
@ -575,22 +575,22 @@ SetRegisterToCoercedUndefined(mach_port_t rtThread, x86_thread_state64_t &state,
return false;
} else {
switch (heapAccess.loadedReg().gpr().code()) {
case X86Registers::eax: state.__rax = 0; break;
case X86Registers::ecx: state.__rcx = 0; break;
case X86Registers::edx: state.__rdx = 0; break;
case X86Registers::ebx: state.__rbx = 0; break;
case X86Registers::esp: state.__rsp = 0; break;
case X86Registers::ebp: state.__rbp = 0; break;
case X86Registers::esi: state.__rsi = 0; break;
case X86Registers::edi: state.__rdi = 0; break;
case X86Registers::r8: state.__r8 = 0; break;
case X86Registers::r9: state.__r9 = 0; break;
case X86Registers::r10: state.__r10 = 0; break;
case X86Registers::r11: state.__r11 = 0; break;
case X86Registers::r12: state.__r12 = 0; break;
case X86Registers::r13: state.__r13 = 0; break;
case X86Registers::r14: state.__r14 = 0; break;
case X86Registers::r15: state.__r15 = 0; break;
case X86Encoding::rax: state.__rax = 0; break;
case X86Encoding::rcx: state.__rcx = 0; break;
case X86Encoding::rdx: state.__rdx = 0; break;
case X86Encoding::rbx: state.__rbx = 0; break;
case X86Encoding::rsp: state.__rsp = 0; break;
case X86Encoding::rbp: state.__rbp = 0; break;
case X86Encoding::rsi: state.__rsi = 0; break;
case X86Encoding::rdi: state.__rdi = 0; break;
case X86Encoding::r8: state.__r8 = 0; break;
case X86Encoding::r9: state.__r9 = 0; break;
case X86Encoding::r10: state.__r10 = 0; break;
case X86Encoding::r11: state.__r11 = 0; break;
case X86Encoding::r12: state.__r12 = 0; break;
case X86Encoding::r13: state.__r13 = 0; break;
case X86Encoding::r14: state.__r14 = 0; break;
case X86Encoding::r15: state.__r15 = 0; break;
default: MOZ_CRASH();
}
}

Просмотреть файл

@ -50,7 +50,7 @@ TraceDataRelocations(JSTracer *trc, uint8_t *buffer, CompactBufferReader &reader
{
while (reader.more()) {
size_t offset = reader.readUnsigned();
void **ptr = X86Assembler::getPointerRef(buffer + offset);
void **ptr = X86Encoding::GetPointerRef(buffer + offset);
#ifdef JS_PUNBOX64
// All pointers on x64 will have the top bits cleared. If those bits

Просмотреть файл

@ -68,7 +68,7 @@ class Operand
{ }
explicit Operand(AbsoluteAddress address)
: kind_(MEM_ADDRESS32),
disp_(X86Assembler::addressImmediate(address.addr))
disp_(X86Encoding::AddressImmediate(address.addr))
{ }
Address toAddress() const {
@ -216,10 +216,10 @@ class AssemblerX86Shared : public AssemblerShared
}
protected:
X86Assembler masm;
X86Encoding::BaseAssembler masm;
typedef X86Assembler::JmpSrc JmpSrc;
typedef X86Assembler::JmpDst JmpDst;
typedef X86Encoding::JmpSrc JmpSrc;
typedef X86Encoding::JmpDst JmpDst;
public:
AssemblerX86Shared()
@ -229,23 +229,23 @@ class AssemblerX86Shared : public AssemblerShared
}
enum Condition {
Equal = X86Assembler::ConditionE,
NotEqual = X86Assembler::ConditionNE,
Above = X86Assembler::ConditionA,
AboveOrEqual = X86Assembler::ConditionAE,
Below = X86Assembler::ConditionB,
BelowOrEqual = X86Assembler::ConditionBE,
GreaterThan = X86Assembler::ConditionG,
GreaterThanOrEqual = X86Assembler::ConditionGE,
LessThan = X86Assembler::ConditionL,
LessThanOrEqual = X86Assembler::ConditionLE,
Overflow = X86Assembler::ConditionO,
Signed = X86Assembler::ConditionS,
NotSigned = X86Assembler::ConditionNS,
Zero = X86Assembler::ConditionE,
NonZero = X86Assembler::ConditionNE,
Parity = X86Assembler::ConditionP,
NoParity = X86Assembler::ConditionNP
Equal = X86Encoding::ConditionE,
NotEqual = X86Encoding::ConditionNE,
Above = X86Encoding::ConditionA,
AboveOrEqual = X86Encoding::ConditionAE,
Below = X86Encoding::ConditionB,
BelowOrEqual = X86Encoding::ConditionBE,
GreaterThan = X86Encoding::ConditionG,
GreaterThanOrEqual = X86Encoding::ConditionGE,
LessThan = X86Encoding::ConditionL,
LessThanOrEqual = X86Encoding::ConditionLE,
Overflow = X86Encoding::ConditionO,
Signed = X86Encoding::ConditionS,
NotSigned = X86Encoding::ConditionNS,
Zero = X86Encoding::ConditionE,
NonZero = X86Encoding::ConditionNE,
Parity = X86Encoding::ConditionP,
NoParity = X86Encoding::ConditionNP
};
// If this bit is set, the vucomisd operands have to be inverted.
@ -788,10 +788,10 @@ class AssemblerX86Shared : public AssemblerShared
void jSrc(Condition cond, Label *label) {
if (label->bound()) {
// The jump can be immediately encoded to the correct destination.
masm.jCC_i(static_cast<X86Assembler::Condition>(cond), JmpDst(label->offset()));
masm.jCC_i(static_cast<X86Encoding::Condition>(cond), JmpDst(label->offset()));
} else {
// Thread the jump list through the unpatched jump targets.
JmpSrc j = masm.jCC(static_cast<X86Assembler::Condition>(cond));
JmpSrc j = masm.jCC(static_cast<X86Encoding::Condition>(cond));
JmpSrc prev = JmpSrc(label->use(j.offset()));
masm.setNextJump(j, prev);
}
@ -823,7 +823,7 @@ class AssemblerX86Shared : public AssemblerShared
}
JmpSrc jSrc(Condition cond, RepatchLabel *label) {
JmpSrc j = masm.jCC(static_cast<X86Assembler::Condition>(cond));
JmpSrc j = masm.jCC(static_cast<X86Encoding::Condition>(cond));
if (label->bound()) {
// The jump can be immediately patched to the correct destination.
masm.linkJump(j, JmpDst(label->offset()));
@ -869,12 +869,12 @@ class AssemblerX86Shared : public AssemblerShared
}
void cmpEAX(Label *label) { cmpSrc(label); }
void bind(Label *label) {
X86Assembler::JmpDst dst(masm.label());
JmpDst dst(masm.label());
if (label->used()) {
bool more;
X86Assembler::JmpSrc jmp(label->offset());
JmpSrc jmp(label->offset());
do {
X86Assembler::JmpSrc next;
JmpSrc next;
more = masm.nextJump(jmp, &next);
masm.linkJump(jmp, dst);
jmp = next;
@ -883,9 +883,9 @@ class AssemblerX86Shared : public AssemblerShared
label->bind(dst.offset());
}
void bind(RepatchLabel *label) {
X86Assembler::JmpDst dst(masm.label());
JmpDst dst(masm.label());
if (label->used()) {
X86Assembler::JmpSrc jmp(label->offset());
JmpSrc jmp(label->offset());
masm.linkJump(jmp, dst);
}
label->bind(dst.offset());
@ -898,9 +898,9 @@ class AssemblerX86Shared : public AssemblerShared
void retarget(Label *label, Label *target) {
if (label->used()) {
bool more;
X86Assembler::JmpSrc jmp(label->offset());
JmpSrc jmp(label->offset());
do {
X86Assembler::JmpSrc next;
JmpSrc next;
more = masm.nextJump(jmp, &next);
if (target->bound()) {
@ -922,15 +922,15 @@ class AssemblerX86Shared : public AssemblerShared
if (label->used()) {
intptr_t src = label->offset();
do {
intptr_t next = reinterpret_cast<intptr_t>(X86Assembler::getPointer(raw + src));
X86Assembler::setPointer(raw + src, address);
intptr_t next = reinterpret_cast<intptr_t>(X86Encoding::GetPointer(raw + src));
X86Encoding::SetPointer(raw + src, address);
src = next;
} while (src != AbsoluteLabel::INVALID_OFFSET);
}
label->bind();
}
// See Bind and X86Assembler::setPointer.
// See Bind and X86Encoding::setPointer.
size_t labelOffsetToPatchOffset(size_t offset) {
return offset - sizeof(void*);
}
@ -1040,7 +1040,7 @@ class AssemblerX86Shared : public AssemblerShared
masm.cmpw_rr(rhs.code(), lhs.code());
}
void setCC(Condition cond, Register r) {
masm.setCC_r(static_cast<X86Assembler::Condition>(cond), r.code());
masm.setCC_r(static_cast<X86Encoding::Condition>(cond), r.code());
}
void testb(Register rhs, Register lhs) {
MOZ_ASSERT(GeneralRegisterSet(Registers::SingleByteRegs).has(rhs));
@ -1694,19 +1694,19 @@ class AssemblerX86Shared : public AssemblerShared
}
}
void vcmpeqps(const Operand &src1, FloatRegister src0, FloatRegister dest) {
vcmpps(X86Assembler::ConditionCmp_EQ, src1, src0, dest);
vcmpps(X86Encoding::ConditionCmp_EQ, src1, src0, dest);
}
void vcmpltps(const Operand &src1, FloatRegister src0, FloatRegister dest) {
vcmpps(X86Assembler::ConditionCmp_LT, src1, src0, dest);
vcmpps(X86Encoding::ConditionCmp_LT, src1, src0, dest);
}
void vcmpleps(const Operand &src1, FloatRegister src0, FloatRegister dest) {
vcmpps(X86Assembler::ConditionCmp_LE, src1, src0, dest);
vcmpps(X86Encoding::ConditionCmp_LE, src1, src0, dest);
}
void vcmpunordps(const Operand &src1, FloatRegister src0, FloatRegister dest) {
vcmpps(X86Assembler::ConditionCmp_UNORD, src1, src0, dest);
vcmpps(X86Encoding::ConditionCmp_UNORD, src1, src0, dest);
}
void vcmpneqps(const Operand &src1, FloatRegister src0, FloatRegister dest) {
vcmpps(X86Assembler::ConditionCmp_NEQ, src1, src0, dest);
vcmpps(X86Encoding::ConditionCmp_NEQ, src1, src0, dest);
}
void vrcpps(const Operand &src, FloatRegister dest) {
MOZ_ASSERT(HasSSE2());
@ -2333,11 +2333,11 @@ class AssemblerX86Shared : public AssemblerShared
MOZ_ASSERT(HasSSE2());
masm.vsqrtss_rr(src1.code(), src0.code(), dest.code());
}
void vroundsd(X86Assembler::RoundingMode mode, FloatRegister src1, FloatRegister src0, FloatRegister dest) {
void vroundsd(X86Encoding::RoundingMode mode, FloatRegister src1, FloatRegister src0, FloatRegister dest) {
MOZ_ASSERT(HasSSE41());
masm.vroundsd_irr(mode, src1.code(), src0.code(), dest.code());
}
void vroundss(X86Assembler::RoundingMode mode, FloatRegister src1, FloatRegister src0, FloatRegister dest) {
void vroundss(X86Encoding::RoundingMode mode, FloatRegister src1, FloatRegister src0, FloatRegister dest) {
MOZ_ASSERT(HasSSE41());
masm.vroundss_irr(mode, src1.code(), src0.code(), dest.code());
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1601,7 +1601,7 @@ CodeGeneratorX86Shared::visitFloor(LFloor *lir)
bailoutFrom(&bailout, lir->snapshot());
// Round toward -Infinity.
masm.vroundsd(X86Assembler::RoundDown, input, scratch, scratch);
masm.vroundsd(X86Encoding::RoundDown, input, scratch, scratch);
bailoutCvttsd2si(scratch, output, lir->snapshot());
} else {
@ -1658,7 +1658,7 @@ CodeGeneratorX86Shared::visitFloorF(LFloorF *lir)
bailoutFrom(&bailout, lir->snapshot());
// Round toward -Infinity.
masm.vroundss(X86Assembler::RoundDown, input, scratch, scratch);
masm.vroundss(X86Encoding::RoundDown, input, scratch, scratch);
bailoutCvttss2si(scratch, output, lir->snapshot());
} else {
@ -1723,7 +1723,7 @@ CodeGeneratorX86Shared::visitCeil(LCeil *lir)
// x <= -1 or x > -0
masm.bind(&lessThanMinusOne);
// Round toward +Infinity.
masm.vroundsd(X86Assembler::RoundUp, input, scratch, scratch);
masm.vroundsd(X86Encoding::RoundUp, input, scratch, scratch);
bailoutCvttsd2si(scratch, output, lir->snapshot());
return;
}
@ -1775,7 +1775,7 @@ CodeGeneratorX86Shared::visitCeilF(LCeilF *lir)
// x <= -1 or x > -0
masm.bind(&lessThanMinusOne);
// Round toward +Infinity.
masm.vroundss(X86Assembler::RoundUp, input, scratch, scratch);
masm.vroundss(X86Encoding::RoundUp, input, scratch, scratch);
bailoutCvttss2si(scratch, output, lir->snapshot());
return;
}
@ -1850,7 +1850,7 @@ CodeGeneratorX86Shared::visitRound(LRound *lir)
// Add 0.5 and round toward -Infinity. The result is stored in the temp
// register (currently contains 0.5).
masm.addDouble(input, temp);
masm.vroundsd(X86Assembler::RoundDown, temp, scratch, scratch);
masm.vroundsd(X86Encoding::RoundDown, temp, scratch, scratch);
// Truncate.
bailoutCvttsd2si(scratch, output, lir->snapshot());
@ -1933,7 +1933,7 @@ CodeGeneratorX86Shared::visitRoundF(LRoundF *lir)
// Add 0.5 and round toward -Infinity. The result is stored in the temp
// register (currently contains 0.5).
masm.addFloat32(input, temp);
masm.vroundss(X86Assembler::RoundDown, temp, scratch, scratch);
masm.vroundss(X86Encoding::RoundDown, temp, scratch, scratch);
// Truncate.
bailoutCvttss2si(scratch, output, lir->snapshot());

Просмотреть файл

@ -0,0 +1,222 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef jit_shared_Constants_x86_shared_h
#define jit_shared_Constants_x86_shared_h
namespace js {
namespace jit {
namespace X86Encoding {
enum RegisterID {
rax, rcx, rdx, rbx, rsp, rbp, rsi, rdi
#ifdef JS_CODEGEN_X64
,r8, r9, r10, r11, r12, r13, r14, r15
#endif
,invalid_reg
};
enum HRegisterID {
ah = rsp,
ch = rbp,
dh = rsi,
bh = rdi
};
enum XMMRegisterID {
xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7
#ifdef JS_CODEGEN_X64
,xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15
#endif
,invalid_xmm
};
inline const char *XMMRegName(XMMRegisterID reg)
{
static const char *const names[] = {
"%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7"
#ifdef JS_CODEGEN_X64
,"%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15"
#endif
};
MOZ_ASSERT(size_t(reg) < mozilla::ArrayLength(names));
return names[reg];
}
#ifdef JS_CODEGEN_X64
inline const char *GPReg64Name(RegisterID reg)
{
static const char *const names[] = {
"%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi"
#ifdef JS_CODEGEN_X64
,"%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15"
#endif
};
MOZ_ASSERT(size_t(reg) < mozilla::ArrayLength(names));
return names[reg];
}
#endif
inline const char *GPReg32Name(RegisterID reg)
{
static const char *const names[] = {
"%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi"
#ifdef JS_CODEGEN_X64
,"%r8d", "%r9d", "%r10d", "%r11d", "%r12d", "%r13d", "%r14d", "%r15d"
#endif
};
MOZ_ASSERT(size_t(reg) < mozilla::ArrayLength(names));
return names[reg];
}
inline const char *GPReg16Name(RegisterID reg)
{
static const char *const names[] = {
"%ax", "%cx", "%dx", "%bx", "%sp", "%bp", "%si", "%di"
#ifdef JS_CODEGEN_X64
,"%r8w", "%r9w", "%r10w", "%r11w", "%r12w", "%r13w", "%r14w", "%r15w"
#endif
};
MOZ_ASSERT(size_t(reg) < mozilla::ArrayLength(names));
return names[reg];
}
inline const char *GPReg8Name(RegisterID reg)
{
static const char *const names[] = {
"%al", "%cl", "%dl", "%bl"
#ifdef JS_CODEGEN_X64
,"%spl", "%bpl", "%sil", "%dil",
"%r8b", "%r9b", "%r10b", "%r11b", "%r12b", "%r13b", "%r14b", "%r15b"
#endif
};
MOZ_ASSERT(size_t(reg) < mozilla::ArrayLength(names));
return names[reg];
}
inline const char *GPRegName(RegisterID reg)
{
#ifdef JS_CODEGEN_X64
return GPReg64Name(reg);
#else
return GPReg32Name(reg);
#endif
}
inline bool HasSubregL(RegisterID reg)
{
#ifdef JS_CODEGEN_X64
// In 64-bit mode, all registers have an 8-bit lo subreg.
return true;
#else
// In 32-bit mode, only the first four registers do.
return reg <= rbx;
#endif
}
inline bool HasSubregH(RegisterID reg)
{
// The first four registers always have h registers. However, note that
// on x64, h registers may not be used in instructions using REX
// prefixes. Also note that this may depend on what other registers are
// used!
return reg <= rbx;
}
inline HRegisterID GetSubregH(RegisterID reg)
{
MOZ_ASSERT(HasSubregH(reg));
return HRegisterID(reg + 4);
}
inline const char *HRegName8(HRegisterID reg)
{
static const char *const names[] = {
"%ah", "%ch", "%dh", "%bh"
};
size_t index = reg - GetSubregH(rax);
MOZ_ASSERT(index < mozilla::ArrayLength(names));
return names[index];
}
enum Condition {
ConditionO,
ConditionNO,
ConditionB,
ConditionAE,
ConditionE,
ConditionNE,
ConditionBE,
ConditionA,
ConditionS,
ConditionNS,
ConditionP,
ConditionNP,
ConditionL,
ConditionGE,
ConditionLE,
ConditionG,
ConditionC = ConditionB,
ConditionNC = ConditionAE
};
inline const char *CCName(Condition cc)
{
static const char *const names[] = {
"o ", "no", "b ", "ae", "e ", "ne", "be", "a ",
"s ", "ns", "p ", "np", "l ", "ge", "le", "g "
};
MOZ_ASSERT(size_t(cc) < mozilla::ArrayLength(names));
return names[cc];
}
// Conditions for CMP instructions (CMPSS, CMPSD, CMPPS, CMPPD, etc).
enum ConditionCmp {
ConditionCmp_EQ = 0x0,
ConditionCmp_LT = 0x1,
ConditionCmp_LE = 0x2,
ConditionCmp_UNORD = 0x3,
ConditionCmp_NEQ = 0x4,
ConditionCmp_NLT = 0x5,
ConditionCmp_NLE = 0x6,
ConditionCmp_ORD = 0x7,
};
// Rounding modes for ROUNDSD.
enum RoundingMode {
RoundToNearest = 0x0,
RoundDown = 0x1,
RoundUp = 0x2,
RoundToZero = 0x3
};
// Test whether the given address will fit in an address immediate field.
// This is always true on x86, but on x64 it's only true for addreses which
// fit in the 32-bit immediate field.
inline bool IsAddressImmediate(const void *address)
{
intptr_t value = reinterpret_cast<intptr_t>(address);
int32_t immediate = static_cast<int32_t>(value);
return value == immediate;
}
// Convert the given address to a 32-bit immediate field value. This is a
// no-op on x86, but on x64 it asserts that the address is actually a valid
// address immediate.
inline int32_t AddressImmediate(const void *address)
{
MOZ_ASSERT(IsAddressImmediate(address));
return static_cast<int32_t>(reinterpret_cast<intptr_t>(address));
}
} // namespace X86Encoding
} // namespace jit
} // namespace js
#endif /* jit_shared_Constants_x86_shared_h */

Просмотреть файл

@ -0,0 +1,317 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef jit_shared_Encoding_x86_shared_h
#define jit_shared_Encoding_x86_shared_h
#include "jit/shared/Constants-x86-shared.h"
namespace js {
namespace jit {
namespace X86Encoding {
static const size_t MaxInstructionSize = 16;
enum OneByteOpcodeID {
OP_ADD_EbGb = 0x00,
OP_ADD_EvGv = 0x01,
OP_ADD_GvEv = 0x03,
OP_ADD_EAXIv = 0x05,
OP_OR_EbGb = 0x08,
OP_OR_EvGv = 0x09,
OP_OR_GvEv = 0x0B,
OP_OR_EAXIv = 0x0D,
OP_2BYTE_ESCAPE = 0x0F,
OP_AND_EbGb = 0x20,
OP_AND_EvGv = 0x21,
OP_AND_GvEv = 0x23,
OP_AND_EAXIv = 0x25,
OP_SUB_EbGb = 0x28,
OP_SUB_EvGv = 0x29,
OP_SUB_GvEv = 0x2B,
OP_SUB_EAXIv = 0x2D,
PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E,
OP_XOR_EbGb = 0x30,
OP_XOR_EvGv = 0x31,
OP_XOR_GvEv = 0x33,
OP_XOR_EAXIv = 0x35,
OP_CMP_EvGv = 0x39,
OP_CMP_GvEv = 0x3B,
OP_CMP_EAXIv = 0x3D,
#ifdef JS_CODEGEN_X64
PRE_REX = 0x40,
#endif
OP_PUSH_EAX = 0x50,
OP_POP_EAX = 0x58,
#ifdef JS_CODEGEN_X86
OP_PUSHA = 0x60,
OP_POPA = 0x61,
#endif
#ifdef JS_CODEGEN_X64
OP_MOVSXD_GvEv = 0x63,
#endif
PRE_OPERAND_SIZE = 0x66,
PRE_SSE_66 = 0x66,
OP_PUSH_Iz = 0x68,
OP_IMUL_GvEvIz = 0x69,
OP_PUSH_Ib = 0x6a,
OP_IMUL_GvEvIb = 0x6b,
OP_JCC_rel8 = 0x70,
OP_GROUP1_EbIb = 0x80,
OP_GROUP1_EvIz = 0x81,
OP_GROUP1_EvIb = 0x83,
OP_TEST_EbGb = 0x84,
OP_TEST_EvGv = 0x85,
OP_XCHG_GvEv = 0x87,
OP_MOV_EbGv = 0x88,
OP_MOV_EvGv = 0x89,
OP_MOV_GvEb = 0x8A,
OP_MOV_GvEv = 0x8B,
OP_LEA = 0x8D,
OP_GROUP1A_Ev = 0x8F,
OP_NOP = 0x90,
OP_PUSHFLAGS = 0x9C,
OP_POPFLAGS = 0x9D,
OP_CDQ = 0x99,
OP_MOV_EAXOv = 0xA1,
OP_MOV_OvEAX = 0xA3,
OP_TEST_EAXIb = 0xA8,
OP_TEST_EAXIv = 0xA9,
OP_MOV_EAXIv = 0xB8,
OP_GROUP2_EvIb = 0xC1,
OP_RET_Iz = 0xC2,
PRE_VEX_C4 = 0xC4,
PRE_VEX_C5 = 0xC5,
OP_RET = 0xC3,
OP_GROUP11_EvIb = 0xC6,
OP_GROUP11_EvIz = 0xC7,
OP_INT3 = 0xCC,
OP_GROUP2_Ev1 = 0xD1,
OP_GROUP2_EvCL = 0xD3,
OP_FPU6 = 0xDD,
OP_FPU6_F32 = 0xD9,
OP_CALL_rel32 = 0xE8,
OP_JMP_rel32 = 0xE9,
OP_JMP_rel8 = 0xEB,
PRE_LOCK = 0xF0,
PRE_SSE_F2 = 0xF2,
PRE_SSE_F3 = 0xF3,
OP_HLT = 0xF4,
OP_GROUP3_EbIb = 0xF6,
OP_GROUP3_Ev = 0xF7,
OP_GROUP3_EvIz = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
OP_GROUP5_Ev = 0xFF
};
enum class ShiftID {
vpsrld = 2,
vpsrlq = 2,
vpsrldq = 3,
vpsrad = 4,
vpslld = 6,
vpsllq = 6
};
enum TwoByteOpcodeID {
OP2_UD2 = 0x0B,
OP2_MOVSD_VsdWsd = 0x10,
OP2_MOVPS_VpsWps = 0x10,
OP2_MOVSD_WsdVsd = 0x11,
OP2_MOVPS_WpsVps = 0x11,
OP2_MOVHLPS_VqUq = 0x12,
OP2_MOVSLDUP_VpsWps = 0x12,
OP2_UNPCKLPS_VsdWsd = 0x14,
OP2_UNPCKHPS_VsdWsd = 0x15,
OP2_MOVLHPS_VqUq = 0x16,
OP2_MOVSHDUP_VpsWps = 0x16,
OP2_MOVAPD_VsdWsd = 0x28,
OP2_MOVAPS_VsdWsd = 0x28,
OP2_MOVAPS_WsdVsd = 0x29,
OP2_CVTSI2SD_VsdEd = 0x2A,
OP2_CVTTSD2SI_GdWsd = 0x2C,
OP2_UCOMISD_VsdWsd = 0x2E,
OP2_MOVMSKPD_EdVd = 0x50,
OP2_ANDPS_VpsWps = 0x54,
OP2_ANDNPS_VpsWps = 0x55,
OP2_ORPS_VpsWps = 0x56,
OP2_XORPS_VpsWps = 0x57,
OP2_ADDSD_VsdWsd = 0x58,
OP2_ADDPS_VpsWps = 0x58,
OP2_MULSD_VsdWsd = 0x59,
OP2_MULPS_VpsWps = 0x59,
OP2_CVTSS2SD_VsdEd = 0x5A,
OP2_CVTSD2SS_VsdEd = 0x5A,
OP2_CVTTPS2DQ_VdqWps = 0x5B,
OP2_CVTDQ2PS_VpsWdq = 0x5B,
OP2_SUBSD_VsdWsd = 0x5C,
OP2_SUBPS_VpsWps = 0x5C,
OP2_MINSD_VsdWsd = 0x5D,
OP2_MINSS_VssWss = 0x5D,
OP2_MINPS_VpsWps = 0x5D,
OP2_DIVSD_VsdWsd = 0x5E,
OP2_DIVPS_VpsWps = 0x5E,
OP2_MAXSD_VsdWsd = 0x5F,
OP2_MAXSS_VssWss = 0x5F,
OP2_MAXPS_VpsWps = 0x5F,
OP2_SQRTSD_VsdWsd = 0x51,
OP2_SQRTSS_VssWss = 0x51,
OP2_SQRTPS_VpsWps = 0x51,
OP2_RSQRTPS_VpsWps = 0x52,
OP2_RCPPS_VpsWps = 0x53,
OP2_ANDPD_VpdWpd = 0x54,
OP2_ORPD_VpdWpd = 0x56,
OP2_XORPD_VpdWpd = 0x57,
OP2_PCMPGTD_VdqWdq = 0x66,
OP2_MOVD_VdEd = 0x6E,
OP2_MOVDQ_VsdWsd = 0x6F,
OP2_MOVDQ_VdqWdq = 0x6F,
OP2_PSHUFD_VdqWdqIb = 0x70,
OP2_PSLLD_UdqIb = 0x72,
OP2_PSRAD_UdqIb = 0x72,
OP2_PSRLD_UdqIb = 0x72,
OP2_PSRLDQ_Vd = 0x73,
OP2_PCMPEQW = 0x75,
OP2_PCMPEQD_VdqWdq = 0x76,
OP2_MOVD_EdVd = 0x7E,
OP2_MOVDQ_WdqVdq = 0x7F,
OP2_JCC_rel32 = 0x80,
OP_SETCC = 0x90,
OP_FENCE = 0xAE,
OP2_IMUL_GvEv = 0xAF,
OP2_CMPXCHG_GvEb = 0xB0,
OP2_CMPXCHG_GvEw = 0xB1,
OP2_BSR_GvEv = 0xBD,
OP2_MOVSX_GvEb = 0xBE,
OP2_MOVSX_GvEw = 0xBF,
OP2_MOVZX_GvEb = 0xB6,
OP2_MOVZX_GvEw = 0xB7,
OP2_XADD_EbGb = 0xC0,
OP2_XADD_EvGv = 0xC1,
OP2_CMPPS_VpsWps = 0xC2,
OP2_PEXTRW_GdUdIb = 0xC5,
OP2_SHUFPS_VpsWpsIb = 0xC6,
OP2_PSRLD_VdqWdq = 0xD2,
OP2_PANDDQ_VdqWdq = 0xDB,
OP2_PANDNDQ_VdqWdq = 0xDF,
OP2_PSRAD_VdqWdq = 0xE2,
OP2_PORDQ_VdqWdq = 0xEB,
OP2_PXORDQ_VdqWdq = 0xEF,
OP2_PSLLD_VdqWdq = 0xF2,
OP2_PMULUDQ_VdqWdq = 0xF4,
OP2_PSUBD_VdqWdq = 0xFA,
OP2_PADDD_VdqWdq = 0xFE
};
enum ThreeByteOpcodeID {
OP3_ROUNDSS_VsdWsd = 0x0A,
OP3_ROUNDSD_VsdWsd = 0x0B,
OP3_BLENDVPS_VdqWdq = 0x14,
OP3_PEXTRD_EdVdqIb = 0x16,
OP3_BLENDPS_VpsWpsIb = 0x0C,
OP3_PTEST_VdVd = 0x17,
OP3_INSERTPS_VpsUps = 0x21,
OP3_PINSRD_VdqEdIb = 0x22,
OP3_PMULLD_VdqWdq = 0x40,
OP3_VBLENDVPS_VdqWdq = 0x4A
};
// Test whether the given opcode should be printed with its operands reversed.
inline bool IsXMMReversedOperands(TwoByteOpcodeID opcode)
{
switch (opcode) {
case OP2_MOVSD_WsdVsd: // also OP2_MOVPS_WpsVps
case OP2_MOVAPS_WsdVsd:
case OP2_MOVDQ_WdqVdq:
case OP3_PEXTRD_EdVdqIb:
return true;
default:
break;
}
return false;
}
enum ThreeByteEscape {
ESCAPE_38 = 0x38,
ESCAPE_3A = 0x3A
};
enum VexOperandType {
VEX_PS = 0,
VEX_PD = 1,
VEX_SS = 2,
VEX_SD = 3
};
inline OneByteOpcodeID jccRel8(Condition cond)
{
return OneByteOpcodeID(OP_JCC_rel8 + cond);
}
inline TwoByteOpcodeID jccRel32(Condition cond)
{
return TwoByteOpcodeID(OP2_JCC_rel32 + cond);
}
inline TwoByteOpcodeID setccOpcode(Condition cond)
{
return TwoByteOpcodeID(OP_SETCC + cond);
}
enum GroupOpcodeID {
GROUP1_OP_ADD = 0,
GROUP1_OP_OR = 1,
GROUP1_OP_ADC = 2,
GROUP1_OP_AND = 4,
GROUP1_OP_SUB = 5,
GROUP1_OP_XOR = 6,
GROUP1_OP_CMP = 7,
GROUP1A_OP_POP = 0,
GROUP2_OP_SHL = 4,
GROUP2_OP_SHR = 5,
GROUP2_OP_SAR = 7,
GROUP3_OP_TEST = 0,
GROUP3_OP_NOT = 2,
GROUP3_OP_NEG = 3,
GROUP3_OP_IMUL = 5,
GROUP3_OP_DIV = 6,
GROUP3_OP_IDIV = 7,
GROUP5_OP_INC = 0,
GROUP5_OP_DEC = 1,
GROUP5_OP_CALLN = 2,
GROUP5_OP_JMPN = 4,
GROUP5_OP_PUSH = 6,
FPU6_OP_FLD = 0,
FPU6_OP_FISTTP = 1,
FPU6_OP_FSTP = 3,
GROUP11_MOV = 0
};
static const RegisterID noBase = rbp;
static const RegisterID hasSib = rsp;
static const RegisterID noIndex = rsp;
#ifdef JS_CODEGEN_X64
static const RegisterID noBase2 = r13;
static const RegisterID hasSib2 = r12;
#endif
enum ModRmMode {
ModRmMemoryNoDisp,
ModRmMemoryDisp8,
ModRmMemoryDisp32,
ModRmRegister
};
} // namespace X86Encoding
} // namespace jit
} // namespace js
#endif /* jit_shared_Encoding_x86_shared_h */

Просмотреть файл

@ -213,17 +213,17 @@ class MacroAssemblerX86Shared : public Assembler
}
void atomic_cmpxchg8(Register newval, const Operand &addr, Register oldval_and_result) {
// %eax must be explicitly provided for calling clarity.
MOZ_ASSERT(oldval_and_result.code() == X86Registers::eax);
MOZ_ASSERT(oldval_and_result.code() == X86Encoding::rax);
lock_cmpxchg8(newval, addr);
}
void atomic_cmpxchg16(Register newval, const Operand &addr, Register oldval_and_result) {
// %eax must be explicitly provided for calling clarity.
MOZ_ASSERT(oldval_and_result.code() == X86Registers::eax);
MOZ_ASSERT(oldval_and_result.code() == X86Encoding::rax);
lock_cmpxchg16(newval, addr);
}
void atomic_cmpxchg32(Register newval, const Operand &addr, Register oldval_and_result) {
// %eax must be explicitly provided for calling clarity.
MOZ_ASSERT(oldval_and_result.code() == X86Registers::eax);
MOZ_ASSERT(oldval_and_result.code() == X86Encoding::rax);
lock_cmpxchg32(newval, addr);
}

Просмотреть файл

@ -0,0 +1,137 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef jit_shared_Patching_x86_shared_h
#define jit_shared_Patching_x86_shared_h
namespace js {
namespace jit {
namespace X86Encoding {
inline void *
GetPointer(const void* where)
{
return reinterpret_cast<void *const *>(where)[-1];
}
inline void **
GetPointerRef(void* where)
{
return &reinterpret_cast<void **>(where)[-1];
}
inline void
SetPointer(void* where, const void* value)
{
reinterpret_cast<const void**>(where)[-1] = value;
}
inline int32_t
GetInt32(const void* where)
{
return reinterpret_cast<const int32_t*>(where)[-1];
}
inline void
SetInt32(void* where, int32_t value)
{
reinterpret_cast<int32_t*>(where)[-1] = value;
}
inline void
AddInt32(void* where, int32_t value)
{
#ifdef DEBUG
uint32_t x = reinterpret_cast<uint32_t*>(where)[-1];
uint32_t y = x + uint32_t(value);
MOZ_ASSERT(value >= 0 ? (int32_t(y) >= int32_t(x)) : (int32_t(y) < int32_t(x)));
#endif
reinterpret_cast<uint32_t*>(where)[-1] += uint32_t(value);
}
inline void
SetRel32(void* from, void* to)
{
intptr_t offset = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
MOZ_ASSERT(offset == static_cast<int32_t>(offset),
"offset is too great for a 32-bit relocation");
if (offset != static_cast<int32_t>(offset))
MOZ_CRASH("offset is too great for a 32-bit relocation");
SetInt32(from, offset);
}
inline void *
GetRel32Target(void* where)
{
int32_t rel = GetInt32(where);
return (char *)where + rel;
}
class JmpSrc {
public:
JmpSrc()
: offset_(-1)
{
}
explicit JmpSrc(int32_t offset)
: offset_(offset)
{
}
int32_t offset() const {
return offset_;
}
bool isSet() const {
return offset_ != -1;
}
private:
int offset_;
};
class JmpDst {
public:
JmpDst()
: offset_(-1)
, used_(false)
{
}
bool isUsed() const { return used_; }
void used() { used_ = true; }
bool isValid() const { return offset_ != -1; }
explicit JmpDst(int32_t offset)
: offset_(offset)
, used_(false)
{
MOZ_ASSERT(offset_ == offset);
}
int32_t offset() const {
return offset_;
}
private:
int32_t offset_ : 31;
bool used_ : 1;
};
inline bool
CanRelinkJump(void* from, void* to)
{
intptr_t offset = static_cast<char *>(to) - static_cast<char *>(from);
return (offset == static_cast<int32_t>(offset));
}
} // namespace X86Encoding
} // namespace jit
} // namespace js
#endif /* jit_shared_Patching_x86_shared_h */

Просмотреть файл

@ -7,7 +7,7 @@
#ifndef jit_x64_Architecture_x64_h
#define jit_x64_Architecture_x64_h
#include "jit/shared/BaseAssembler-x86-shared.h"
#include "jit/shared/Constants-x86-shared.h"
namespace js {
namespace jit {
@ -26,7 +26,7 @@ static const uint32_t ShadowStackSpace = 0;
class Registers {
public:
typedef X86Registers::RegisterID Code;
typedef X86Encoding::RegisterID Code;
typedef uint32_t SetType;
static uint32_t SetSize(SetType x) {
static_assert(sizeof(SetType) == 4, "SetType must be 32 bits");
@ -54,8 +54,8 @@ class Registers {
return Invalid;
}
static const Code StackPointer = X86Registers::esp;
static const Code Invalid = X86Registers::invalid_reg;
static const Code StackPointer = X86Encoding::rsp;
static const Code Invalid = X86Encoding::invalid_reg;
static const uint32_t Total = 16;
static const uint32_t TotalPhys = 16;
@ -65,46 +65,46 @@ class Registers {
static const uint32_t ArgRegMask =
# if !defined(_WIN64)
(1 << X86Registers::edi) |
(1 << X86Registers::esi) |
(1 << X86Encoding::rdi) |
(1 << X86Encoding::rsi) |
# endif
(1 << X86Registers::edx) |
(1 << X86Registers::ecx) |
(1 << X86Registers::r8) |
(1 << X86Registers::r9);
(1 << X86Encoding::rdx) |
(1 << X86Encoding::rcx) |
(1 << X86Encoding::r8) |
(1 << X86Encoding::r9);
static const uint32_t VolatileMask =
(1 << X86Registers::eax) |
(1 << X86Registers::ecx) |
(1 << X86Registers::edx) |
(1 << X86Encoding::rax) |
(1 << X86Encoding::rcx) |
(1 << X86Encoding::rdx) |
# if !defined(_WIN64)
(1 << X86Registers::esi) |
(1 << X86Registers::edi) |
(1 << X86Encoding::rsi) |
(1 << X86Encoding::rdi) |
# endif
(1 << X86Registers::r8) |
(1 << X86Registers::r9) |
(1 << X86Registers::r10) |
(1 << X86Registers::r11);
(1 << X86Encoding::r8) |
(1 << X86Encoding::r9) |
(1 << X86Encoding::r10) |
(1 << X86Encoding::r11);
static const uint32_t NonVolatileMask =
(1 << X86Registers::ebx) |
(1 << X86Encoding::rbx) |
#if defined(_WIN64)
(1 << X86Registers::esi) |
(1 << X86Registers::edi) |
(1 << X86Encoding::rsi) |
(1 << X86Encoding::rdi) |
#endif
(1 << X86Registers::ebp) |
(1 << X86Registers::r12) |
(1 << X86Registers::r13) |
(1 << X86Registers::r14) |
(1 << X86Registers::r15);
(1 << X86Encoding::rbp) |
(1 << X86Encoding::r12) |
(1 << X86Encoding::r13) |
(1 << X86Encoding::r14) |
(1 << X86Encoding::r15);
static const uint32_t WrapperMask = VolatileMask;
static const uint32_t SingleByteRegs = VolatileMask | NonVolatileMask;
static const uint32_t NonAllocatableMask =
(1 << X86Registers::esp) |
(1 << X86Registers::r11); // This is ScratchReg.
(1 << X86Encoding::rsp) |
(1 << X86Encoding::r11); // This is ScratchReg.
static const uint32_t AllocatableMask = AllMask & ~NonAllocatableMask;
@ -113,11 +113,11 @@ class Registers {
// Registers returned from a JS -> JS call.
static const uint32_t JSCallMask =
(1 << X86Registers::ecx);
(1 << X86Encoding::rcx);
// Registers returned from a JS -> C call.
static const uint32_t CallMask =
(1 << X86Registers::eax);
(1 << X86Encoding::rax);
};
// Smallest integer type that can hold a register bitmask.
@ -125,14 +125,10 @@ typedef uint16_t PackedRegisterMask;
class FloatRegisters {
public:
typedef X86Registers::XMMRegisterID Code;
typedef X86Encoding::XMMRegisterID Code;
typedef uint32_t SetType;
static const char *GetName(Code code) {
static const char * const Names[] = { "xmm0", "xmm1", "xmm2", "xmm3",
"xmm4", "xmm5", "xmm6", "xmm7",
"xmm8", "xmm9", "xmm10", "xmm11",
"xmm12", "xmm13", "xmm14", "xmm15" };
return Names[code];
return X86Encoding::XMMRegName(code);
}
static Code FromName(const char *name) {
@ -143,7 +139,7 @@ class FloatRegisters {
return Invalid;
}
static const Code Invalid = X86Registers::invalid_xmm;
static const Code Invalid = X86Encoding::invalid_xmm;
static const uint32_t Total = 16;
static const uint32_t TotalPhys = 16;
@ -154,12 +150,12 @@ class FloatRegisters {
static const uint32_t AllDoubleMask = AllMask;
static const uint32_t VolatileMask =
#if defined(_WIN64)
(1 << X86Registers::xmm0) |
(1 << X86Registers::xmm1) |
(1 << X86Registers::xmm2) |
(1 << X86Registers::xmm3) |
(1 << X86Registers::xmm4) |
(1 << X86Registers::xmm5);
(1 << X86Encoding::xmm0) |
(1 << X86Encoding::xmm1) |
(1 << X86Encoding::xmm2) |
(1 << X86Encoding::xmm3) |
(1 << X86Encoding::xmm4) |
(1 << X86Encoding::xmm5);
#else
AllMask;
#endif
@ -169,7 +165,7 @@ class FloatRegisters {
static const uint32_t WrapperMask = VolatileMask;
static const uint32_t NonAllocatableMask =
(1 << X86Registers::xmm15); // This is ScratchDoubleReg.
(1 << X86Encoding::xmm15); // This is ScratchDoubleReg.
static const uint32_t AllocatableMask = AllMask & ~NonAllocatableMask;
};

Просмотреть файл

@ -101,11 +101,11 @@ ABIArgGenerator::next(MIRType type)
}
// Avoid r11, which is the MacroAssembler's ScratchReg.
const Register ABIArgGenerator::NonArgReturnReg0 = r10;
const Register ABIArgGenerator::NonArgReturnReg1 = r12;
const Register ABIArgGenerator::NonVolatileReg = r13;
const Register ABIArgGenerator::NonArg_VolatileReg = rax;
const Register ABIArgGenerator::NonReturn_VolatileReg0 = rcx;
const Register ABIArgGenerator::NonArgReturnReg0 = jit::r10;
const Register ABIArgGenerator::NonArgReturnReg1 = jit::r12;
const Register ABIArgGenerator::NonVolatileReg = jit::r13;
const Register ABIArgGenerator::NonArg_VolatileReg = jit::rax;
const Register ABIArgGenerator::NonReturn_VolatileReg0 = jit::rcx;
void
Assembler::writeRelocation(JmpSrc src, Relocation::Kind reloc)
@ -216,8 +216,8 @@ Assembler::executableCopy(uint8_t *buffer)
// to jump to a different code block.
continue;
}
if (X86Assembler::canRelinkJump(src, rp.target)) {
X86Assembler::setRel32(src, rp.target);
if (X86Encoding::CanRelinkJump(src, rp.target)) {
X86Encoding::SetRel32(src, rp.target);
} else {
// An extended jump table must exist, and its offset must be in
// range.
@ -226,11 +226,11 @@ Assembler::executableCopy(uint8_t *buffer)
// Patch the jump to go to the extended jump entry.
uint8_t *entry = buffer + extendedJumpTable_ + i * SizeOfJumpTableEntry;
X86Assembler::setRel32(src, entry);
X86Encoding::SetRel32(src, entry);
// Now patch the pointer, note that we need to align it to
// *after* the extended jump, i.e. after the 64-bit immedate.
X86Assembler::setPointer(entry + SizeOfExtendedJump, rp.target);
X86Encoding::SetPointer(entry + SizeOfExtendedJump, rp.target);
}
}
}
@ -268,13 +268,13 @@ class RelocationIterator
JitCode *
Assembler::CodeFromJump(JitCode *code, uint8_t *jump)
{
uint8_t *target = (uint8_t *)X86Assembler::getRel32Target(jump);
uint8_t *target = (uint8_t *)X86Encoding::GetRel32Target(jump);
if (target >= code->raw() && target < code->raw() + code->instructionsSize()) {
// This jump is within the code buffer, so it has been redirected to
// the extended jump table.
MOZ_ASSERT(target + SizeOfJumpTableEntry <= code->raw() + code->instructionsSize());
target = (uint8_t *)X86Assembler::getPointer(target + SizeOfExtendedJump);
target = (uint8_t *)X86Encoding::GetPointer(target + SizeOfExtendedJump);
}
return JitCode::FromExecutable(target);

Просмотреть файл

@ -16,39 +16,39 @@
namespace js {
namespace jit {
static MOZ_CONSTEXPR_VAR Register rax = { X86Registers::eax };
static MOZ_CONSTEXPR_VAR Register rbx = { X86Registers::ebx };
static MOZ_CONSTEXPR_VAR Register rcx = { X86Registers::ecx };
static MOZ_CONSTEXPR_VAR Register rdx = { X86Registers::edx };
static MOZ_CONSTEXPR_VAR Register rsi = { X86Registers::esi };
static MOZ_CONSTEXPR_VAR Register rdi = { X86Registers::edi };
static MOZ_CONSTEXPR_VAR Register rbp = { X86Registers::ebp };
static MOZ_CONSTEXPR_VAR Register r8 = { X86Registers::r8 };
static MOZ_CONSTEXPR_VAR Register r9 = { X86Registers::r9 };
static MOZ_CONSTEXPR_VAR Register r10 = { X86Registers::r10 };
static MOZ_CONSTEXPR_VAR Register r11 = { X86Registers::r11 };
static MOZ_CONSTEXPR_VAR Register r12 = { X86Registers::r12 };
static MOZ_CONSTEXPR_VAR Register r13 = { X86Registers::r13 };
static MOZ_CONSTEXPR_VAR Register r14 = { X86Registers::r14 };
static MOZ_CONSTEXPR_VAR Register r15 = { X86Registers::r15 };
static MOZ_CONSTEXPR_VAR Register rsp = { X86Registers::esp };
static MOZ_CONSTEXPR_VAR Register rax = { X86Encoding::rax };
static MOZ_CONSTEXPR_VAR Register rbx = { X86Encoding::rbx };
static MOZ_CONSTEXPR_VAR Register rcx = { X86Encoding::rcx };
static MOZ_CONSTEXPR_VAR Register rdx = { X86Encoding::rdx };
static MOZ_CONSTEXPR_VAR Register rsi = { X86Encoding::rsi };
static MOZ_CONSTEXPR_VAR Register rdi = { X86Encoding::rdi };
static MOZ_CONSTEXPR_VAR Register rbp = { X86Encoding::rbp };
static MOZ_CONSTEXPR_VAR Register r8 = { X86Encoding::r8 };
static MOZ_CONSTEXPR_VAR Register r9 = { X86Encoding::r9 };
static MOZ_CONSTEXPR_VAR Register r10 = { X86Encoding::r10 };
static MOZ_CONSTEXPR_VAR Register r11 = { X86Encoding::r11 };
static MOZ_CONSTEXPR_VAR Register r12 = { X86Encoding::r12 };
static MOZ_CONSTEXPR_VAR Register r13 = { X86Encoding::r13 };
static MOZ_CONSTEXPR_VAR Register r14 = { X86Encoding::r14 };
static MOZ_CONSTEXPR_VAR Register r15 = { X86Encoding::r15 };
static MOZ_CONSTEXPR_VAR Register rsp = { X86Encoding::rsp };
static MOZ_CONSTEXPR_VAR FloatRegister xmm0 = { X86Registers::xmm0 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm1 = { X86Registers::xmm1 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm2 = { X86Registers::xmm2 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm3 = { X86Registers::xmm3 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm4 = { X86Registers::xmm4 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm5 = { X86Registers::xmm5 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm6 = { X86Registers::xmm6 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm7 = { X86Registers::xmm7 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm8 = { X86Registers::xmm8 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm9 = { X86Registers::xmm9 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm10 = { X86Registers::xmm10 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm11 = { X86Registers::xmm11 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm12 = { X86Registers::xmm12 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm13 = { X86Registers::xmm13 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm14 = { X86Registers::xmm14 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm15 = { X86Registers::xmm15 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm0 = { X86Encoding::xmm0 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm1 = { X86Encoding::xmm1 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm2 = { X86Encoding::xmm2 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm3 = { X86Encoding::xmm3 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm4 = { X86Encoding::xmm4 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm5 = { X86Encoding::xmm5 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm6 = { X86Encoding::xmm6 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm7 = { X86Encoding::xmm7 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm8 = { X86Encoding::xmm8 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm9 = { X86Encoding::xmm9 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm10 = { X86Encoding::xmm10 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm11 = { X86Encoding::xmm11 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm12 = { X86Encoding::xmm12 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm13 = { X86Encoding::xmm13 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm14 = { X86Encoding::xmm14 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm15 = { X86Encoding::xmm15 };
// X86-common synonyms.
static MOZ_CONSTEXPR_VAR Register eax = rax;
@ -60,8 +60,8 @@ static MOZ_CONSTEXPR_VAR Register edi = rdi;
static MOZ_CONSTEXPR_VAR Register ebp = rbp;
static MOZ_CONSTEXPR_VAR Register esp = rsp;
static MOZ_CONSTEXPR_VAR Register InvalidReg = { X86Registers::invalid_reg };
static MOZ_CONSTEXPR_VAR FloatRegister InvalidFloatReg = { X86Registers::invalid_xmm };
static MOZ_CONSTEXPR_VAR Register InvalidReg = { X86Encoding::invalid_reg };
static MOZ_CONSTEXPR_VAR FloatRegister InvalidFloatReg = { X86Encoding::invalid_xmm };
static MOZ_CONSTEXPR_VAR Register StackPointer = rsp;
static MOZ_CONSTEXPR_VAR Register FramePointer = rbp;
@ -717,7 +717,7 @@ class Assembler : public AssemblerX86Shared
}
void j(Condition cond, ImmPtr target,
Relocation::Kind reloc = Relocation::HARDCODED) {
JmpSrc src = masm.jCC(static_cast<X86Assembler::Condition>(cond));
JmpSrc src = masm.jCC(static_cast<X86Encoding::Condition>(cond));
addPendingJump(src, target, reloc);
}
@ -767,10 +767,10 @@ class Assembler : public AssemblerX86Shared
static inline void
PatchJump(CodeLocationJump jump, CodeLocationLabel label)
{
if (X86Assembler::canRelinkJump(jump.raw(), label.raw())) {
X86Assembler::setRel32(jump.raw(), label.raw());
if (X86Encoding::CanRelinkJump(jump.raw(), label.raw())) {
X86Encoding::SetRel32(jump.raw(), label.raw());
} else {
X86Assembler::setRel32(jump.raw(), jump.jumpTableEntry());
X86Encoding::SetRel32(jump.raw(), jump.jumpTableEntry());
Assembler::PatchJumpEntry(jump.jumpTableEntry(), label.raw());
}
}

Просмотреть файл

@ -635,7 +635,7 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
}
void branch32(Condition cond, AbsoluteAddress lhs, Imm32 rhs, Label *label) {
if (X86Assembler::isAddressImmediate(lhs.addr)) {
if (X86Encoding::IsAddressImmediate(lhs.addr)) {
branch32(cond, Operand(lhs), rhs, label);
} else {
mov(ImmPtr(lhs.addr), ScratchReg);
@ -647,7 +647,7 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
branch32(cond, Address(ScratchReg, 0), rhs, label);
}
void branch32(Condition cond, AbsoluteAddress lhs, Register rhs, Label *label) {
if (X86Assembler::isAddressImmediate(lhs.addr)) {
if (X86Encoding::IsAddressImmediate(lhs.addr)) {
branch32(cond, Operand(lhs), rhs, label);
} else {
mov(ImmPtr(lhs.addr), ScratchReg);
@ -655,7 +655,7 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
}
}
void branchTest32(Condition cond, AbsoluteAddress address, Imm32 imm, Label *label) {
if (X86Assembler::isAddressImmediate(address.addr)) {
if (X86Encoding::IsAddressImmediate(address.addr)) {
test32(Operand(address), imm);
} else {
mov(ImmPtr(address.addr), ScratchReg);
@ -667,7 +667,7 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
// Specialization for AbsoluteAddress.
void branchPtr(Condition cond, AbsoluteAddress addr, Register ptr, Label *label) {
MOZ_ASSERT(ptr != ScratchReg);
if (X86Assembler::isAddressImmediate(addr.addr)) {
if (X86Encoding::IsAddressImmediate(addr.addr)) {
branchPtr(cond, Operand(addr), ptr, label);
} else {
mov(ImmPtr(addr.addr), ScratchReg);
@ -675,7 +675,7 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
}
}
void branchPtr(Condition cond, AbsoluteAddress addr, ImmWord ptr, Label *label) {
if (X86Assembler::isAddressImmediate(addr.addr)) {
if (X86Encoding::IsAddressImmediate(addr.addr)) {
branchPtr(cond, Operand(addr), ptr, label);
} else {
mov(ImmPtr(addr.addr), ScratchReg);
@ -767,7 +767,7 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
movePtr(noteMaybeNurseryPtr(imm), dest);
}
void loadPtr(AbsoluteAddress address, Register dest) {
if (X86Assembler::isAddressImmediate(address.addr)) {
if (X86Encoding::IsAddressImmediate(address.addr)) {
movq(Operand(address), dest);
} else {
mov(ImmPtr(address.addr), ScratchReg);
@ -788,7 +788,7 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
shlq(Imm32(1), dest);
}
void load32(AbsoluteAddress address, Register dest) {
if (X86Assembler::isAddressImmediate(address.addr)) {
if (X86Encoding::IsAddressImmediate(address.addr)) {
movl(Operand(address), dest);
} else {
mov(ImmPtr(address.addr), ScratchReg);
@ -823,7 +823,7 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
movq(src, dest);
}
void storePtr(Register src, AbsoluteAddress address) {
if (X86Assembler::isAddressImmediate(address.addr)) {
if (X86Encoding::IsAddressImmediate(address.addr)) {
movq(src, Operand(address));
} else {
mov(ImmPtr(address.addr), ScratchReg);
@ -831,7 +831,7 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
}
}
void store32(Register src, AbsoluteAddress address) {
if (X86Assembler::isAddressImmediate(address.addr)) {
if (X86Encoding::IsAddressImmediate(address.addr)) {
movl(src, Operand(address));
} else {
mov(ImmPtr(address.addr), ScratchReg);
@ -1355,7 +1355,7 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
}
void inc64(AbsoluteAddress dest) {
if (X86Assembler::isAddressImmediate(dest.addr)) {
if (X86Encoding::IsAddressImmediate(dest.addr)) {
addPtr(Imm32(1), Operand(dest));
} else {
mov(ImmPtr(dest.addr), ScratchReg);

Просмотреть файл

@ -7,7 +7,7 @@
#ifndef jit_x86_Architecture_x86_h
#define jit_x86_Architecture_x86_h
#include "jit/shared/BaseAssembler-x86-shared.h"
#include "jit/shared/Constants-x86-shared.h"
namespace js {
namespace jit {
@ -35,7 +35,7 @@ static const uint32_t BAILOUT_TABLE_ENTRY_SIZE = 5;
class Registers {
public:
typedef X86Registers::RegisterID Code;
typedef X86Encoding::RegisterID Code;
typedef uint8_t SetType;
static uint32_t SetSize(SetType x) {
static_assert(sizeof(SetType) == 1, "SetType must be 8 bits");
@ -48,9 +48,7 @@ class Registers {
return 31 - mozilla::CountLeadingZeroes32(x);
}
static const char *GetName(Code code) {
static const char * const Names[] = { "eax", "ecx", "edx", "ebx",
"esp", "ebp", "esi", "edi" };
return Names[code];
return X86Encoding::GPRegName(code);
}
static Code FromName(const char *name) {
@ -61,8 +59,8 @@ class Registers {
return Invalid;
}
static const Code StackPointer = X86Registers::esp;
static const Code Invalid = X86Registers::invalid_reg;
static const Code StackPointer = X86Encoding::rsp;
static const Code Invalid = X86Encoding::invalid_reg;
static const uint32_t Total = 8;
static const uint32_t TotalPhys = 8;
@ -73,28 +71,28 @@ class Registers {
static const uint32_t ArgRegMask = 0;
static const uint32_t VolatileMask =
(1 << X86Registers::eax) |
(1 << X86Registers::ecx) |
(1 << X86Registers::edx);
(1 << X86Encoding::rax) |
(1 << X86Encoding::rcx) |
(1 << X86Encoding::rdx);
static const uint32_t NonVolatileMask =
(1 << X86Registers::ebx) |
(1 << X86Registers::esi) |
(1 << X86Registers::edi) |
(1 << X86Registers::ebp);
(1 << X86Encoding::rbx) |
(1 << X86Encoding::rsi) |
(1 << X86Encoding::rdi) |
(1 << X86Encoding::rbp);
static const uint32_t WrapperMask =
VolatileMask |
(1 << X86Registers::ebx);
(1 << X86Encoding::rbx);
static const uint32_t SingleByteRegs =
(1 << X86Registers::eax) |
(1 << X86Registers::ecx) |
(1 << X86Registers::edx) |
(1 << X86Registers::ebx);
(1 << X86Encoding::rax) |
(1 << X86Encoding::rcx) |
(1 << X86Encoding::rdx) |
(1 << X86Encoding::rbx);
static const uint32_t NonAllocatableMask =
(1 << X86Registers::esp);
(1 << X86Encoding::rsp);
static const uint32_t AllocatableMask = AllMask & ~NonAllocatableMask;
@ -103,12 +101,12 @@ class Registers {
// Registers returned from a JS -> JS call.
static const uint32_t JSCallMask =
(1 << X86Registers::ecx) |
(1 << X86Registers::edx);
(1 << X86Encoding::rcx) |
(1 << X86Encoding::rdx);
// Registers returned from a JS -> C call.
static const uint32_t CallMask =
(1 << X86Registers::eax);
(1 << X86Encoding::rax);
};
// Smallest integer type that can hold a register bitmask.
@ -116,12 +114,10 @@ typedef uint8_t PackedRegisterMask;
class FloatRegisters {
public:
typedef X86Registers::XMMRegisterID Code;
typedef X86Encoding::XMMRegisterID Code;
typedef uint32_t SetType;
static const char *GetName(Code code) {
static const char * const Names[] = { "xmm0", "xmm1", "xmm2", "xmm3",
"xmm4", "xmm5", "xmm6", "xmm7" };
return Names[code];
return X86Encoding::XMMRegName(code);
}
static Code FromName(const char *name) {
@ -132,7 +128,7 @@ class FloatRegisters {
return Invalid;
}
static const Code Invalid = X86Registers::invalid_xmm;
static const Code Invalid = X86Encoding::invalid_xmm;
static const uint32_t Total = 8;
static const uint32_t TotalPhys = 8;
@ -146,7 +142,7 @@ class FloatRegisters {
static const uint32_t WrapperMask = VolatileMask;
static const uint32_t NonAllocatableMask =
(1 << X86Registers::xmm7); // This is ScratchDoubleReg.
(1 << X86Encoding::xmm7); // This is ScratchDoubleReg.
static const uint32_t AllocatableMask = AllMask & ~NonAllocatableMask;
};

Просмотреть файл

@ -58,7 +58,7 @@ Assembler::executableCopy(uint8_t *buffer)
for (size_t i = 0; i < jumps_.length(); i++) {
RelativePatch &rp = jumps_[i];
X86Assembler::setRel32(buffer + rp.offset, rp.target);
X86Encoding::SetRel32(buffer + rp.offset, rp.target);
}
}
@ -87,7 +87,7 @@ class RelocationIterator
static inline JitCode *
CodeFromJump(uint8_t *jump)
{
uint8_t *target = (uint8_t *)X86Assembler::getRel32Target(jump);
uint8_t *target = (uint8_t *)X86Encoding::GetRel32Target(jump);
return JitCode::FromExecutable(target);
}

Просмотреть файл

@ -13,31 +13,31 @@
#include "jit/IonCode.h"
#include "jit/JitCompartment.h"
#include "jit/shared/Assembler-shared.h"
#include "jit/shared/BaseAssembler-x86-shared.h"
#include "jit/shared/Constants-x86-shared.h"
namespace js {
namespace jit {
static MOZ_CONSTEXPR_VAR Register eax = { X86Registers::eax };
static MOZ_CONSTEXPR_VAR Register ecx = { X86Registers::ecx };
static MOZ_CONSTEXPR_VAR Register edx = { X86Registers::edx };
static MOZ_CONSTEXPR_VAR Register ebx = { X86Registers::ebx };
static MOZ_CONSTEXPR_VAR Register esp = { X86Registers::esp };
static MOZ_CONSTEXPR_VAR Register ebp = { X86Registers::ebp };
static MOZ_CONSTEXPR_VAR Register esi = { X86Registers::esi };
static MOZ_CONSTEXPR_VAR Register edi = { X86Registers::edi };
static MOZ_CONSTEXPR_VAR Register eax = { X86Encoding::rax };
static MOZ_CONSTEXPR_VAR Register ecx = { X86Encoding::rcx };
static MOZ_CONSTEXPR_VAR Register edx = { X86Encoding::rdx };
static MOZ_CONSTEXPR_VAR Register ebx = { X86Encoding::rbx };
static MOZ_CONSTEXPR_VAR Register esp = { X86Encoding::rsp };
static MOZ_CONSTEXPR_VAR Register ebp = { X86Encoding::rbp };
static MOZ_CONSTEXPR_VAR Register esi = { X86Encoding::rsi };
static MOZ_CONSTEXPR_VAR Register edi = { X86Encoding::rdi };
static MOZ_CONSTEXPR_VAR FloatRegister xmm0 = { X86Registers::xmm0 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm1 = { X86Registers::xmm1 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm2 = { X86Registers::xmm2 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm3 = { X86Registers::xmm3 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm4 = { X86Registers::xmm4 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm5 = { X86Registers::xmm5 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm6 = { X86Registers::xmm6 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm7 = { X86Registers::xmm7 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm0 = { X86Encoding::xmm0 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm1 = { X86Encoding::xmm1 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm2 = { X86Encoding::xmm2 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm3 = { X86Encoding::xmm3 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm4 = { X86Encoding::xmm4 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm5 = { X86Encoding::xmm5 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm6 = { X86Encoding::xmm6 };
static MOZ_CONSTEXPR_VAR FloatRegister xmm7 = { X86Encoding::xmm7 };
static MOZ_CONSTEXPR_VAR Register InvalidReg = { X86Registers::invalid_reg };
static MOZ_CONSTEXPR_VAR FloatRegister InvalidFloatReg = { X86Registers::invalid_xmm };
static MOZ_CONSTEXPR_VAR Register InvalidReg = { X86Encoding::invalid_reg };
static MOZ_CONSTEXPR_VAR FloatRegister InvalidFloatReg = { X86Encoding::invalid_xmm };
static MOZ_CONSTEXPR_VAR Register JSReturnReg_Type = ecx;
static MOZ_CONSTEXPR_VAR Register JSReturnReg_Data = edx;
@ -164,7 +164,7 @@ PatchJump(CodeLocationJump jump, CodeLocationLabel label)
MOZ_ASSERT(((*x >= 0x80 && *x <= 0x8F) && *(x - 1) == 0x0F) ||
(*x == 0xE9));
#endif
X86Assembler::setRel32(jump.raw(), label.raw());
X86Encoding::SetRel32(jump.raw(), label.raw());
}
static inline void
PatchBackedge(CodeLocationJump &jump_, CodeLocationLabel label, JitRuntime::BackedgeTarget target)
@ -381,7 +381,7 @@ class Assembler : public AssemblerX86Shared
}
void j(Condition cond, ImmPtr target,
Relocation::Kind reloc = Relocation::HARDCODED) {
JmpSrc src = masm.jCC(static_cast<X86Assembler::Condition>(cond));
JmpSrc src = masm.jCC(static_cast<X86Encoding::Condition>(cond));
addPendingJump(src, target, reloc);
}
@ -423,9 +423,9 @@ class Assembler : public AssemblerX86Shared
void retarget(Label *label, ImmPtr target, Relocation::Kind reloc) {
if (label->used()) {
bool more;
X86Assembler::JmpSrc jmp(label->offset());
X86Encoding::JmpSrc jmp(label->offset());
do {
X86Assembler::JmpSrc next;
X86Encoding::JmpSrc next;
more = masm.nextJump(jmp, &next);
addPendingJump(jmp, target, reloc);
jmp = next;
@ -630,7 +630,7 @@ class Assembler : public AssemblerX86Shared
}
static bool canUseInSingleByteInstruction(Register reg) {
return !ByteRegRequiresRex(reg.code());
return X86Encoding::HasSubregL(reg.code());
}
};