зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1322093 part 1 - Split up BaselineCacheIR.{cpp,h}. r=h4writer
This commit is contained in:
Родитель
47afdb3f0c
Коммит
52611926d0
|
@ -17,432 +17,6 @@ using namespace js::jit;
|
||||||
|
|
||||||
using mozilla::Maybe;
|
using mozilla::Maybe;
|
||||||
|
|
||||||
// OperandLocation represents the location of an OperandId. The operand is
|
|
||||||
// either in a register or on the stack, and is either boxed or unboxed.
|
|
||||||
class OperandLocation
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
enum Kind {
|
|
||||||
Uninitialized = 0,
|
|
||||||
PayloadReg,
|
|
||||||
ValueReg,
|
|
||||||
PayloadStack,
|
|
||||||
ValueStack,
|
|
||||||
};
|
|
||||||
|
|
||||||
private:
|
|
||||||
Kind kind_;
|
|
||||||
|
|
||||||
union Data {
|
|
||||||
struct {
|
|
||||||
Register reg;
|
|
||||||
JSValueType type;
|
|
||||||
} payloadReg;
|
|
||||||
ValueOperand valueReg;
|
|
||||||
struct {
|
|
||||||
uint32_t stackPushed;
|
|
||||||
JSValueType type;
|
|
||||||
} payloadStack;
|
|
||||||
uint32_t valueStackPushed;
|
|
||||||
|
|
||||||
Data() : valueStackPushed(0) {}
|
|
||||||
};
|
|
||||||
Data data_;
|
|
||||||
|
|
||||||
public:
|
|
||||||
OperandLocation() : kind_(Uninitialized) {}
|
|
||||||
|
|
||||||
Kind kind() const { return kind_; }
|
|
||||||
|
|
||||||
void setUninitialized() {
|
|
||||||
kind_ = Uninitialized;
|
|
||||||
}
|
|
||||||
|
|
||||||
ValueOperand valueReg() const {
|
|
||||||
MOZ_ASSERT(kind_ == ValueReg);
|
|
||||||
return data_.valueReg;
|
|
||||||
}
|
|
||||||
Register payloadReg() const {
|
|
||||||
MOZ_ASSERT(kind_ == PayloadReg);
|
|
||||||
return data_.payloadReg.reg;
|
|
||||||
}
|
|
||||||
uint32_t payloadStack() const {
|
|
||||||
MOZ_ASSERT(kind_ == PayloadStack);
|
|
||||||
return data_.payloadStack.stackPushed;
|
|
||||||
}
|
|
||||||
uint32_t valueStack() const {
|
|
||||||
MOZ_ASSERT(kind_ == ValueStack);
|
|
||||||
return data_.valueStackPushed;
|
|
||||||
}
|
|
||||||
JSValueType payloadType() const {
|
|
||||||
if (kind_ == PayloadReg)
|
|
||||||
return data_.payloadReg.type;
|
|
||||||
MOZ_ASSERT(kind_ == PayloadStack);
|
|
||||||
return data_.payloadStack.type;
|
|
||||||
}
|
|
||||||
void setPayloadReg(Register reg, JSValueType type) {
|
|
||||||
kind_ = PayloadReg;
|
|
||||||
data_.payloadReg.reg = reg;
|
|
||||||
data_.payloadReg.type = type;
|
|
||||||
}
|
|
||||||
void setValueReg(ValueOperand reg) {
|
|
||||||
kind_ = ValueReg;
|
|
||||||
data_.valueReg = reg;
|
|
||||||
}
|
|
||||||
void setPayloadStack(uint32_t stackPushed, JSValueType type) {
|
|
||||||
kind_ = PayloadStack;
|
|
||||||
data_.payloadStack.stackPushed = stackPushed;
|
|
||||||
data_.payloadStack.type = type;
|
|
||||||
}
|
|
||||||
void setValueStack(uint32_t stackPushed) {
|
|
||||||
kind_ = ValueStack;
|
|
||||||
data_.valueStackPushed = stackPushed;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool aliasesReg(Register reg) {
|
|
||||||
if (kind_ == PayloadReg)
|
|
||||||
return payloadReg() == reg;
|
|
||||||
if (kind_ == ValueReg)
|
|
||||||
return valueReg().aliases(reg);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
bool aliasesReg(ValueOperand reg) {
|
|
||||||
#if defined(JS_NUNBOX32)
|
|
||||||
return aliasesReg(reg.typeReg()) || aliasesReg(reg.payloadReg());
|
|
||||||
#else
|
|
||||||
return aliasesReg(reg.valueReg());
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
bool operator==(const OperandLocation& other) const {
|
|
||||||
if (kind_ != other.kind_)
|
|
||||||
return false;
|
|
||||||
switch (kind()) {
|
|
||||||
case Uninitialized:
|
|
||||||
return true;
|
|
||||||
case PayloadReg:
|
|
||||||
return payloadReg() == other.payloadReg() && payloadType() == other.payloadType();
|
|
||||||
case ValueReg:
|
|
||||||
return valueReg() == other.valueReg();
|
|
||||||
case PayloadStack:
|
|
||||||
return payloadStack() == other.payloadStack() && payloadType() == other.payloadType();
|
|
||||||
case ValueStack:
|
|
||||||
return valueStack() == other.valueStack();
|
|
||||||
}
|
|
||||||
MOZ_CRASH("Invalid OperandLocation kind");
|
|
||||||
}
|
|
||||||
bool operator!=(const OperandLocation& other) const { return !operator==(other); }
|
|
||||||
};
|
|
||||||
|
|
||||||
// Class to track and allocate registers while emitting IC code.
|
|
||||||
class MOZ_RAII CacheRegisterAllocator
|
|
||||||
{
|
|
||||||
// The original location of the inputs to the cache.
|
|
||||||
Vector<OperandLocation, 4, SystemAllocPolicy> origInputLocations_;
|
|
||||||
|
|
||||||
// The current location of each operand.
|
|
||||||
Vector<OperandLocation, 8, SystemAllocPolicy> operandLocations_;
|
|
||||||
|
|
||||||
// The registers allocated while emitting the current CacheIR op.
|
|
||||||
// This prevents us from allocating a register and then immediately
|
|
||||||
// clobbering it for something else, while we're still holding on to it.
|
|
||||||
LiveGeneralRegisterSet currentOpRegs_;
|
|
||||||
|
|
||||||
const AllocatableGeneralRegisterSet allocatableRegs_;
|
|
||||||
|
|
||||||
// Registers that are currently unused and available.
|
|
||||||
AllocatableGeneralRegisterSet availableRegs_;
|
|
||||||
|
|
||||||
// The number of bytes pushed on the native stack.
|
|
||||||
uint32_t stackPushed_;
|
|
||||||
|
|
||||||
// The index of the CacheIR instruction we're currently emitting.
|
|
||||||
uint32_t currentInstruction_;
|
|
||||||
|
|
||||||
const CacheIRWriter& writer_;
|
|
||||||
|
|
||||||
CacheRegisterAllocator(const CacheRegisterAllocator&) = delete;
|
|
||||||
CacheRegisterAllocator& operator=(const CacheRegisterAllocator&) = delete;
|
|
||||||
|
|
||||||
void freeDeadOperandRegisters();
|
|
||||||
|
|
||||||
public:
|
|
||||||
friend class AutoScratchRegister;
|
|
||||||
friend class AutoScratchRegisterExcluding;
|
|
||||||
|
|
||||||
explicit CacheRegisterAllocator(const CacheIRWriter& writer)
|
|
||||||
: allocatableRegs_(GeneralRegisterSet::All()),
|
|
||||||
stackPushed_(0),
|
|
||||||
currentInstruction_(0),
|
|
||||||
writer_(writer)
|
|
||||||
{}
|
|
||||||
|
|
||||||
MOZ_MUST_USE bool init(const AllocatableGeneralRegisterSet& available) {
|
|
||||||
availableRegs_ = available;
|
|
||||||
if (!origInputLocations_.resize(writer_.numInputOperands()))
|
|
||||||
return false;
|
|
||||||
if (!operandLocations_.resize(writer_.numOperandIds()))
|
|
||||||
return false;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
OperandLocation operandLocation(size_t i) const {
|
|
||||||
return operandLocations_[i];
|
|
||||||
}
|
|
||||||
OperandLocation origInputLocation(size_t i) const {
|
|
||||||
return origInputLocations_[i];
|
|
||||||
}
|
|
||||||
void initInputLocation(size_t i, ValueOperand reg) {
|
|
||||||
origInputLocations_[i].setValueReg(reg);
|
|
||||||
operandLocations_[i] = origInputLocations_[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
void nextOp() {
|
|
||||||
currentOpRegs_.clear();
|
|
||||||
currentInstruction_++;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint32_t stackPushed() const {
|
|
||||||
return stackPushed_;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool isAllocatable(Register reg) const {
|
|
||||||
return allocatableRegs_.has(reg);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allocates a new register.
|
|
||||||
Register allocateRegister(MacroAssembler& masm);
|
|
||||||
ValueOperand allocateValueRegister(MacroAssembler& masm);
|
|
||||||
void allocateFixedRegister(MacroAssembler& masm, Register reg);
|
|
||||||
|
|
||||||
// Releases a register so it can be reused later.
|
|
||||||
void releaseRegister(Register reg) {
|
|
||||||
MOZ_ASSERT(currentOpRegs_.has(reg));
|
|
||||||
availableRegs_.add(reg);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Removes spilled values from the native stack. This should only be
|
|
||||||
// called after all registers have been allocated.
|
|
||||||
void discardStack(MacroAssembler& masm);
|
|
||||||
|
|
||||||
// Returns the register for the given operand. If the operand is currently
|
|
||||||
// not in a register, it will load it into one.
|
|
||||||
ValueOperand useValueRegister(MacroAssembler& masm, ValOperandId val);
|
|
||||||
Register useRegister(MacroAssembler& masm, TypedOperandId typedId);
|
|
||||||
|
|
||||||
// Allocates an output register for the given operand.
|
|
||||||
Register defineRegister(MacroAssembler& masm, TypedOperandId typedId);
|
|
||||||
ValueOperand defineValueRegister(MacroAssembler& masm, ValOperandId val);
|
|
||||||
};
|
|
||||||
|
|
||||||
// RAII class to allocate a scratch register and release it when we're done
|
|
||||||
// with it.
|
|
||||||
class MOZ_RAII AutoScratchRegister
|
|
||||||
{
|
|
||||||
CacheRegisterAllocator& alloc_;
|
|
||||||
Register reg_;
|
|
||||||
|
|
||||||
public:
|
|
||||||
AutoScratchRegister(CacheRegisterAllocator& alloc, MacroAssembler& masm,
|
|
||||||
Register reg = InvalidReg)
|
|
||||||
: alloc_(alloc)
|
|
||||||
{
|
|
||||||
if (reg != InvalidReg) {
|
|
||||||
alloc.allocateFixedRegister(masm, reg);
|
|
||||||
reg_ = reg;
|
|
||||||
} else {
|
|
||||||
reg_ = alloc.allocateRegister(masm);
|
|
||||||
}
|
|
||||||
MOZ_ASSERT(alloc_.currentOpRegs_.has(reg_));
|
|
||||||
}
|
|
||||||
~AutoScratchRegister() {
|
|
||||||
alloc_.releaseRegister(reg_);
|
|
||||||
}
|
|
||||||
operator Register() const { return reg_; }
|
|
||||||
};
|
|
||||||
|
|
||||||
// Like AutoScratchRegister, but lets the caller specify a register that should
|
|
||||||
// not be allocated here.
|
|
||||||
class MOZ_RAII AutoScratchRegisterExcluding
|
|
||||||
{
|
|
||||||
CacheRegisterAllocator& alloc_;
|
|
||||||
Register reg_;
|
|
||||||
|
|
||||||
public:
|
|
||||||
AutoScratchRegisterExcluding(CacheRegisterAllocator& alloc, MacroAssembler& masm,
|
|
||||||
Register excluding)
|
|
||||||
: alloc_(alloc)
|
|
||||||
{
|
|
||||||
MOZ_ASSERT(excluding != InvalidReg);
|
|
||||||
|
|
||||||
reg_ = alloc.allocateRegister(masm);
|
|
||||||
|
|
||||||
if (reg_ == excluding) {
|
|
||||||
// We need a different register, so try again.
|
|
||||||
reg_ = alloc.allocateRegister(masm);
|
|
||||||
MOZ_ASSERT(reg_ != excluding);
|
|
||||||
alloc_.releaseRegister(excluding);
|
|
||||||
}
|
|
||||||
|
|
||||||
MOZ_ASSERT(alloc_.currentOpRegs_.has(reg_));
|
|
||||||
}
|
|
||||||
~AutoScratchRegisterExcluding() {
|
|
||||||
alloc_.releaseRegister(reg_);
|
|
||||||
}
|
|
||||||
operator Register() const { return reg_; }
|
|
||||||
};
|
|
||||||
|
|
||||||
// The FailurePath class stores everything we need to generate a failure path
|
|
||||||
// at the end of the IC code. The failure path restores the input registers, if
|
|
||||||
// needed, and jumps to the next stub.
|
|
||||||
class FailurePath
|
|
||||||
{
|
|
||||||
Vector<OperandLocation, 4, SystemAllocPolicy> inputs_;
|
|
||||||
NonAssertingLabel label_;
|
|
||||||
uint32_t stackPushed_;
|
|
||||||
|
|
||||||
public:
|
|
||||||
FailurePath() = default;
|
|
||||||
|
|
||||||
FailurePath(FailurePath&& other)
|
|
||||||
: inputs_(Move(other.inputs_)),
|
|
||||||
label_(other.label_),
|
|
||||||
stackPushed_(other.stackPushed_)
|
|
||||||
{}
|
|
||||||
|
|
||||||
Label* label() { return &label_; }
|
|
||||||
|
|
||||||
void setStackPushed(uint32_t i) { stackPushed_ = i; }
|
|
||||||
uint32_t stackPushed() const { return stackPushed_; }
|
|
||||||
|
|
||||||
bool appendInput(OperandLocation loc) {
|
|
||||||
return inputs_.append(loc);
|
|
||||||
}
|
|
||||||
OperandLocation input(size_t i) const {
|
|
||||||
return inputs_[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
// If canShareFailurePath(other) returns true, the same machine code will
|
|
||||||
// be emitted for two failure paths, so we can share them.
|
|
||||||
bool canShareFailurePath(const FailurePath& other) const {
|
|
||||||
if (stackPushed_ != other.stackPushed_)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
MOZ_ASSERT(inputs_.length() == other.inputs_.length());
|
|
||||||
|
|
||||||
for (size_t i = 0; i < inputs_.length(); i++) {
|
|
||||||
if (inputs_[i] != other.inputs_[i])
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Base class for BaselineCacheIRCompiler and IonCacheIRCompiler.
|
|
||||||
class MOZ_RAII CacheIRCompiler
|
|
||||||
{
|
|
||||||
protected:
|
|
||||||
JSContext* cx_;
|
|
||||||
CacheIRReader reader;
|
|
||||||
const CacheIRWriter& writer_;
|
|
||||||
MacroAssembler masm;
|
|
||||||
|
|
||||||
CacheRegisterAllocator allocator;
|
|
||||||
Vector<FailurePath, 4, SystemAllocPolicy> failurePaths;
|
|
||||||
|
|
||||||
CacheIRCompiler(JSContext* cx, const CacheIRWriter& writer)
|
|
||||||
: cx_(cx),
|
|
||||||
reader(writer),
|
|
||||||
writer_(writer),
|
|
||||||
allocator(writer_)
|
|
||||||
{}
|
|
||||||
|
|
||||||
void emitFailurePath(size_t i);
|
|
||||||
};
|
|
||||||
|
|
||||||
void
|
|
||||||
CacheIRCompiler::emitFailurePath(size_t i)
|
|
||||||
{
|
|
||||||
FailurePath& failure = failurePaths[i];
|
|
||||||
|
|
||||||
masm.bind(failure.label());
|
|
||||||
|
|
||||||
uint32_t stackPushed = failure.stackPushed();
|
|
||||||
size_t numInputOperands = writer_.numInputOperands();
|
|
||||||
|
|
||||||
for (size_t j = 0; j < numInputOperands; j++) {
|
|
||||||
OperandLocation orig = allocator.origInputLocation(j);
|
|
||||||
OperandLocation cur = failure.input(j);
|
|
||||||
|
|
||||||
MOZ_ASSERT(orig.kind() == OperandLocation::ValueReg);
|
|
||||||
|
|
||||||
// We have a cycle if a destination register will be used later
|
|
||||||
// as source register. If that happens, just push the current value
|
|
||||||
// on the stack and later get it from there.
|
|
||||||
for (size_t k = j + 1; k < numInputOperands; k++) {
|
|
||||||
OperandLocation laterSource = failure.input(k);
|
|
||||||
switch (laterSource.kind()) {
|
|
||||||
case OperandLocation::ValueReg:
|
|
||||||
if (orig.aliasesReg(laterSource.valueReg())) {
|
|
||||||
stackPushed += sizeof(js::Value);
|
|
||||||
masm.pushValue(laterSource.valueReg());
|
|
||||||
laterSource.setValueStack(stackPushed);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case OperandLocation::PayloadReg:
|
|
||||||
if (orig.aliasesReg(laterSource.payloadReg())) {
|
|
||||||
stackPushed += sizeof(uintptr_t);
|
|
||||||
masm.push(laterSource.payloadReg());
|
|
||||||
laterSource.setPayloadStack(stackPushed, laterSource.payloadType());
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case OperandLocation::PayloadStack:
|
|
||||||
case OperandLocation::ValueStack:
|
|
||||||
case OperandLocation::Uninitialized:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (cur.kind()) {
|
|
||||||
case OperandLocation::ValueReg:
|
|
||||||
masm.moveValue(cur.valueReg(), orig.valueReg());
|
|
||||||
break;
|
|
||||||
case OperandLocation::PayloadReg:
|
|
||||||
masm.tagValue(cur.payloadType(), cur.payloadReg(), orig.valueReg());
|
|
||||||
break;
|
|
||||||
case OperandLocation::PayloadStack: {
|
|
||||||
MOZ_ASSERT(stackPushed >= sizeof(uintptr_t));
|
|
||||||
Register scratch = orig.valueReg().scratchReg();
|
|
||||||
if (cur.payloadStack() == stackPushed) {
|
|
||||||
masm.pop(scratch);
|
|
||||||
stackPushed -= sizeof(uintptr_t);
|
|
||||||
} else {
|
|
||||||
MOZ_ASSERT(cur.payloadStack() < stackPushed);
|
|
||||||
masm.loadPtr(Address(masm.getStackPointer(), stackPushed - cur.payloadStack()),
|
|
||||||
scratch);
|
|
||||||
}
|
|
||||||
masm.tagValue(cur.payloadType(), scratch, orig.valueReg());
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case OperandLocation::ValueStack:
|
|
||||||
MOZ_ASSERT(stackPushed >= sizeof(js::Value));
|
|
||||||
if (cur.valueStack() == stackPushed) {
|
|
||||||
masm.popValue(orig.valueReg());
|
|
||||||
stackPushed -= sizeof(js::Value);
|
|
||||||
} else {
|
|
||||||
MOZ_ASSERT(cur.valueStack() < stackPushed);
|
|
||||||
masm.loadValue(Address(masm.getStackPointer(), stackPushed - cur.valueStack()),
|
|
||||||
orig.valueReg());
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
MOZ_CRASH();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
allocator.discardStack(masm);
|
|
||||||
}
|
|
||||||
|
|
||||||
// BaselineCacheIRCompiler compiles CacheIR to BaselineIC native code.
|
// BaselineCacheIRCompiler compiles CacheIR to BaselineIC native code.
|
||||||
class MOZ_RAII BaselineCacheIRCompiler : public CacheIRCompiler
|
class MOZ_RAII BaselineCacheIRCompiler : public CacheIRCompiler
|
||||||
{
|
{
|
||||||
|
@ -483,28 +57,6 @@ class MOZ_RAII BaselineCacheIRCompiler : public CacheIRCompiler
|
||||||
Address stubAddress(uint32_t offset) const {
|
Address stubAddress(uint32_t offset) const {
|
||||||
return Address(ICStubReg, stubDataOffset_ + offset);
|
return Address(ICStubReg, stubDataOffset_ + offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool addFailurePath(FailurePath** failure) {
|
|
||||||
FailurePath newFailure;
|
|
||||||
for (size_t i = 0; i < writer_.numInputOperands(); i++) {
|
|
||||||
if (!newFailure.appendInput(allocator.operandLocation(i)))
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
newFailure.setStackPushed(allocator.stackPushed());
|
|
||||||
|
|
||||||
// Reuse the previous failure path if the current one is the same, to
|
|
||||||
// avoid emitting duplicate code.
|
|
||||||
if (failurePaths.length() > 0 && failurePaths.back().canShareFailurePath(newFailure)) {
|
|
||||||
*failure = &failurePaths.back();
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!failurePaths.append(Move(newFailure)))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
*failure = &failurePaths.back();
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Instructions that have to perform a callVM require a stub frame. Use
|
// Instructions that have to perform a callVM require a stub frame. Use
|
||||||
|
@ -643,274 +195,6 @@ BaselineCacheIRCompiler::compile()
|
||||||
return newStubCode;
|
return newStubCode;
|
||||||
}
|
}
|
||||||
|
|
||||||
ValueOperand
|
|
||||||
CacheRegisterAllocator::useValueRegister(MacroAssembler& masm, ValOperandId op)
|
|
||||||
{
|
|
||||||
OperandLocation& loc = operandLocations_[op.id()];
|
|
||||||
|
|
||||||
switch (loc.kind()) {
|
|
||||||
case OperandLocation::ValueReg:
|
|
||||||
currentOpRegs_.add(loc.valueReg());
|
|
||||||
return loc.valueReg();
|
|
||||||
|
|
||||||
case OperandLocation::ValueStack: {
|
|
||||||
// The Value is on the stack. If it's on top of the stack, unbox and
|
|
||||||
// then pop it. If we need the registers later, we can always spill
|
|
||||||
// back. If it's not on the top of the stack, just unbox.
|
|
||||||
ValueOperand reg = allocateValueRegister(masm);
|
|
||||||
if (loc.valueStack() == stackPushed_) {
|
|
||||||
masm.popValue(reg);
|
|
||||||
MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
|
|
||||||
stackPushed_ -= sizeof(js::Value);
|
|
||||||
} else {
|
|
||||||
MOZ_ASSERT(loc.valueStack() < stackPushed_);
|
|
||||||
masm.loadValue(Address(masm.getStackPointer(), stackPushed_ - loc.valueStack()), reg);
|
|
||||||
}
|
|
||||||
loc.setValueReg(reg);
|
|
||||||
return reg;
|
|
||||||
}
|
|
||||||
|
|
||||||
// The operand should never be unboxed.
|
|
||||||
case OperandLocation::PayloadStack:
|
|
||||||
case OperandLocation::PayloadReg:
|
|
||||||
case OperandLocation::Uninitialized:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
MOZ_CRASH();
|
|
||||||
}
|
|
||||||
|
|
||||||
Register
|
|
||||||
CacheRegisterAllocator::useRegister(MacroAssembler& masm, TypedOperandId typedId)
|
|
||||||
{
|
|
||||||
OperandLocation& loc = operandLocations_[typedId.id()];
|
|
||||||
switch (loc.kind()) {
|
|
||||||
case OperandLocation::PayloadReg:
|
|
||||||
currentOpRegs_.add(loc.payloadReg());
|
|
||||||
return loc.payloadReg();
|
|
||||||
|
|
||||||
case OperandLocation::ValueReg: {
|
|
||||||
// It's possible the value is still boxed: as an optimization, we unbox
|
|
||||||
// the first time we use a value as object.
|
|
||||||
ValueOperand val = loc.valueReg();
|
|
||||||
availableRegs_.add(val);
|
|
||||||
Register reg = val.scratchReg();
|
|
||||||
availableRegs_.take(reg);
|
|
||||||
masm.unboxObject(val, reg);
|
|
||||||
loc.setPayloadReg(reg, typedId.type());
|
|
||||||
currentOpRegs_.add(reg);
|
|
||||||
return reg;
|
|
||||||
}
|
|
||||||
|
|
||||||
case OperandLocation::PayloadStack: {
|
|
||||||
// The payload is on the stack. If it's on top of the stack we can just
|
|
||||||
// pop it, else we emit a load.
|
|
||||||
Register reg = allocateRegister(masm);
|
|
||||||
if (loc.payloadStack() == stackPushed_) {
|
|
||||||
masm.pop(reg);
|
|
||||||
MOZ_ASSERT(stackPushed_ >= sizeof(uintptr_t));
|
|
||||||
stackPushed_ -= sizeof(uintptr_t);
|
|
||||||
} else {
|
|
||||||
MOZ_ASSERT(loc.payloadStack() < stackPushed_);
|
|
||||||
masm.loadPtr(Address(masm.getStackPointer(), stackPushed_ - loc.payloadStack()), reg);
|
|
||||||
}
|
|
||||||
loc.setPayloadReg(reg, loc.payloadType());
|
|
||||||
return reg;
|
|
||||||
}
|
|
||||||
|
|
||||||
case OperandLocation::ValueStack: {
|
|
||||||
// The value is on the stack, but boxed. If it's on top of the stack we
|
|
||||||
// unbox it and then remove it from the stack, else we just unbox.
|
|
||||||
Register reg = allocateRegister(masm);
|
|
||||||
if (loc.valueStack() == stackPushed_) {
|
|
||||||
masm.unboxObject(Address(masm.getStackPointer(), 0), reg);
|
|
||||||
masm.addToStackPtr(Imm32(sizeof(js::Value)));
|
|
||||||
MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
|
|
||||||
stackPushed_ -= sizeof(js::Value);
|
|
||||||
} else {
|
|
||||||
MOZ_ASSERT(loc.valueStack() < stackPushed_);
|
|
||||||
masm.unboxObject(Address(masm.getStackPointer(), stackPushed_ - loc.valueStack()),
|
|
||||||
reg);
|
|
||||||
}
|
|
||||||
loc.setPayloadReg(reg, typedId.type());
|
|
||||||
return reg;
|
|
||||||
}
|
|
||||||
|
|
||||||
case OperandLocation::Uninitialized:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
MOZ_CRASH();
|
|
||||||
}
|
|
||||||
|
|
||||||
Register
|
|
||||||
CacheRegisterAllocator::defineRegister(MacroAssembler& masm, TypedOperandId typedId)
|
|
||||||
{
|
|
||||||
OperandLocation& loc = operandLocations_[typedId.id()];
|
|
||||||
MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
|
|
||||||
|
|
||||||
Register reg = allocateRegister(masm);
|
|
||||||
loc.setPayloadReg(reg, typedId.type());
|
|
||||||
return reg;
|
|
||||||
}
|
|
||||||
|
|
||||||
ValueOperand
|
|
||||||
CacheRegisterAllocator::defineValueRegister(MacroAssembler& masm, ValOperandId val)
|
|
||||||
{
|
|
||||||
OperandLocation& loc = operandLocations_[val.id()];
|
|
||||||
MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
|
|
||||||
|
|
||||||
ValueOperand reg = allocateValueRegister(masm);
|
|
||||||
loc.setValueReg(reg);
|
|
||||||
return reg;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
CacheRegisterAllocator::freeDeadOperandRegisters()
|
|
||||||
{
|
|
||||||
// See if any operands are dead so we can reuse their registers. Note that
|
|
||||||
// we skip the input operands, as those are also used by failure paths, and
|
|
||||||
// we currently don't track those uses.
|
|
||||||
for (size_t i = writer_.numInputOperands(); i < operandLocations_.length(); i++) {
|
|
||||||
if (!writer_.operandIsDead(i, currentInstruction_))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
OperandLocation& loc = operandLocations_[i];
|
|
||||||
switch (loc.kind()) {
|
|
||||||
case OperandLocation::PayloadReg:
|
|
||||||
availableRegs_.add(loc.payloadReg());
|
|
||||||
break;
|
|
||||||
case OperandLocation::ValueReg:
|
|
||||||
availableRegs_.add(loc.valueReg());
|
|
||||||
break;
|
|
||||||
case OperandLocation::Uninitialized:
|
|
||||||
case OperandLocation::PayloadStack:
|
|
||||||
case OperandLocation::ValueStack:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
loc.setUninitialized();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
CacheRegisterAllocator::discardStack(MacroAssembler& masm)
|
|
||||||
{
|
|
||||||
// This should only be called when we are no longer using the operands,
|
|
||||||
// as we're discarding everything from the native stack. Set all operand
|
|
||||||
// locations to Uninitialized to catch bugs.
|
|
||||||
for (size_t i = 0; i < operandLocations_.length(); i++)
|
|
||||||
operandLocations_[i].setUninitialized();
|
|
||||||
|
|
||||||
if (stackPushed_ > 0) {
|
|
||||||
masm.addToStackPtr(Imm32(stackPushed_));
|
|
||||||
stackPushed_ = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Register
|
|
||||||
CacheRegisterAllocator::allocateRegister(MacroAssembler& masm)
|
|
||||||
{
|
|
||||||
if (availableRegs_.empty())
|
|
||||||
freeDeadOperandRegisters();
|
|
||||||
|
|
||||||
if (availableRegs_.empty()) {
|
|
||||||
// Still no registers available, try to spill unused operands to
|
|
||||||
// the stack.
|
|
||||||
for (size_t i = 0; i < operandLocations_.length(); i++) {
|
|
||||||
OperandLocation& loc = operandLocations_[i];
|
|
||||||
if (loc.kind() == OperandLocation::PayloadReg) {
|
|
||||||
Register reg = loc.payloadReg();
|
|
||||||
if (currentOpRegs_.has(reg))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
masm.push(reg);
|
|
||||||
stackPushed_ += sizeof(uintptr_t);
|
|
||||||
loc.setPayloadStack(stackPushed_, loc.payloadType());
|
|
||||||
availableRegs_.add(reg);
|
|
||||||
break; // We got a register, so break out of the loop.
|
|
||||||
}
|
|
||||||
if (loc.kind() == OperandLocation::ValueReg) {
|
|
||||||
ValueOperand reg = loc.valueReg();
|
|
||||||
if (currentOpRegs_.aliases(reg))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
masm.pushValue(reg);
|
|
||||||
stackPushed_ += sizeof(js::Value);
|
|
||||||
loc.setValueStack(stackPushed_);
|
|
||||||
availableRegs_.add(reg);
|
|
||||||
break; // Break out of the loop.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// At this point, there must be a free register. (Ion ICs don't have as
|
|
||||||
// many registers available, so once we support Ion code generation, we may
|
|
||||||
// have to spill some unrelated registers.)
|
|
||||||
MOZ_RELEASE_ASSERT(!availableRegs_.empty());
|
|
||||||
|
|
||||||
Register reg = availableRegs_.takeAny();
|
|
||||||
currentOpRegs_.add(reg);
|
|
||||||
return reg;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
CacheRegisterAllocator::allocateFixedRegister(MacroAssembler& masm, Register reg)
|
|
||||||
{
|
|
||||||
// Fixed registers should be allocated first, to ensure they're
|
|
||||||
// still available.
|
|
||||||
MOZ_ASSERT(!currentOpRegs_.has(reg), "Register is in use");
|
|
||||||
|
|
||||||
freeDeadOperandRegisters();
|
|
||||||
|
|
||||||
if (availableRegs_.has(reg)) {
|
|
||||||
availableRegs_.take(reg);
|
|
||||||
currentOpRegs_.add(reg);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// The register must be used by some operand. Spill it to the stack.
|
|
||||||
for (size_t i = 0; i < operandLocations_.length(); i++) {
|
|
||||||
OperandLocation& loc = operandLocations_[i];
|
|
||||||
if (loc.kind() == OperandLocation::PayloadReg) {
|
|
||||||
if (loc.payloadReg() != reg)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
masm.push(reg);
|
|
||||||
stackPushed_ += sizeof(uintptr_t);
|
|
||||||
loc.setPayloadStack(stackPushed_, loc.payloadType());
|
|
||||||
currentOpRegs_.add(reg);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (loc.kind() == OperandLocation::ValueReg) {
|
|
||||||
if (!loc.valueReg().aliases(reg))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
masm.pushValue(loc.valueReg());
|
|
||||||
stackPushed_ += sizeof(js::Value);
|
|
||||||
loc.setValueStack(stackPushed_);
|
|
||||||
availableRegs_.add(loc.valueReg());
|
|
||||||
availableRegs_.take(reg);
|
|
||||||
currentOpRegs_.add(reg);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
MOZ_CRASH("Invalid register");
|
|
||||||
}
|
|
||||||
|
|
||||||
ValueOperand
|
|
||||||
CacheRegisterAllocator::allocateValueRegister(MacroAssembler& masm)
|
|
||||||
{
|
|
||||||
#ifdef JS_NUNBOX32
|
|
||||||
Register reg1 = allocateRegister(masm);
|
|
||||||
Register reg2 = allocateRegister(masm);
|
|
||||||
return ValueOperand(reg1, reg2);
|
|
||||||
#else
|
|
||||||
Register reg = allocateRegister(masm);
|
|
||||||
return ValueOperand(reg);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
bool
|
bool
|
||||||
BaselineCacheIRCompiler::emitGuardIsObject()
|
BaselineCacheIRCompiler::emitGuardIsObject()
|
||||||
{
|
{
|
||||||
|
@ -1938,156 +1222,6 @@ BaselineCacheIRCompiler::init(CacheKind kind)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
static GCPtr<T>*
|
|
||||||
AsGCPtr(uintptr_t* ptr)
|
|
||||||
{
|
|
||||||
return reinterpret_cast<GCPtr<T>*>(ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
template<class T>
|
|
||||||
GCPtr<T>&
|
|
||||||
CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const
|
|
||||||
{
|
|
||||||
uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
|
|
||||||
MOZ_ASSERT(uintptr_t(stubData) % sizeof(uintptr_t) == 0);
|
|
||||||
|
|
||||||
return *AsGCPtr<T>((uintptr_t*)(stubData + offset));
|
|
||||||
}
|
|
||||||
|
|
||||||
template GCPtr<Shape*>& CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const;
|
|
||||||
template GCPtr<ObjectGroup*>& CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const;
|
|
||||||
template GCPtr<JSObject*>& CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const;
|
|
||||||
|
|
||||||
template <typename T, typename V>
|
|
||||||
static void
|
|
||||||
InitGCPtr(uintptr_t* ptr, V val)
|
|
||||||
{
|
|
||||||
AsGCPtr<T>(ptr)->init(mozilla::BitwiseCast<T>(val));
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
CacheIRWriter::copyStubData(uint8_t* dest) const
|
|
||||||
{
|
|
||||||
uintptr_t* destWords = reinterpret_cast<uintptr_t*>(dest);
|
|
||||||
|
|
||||||
for (const StubField& field : stubFields_) {
|
|
||||||
switch (field.type()) {
|
|
||||||
case StubField::Type::RawWord:
|
|
||||||
*destWords = field.asWord();
|
|
||||||
break;
|
|
||||||
case StubField::Type::Shape:
|
|
||||||
InitGCPtr<Shape*>(destWords, field.asWord());
|
|
||||||
break;
|
|
||||||
case StubField::Type::JSObject:
|
|
||||||
InitGCPtr<JSObject*>(destWords, field.asWord());
|
|
||||||
break;
|
|
||||||
case StubField::Type::ObjectGroup:
|
|
||||||
InitGCPtr<ObjectGroup*>(destWords, field.asWord());
|
|
||||||
break;
|
|
||||||
case StubField::Type::Symbol:
|
|
||||||
InitGCPtr<JS::Symbol*>(destWords, field.asWord());
|
|
||||||
break;
|
|
||||||
case StubField::Type::String:
|
|
||||||
InitGCPtr<JSString*>(destWords, field.asWord());
|
|
||||||
break;
|
|
||||||
case StubField::Type::Id:
|
|
||||||
InitGCPtr<jsid>(destWords, field.asWord());
|
|
||||||
break;
|
|
||||||
case StubField::Type::RawInt64:
|
|
||||||
*reinterpret_cast<uint64_t*>(destWords) = field.asInt64();
|
|
||||||
break;
|
|
||||||
case StubField::Type::Value:
|
|
||||||
InitGCPtr<JS::Value>(destWords, field.asInt64());
|
|
||||||
break;
|
|
||||||
case StubField::Type::Limit:
|
|
||||||
MOZ_CRASH("Invalid type");
|
|
||||||
}
|
|
||||||
destWords += StubField::sizeInBytes(field.type()) / sizeof(uintptr_t);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool
|
|
||||||
CacheIRWriter::stubDataEquals(const uint8_t* stubData) const
|
|
||||||
{
|
|
||||||
const uintptr_t* stubDataWords = reinterpret_cast<const uintptr_t*>(stubData);
|
|
||||||
|
|
||||||
for (const StubField& field : stubFields_) {
|
|
||||||
if (field.sizeIsWord()) {
|
|
||||||
if (field.asWord() != *stubDataWords)
|
|
||||||
return false;
|
|
||||||
stubDataWords++;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (field.asInt64() != *reinterpret_cast<const uint64_t*>(stubDataWords))
|
|
||||||
return false;
|
|
||||||
stubDataWords += sizeof(uint64_t) / sizeof(uintptr_t);
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
HashNumber
|
|
||||||
CacheIRStubKey::hash(const CacheIRStubKey::Lookup& l)
|
|
||||||
{
|
|
||||||
HashNumber hash = mozilla::HashBytes(l.code, l.length);
|
|
||||||
hash = mozilla::AddToHash(hash, uint32_t(l.kind));
|
|
||||||
hash = mozilla::AddToHash(hash, uint32_t(l.engine));
|
|
||||||
return hash;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool
|
|
||||||
CacheIRStubKey::match(const CacheIRStubKey& entry, const CacheIRStubKey::Lookup& l)
|
|
||||||
{
|
|
||||||
if (entry.stubInfo->kind() != l.kind)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
if (entry.stubInfo->engine() != l.engine)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
if (entry.stubInfo->codeLength() != l.length)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
if (!mozilla::PodEqual(entry.stubInfo->code(), l.code, l.length))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
CacheIRReader::CacheIRReader(const CacheIRStubInfo* stubInfo)
|
|
||||||
: CacheIRReader(stubInfo->code(), stubInfo->code() + stubInfo->codeLength())
|
|
||||||
{}
|
|
||||||
|
|
||||||
CacheIRStubInfo*
|
|
||||||
CacheIRStubInfo::New(CacheKind kind, ICStubEngine engine, bool makesGCCalls,
|
|
||||||
uint32_t stubDataOffset, const CacheIRWriter& writer)
|
|
||||||
{
|
|
||||||
size_t numStubFields = writer.numStubFields();
|
|
||||||
size_t bytesNeeded = sizeof(CacheIRStubInfo) +
|
|
||||||
writer.codeLength() +
|
|
||||||
(numStubFields + 1); // +1 for the GCType::Limit terminator.
|
|
||||||
uint8_t* p = js_pod_malloc<uint8_t>(bytesNeeded);
|
|
||||||
if (!p)
|
|
||||||
return nullptr;
|
|
||||||
|
|
||||||
// Copy the CacheIR code.
|
|
||||||
uint8_t* codeStart = p + sizeof(CacheIRStubInfo);
|
|
||||||
mozilla::PodCopy(codeStart, writer.codeStart(), writer.codeLength());
|
|
||||||
|
|
||||||
static_assert(sizeof(StubField::Type) == sizeof(uint8_t),
|
|
||||||
"StubField::Type must fit in uint8_t");
|
|
||||||
|
|
||||||
// Copy the stub field types.
|
|
||||||
uint8_t* fieldTypes = codeStart + writer.codeLength();
|
|
||||||
for (size_t i = 0; i < numStubFields; i++)
|
|
||||||
fieldTypes[i] = uint8_t(writer.stubFieldType(i));
|
|
||||||
fieldTypes[numStubFields] = uint8_t(StubField::Type::Limit);
|
|
||||||
|
|
||||||
return new(p) CacheIRStubInfo(kind, engine, makesGCCalls, stubDataOffset, codeStart,
|
|
||||||
writer.codeLength(), fieldTypes);
|
|
||||||
}
|
|
||||||
|
|
||||||
static const size_t MaxOptimizedCacheIRStubs = 16;
|
static const size_t MaxOptimizedCacheIRStubs = 16;
|
||||||
|
|
||||||
ICStub*
|
ICStub*
|
||||||
|
@ -2227,67 +1361,6 @@ jit::TraceBaselineCacheIRStub(JSTracer* trc, ICStub* stub, const CacheIRStubInfo
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t
|
|
||||||
CacheIRStubInfo::stubDataSize() const
|
|
||||||
{
|
|
||||||
size_t field = 0;
|
|
||||||
size_t size = 0;
|
|
||||||
while (true) {
|
|
||||||
StubField::Type type = fieldType(field++);
|
|
||||||
if (type == StubField::Type::Limit)
|
|
||||||
return size;
|
|
||||||
size += StubField::sizeInBytes(type);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
CacheIRStubInfo::copyStubData(ICStub* src, ICStub* dest) const
|
|
||||||
{
|
|
||||||
uint8_t* srcBytes = reinterpret_cast<uint8_t*>(src);
|
|
||||||
uint8_t* destBytes = reinterpret_cast<uint8_t*>(dest);
|
|
||||||
|
|
||||||
size_t field = 0;
|
|
||||||
size_t offset = 0;
|
|
||||||
while (true) {
|
|
||||||
StubField::Type type = fieldType(field);
|
|
||||||
switch (type) {
|
|
||||||
case StubField::Type::RawWord:
|
|
||||||
*reinterpret_cast<uintptr_t*>(destBytes + offset) =
|
|
||||||
*reinterpret_cast<uintptr_t*>(srcBytes + offset);
|
|
||||||
break;
|
|
||||||
case StubField::Type::RawInt64:
|
|
||||||
*reinterpret_cast<uint64_t*>(destBytes + offset) =
|
|
||||||
*reinterpret_cast<uint64_t*>(srcBytes + offset);
|
|
||||||
break;
|
|
||||||
case StubField::Type::Shape:
|
|
||||||
getStubField<Shape*>(dest, offset).init(getStubField<Shape*>(src, offset));
|
|
||||||
break;
|
|
||||||
case StubField::Type::JSObject:
|
|
||||||
getStubField<JSObject*>(dest, offset).init(getStubField<JSObject*>(src, offset));
|
|
||||||
break;
|
|
||||||
case StubField::Type::ObjectGroup:
|
|
||||||
getStubField<ObjectGroup*>(dest, offset).init(getStubField<ObjectGroup*>(src, offset));
|
|
||||||
break;
|
|
||||||
case StubField::Type::Symbol:
|
|
||||||
getStubField<JS::Symbol*>(dest, offset).init(getStubField<JS::Symbol*>(src, offset));
|
|
||||||
break;
|
|
||||||
case StubField::Type::String:
|
|
||||||
getStubField<JSString*>(dest, offset).init(getStubField<JSString*>(src, offset));
|
|
||||||
break;
|
|
||||||
case StubField::Type::Id:
|
|
||||||
getStubField<jsid>(dest, offset).init(getStubField<jsid>(src, offset));
|
|
||||||
break;
|
|
||||||
case StubField::Type::Value:
|
|
||||||
getStubField<Value>(dest, offset).init(getStubField<Value>(src, offset));
|
|
||||||
break;
|
|
||||||
case StubField::Type::Limit:
|
|
||||||
return; // Done.
|
|
||||||
}
|
|
||||||
field++;
|
|
||||||
offset += StubField::sizeInBytes(type);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
uint8_t*
|
uint8_t*
|
||||||
ICCacheIR_Monitored::stubDataStart()
|
ICCacheIR_Monitored::stubDataStart()
|
||||||
{
|
{
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
|
|
||||||
#include "gc/Barrier.h"
|
#include "gc/Barrier.h"
|
||||||
#include "jit/CacheIR.h"
|
#include "jit/CacheIR.h"
|
||||||
|
#include "jit/CacheIRCompiler.h"
|
||||||
|
|
||||||
namespace js {
|
namespace js {
|
||||||
namespace jit {
|
namespace jit {
|
||||||
|
@ -16,62 +17,6 @@ namespace jit {
|
||||||
class ICFallbackStub;
|
class ICFallbackStub;
|
||||||
class ICStub;
|
class ICStub;
|
||||||
|
|
||||||
// See the 'Sharing Baseline stub code' comment in CacheIR.h for a description
|
|
||||||
// of this class.
|
|
||||||
class CacheIRStubInfo
|
|
||||||
{
|
|
||||||
// These fields don't require 8 bits, but GCC complains if these fields are
|
|
||||||
// smaller than the size of the enums.
|
|
||||||
CacheKind kind_ : 8;
|
|
||||||
ICStubEngine engine_ : 8;
|
|
||||||
bool makesGCCalls_ : 1;
|
|
||||||
uint8_t stubDataOffset_;
|
|
||||||
|
|
||||||
const uint8_t* code_;
|
|
||||||
uint32_t length_;
|
|
||||||
const uint8_t* fieldTypes_;
|
|
||||||
|
|
||||||
CacheIRStubInfo(CacheKind kind, ICStubEngine engine, bool makesGCCalls,
|
|
||||||
uint32_t stubDataOffset, const uint8_t* code, uint32_t codeLength,
|
|
||||||
const uint8_t* fieldTypes)
|
|
||||||
: kind_(kind),
|
|
||||||
engine_(engine),
|
|
||||||
makesGCCalls_(makesGCCalls),
|
|
||||||
stubDataOffset_(stubDataOffset),
|
|
||||||
code_(code),
|
|
||||||
length_(codeLength),
|
|
||||||
fieldTypes_(fieldTypes)
|
|
||||||
{
|
|
||||||
MOZ_ASSERT(kind_ == kind, "Kind must fit in bitfield");
|
|
||||||
MOZ_ASSERT(engine_ == engine, "Engine must fit in bitfield");
|
|
||||||
MOZ_ASSERT(stubDataOffset_ == stubDataOffset, "stubDataOffset must fit in uint8_t");
|
|
||||||
}
|
|
||||||
|
|
||||||
CacheIRStubInfo(const CacheIRStubInfo&) = delete;
|
|
||||||
CacheIRStubInfo& operator=(const CacheIRStubInfo&) = delete;
|
|
||||||
|
|
||||||
public:
|
|
||||||
CacheKind kind() const { return kind_; }
|
|
||||||
ICStubEngine engine() const { return engine_; }
|
|
||||||
bool makesGCCalls() const { return makesGCCalls_; }
|
|
||||||
|
|
||||||
const uint8_t* code() const { return code_; }
|
|
||||||
uint32_t codeLength() const { return length_; }
|
|
||||||
uint32_t stubDataOffset() const { return stubDataOffset_; }
|
|
||||||
|
|
||||||
size_t stubDataSize() const;
|
|
||||||
|
|
||||||
StubField::Type fieldType(uint32_t i) const { return (StubField::Type)fieldTypes_[i]; }
|
|
||||||
|
|
||||||
static CacheIRStubInfo* New(CacheKind kind, ICStubEngine engine, bool canMakeCalls,
|
|
||||||
uint32_t stubDataOffset, const CacheIRWriter& writer);
|
|
||||||
|
|
||||||
template <class T>
|
|
||||||
js::GCPtr<T>& getStubField(ICStub* stub, uint32_t field) const;
|
|
||||||
|
|
||||||
void copyStubData(ICStub* src, ICStub* dest) const;
|
|
||||||
};
|
|
||||||
|
|
||||||
void TraceBaselineCacheIRStub(JSTracer* trc, ICStub* stub, const CacheIRStubInfo* stubInfo);
|
void TraceBaselineCacheIRStub(JSTracer* trc, ICStub* stub, const CacheIRStubInfo* stubInfo);
|
||||||
|
|
||||||
ICStub* AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer,
|
ICStub* AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer,
|
||||||
|
|
|
@ -0,0 +1,650 @@
|
||||||
|
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||||
|
* vim: set ts=8 sts=4 et sw=4 tw=99:
|
||||||
|
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||||
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||||
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||||
|
|
||||||
|
#include "jit/CacheIRCompiler.h"
|
||||||
|
|
||||||
|
using namespace js;
|
||||||
|
using namespace js::jit;
|
||||||
|
|
||||||
|
using mozilla::Maybe;
|
||||||
|
|
||||||
|
ValueOperand
|
||||||
|
CacheRegisterAllocator::useValueRegister(MacroAssembler& masm, ValOperandId op)
|
||||||
|
{
|
||||||
|
OperandLocation& loc = operandLocations_[op.id()];
|
||||||
|
|
||||||
|
switch (loc.kind()) {
|
||||||
|
case OperandLocation::ValueReg:
|
||||||
|
currentOpRegs_.add(loc.valueReg());
|
||||||
|
return loc.valueReg();
|
||||||
|
|
||||||
|
case OperandLocation::ValueStack: {
|
||||||
|
// The Value is on the stack. If it's on top of the stack, unbox and
|
||||||
|
// then pop it. If we need the registers later, we can always spill
|
||||||
|
// back. If it's not on the top of the stack, just unbox.
|
||||||
|
ValueOperand reg = allocateValueRegister(masm);
|
||||||
|
if (loc.valueStack() == stackPushed_) {
|
||||||
|
masm.popValue(reg);
|
||||||
|
MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
|
||||||
|
stackPushed_ -= sizeof(js::Value);
|
||||||
|
} else {
|
||||||
|
MOZ_ASSERT(loc.valueStack() < stackPushed_);
|
||||||
|
masm.loadValue(Address(masm.getStackPointer(), stackPushed_ - loc.valueStack()), reg);
|
||||||
|
}
|
||||||
|
loc.setValueReg(reg);
|
||||||
|
return reg;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The operand should never be unboxed.
|
||||||
|
case OperandLocation::PayloadStack:
|
||||||
|
case OperandLocation::PayloadReg:
|
||||||
|
case OperandLocation::Uninitialized:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
MOZ_CRASH();
|
||||||
|
}
|
||||||
|
|
||||||
|
Register
|
||||||
|
CacheRegisterAllocator::useRegister(MacroAssembler& masm, TypedOperandId typedId)
|
||||||
|
{
|
||||||
|
OperandLocation& loc = operandLocations_[typedId.id()];
|
||||||
|
switch (loc.kind()) {
|
||||||
|
case OperandLocation::PayloadReg:
|
||||||
|
currentOpRegs_.add(loc.payloadReg());
|
||||||
|
return loc.payloadReg();
|
||||||
|
|
||||||
|
case OperandLocation::ValueReg: {
|
||||||
|
// It's possible the value is still boxed: as an optimization, we unbox
|
||||||
|
// the first time we use a value as object.
|
||||||
|
ValueOperand val = loc.valueReg();
|
||||||
|
availableRegs_.add(val);
|
||||||
|
Register reg = val.scratchReg();
|
||||||
|
availableRegs_.take(reg);
|
||||||
|
masm.unboxObject(val, reg);
|
||||||
|
loc.setPayloadReg(reg, typedId.type());
|
||||||
|
currentOpRegs_.add(reg);
|
||||||
|
return reg;
|
||||||
|
}
|
||||||
|
|
||||||
|
case OperandLocation::PayloadStack: {
|
||||||
|
// The payload is on the stack. If it's on top of the stack we can just
|
||||||
|
// pop it, else we emit a load.
|
||||||
|
Register reg = allocateRegister(masm);
|
||||||
|
if (loc.payloadStack() == stackPushed_) {
|
||||||
|
masm.pop(reg);
|
||||||
|
MOZ_ASSERT(stackPushed_ >= sizeof(uintptr_t));
|
||||||
|
stackPushed_ -= sizeof(uintptr_t);
|
||||||
|
} else {
|
||||||
|
MOZ_ASSERT(loc.payloadStack() < stackPushed_);
|
||||||
|
masm.loadPtr(Address(masm.getStackPointer(), stackPushed_ - loc.payloadStack()), reg);
|
||||||
|
}
|
||||||
|
loc.setPayloadReg(reg, loc.payloadType());
|
||||||
|
return reg;
|
||||||
|
}
|
||||||
|
|
||||||
|
case OperandLocation::ValueStack: {
|
||||||
|
// The value is on the stack, but boxed. If it's on top of the stack we
|
||||||
|
// unbox it and then remove it from the stack, else we just unbox.
|
||||||
|
Register reg = allocateRegister(masm);
|
||||||
|
if (loc.valueStack() == stackPushed_) {
|
||||||
|
masm.unboxObject(Address(masm.getStackPointer(), 0), reg);
|
||||||
|
masm.addToStackPtr(Imm32(sizeof(js::Value)));
|
||||||
|
MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
|
||||||
|
stackPushed_ -= sizeof(js::Value);
|
||||||
|
} else {
|
||||||
|
MOZ_ASSERT(loc.valueStack() < stackPushed_);
|
||||||
|
masm.unboxObject(Address(masm.getStackPointer(), stackPushed_ - loc.valueStack()),
|
||||||
|
reg);
|
||||||
|
}
|
||||||
|
loc.setPayloadReg(reg, typedId.type());
|
||||||
|
return reg;
|
||||||
|
}
|
||||||
|
|
||||||
|
case OperandLocation::Uninitialized:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
MOZ_CRASH();
|
||||||
|
}
|
||||||
|
|
||||||
|
Register
|
||||||
|
CacheRegisterAllocator::defineRegister(MacroAssembler& masm, TypedOperandId typedId)
|
||||||
|
{
|
||||||
|
OperandLocation& loc = operandLocations_[typedId.id()];
|
||||||
|
MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
|
||||||
|
|
||||||
|
Register reg = allocateRegister(masm);
|
||||||
|
loc.setPayloadReg(reg, typedId.type());
|
||||||
|
return reg;
|
||||||
|
}
|
||||||
|
|
||||||
|
ValueOperand
|
||||||
|
CacheRegisterAllocator::defineValueRegister(MacroAssembler& masm, ValOperandId val)
|
||||||
|
{
|
||||||
|
OperandLocation& loc = operandLocations_[val.id()];
|
||||||
|
MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
|
||||||
|
|
||||||
|
ValueOperand reg = allocateValueRegister(masm);
|
||||||
|
loc.setValueReg(reg);
|
||||||
|
return reg;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
CacheRegisterAllocator::freeDeadOperandRegisters()
|
||||||
|
{
|
||||||
|
// See if any operands are dead so we can reuse their registers. Note that
|
||||||
|
// we skip the input operands, as those are also used by failure paths, and
|
||||||
|
// we currently don't track those uses.
|
||||||
|
for (size_t i = writer_.numInputOperands(); i < operandLocations_.length(); i++) {
|
||||||
|
if (!writer_.operandIsDead(i, currentInstruction_))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
OperandLocation& loc = operandLocations_[i];
|
||||||
|
switch (loc.kind()) {
|
||||||
|
case OperandLocation::PayloadReg:
|
||||||
|
availableRegs_.add(loc.payloadReg());
|
||||||
|
break;
|
||||||
|
case OperandLocation::ValueReg:
|
||||||
|
availableRegs_.add(loc.valueReg());
|
||||||
|
break;
|
||||||
|
case OperandLocation::Uninitialized:
|
||||||
|
case OperandLocation::PayloadStack:
|
||||||
|
case OperandLocation::ValueStack:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
loc.setUninitialized();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
CacheRegisterAllocator::discardStack(MacroAssembler& masm)
|
||||||
|
{
|
||||||
|
// This should only be called when we are no longer using the operands,
|
||||||
|
// as we're discarding everything from the native stack. Set all operand
|
||||||
|
// locations to Uninitialized to catch bugs.
|
||||||
|
for (size_t i = 0; i < operandLocations_.length(); i++)
|
||||||
|
operandLocations_[i].setUninitialized();
|
||||||
|
|
||||||
|
if (stackPushed_ > 0) {
|
||||||
|
masm.addToStackPtr(Imm32(stackPushed_));
|
||||||
|
stackPushed_ = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Register
|
||||||
|
CacheRegisterAllocator::allocateRegister(MacroAssembler& masm)
|
||||||
|
{
|
||||||
|
if (availableRegs_.empty())
|
||||||
|
freeDeadOperandRegisters();
|
||||||
|
|
||||||
|
if (availableRegs_.empty()) {
|
||||||
|
// Still no registers available, try to spill unused operands to
|
||||||
|
// the stack.
|
||||||
|
for (size_t i = 0; i < operandLocations_.length(); i++) {
|
||||||
|
OperandLocation& loc = operandLocations_[i];
|
||||||
|
if (loc.kind() == OperandLocation::PayloadReg) {
|
||||||
|
Register reg = loc.payloadReg();
|
||||||
|
if (currentOpRegs_.has(reg))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
masm.push(reg);
|
||||||
|
stackPushed_ += sizeof(uintptr_t);
|
||||||
|
loc.setPayloadStack(stackPushed_, loc.payloadType());
|
||||||
|
availableRegs_.add(reg);
|
||||||
|
break; // We got a register, so break out of the loop.
|
||||||
|
}
|
||||||
|
if (loc.kind() == OperandLocation::ValueReg) {
|
||||||
|
ValueOperand reg = loc.valueReg();
|
||||||
|
if (currentOpRegs_.aliases(reg))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
masm.pushValue(reg);
|
||||||
|
stackPushed_ += sizeof(js::Value);
|
||||||
|
loc.setValueStack(stackPushed_);
|
||||||
|
availableRegs_.add(reg);
|
||||||
|
break; // Break out of the loop.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// At this point, there must be a free register. (Ion ICs don't have as
|
||||||
|
// many registers available, so once we support Ion code generation, we may
|
||||||
|
// have to spill some unrelated registers.)
|
||||||
|
MOZ_RELEASE_ASSERT(!availableRegs_.empty());
|
||||||
|
|
||||||
|
Register reg = availableRegs_.takeAny();
|
||||||
|
currentOpRegs_.add(reg);
|
||||||
|
return reg;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
CacheRegisterAllocator::allocateFixedRegister(MacroAssembler& masm, Register reg)
|
||||||
|
{
|
||||||
|
// Fixed registers should be allocated first, to ensure they're
|
||||||
|
// still available.
|
||||||
|
MOZ_ASSERT(!currentOpRegs_.has(reg), "Register is in use");
|
||||||
|
|
||||||
|
freeDeadOperandRegisters();
|
||||||
|
|
||||||
|
if (availableRegs_.has(reg)) {
|
||||||
|
availableRegs_.take(reg);
|
||||||
|
currentOpRegs_.add(reg);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The register must be used by some operand. Spill it to the stack.
|
||||||
|
for (size_t i = 0; i < operandLocations_.length(); i++) {
|
||||||
|
OperandLocation& loc = operandLocations_[i];
|
||||||
|
if (loc.kind() == OperandLocation::PayloadReg) {
|
||||||
|
if (loc.payloadReg() != reg)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
masm.push(reg);
|
||||||
|
stackPushed_ += sizeof(uintptr_t);
|
||||||
|
loc.setPayloadStack(stackPushed_, loc.payloadType());
|
||||||
|
currentOpRegs_.add(reg);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (loc.kind() == OperandLocation::ValueReg) {
|
||||||
|
if (!loc.valueReg().aliases(reg))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
masm.pushValue(loc.valueReg());
|
||||||
|
stackPushed_ += sizeof(js::Value);
|
||||||
|
loc.setValueStack(stackPushed_);
|
||||||
|
availableRegs_.add(loc.valueReg());
|
||||||
|
availableRegs_.take(reg);
|
||||||
|
currentOpRegs_.add(reg);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
MOZ_CRASH("Invalid register");
|
||||||
|
}
|
||||||
|
|
||||||
|
ValueOperand
|
||||||
|
CacheRegisterAllocator::allocateValueRegister(MacroAssembler& masm)
|
||||||
|
{
|
||||||
|
#ifdef JS_NUNBOX32
|
||||||
|
Register reg1 = allocateRegister(masm);
|
||||||
|
Register reg2 = allocateRegister(masm);
|
||||||
|
return ValueOperand(reg1, reg2);
|
||||||
|
#else
|
||||||
|
Register reg = allocateRegister(masm);
|
||||||
|
return ValueOperand(reg);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
CacheRegisterAllocator::init(const AllocatableGeneralRegisterSet& available)
|
||||||
|
{
|
||||||
|
availableRegs_ = available;
|
||||||
|
if (!origInputLocations_.resize(writer_.numInputOperands()))
|
||||||
|
return false;
|
||||||
|
if (!operandLocations_.resize(writer_.numOperandIds()))
|
||||||
|
return false;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t
|
||||||
|
CacheIRStubInfo::stubDataSize() const
|
||||||
|
{
|
||||||
|
size_t field = 0;
|
||||||
|
size_t size = 0;
|
||||||
|
while (true) {
|
||||||
|
StubField::Type type = fieldType(field++);
|
||||||
|
if (type == StubField::Type::Limit)
|
||||||
|
return size;
|
||||||
|
size += StubField::sizeInBytes(type);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
CacheIRStubInfo::copyStubData(ICStub* src, ICStub* dest) const
|
||||||
|
{
|
||||||
|
uint8_t* srcBytes = reinterpret_cast<uint8_t*>(src);
|
||||||
|
uint8_t* destBytes = reinterpret_cast<uint8_t*>(dest);
|
||||||
|
|
||||||
|
size_t field = 0;
|
||||||
|
size_t offset = 0;
|
||||||
|
while (true) {
|
||||||
|
StubField::Type type = fieldType(field);
|
||||||
|
switch (type) {
|
||||||
|
case StubField::Type::RawWord:
|
||||||
|
*reinterpret_cast<uintptr_t*>(destBytes + offset) =
|
||||||
|
*reinterpret_cast<uintptr_t*>(srcBytes + offset);
|
||||||
|
break;
|
||||||
|
case StubField::Type::RawInt64:
|
||||||
|
*reinterpret_cast<uint64_t*>(destBytes + offset) =
|
||||||
|
*reinterpret_cast<uint64_t*>(srcBytes + offset);
|
||||||
|
break;
|
||||||
|
case StubField::Type::Shape:
|
||||||
|
getStubField<Shape*>(dest, offset).init(getStubField<Shape*>(src, offset));
|
||||||
|
break;
|
||||||
|
case StubField::Type::JSObject:
|
||||||
|
getStubField<JSObject*>(dest, offset).init(getStubField<JSObject*>(src, offset));
|
||||||
|
break;
|
||||||
|
case StubField::Type::ObjectGroup:
|
||||||
|
getStubField<ObjectGroup*>(dest, offset).init(getStubField<ObjectGroup*>(src, offset));
|
||||||
|
break;
|
||||||
|
case StubField::Type::Symbol:
|
||||||
|
getStubField<JS::Symbol*>(dest, offset).init(getStubField<JS::Symbol*>(src, offset));
|
||||||
|
break;
|
||||||
|
case StubField::Type::String:
|
||||||
|
getStubField<JSString*>(dest, offset).init(getStubField<JSString*>(src, offset));
|
||||||
|
break;
|
||||||
|
case StubField::Type::Id:
|
||||||
|
getStubField<jsid>(dest, offset).init(getStubField<jsid>(src, offset));
|
||||||
|
break;
|
||||||
|
case StubField::Type::Value:
|
||||||
|
getStubField<Value>(dest, offset).init(getStubField<Value>(src, offset));
|
||||||
|
break;
|
||||||
|
case StubField::Type::Limit:
|
||||||
|
return; // Done.
|
||||||
|
}
|
||||||
|
field++;
|
||||||
|
offset += StubField::sizeInBytes(type);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
static GCPtr<T>*
|
||||||
|
AsGCPtr(uintptr_t* ptr)
|
||||||
|
{
|
||||||
|
return reinterpret_cast<GCPtr<T>*>(ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class T>
|
||||||
|
GCPtr<T>&
|
||||||
|
CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const
|
||||||
|
{
|
||||||
|
uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
|
||||||
|
MOZ_ASSERT(uintptr_t(stubData) % sizeof(uintptr_t) == 0);
|
||||||
|
|
||||||
|
return *AsGCPtr<T>((uintptr_t*)(stubData + offset));
|
||||||
|
}
|
||||||
|
|
||||||
|
template GCPtr<Shape*>& CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const;
|
||||||
|
template GCPtr<ObjectGroup*>& CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const;
|
||||||
|
template GCPtr<JSObject*>& CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const;
|
||||||
|
template GCPtr<JSString*>& CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const;
|
||||||
|
template GCPtr<JS::Symbol*>& CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const;
|
||||||
|
template GCPtr<JS::Value>& CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const;
|
||||||
|
template GCPtr<jsid>& CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const;
|
||||||
|
|
||||||
|
template <typename T, typename V>
|
||||||
|
static void
|
||||||
|
InitGCPtr(uintptr_t* ptr, V val)
|
||||||
|
{
|
||||||
|
AsGCPtr<T>(ptr)->init(mozilla::BitwiseCast<T>(val));
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
CacheIRWriter::copyStubData(uint8_t* dest) const
|
||||||
|
{
|
||||||
|
uintptr_t* destWords = reinterpret_cast<uintptr_t*>(dest);
|
||||||
|
|
||||||
|
for (const StubField& field : stubFields_) {
|
||||||
|
switch (field.type()) {
|
||||||
|
case StubField::Type::RawWord:
|
||||||
|
*destWords = field.asWord();
|
||||||
|
break;
|
||||||
|
case StubField::Type::Shape:
|
||||||
|
InitGCPtr<Shape*>(destWords, field.asWord());
|
||||||
|
break;
|
||||||
|
case StubField::Type::JSObject:
|
||||||
|
InitGCPtr<JSObject*>(destWords, field.asWord());
|
||||||
|
break;
|
||||||
|
case StubField::Type::ObjectGroup:
|
||||||
|
InitGCPtr<ObjectGroup*>(destWords, field.asWord());
|
||||||
|
break;
|
||||||
|
case StubField::Type::Symbol:
|
||||||
|
InitGCPtr<JS::Symbol*>(destWords, field.asWord());
|
||||||
|
break;
|
||||||
|
case StubField::Type::String:
|
||||||
|
InitGCPtr<JSString*>(destWords, field.asWord());
|
||||||
|
break;
|
||||||
|
case StubField::Type::Id:
|
||||||
|
InitGCPtr<jsid>(destWords, field.asWord());
|
||||||
|
break;
|
||||||
|
case StubField::Type::RawInt64:
|
||||||
|
*reinterpret_cast<uint64_t*>(destWords) = field.asInt64();
|
||||||
|
break;
|
||||||
|
case StubField::Type::Value:
|
||||||
|
InitGCPtr<JS::Value>(destWords, field.asInt64());
|
||||||
|
break;
|
||||||
|
case StubField::Type::Limit:
|
||||||
|
MOZ_CRASH("Invalid type");
|
||||||
|
}
|
||||||
|
destWords += StubField::sizeInBytes(field.type()) / sizeof(uintptr_t);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
CacheIRWriter::stubDataEquals(const uint8_t* stubData) const
|
||||||
|
{
|
||||||
|
const uintptr_t* stubDataWords = reinterpret_cast<const uintptr_t*>(stubData);
|
||||||
|
|
||||||
|
for (const StubField& field : stubFields_) {
|
||||||
|
if (field.sizeIsWord()) {
|
||||||
|
if (field.asWord() != *stubDataWords)
|
||||||
|
return false;
|
||||||
|
stubDataWords++;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (field.asInt64() != *reinterpret_cast<const uint64_t*>(stubDataWords))
|
||||||
|
return false;
|
||||||
|
stubDataWords += sizeof(uint64_t) / sizeof(uintptr_t);
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
HashNumber
|
||||||
|
CacheIRStubKey::hash(const CacheIRStubKey::Lookup& l)
|
||||||
|
{
|
||||||
|
HashNumber hash = mozilla::HashBytes(l.code, l.length);
|
||||||
|
hash = mozilla::AddToHash(hash, uint32_t(l.kind));
|
||||||
|
hash = mozilla::AddToHash(hash, uint32_t(l.engine));
|
||||||
|
return hash;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
CacheIRStubKey::match(const CacheIRStubKey& entry, const CacheIRStubKey::Lookup& l)
|
||||||
|
{
|
||||||
|
if (entry.stubInfo->kind() != l.kind)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (entry.stubInfo->engine() != l.engine)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (entry.stubInfo->codeLength() != l.length)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (!mozilla::PodEqual(entry.stubInfo->code(), l.code, l.length))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
CacheIRReader::CacheIRReader(const CacheIRStubInfo* stubInfo)
|
||||||
|
: CacheIRReader(stubInfo->code(), stubInfo->code() + stubInfo->codeLength())
|
||||||
|
{}
|
||||||
|
|
||||||
|
CacheIRStubInfo*
|
||||||
|
CacheIRStubInfo::New(CacheKind kind, ICStubEngine engine, bool makesGCCalls,
|
||||||
|
uint32_t stubDataOffset, const CacheIRWriter& writer)
|
||||||
|
{
|
||||||
|
size_t numStubFields = writer.numStubFields();
|
||||||
|
size_t bytesNeeded = sizeof(CacheIRStubInfo) +
|
||||||
|
writer.codeLength() +
|
||||||
|
(numStubFields + 1); // +1 for the GCType::Limit terminator.
|
||||||
|
uint8_t* p = js_pod_malloc<uint8_t>(bytesNeeded);
|
||||||
|
if (!p)
|
||||||
|
return nullptr;
|
||||||
|
|
||||||
|
// Copy the CacheIR code.
|
||||||
|
uint8_t* codeStart = p + sizeof(CacheIRStubInfo);
|
||||||
|
mozilla::PodCopy(codeStart, writer.codeStart(), writer.codeLength());
|
||||||
|
|
||||||
|
static_assert(sizeof(StubField::Type) == sizeof(uint8_t),
|
||||||
|
"StubField::Type must fit in uint8_t");
|
||||||
|
|
||||||
|
// Copy the stub field types.
|
||||||
|
uint8_t* fieldTypes = codeStart + writer.codeLength();
|
||||||
|
for (size_t i = 0; i < numStubFields; i++)
|
||||||
|
fieldTypes[i] = uint8_t(writer.stubFieldType(i));
|
||||||
|
fieldTypes[numStubFields] = uint8_t(StubField::Type::Limit);
|
||||||
|
|
||||||
|
return new(p) CacheIRStubInfo(kind, engine, makesGCCalls, stubDataOffset, codeStart,
|
||||||
|
writer.codeLength(), fieldTypes);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
OperandLocation::operator==(const OperandLocation& other) const
|
||||||
|
{
|
||||||
|
if (kind_ != other.kind_)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
switch (kind()) {
|
||||||
|
case Uninitialized:
|
||||||
|
return true;
|
||||||
|
case PayloadReg:
|
||||||
|
return payloadReg() == other.payloadReg() && payloadType() == other.payloadType();
|
||||||
|
case ValueReg:
|
||||||
|
return valueReg() == other.valueReg();
|
||||||
|
case PayloadStack:
|
||||||
|
return payloadStack() == other.payloadStack() && payloadType() == other.payloadType();
|
||||||
|
case ValueStack:
|
||||||
|
return valueStack() == other.valueStack();
|
||||||
|
}
|
||||||
|
|
||||||
|
MOZ_CRASH("Invalid OperandLocation kind");
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
FailurePath::canShareFailurePath(const FailurePath& other) const
|
||||||
|
{
|
||||||
|
if (stackPushed_ != other.stackPushed_)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
MOZ_ASSERT(inputs_.length() == other.inputs_.length());
|
||||||
|
|
||||||
|
for (size_t i = 0; i < inputs_.length(); i++) {
|
||||||
|
if (inputs_[i] != other.inputs_[i])
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
CacheIRCompiler::addFailurePath(FailurePath** failure)
|
||||||
|
{
|
||||||
|
FailurePath newFailure;
|
||||||
|
for (size_t i = 0; i < writer_.numInputOperands(); i++) {
|
||||||
|
if (!newFailure.appendInput(allocator.operandLocation(i)))
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
newFailure.setStackPushed(allocator.stackPushed());
|
||||||
|
|
||||||
|
// Reuse the previous failure path if the current one is the same, to
|
||||||
|
// avoid emitting duplicate code.
|
||||||
|
if (failurePaths.length() > 0 && failurePaths.back().canShareFailurePath(newFailure)) {
|
||||||
|
*failure = &failurePaths.back();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!failurePaths.append(Move(newFailure)))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
*failure = &failurePaths.back();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
CacheIRCompiler::emitFailurePath(size_t i)
|
||||||
|
{
|
||||||
|
FailurePath& failure = failurePaths[i];
|
||||||
|
|
||||||
|
masm.bind(failure.label());
|
||||||
|
|
||||||
|
uint32_t stackPushed = failure.stackPushed();
|
||||||
|
size_t numInputOperands = writer_.numInputOperands();
|
||||||
|
|
||||||
|
for (size_t j = 0; j < numInputOperands; j++) {
|
||||||
|
OperandLocation orig = allocator.origInputLocation(j);
|
||||||
|
OperandLocation cur = failure.input(j);
|
||||||
|
|
||||||
|
MOZ_ASSERT(orig.kind() == OperandLocation::ValueReg);
|
||||||
|
|
||||||
|
// We have a cycle if a destination register will be used later
|
||||||
|
// as source register. If that happens, just push the current value
|
||||||
|
// on the stack and later get it from there.
|
||||||
|
for (size_t k = j + 1; k < numInputOperands; k++) {
|
||||||
|
OperandLocation laterSource = failure.input(k);
|
||||||
|
switch (laterSource.kind()) {
|
||||||
|
case OperandLocation::ValueReg:
|
||||||
|
if (orig.aliasesReg(laterSource.valueReg())) {
|
||||||
|
stackPushed += sizeof(js::Value);
|
||||||
|
masm.pushValue(laterSource.valueReg());
|
||||||
|
laterSource.setValueStack(stackPushed);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case OperandLocation::PayloadReg:
|
||||||
|
if (orig.aliasesReg(laterSource.payloadReg())) {
|
||||||
|
stackPushed += sizeof(uintptr_t);
|
||||||
|
masm.push(laterSource.payloadReg());
|
||||||
|
laterSource.setPayloadStack(stackPushed, laterSource.payloadType());
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case OperandLocation::PayloadStack:
|
||||||
|
case OperandLocation::ValueStack:
|
||||||
|
case OperandLocation::Uninitialized:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (cur.kind()) {
|
||||||
|
case OperandLocation::ValueReg:
|
||||||
|
masm.moveValue(cur.valueReg(), orig.valueReg());
|
||||||
|
break;
|
||||||
|
case OperandLocation::PayloadReg:
|
||||||
|
masm.tagValue(cur.payloadType(), cur.payloadReg(), orig.valueReg());
|
||||||
|
break;
|
||||||
|
case OperandLocation::PayloadStack: {
|
||||||
|
MOZ_ASSERT(stackPushed >= sizeof(uintptr_t));
|
||||||
|
Register scratch = orig.valueReg().scratchReg();
|
||||||
|
if (cur.payloadStack() == stackPushed) {
|
||||||
|
masm.pop(scratch);
|
||||||
|
stackPushed -= sizeof(uintptr_t);
|
||||||
|
} else {
|
||||||
|
MOZ_ASSERT(cur.payloadStack() < stackPushed);
|
||||||
|
masm.loadPtr(Address(masm.getStackPointer(), stackPushed - cur.payloadStack()),
|
||||||
|
scratch);
|
||||||
|
}
|
||||||
|
masm.tagValue(cur.payloadType(), scratch, orig.valueReg());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case OperandLocation::ValueStack:
|
||||||
|
MOZ_ASSERT(stackPushed >= sizeof(js::Value));
|
||||||
|
if (cur.valueStack() == stackPushed) {
|
||||||
|
masm.popValue(orig.valueReg());
|
||||||
|
stackPushed -= sizeof(js::Value);
|
||||||
|
} else {
|
||||||
|
MOZ_ASSERT(cur.valueStack() < stackPushed);
|
||||||
|
masm.loadValue(Address(masm.getStackPointer(), stackPushed - cur.valueStack()),
|
||||||
|
orig.valueReg());
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
MOZ_CRASH();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
allocator.discardStack(masm);
|
||||||
|
}
|
|
@ -0,0 +1,385 @@
|
||||||
|
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||||
|
* vim: set ts=8 sts=4 et sw=4 tw=99:
|
||||||
|
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||||
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||||
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||||
|
|
||||||
|
#ifndef jit_CacheIRCompiler_h
|
||||||
|
#define jit_CacheIRCompiler_h
|
||||||
|
|
||||||
|
#include "jit/CacheIR.h"
|
||||||
|
|
||||||
|
namespace js {
|
||||||
|
namespace jit {
|
||||||
|
|
||||||
|
// OperandLocation represents the location of an OperandId. The operand is
|
||||||
|
// either in a register or on the stack, and is either boxed or unboxed.
|
||||||
|
class OperandLocation
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
enum Kind {
|
||||||
|
Uninitialized = 0,
|
||||||
|
PayloadReg,
|
||||||
|
ValueReg,
|
||||||
|
PayloadStack,
|
||||||
|
ValueStack,
|
||||||
|
};
|
||||||
|
|
||||||
|
private:
|
||||||
|
Kind kind_;
|
||||||
|
|
||||||
|
union Data {
|
||||||
|
struct {
|
||||||
|
Register reg;
|
||||||
|
JSValueType type;
|
||||||
|
} payloadReg;
|
||||||
|
ValueOperand valueReg;
|
||||||
|
struct {
|
||||||
|
uint32_t stackPushed;
|
||||||
|
JSValueType type;
|
||||||
|
} payloadStack;
|
||||||
|
uint32_t valueStackPushed;
|
||||||
|
|
||||||
|
Data() : valueStackPushed(0) {}
|
||||||
|
};
|
||||||
|
Data data_;
|
||||||
|
|
||||||
|
public:
|
||||||
|
OperandLocation() : kind_(Uninitialized) {}
|
||||||
|
|
||||||
|
Kind kind() const { return kind_; }
|
||||||
|
|
||||||
|
void setUninitialized() {
|
||||||
|
kind_ = Uninitialized;
|
||||||
|
}
|
||||||
|
|
||||||
|
ValueOperand valueReg() const {
|
||||||
|
MOZ_ASSERT(kind_ == ValueReg);
|
||||||
|
return data_.valueReg;
|
||||||
|
}
|
||||||
|
Register payloadReg() const {
|
||||||
|
MOZ_ASSERT(kind_ == PayloadReg);
|
||||||
|
return data_.payloadReg.reg;
|
||||||
|
}
|
||||||
|
uint32_t payloadStack() const {
|
||||||
|
MOZ_ASSERT(kind_ == PayloadStack);
|
||||||
|
return data_.payloadStack.stackPushed;
|
||||||
|
}
|
||||||
|
uint32_t valueStack() const {
|
||||||
|
MOZ_ASSERT(kind_ == ValueStack);
|
||||||
|
return data_.valueStackPushed;
|
||||||
|
}
|
||||||
|
JSValueType payloadType() const {
|
||||||
|
if (kind_ == PayloadReg)
|
||||||
|
return data_.payloadReg.type;
|
||||||
|
MOZ_ASSERT(kind_ == PayloadStack);
|
||||||
|
return data_.payloadStack.type;
|
||||||
|
}
|
||||||
|
void setPayloadReg(Register reg, JSValueType type) {
|
||||||
|
kind_ = PayloadReg;
|
||||||
|
data_.payloadReg.reg = reg;
|
||||||
|
data_.payloadReg.type = type;
|
||||||
|
}
|
||||||
|
void setValueReg(ValueOperand reg) {
|
||||||
|
kind_ = ValueReg;
|
||||||
|
data_.valueReg = reg;
|
||||||
|
}
|
||||||
|
void setPayloadStack(uint32_t stackPushed, JSValueType type) {
|
||||||
|
kind_ = PayloadStack;
|
||||||
|
data_.payloadStack.stackPushed = stackPushed;
|
||||||
|
data_.payloadStack.type = type;
|
||||||
|
}
|
||||||
|
void setValueStack(uint32_t stackPushed) {
|
||||||
|
kind_ = ValueStack;
|
||||||
|
data_.valueStackPushed = stackPushed;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool aliasesReg(Register reg) {
|
||||||
|
if (kind_ == PayloadReg)
|
||||||
|
return payloadReg() == reg;
|
||||||
|
if (kind_ == ValueReg)
|
||||||
|
return valueReg().aliases(reg);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
bool aliasesReg(ValueOperand reg) {
|
||||||
|
#if defined(JS_NUNBOX32)
|
||||||
|
return aliasesReg(reg.typeReg()) || aliasesReg(reg.payloadReg());
|
||||||
|
#else
|
||||||
|
return aliasesReg(reg.valueReg());
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
bool operator==(const OperandLocation& other) const;
|
||||||
|
bool operator!=(const OperandLocation& other) const { return !operator==(other); }
|
||||||
|
};
|
||||||
|
|
||||||
|
// Class to track and allocate registers while emitting IC code.
|
||||||
|
class MOZ_RAII CacheRegisterAllocator
|
||||||
|
{
|
||||||
|
// The original location of the inputs to the cache.
|
||||||
|
Vector<OperandLocation, 4, SystemAllocPolicy> origInputLocations_;
|
||||||
|
|
||||||
|
// The current location of each operand.
|
||||||
|
Vector<OperandLocation, 8, SystemAllocPolicy> operandLocations_;
|
||||||
|
|
||||||
|
// The registers allocated while emitting the current CacheIR op.
|
||||||
|
// This prevents us from allocating a register and then immediately
|
||||||
|
// clobbering it for something else, while we're still holding on to it.
|
||||||
|
LiveGeneralRegisterSet currentOpRegs_;
|
||||||
|
|
||||||
|
const AllocatableGeneralRegisterSet allocatableRegs_;
|
||||||
|
|
||||||
|
// Registers that are currently unused and available.
|
||||||
|
AllocatableGeneralRegisterSet availableRegs_;
|
||||||
|
|
||||||
|
// The number of bytes pushed on the native stack.
|
||||||
|
uint32_t stackPushed_;
|
||||||
|
|
||||||
|
// The index of the CacheIR instruction we're currently emitting.
|
||||||
|
uint32_t currentInstruction_;
|
||||||
|
|
||||||
|
const CacheIRWriter& writer_;
|
||||||
|
|
||||||
|
CacheRegisterAllocator(const CacheRegisterAllocator&) = delete;
|
||||||
|
CacheRegisterAllocator& operator=(const CacheRegisterAllocator&) = delete;
|
||||||
|
|
||||||
|
void freeDeadOperandRegisters();
|
||||||
|
|
||||||
|
public:
|
||||||
|
friend class AutoScratchRegister;
|
||||||
|
friend class AutoScratchRegisterExcluding;
|
||||||
|
|
||||||
|
explicit CacheRegisterAllocator(const CacheIRWriter& writer)
|
||||||
|
: allocatableRegs_(GeneralRegisterSet::All()),
|
||||||
|
stackPushed_(0),
|
||||||
|
currentInstruction_(0),
|
||||||
|
writer_(writer)
|
||||||
|
{}
|
||||||
|
|
||||||
|
MOZ_MUST_USE bool init(const AllocatableGeneralRegisterSet& available);
|
||||||
|
|
||||||
|
OperandLocation operandLocation(size_t i) const {
|
||||||
|
return operandLocations_[i];
|
||||||
|
}
|
||||||
|
OperandLocation origInputLocation(size_t i) const {
|
||||||
|
return origInputLocations_[i];
|
||||||
|
}
|
||||||
|
void initInputLocation(size_t i, ValueOperand reg) {
|
||||||
|
origInputLocations_[i].setValueReg(reg);
|
||||||
|
operandLocations_[i] = origInputLocations_[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
void nextOp() {
|
||||||
|
currentOpRegs_.clear();
|
||||||
|
currentInstruction_++;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t stackPushed() const {
|
||||||
|
return stackPushed_;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool isAllocatable(Register reg) const {
|
||||||
|
return allocatableRegs_.has(reg);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allocates a new register.
|
||||||
|
Register allocateRegister(MacroAssembler& masm);
|
||||||
|
ValueOperand allocateValueRegister(MacroAssembler& masm);
|
||||||
|
void allocateFixedRegister(MacroAssembler& masm, Register reg);
|
||||||
|
|
||||||
|
// Releases a register so it can be reused later.
|
||||||
|
void releaseRegister(Register reg) {
|
||||||
|
MOZ_ASSERT(currentOpRegs_.has(reg));
|
||||||
|
availableRegs_.add(reg);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Removes spilled values from the native stack. This should only be
|
||||||
|
// called after all registers have been allocated.
|
||||||
|
void discardStack(MacroAssembler& masm);
|
||||||
|
|
||||||
|
// Returns the register for the given operand. If the operand is currently
|
||||||
|
// not in a register, it will load it into one.
|
||||||
|
ValueOperand useValueRegister(MacroAssembler& masm, ValOperandId val);
|
||||||
|
Register useRegister(MacroAssembler& masm, TypedOperandId typedId);
|
||||||
|
|
||||||
|
// Allocates an output register for the given operand.
|
||||||
|
Register defineRegister(MacroAssembler& masm, TypedOperandId typedId);
|
||||||
|
ValueOperand defineValueRegister(MacroAssembler& masm, ValOperandId val);
|
||||||
|
};
|
||||||
|
|
||||||
|
// RAII class to allocate a scratch register and release it when we're done
|
||||||
|
// with it.
|
||||||
|
class MOZ_RAII AutoScratchRegister
|
||||||
|
{
|
||||||
|
CacheRegisterAllocator& alloc_;
|
||||||
|
Register reg_;
|
||||||
|
|
||||||
|
public:
|
||||||
|
AutoScratchRegister(CacheRegisterAllocator& alloc, MacroAssembler& masm,
|
||||||
|
Register reg = InvalidReg)
|
||||||
|
: alloc_(alloc)
|
||||||
|
{
|
||||||
|
if (reg != InvalidReg) {
|
||||||
|
alloc.allocateFixedRegister(masm, reg);
|
||||||
|
reg_ = reg;
|
||||||
|
} else {
|
||||||
|
reg_ = alloc.allocateRegister(masm);
|
||||||
|
}
|
||||||
|
MOZ_ASSERT(alloc_.currentOpRegs_.has(reg_));
|
||||||
|
}
|
||||||
|
~AutoScratchRegister() {
|
||||||
|
alloc_.releaseRegister(reg_);
|
||||||
|
}
|
||||||
|
operator Register() const { return reg_; }
|
||||||
|
};
|
||||||
|
|
||||||
|
// Like AutoScratchRegister, but lets the caller specify a register that should
|
||||||
|
// not be allocated here.
|
||||||
|
class MOZ_RAII AutoScratchRegisterExcluding
|
||||||
|
{
|
||||||
|
CacheRegisterAllocator& alloc_;
|
||||||
|
Register reg_;
|
||||||
|
|
||||||
|
public:
|
||||||
|
AutoScratchRegisterExcluding(CacheRegisterAllocator& alloc, MacroAssembler& masm,
|
||||||
|
Register excluding)
|
||||||
|
: alloc_(alloc)
|
||||||
|
{
|
||||||
|
MOZ_ASSERT(excluding != InvalidReg);
|
||||||
|
|
||||||
|
reg_ = alloc.allocateRegister(masm);
|
||||||
|
|
||||||
|
if (reg_ == excluding) {
|
||||||
|
// We need a different register, so try again.
|
||||||
|
reg_ = alloc.allocateRegister(masm);
|
||||||
|
MOZ_ASSERT(reg_ != excluding);
|
||||||
|
alloc_.releaseRegister(excluding);
|
||||||
|
}
|
||||||
|
|
||||||
|
MOZ_ASSERT(alloc_.currentOpRegs_.has(reg_));
|
||||||
|
}
|
||||||
|
~AutoScratchRegisterExcluding() {
|
||||||
|
alloc_.releaseRegister(reg_);
|
||||||
|
}
|
||||||
|
operator Register() const { return reg_; }
|
||||||
|
};
|
||||||
|
|
||||||
|
// The FailurePath class stores everything we need to generate a failure path
|
||||||
|
// at the end of the IC code. The failure path restores the input registers, if
|
||||||
|
// needed, and jumps to the next stub.
|
||||||
|
class FailurePath
|
||||||
|
{
|
||||||
|
Vector<OperandLocation, 4, SystemAllocPolicy> inputs_;
|
||||||
|
NonAssertingLabel label_;
|
||||||
|
uint32_t stackPushed_;
|
||||||
|
|
||||||
|
public:
|
||||||
|
FailurePath() = default;
|
||||||
|
|
||||||
|
FailurePath(FailurePath&& other)
|
||||||
|
: inputs_(Move(other.inputs_)),
|
||||||
|
label_(other.label_),
|
||||||
|
stackPushed_(other.stackPushed_)
|
||||||
|
{}
|
||||||
|
|
||||||
|
Label* label() { return &label_; }
|
||||||
|
|
||||||
|
void setStackPushed(uint32_t i) { stackPushed_ = i; }
|
||||||
|
uint32_t stackPushed() const { return stackPushed_; }
|
||||||
|
|
||||||
|
bool appendInput(OperandLocation loc) {
|
||||||
|
return inputs_.append(loc);
|
||||||
|
}
|
||||||
|
OperandLocation input(size_t i) const {
|
||||||
|
return inputs_[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
// If canShareFailurePath(other) returns true, the same machine code will
|
||||||
|
// be emitted for two failure paths, so we can share them.
|
||||||
|
bool canShareFailurePath(const FailurePath& other) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Base class for BaselineCacheIRCompiler and IonCacheIRCompiler.
|
||||||
|
class MOZ_RAII CacheIRCompiler
|
||||||
|
{
|
||||||
|
protected:
|
||||||
|
JSContext* cx_;
|
||||||
|
CacheIRReader reader;
|
||||||
|
const CacheIRWriter& writer_;
|
||||||
|
MacroAssembler masm;
|
||||||
|
|
||||||
|
CacheRegisterAllocator allocator;
|
||||||
|
Vector<FailurePath, 4, SystemAllocPolicy> failurePaths;
|
||||||
|
|
||||||
|
CacheIRCompiler(JSContext* cx, const CacheIRWriter& writer)
|
||||||
|
: cx_(cx),
|
||||||
|
reader(writer),
|
||||||
|
writer_(writer),
|
||||||
|
allocator(writer_)
|
||||||
|
{}
|
||||||
|
|
||||||
|
MOZ_MUST_USE bool addFailurePath(FailurePath** failure);
|
||||||
|
|
||||||
|
void emitFailurePath(size_t i);
|
||||||
|
};
|
||||||
|
|
||||||
|
// See the 'Sharing Baseline stub code' comment in CacheIR.h for a description
|
||||||
|
// of this class.
|
||||||
|
class CacheIRStubInfo
|
||||||
|
{
|
||||||
|
// These fields don't require 8 bits, but GCC complains if these fields are
|
||||||
|
// smaller than the size of the enums.
|
||||||
|
CacheKind kind_ : 8;
|
||||||
|
ICStubEngine engine_ : 8;
|
||||||
|
bool makesGCCalls_ : 1;
|
||||||
|
uint8_t stubDataOffset_;
|
||||||
|
|
||||||
|
const uint8_t* code_;
|
||||||
|
uint32_t length_;
|
||||||
|
const uint8_t* fieldTypes_;
|
||||||
|
|
||||||
|
CacheIRStubInfo(CacheKind kind, ICStubEngine engine, bool makesGCCalls,
|
||||||
|
uint32_t stubDataOffset, const uint8_t* code, uint32_t codeLength,
|
||||||
|
const uint8_t* fieldTypes)
|
||||||
|
: kind_(kind),
|
||||||
|
engine_(engine),
|
||||||
|
makesGCCalls_(makesGCCalls),
|
||||||
|
stubDataOffset_(stubDataOffset),
|
||||||
|
code_(code),
|
||||||
|
length_(codeLength),
|
||||||
|
fieldTypes_(fieldTypes)
|
||||||
|
{
|
||||||
|
MOZ_ASSERT(kind_ == kind, "Kind must fit in bitfield");
|
||||||
|
MOZ_ASSERT(engine_ == engine, "Engine must fit in bitfield");
|
||||||
|
MOZ_ASSERT(stubDataOffset_ == stubDataOffset, "stubDataOffset must fit in uint8_t");
|
||||||
|
}
|
||||||
|
|
||||||
|
CacheIRStubInfo(const CacheIRStubInfo&) = delete;
|
||||||
|
CacheIRStubInfo& operator=(const CacheIRStubInfo&) = delete;
|
||||||
|
|
||||||
|
public:
|
||||||
|
CacheKind kind() const { return kind_; }
|
||||||
|
ICStubEngine engine() const { return engine_; }
|
||||||
|
bool makesGCCalls() const { return makesGCCalls_; }
|
||||||
|
|
||||||
|
const uint8_t* code() const { return code_; }
|
||||||
|
uint32_t codeLength() const { return length_; }
|
||||||
|
uint32_t stubDataOffset() const { return stubDataOffset_; }
|
||||||
|
|
||||||
|
size_t stubDataSize() const;
|
||||||
|
|
||||||
|
StubField::Type fieldType(uint32_t i) const { return (StubField::Type)fieldTypes_[i]; }
|
||||||
|
|
||||||
|
static CacheIRStubInfo* New(CacheKind kind, ICStubEngine engine, bool canMakeCalls,
|
||||||
|
uint32_t stubDataOffset, const CacheIRWriter& writer);
|
||||||
|
|
||||||
|
template <class T>
|
||||||
|
js::GCPtr<T>& getStubField(ICStub* stub, uint32_t field) const;
|
||||||
|
|
||||||
|
void copyStubData(ICStub* src, ICStub* dest) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace jit
|
||||||
|
} // namespace js
|
||||||
|
|
||||||
|
#endif /* jit_CacheIRCompiler_h */
|
|
@ -222,6 +222,7 @@ UNIFIED_SOURCES += [
|
||||||
'jit/BytecodeAnalysis.cpp',
|
'jit/BytecodeAnalysis.cpp',
|
||||||
'jit/C1Spewer.cpp',
|
'jit/C1Spewer.cpp',
|
||||||
'jit/CacheIR.cpp',
|
'jit/CacheIR.cpp',
|
||||||
|
'jit/CacheIRCompiler.cpp',
|
||||||
'jit/CodeGenerator.cpp',
|
'jit/CodeGenerator.cpp',
|
||||||
'jit/CompileWrappers.cpp',
|
'jit/CompileWrappers.cpp',
|
||||||
'jit/Disassembler.cpp',
|
'jit/Disassembler.cpp',
|
||||||
|
|
Загрузка…
Ссылка в новой задаче