Bug 990106 part 2 - LRecoverInfo encodes MIR when flagged as Recovered. r=h4writer

This commit is contained in:
Nicolas B. Pierron 2014-04-29 10:17:51 -07:00
Родитель 6535a6a399
Коммит a0e1f8e563
7 изменённых файлов: 271 добавлений и 180 удалений

Просмотреть файл

@ -111,11 +111,16 @@ LBlock::getExitMoveGroup(TempAllocator &alloc)
}
static size_t
TotalOperandCount(MResumePoint *mir)
TotalOperandCount(LRecoverInfo *recoverInfo)
{
size_t accum = mir->numOperands();
while ((mir = mir->caller()))
accum += mir->numOperands();
LRecoverInfo::OperandIter it(recoverInfo->begin());
LRecoverInfo::OperandIter end(recoverInfo->end());
size_t accum = 0;
for (; it != end; ++it) {
if (!it->isRecoveredOnBailout())
accum++;
}
return accum;
}
@ -137,28 +142,71 @@ LRecoverInfo::New(MIRGenerator *gen, MResumePoint *mir)
return recoverInfo;
}
bool
LRecoverInfo::appendOperands(MNode *ins)
{
for (size_t i = 0, end = ins->numOperands(); i < end; i++) {
MDefinition *def = ins->getOperand(i);
// As there is no cycle in the data-flow (without MPhi), checking for
// isInWorkList implies that the definition is already in the
// instruction vector, and not processed by a caller of the current
// function.
if (def->isRecoveredOnBailout() && !def->isInWorklist()) {
if (!appendDefinition(def))
return false;
}
}
return true;
}
bool
LRecoverInfo::appendDefinition(MDefinition *def)
{
MOZ_ASSERT(def->isRecoveredOnBailout());
def->setInWorklist();
if (!appendOperands(def))
return false;
return instructions_.append(def);
}
bool
LRecoverInfo::appendResumePoint(MResumePoint *rp)
{
if (rp->caller() && !appendResumePoint(rp->caller()))
return false;
if (!appendOperands(rp))
return false;
return instructions_.append(rp);
}
bool
LRecoverInfo::init(MResumePoint *rp)
{
MResumePoint *it = rp;
// Sort operations in the order in which we need to restore the stack. This
// implies that outer frames, as well as operations needed to recover the
// current frame, are located before the current frame. The inner-most
// resume point should be the last element in the list.
do {
if (!instructions_.append(it))
return false;
it = it->caller();
} while (it);
if (!appendResumePoint(rp))
return false;
// Remove temporary flags from all definitions.
for (MNode **it = begin(); it != end(); it++) {
if (!(*it)->isDefinition())
continue;
(*it)->toDefinition()->setNotInWorklist();
}
Reverse(instructions_.begin(), instructions_.end());
MOZ_ASSERT(mir() == rp);
return true;
}
LSnapshot::LSnapshot(LRecoverInfo *recoverInfo, BailoutKind kind)
: numSlots_(TotalOperandCount(recoverInfo->mir()) * BOX_PIECES),
: numSlots_(TotalOperandCount(recoverInfo) * BOX_PIECES),
slots_(nullptr),
recoverInfo_(recoverInfo),
snapshotOffset_(INVALID_SNAPSHOT_OFFSET),

Просмотреть файл

@ -879,7 +879,7 @@ class LCallInstructionHelper : public LInstructionHelper<Defs, Operands, Temps>
class LRecoverInfo : public TempObject
{
public:
typedef Vector<MResumePoint *, 2, IonAllocPolicy> Instructions;
typedef Vector<MNode *, 2, IonAllocPolicy> Instructions;
private:
// List of instructions needed to recover the stack frames.
@ -892,12 +892,17 @@ class LRecoverInfo : public TempObject
LRecoverInfo(TempAllocator &alloc);
bool init(MResumePoint *mir);
// Fill the instruction vector such as all instructions needed for the
// recovery are pushed before the current instruction.
bool appendOperands(MNode *ins);
bool appendDefinition(MDefinition *def);
bool appendResumePoint(MResumePoint *rp);
public:
static LRecoverInfo *New(MIRGenerator *gen, MResumePoint *mir);
// Resume point of the inner most function.
MResumePoint *mir() const {
return instructions_.back();
return instructions_.back()->toResumePoint();
}
RecoverOffset recoverOffset() const {
return recoverOffset_;
@ -907,12 +912,47 @@ class LRecoverInfo : public TempObject
recoverOffset_ = offset;
}
MResumePoint **begin() {
MNode **begin() {
return instructions_.begin();
}
MResumePoint **end() {
MNode **end() {
return instructions_.end();
}
size_t numInstructions() const {
return instructions_.length();
}
class OperandIter
{
private:
MNode **it_;
size_t op_;
public:
OperandIter(MNode **it)
: it_(it), op_(0)
{ }
MDefinition *operator *() {
return (*it_)->getOperand(op_);
}
MDefinition *operator ->() {
return (*it_)->getOperand(op_);
}
OperandIter &operator ++() {
++op_;
if (op_ == (*it_)->numOperands()) {
op_ = 0;
++it_;
}
return *this;
}
bool operator !=(const OperandIter &where) const {
return it_ != where.it_ || op_ != where.op_;
}
};
};
// An LSnapshot is the reflection of an MResumePoint in LIR. Unlike MResumePoints,

Просмотреть файл

@ -3598,6 +3598,9 @@ SpewResumePoint(MBasicBlock *block, MInstruction *ins, MResumePoint *resumePoint
bool
LIRGenerator::visitInstruction(MInstruction *ins)
{
if (ins->isRecoveredOnBailout())
return true;
if (!gen->ensureBallast())
return false;
if (!ins->accept(this))

Просмотреть файл

@ -89,7 +89,14 @@ MIRType MIRTypeFromValue(const js::Value &vp)
* Truncate Doubles. So every time removeUse is called, UseRemoved needs
* to get set.
*/ \
_(UseRemoved)
_(UseRemoved) \
\
/* Marks if the current instruction should go to the bailout paths instead
* of producing code as part of the control flow. This flag can only be set
* on instructions which are only used by ResumePoint or by other flagged
* instructions.
*/ \
_(RecoveredOnBailout)
class MDefinition;
class MInstruction;

Просмотреть файл

@ -137,106 +137,98 @@ ToStackIndex(LAllocation *a)
}
bool
CodeGeneratorShared::encodeAllocations(LSnapshot *snapshot, MResumePoint *resumePoint,
uint32_t *startIndex)
CodeGeneratorShared::encodeAllocation(LSnapshot *snapshot, MDefinition *mir,
uint32_t *allocIndex)
{
IonSpew(IonSpew_Codegen, "Encoding %u of resume point %p's operands starting from %u",
resumePoint->numOperands(), (void *) resumePoint, *startIndex);
for (uint32_t allocno = 0, e = resumePoint->numOperands(); allocno < e; allocno++) {
uint32_t i = allocno + *startIndex;
MDefinition *mir = resumePoint->getOperand(allocno);
if (mir->isBox())
mir = mir->toBox()->getOperand(0);
if (mir->isBox())
mir = mir->toBox()->getOperand(0);
MIRType type = mir->isUnused()
? MIRType_MagicOptimizedOut
: mir->type();
MIRType type = mir->isUnused()
? MIRType_MagicOptimizedOut
: mir->type();
RValueAllocation alloc;
RValueAllocation alloc;
switch (type) {
case MIRType_Undefined:
alloc = RValueAllocation::Undefined();
break;
case MIRType_Null:
alloc = RValueAllocation::Null();
break;
case MIRType_Int32:
case MIRType_String:
case MIRType_Object:
case MIRType_Boolean:
case MIRType_Double:
case MIRType_Float32:
{
LAllocation *payload = snapshot->payloadOfSlot(i);
JSValueType valueType = ValueTypeFromMIRType(type);
if (payload->isMemory()) {
if (type == MIRType_Float32)
alloc = RValueAllocation::Float32(ToStackIndex(payload));
else
alloc = RValueAllocation::Typed(valueType, ToStackIndex(payload));
} else if (payload->isGeneralReg()) {
alloc = RValueAllocation::Typed(valueType, ToRegister(payload));
} else if (payload->isFloatReg()) {
FloatRegister reg = ToFloatRegister(payload);
if (type == MIRType_Float32)
alloc = RValueAllocation::Float32(reg);
else
alloc = RValueAllocation::Double(reg);
} else {
MConstant *constant = mir->toConstant();
uint32_t index;
if (!graph.addConstantToPool(constant->value(), &index))
return false;
alloc = RValueAllocation::ConstantPool(index);
}
break;
}
case MIRType_MagicOptimizedArguments:
case MIRType_MagicOptimizedOut:
{
switch (type) {
case MIRType_Undefined:
alloc = RValueAllocation::Undefined();
break;
case MIRType_Null:
alloc = RValueAllocation::Null();
break;
case MIRType_Int32:
case MIRType_String:
case MIRType_Object:
case MIRType_Boolean:
case MIRType_Double:
case MIRType_Float32:
{
LAllocation *payload = snapshot->payloadOfSlot(*allocIndex);
JSValueType valueType = ValueTypeFromMIRType(type);
if (payload->isMemory()) {
if (type == MIRType_Float32)
alloc = RValueAllocation::Float32(ToStackIndex(payload));
else
alloc = RValueAllocation::Typed(valueType, ToStackIndex(payload));
} else if (payload->isGeneralReg()) {
alloc = RValueAllocation::Typed(valueType, ToRegister(payload));
} else if (payload->isFloatReg()) {
FloatRegister reg = ToFloatRegister(payload);
if (type == MIRType_Float32)
alloc = RValueAllocation::Float32(reg);
else
alloc = RValueAllocation::Double(reg);
} else {
MConstant *constant = mir->toConstant();
uint32_t index;
JSWhyMagic why = (type == MIRType_MagicOptimizedArguments
? JS_OPTIMIZED_ARGUMENTS
: JS_OPTIMIZED_OUT);
Value v = MagicValue(why);
if (!graph.addConstantToPool(v, &index))
if (!graph.addConstantToPool(constant->value(), &index))
return false;
alloc = RValueAllocation::ConstantPool(index);
break;
}
default:
{
JS_ASSERT(mir->type() == MIRType_Value);
LAllocation *payload = snapshot->payloadOfSlot(i);
#ifdef JS_NUNBOX32
LAllocation *type = snapshot->typeOfSlot(i);
if (type->isRegister()) {
if (payload->isRegister())
alloc = RValueAllocation::Untyped(ToRegister(type), ToRegister(payload));
else
alloc = RValueAllocation::Untyped(ToRegister(type), ToStackIndex(payload));
} else {
if (payload->isRegister())
alloc = RValueAllocation::Untyped(ToStackIndex(type), ToRegister(payload));
else
alloc = RValueAllocation::Untyped(ToStackIndex(type), ToStackIndex(payload));
}
#elif JS_PUNBOX64
if (payload->isRegister())
alloc = RValueAllocation::Untyped(ToRegister(payload));
else
alloc = RValueAllocation::Untyped(ToStackIndex(payload));
#endif
break;
}
}
snapshots_.add(alloc);
break;
}
case MIRType_MagicOptimizedArguments:
case MIRType_MagicOptimizedOut:
{
uint32_t index;
JSWhyMagic why = (type == MIRType_MagicOptimizedArguments
? JS_OPTIMIZED_ARGUMENTS
: JS_OPTIMIZED_OUT);
Value v = MagicValue(why);
if (!graph.addConstantToPool(v, &index))
return false;
alloc = RValueAllocation::ConstantPool(index);
break;
}
default:
{
JS_ASSERT(mir->type() == MIRType_Value);
LAllocation *payload = snapshot->payloadOfSlot(*allocIndex);
#ifdef JS_NUNBOX32
LAllocation *type = snapshot->typeOfSlot(*allocIndex);
if (type->isRegister()) {
if (payload->isRegister())
alloc = RValueAllocation::Untyped(ToRegister(type), ToRegister(payload));
else
alloc = RValueAllocation::Untyped(ToRegister(type), ToStackIndex(payload));
} else {
if (payload->isRegister())
alloc = RValueAllocation::Untyped(ToStackIndex(type), ToRegister(payload));
else
alloc = RValueAllocation::Untyped(ToStackIndex(type), ToStackIndex(payload));
}
#elif JS_PUNBOX64
if (payload->isRegister())
alloc = RValueAllocation::Untyped(ToRegister(payload));
else
alloc = RValueAllocation::Untyped(ToStackIndex(payload));
#endif
break;
}
}
*startIndex += resumePoint->numOperands();
snapshots_.add(alloc);
*allocIndex += mir->isRecoveredOnBailout() ? 0 : 1;
return true;
}
@ -246,20 +238,17 @@ CodeGeneratorShared::encode(LRecoverInfo *recover)
if (recover->recoverOffset() != INVALID_RECOVER_OFFSET)
return true;
uint32_t frameCount = recover->mir()->frameCount();
IonSpew(IonSpew_Snapshots, "Encoding LRecoverInfo %p (frameCount %u)",
(void *)recover, frameCount);
uint32_t numInstructions = recover->numInstructions();
IonSpew(IonSpew_Snapshots, "Encoding LRecoverInfo %p (frameCount %u, instructions %u)",
(void *)recover, recover->mir()->frameCount(), numInstructions);
MResumePoint::Mode mode = recover->mir()->mode();
JS_ASSERT(mode != MResumePoint::Outer);
bool resumeAfter = (mode == MResumePoint::ResumeAfter);
RecoverOffset offset = recovers_.startRecover(frameCount, resumeAfter);
RecoverOffset offset = recovers_.startRecover(numInstructions, resumeAfter);
for (MResumePoint **it = recover->begin(), **end = recover->end();
it != end;
++it)
{
for (MNode **it = recover->begin(), **end = recover->end(); it != end; ++it) {
if (!recovers_.writeInstruction(*it))
return false;
}
@ -307,17 +296,17 @@ CodeGeneratorShared::encode(LSnapshot *snapshot)
snapshots_.trackSnapshot(pcOpcode, mirOpcode, mirId, lirOpcode, lirId);
#endif
uint32_t startIndex = 0;
for (MResumePoint **it = recoverInfo->begin(), **end = recoverInfo->end();
it != end;
++it)
{
MResumePoint *mir = *it;
if (!encodeAllocations(snapshot, mir, &startIndex))
uint32_t allocIndex = 0;
LRecoverInfo::OperandIter it(recoverInfo->begin());
LRecoverInfo::OperandIter end(recoverInfo->end());
for (; it != end; ++it) {
DebugOnly<uint32_t> allocWritten = snapshots_.allocWritten();
if (!encodeAllocation(snapshot, *it, &allocIndex))
return false;
MOZ_ASSERT(allocWritten + 1 == snapshots_.allocWritten());
}
MOZ_ASSERT(snapshots_.allocWritten() == snapshot->numSlots());
MOZ_ASSERT(allocIndex == snapshot->numSlots());
snapshots_.endSnapshot();
snapshot->setSnapshotOffset(offset);
return !snapshots_.oom();

Просмотреть файл

@ -272,7 +272,7 @@ class CodeGeneratorShared : public LInstructionVisitor
// false on failure.
bool encode(LRecoverInfo *recover);
bool encode(LSnapshot *snapshot);
bool encodeAllocations(LSnapshot *snapshot, MResumePoint *resumePoint, uint32_t *startIndex);
bool encodeAllocation(LSnapshot *snapshot, MDefinition *def, uint32_t *startIndex);
// Attempts to assign a BailoutId to a snapshot, if one isn't already set.
// If the bailout table is full, this returns false, which is not a fatal

Просмотреть файл

@ -74,49 +74,51 @@ LIRGeneratorShared::getRecoverInfo(MResumePoint *rp)
LSnapshot *
LIRGeneratorShared::buildSnapshot(LInstruction *ins, MResumePoint *rp, BailoutKind kind)
{
LRecoverInfo *recover = getRecoverInfo(rp);
if (!recover)
LRecoverInfo *recoverInfo = getRecoverInfo(rp);
if (!recoverInfo)
return nullptr;
LSnapshot *snapshot = LSnapshot::New(gen, recover, kind);
LSnapshot *snapshot = LSnapshot::New(gen, recoverInfo, kind);
if (!snapshot)
return nullptr;
size_t i = 0;
for (MResumePoint **it = recover->begin(), **end = recover->end(); it != end; ++it) {
MResumePoint *mir = *it;
for (size_t j = 0, e = mir->numOperands(); j < e; ++i, ++j) {
MDefinition *ins = mir->getOperand(j);
size_t index = 0;
LRecoverInfo::OperandIter it(recoverInfo->begin());
LRecoverInfo::OperandIter end(recoverInfo->end());
for (; it != end; ++it) {
MDefinition *ins = *it;
if (ins->isRecoveredOnBailout())
continue;
LAllocation *type = snapshot->typeOfSlot(i);
LAllocation *payload = snapshot->payloadOfSlot(i);
LAllocation *type = snapshot->typeOfSlot(index);
LAllocation *payload = snapshot->payloadOfSlot(index);
++index;
if (ins->isBox())
ins = ins->toBox()->getOperand(0);
if (ins->isBox())
ins = ins->toBox()->getOperand(0);
// Guards should never be eliminated.
JS_ASSERT_IF(ins->isUnused(), !ins->isGuard());
// Guards should never be eliminated.
JS_ASSERT_IF(ins->isUnused(), !ins->isGuard());
// Snapshot operands other than constants should never be
// emitted-at-uses. Try-catch support depends on there being no
// code between an instruction and the LOsiPoint that follows it.
JS_ASSERT_IF(!ins->isConstant(), !ins->isEmittedAtUses());
// Snapshot operands other than constants should never be
// emitted-at-uses. Try-catch support depends on there being no
// code between an instruction and the LOsiPoint that follows it.
JS_ASSERT_IF(!ins->isConstant(), !ins->isEmittedAtUses());
// The register allocation will fill these fields in with actual
// register/stack assignments. During code generation, we can restore
// interpreter state with the given information. Note that for
// constants, including known types, we record a dummy placeholder,
// since we can recover the same information, much cleaner, from MIR.
if (ins->isConstant() || ins->isUnused()) {
*type = LConstantIndex::Bogus();
*payload = LConstantIndex::Bogus();
} else if (ins->type() != MIRType_Value) {
*type = LConstantIndex::Bogus();
*payload = use(ins, LUse::KEEPALIVE);
} else {
*type = useType(ins, LUse::KEEPALIVE);
*payload = usePayload(ins, LUse::KEEPALIVE);
}
// The register allocation will fill these fields in with actual
// register/stack assignments. During code generation, we can restore
// interpreter state with the given information. Note that for
// constants, including known types, we record a dummy placeholder,
// since we can recover the same information, much cleaner, from MIR.
if (ins->isConstant() || ins->isUnused()) {
*type = LConstantIndex::Bogus();
*payload = LConstantIndex::Bogus();
} else if (ins->type() != MIRType_Value) {
*type = LConstantIndex::Bogus();
*payload = use(ins, LUse::KEEPALIVE);
} else {
*type = useType(ins, LUse::KEEPALIVE);
*payload = usePayload(ins, LUse::KEEPALIVE);
}
}
@ -128,40 +130,42 @@ LIRGeneratorShared::buildSnapshot(LInstruction *ins, MResumePoint *rp, BailoutKi
LSnapshot *
LIRGeneratorShared::buildSnapshot(LInstruction *ins, MResumePoint *rp, BailoutKind kind)
{
LRecoverInfo *recover = getRecoverInfo(rp);
if (!recover)
LRecoverInfo *recoverInfo = getRecoverInfo(rp);
if (!recoverInfo)
return nullptr;
LSnapshot *snapshot = LSnapshot::New(gen, recover, kind);
LSnapshot *snapshot = LSnapshot::New(gen, recoverInfo, kind);
if (!snapshot)
return nullptr;
size_t i = 0;
for (MResumePoint **it = recover->begin(), **end = recover->end(); it != end; ++it) {
MResumePoint *mir = *it;
for (size_t j = 0, e = mir->numOperands(); j < e; ++i, ++j) {
MDefinition *def = mir->getOperand(j);
size_t index = 0;
LRecoverInfo::OperandIter it(recoverInfo->begin());
LRecoverInfo::OperandIter end(recoverInfo->end());
for (; it != end; ++it) {
MDefinition *def = *it;
if (def->isBox())
def = def->toBox()->getOperand(0);
if (def->isRecoveredOnBailout())
continue;
// Guards should never be eliminated.
JS_ASSERT_IF(def->isUnused(), !def->isGuard());
if (def->isBox())
def = def->toBox()->getOperand(0);
// Snapshot operands other than constants should never be
// emitted-at-uses. Try-catch support depends on there being no
// code between an instruction and the LOsiPoint that follows it.
JS_ASSERT_IF(!def->isConstant(), !def->isEmittedAtUses());
// Guards should never be eliminated.
JS_ASSERT_IF(def->isUnused(), !def->isGuard());
LAllocation *a = snapshot->getEntry(i);
// Snapshot operands other than constants should never be
// emitted-at-uses. Try-catch support depends on there being no
// code between an instruction and the LOsiPoint that follows it.
JS_ASSERT_IF(!def->isConstant(), !def->isEmittedAtUses());
if (def->isUnused()) {
*a = LConstantIndex::Bogus();
continue;
}
LAllocation *a = snapshot->getEntry(index++);
*a = useKeepaliveOrConstant(def);
if (def->isUnused()) {
*a = LConstantIndex::Bogus();
continue;
}
*a = useKeepaliveOrConstant(def);
}
return snapshot;