Bug 844779: IonMonkey: Improve the order of blocks, r=jandem

This commit is contained in:
Hannes Verschore 2013-04-05 17:23:52 +02:00
Родитель 9c74d3f6cd
Коммит 2093345fc9
6 изменённых файлов: 247 добавлений и 30 удалений

Просмотреть файл

@ -1915,7 +1915,6 @@ class FunctionCompiler
joinBlock->addPredecessor(curBlock_);
}
curBlock_ = joinBlock;
mirGraph().moveBlockToEnd(curBlock_);
}
MBasicBlock *switchToElse(MBasicBlock *elseBlock)
@ -1924,7 +1923,6 @@ class FunctionCompiler
return NULL;
MBasicBlock *thenEnd = curBlock_;
curBlock_ = elseBlock;
mirGraph().moveBlockToEnd(curBlock_);
return thenEnd;
}
@ -2031,8 +2029,6 @@ class FunctionCompiler
loopEntry->setBackedge(curBlock_);
}
curBlock_ = afterLoop;
if (curBlock_)
mirGraph().moveBlockToEnd(curBlock_);
return bindUnlabeledBreaks(pn);
}
@ -2145,7 +2141,6 @@ class FunctionCompiler
(*cases)[i] = bb;
}
}
mirGraph().moveBlockToEnd(*defaultBlock);
return true;
}

Просмотреть файл

@ -893,7 +893,7 @@ bool
OptimizeMIR(MIRGenerator *mir)
{
IonSpewPass("BuildSSA");
// Note: don't call AssertGraphCoherency before SplitCriticalEdges,
// Note: don't call AssertGraphCoherency before ReorderBlocks,
// the graph is not in RPO at this point.
MIRGraph &graph = mir->graph();
@ -904,11 +904,24 @@ OptimizeMIR(MIRGenerator *mir)
if (!SplitCriticalEdges(graph))
return false;
IonSpewPass("Split Critical Edges");
AssertGraphCoherency(graph);
if (mir->shouldCancel("Split Critical Edges"))
return false;
if (!RecalculateLoopDepth(graph))
return false;
// No spew: graph not changed.
if (mir->shouldCancel("Recalculate Loop Depth"))
return false;
if (!ReorderBlocks(graph))
return false;
// No spew: wait for right block numbers
if (mir->shouldCancel("ReorderBlocks"))
return false;
if (!RenumberBlocks(graph))
return false;
IonSpewPass("Renumber Blocks");

Просмотреть файл

@ -623,6 +623,210 @@ ion::ApplyTypeInformation(MIRGenerator *mir, MIRGraph &graph)
return true;
}
static bool
SetLoopDepth(MBasicBlock *header)
{
size_t depth = header->loopDepth() + 1;
Vector<MBasicBlock *, 1, IonAllocPolicy> worklist;
worklist.append(header->backedge());
// Iterate over all blocks that are in a path from backedge to the loopheader.
while (!worklist.empty()) {
MBasicBlock *block = worklist.popCopy();
block->setLoopDepth(depth);
if (block == header)
continue;
for (size_t i = 0; i < block->numPredecessors(); i++) {
MBasicBlock *pred = block->getPredecessor(i);
if (pred->loopDepth() == depth)
continue;
if (!worklist.append(pred))
return false;
}
}
return true;
}
bool
ion::RecalculateLoopDepth(MIRGraph &graph)
{
// Recalculate the loop depth information.
//
// The definition of a loop (important to correctly reorder blocks and for LICM) is:
// all blocks reached when creating a path from the loop backedge to the loopheader.
// This information differs from the loopdepth information gathered in IonBuilder.
// In IonBuilder blocks that are placed inside the loop are considered to be part of the loop.
// E.g.:
// for (...) {
//
// if (...) {
// return;
// }
// }
// IonBuilder will consider the content of the conditional to be part of the loop.
// As of this pass/call this block won't be part of the loop anymore.
// Reset all loopdepth information
for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++)
(*block)->setLoopDepth(0);
// Iterate through graph for loopheaders
// The graph isn't in RPO order yet. Therefore we need to iterate differently.
// In order to iterate correctly we only need to make sure we see the loopheader
// of outer loops, before the loopheader of an innerloop.
// This is guarenteed if we walk just by following the successors,
// since we can't enter a loopbody before seeing the loopheader.
// Here we do DFS.
Vector<MBasicBlock *, 1, IonAllocPolicy> worklist;
if (!worklist.append(graph.entryBlock()))
return false;
JS_ASSERT(!graph.entryBlock()->isLoopHeader());
while (!worklist.empty()) {
MBasicBlock *block = worklist.back();
// Add not yet visited successors to the worklist.
for (size_t i = 0; i < block->numSuccessors(); i++) {
MBasicBlock *succ = block->getSuccessor(i);
if (!succ->isMarked()) {
if (!worklist.append(succ))
return false;
succ->mark();
// Set the loopdepth correctly.
if (succ->isLoopHeader()) {
if (!SetLoopDepth(succ))
return false;
}
}
}
// If any blocks were added, process them first.
if (block != worklist.back())
continue;
// All successors of this block are visited.
worklist.popBack();
}
graph.unmarkBlocks();
return true;
}
static void
SortByLoopDepth(InlineList<MBasicBlock> &pending, MBasicBlockIterator from)
{
// Sort the blocks between "from" and the end of the pending list.
// In almost all cases this list will contain at max 2 items (except for tableswitch).
// Therefore no need for a fancy algorithm.
MBasicBlockIterator end = pending.end();
while (from != end) {
MBasicBlockIterator current(from);
MBasicBlock *max = *current;
while (current != end) {
if (current->loopDepth() > max->loopDepth())
max = *current;
current++;
}
if (max != *from) {
pending.remove(max);
pending.insertBefore(*from, max);
}
from++;
}
}
bool
ion::ReorderBlocks(MIRGraph &graph)
{
// The blocks get reordered to be in RPO (they are already).
// The order will be improved by adding an extra constrain:
// when deciding which successor to visit, always start with the one with lowest loopdepth.
// Consequences:
// => blocks with the same loopdepth are closer (=> tighter loops)
// => loop backedges and loop headers aren't interleaved when visiting
// the graph in RPO (important for Alias Analysis)
// The RPO algorithm works as following.
// It has a stack (pending) and we put the successors on there.
// The last item on the stack is the one we are looking too.
// If it still has unmarked successsors those get added and there is a new last item.
// If there are no unmarked successors anymore, all successors are processed
// and this block can get popped from the stack and added to the graph.
mozilla::DebugOnly<size_t> numBlocks = graph.numBlocks();
InlineList<MBasicBlock> pending;
MBasicBlock *entry = graph.entryBlock();
MBasicBlock *osr = graph.osrBlock();
graph.clearBlockList();
// Start with entry block
pending.pushBack(entry);
entry->mark();
// Iterate all blocks to put the blocks in PostOrder to the graph,
// reversing happens by using moveBlockToBegin.
while (!pending.empty()) {
MBasicBlock *block = pending.peekBack();
bool sortSuccessors = false;
// Add not yet visited successors to the pending list.
for (size_t i = 0; i < block->numSuccessors(); i++) {
MBasicBlock *succ = block->getSuccessor(i);
if (!succ->isMarked()) {
pending.pushBack(succ);
succ->mark();
// Only request sorting, when there are different loopDepths.
if (succ->loopDepth() != block->loopDepth())
sortSuccessors = true;
}
}
// Test if there was a new block added
if (block != pending.peekBack()) {
// Sort the blocks if needed
if (sortSuccessors) {
MBasicBlockIterator start = pending.begin(block);
start++;
SortByLoopDepth(pending, start);
}
continue;
}
// Here we are sure all successors of this block have been added to the graph.
// Now this block can get added to the graph.
pending.popBack();
graph.addBlock(block);
graph.moveBlockToBegin(block);
}
// Put the osr block in the second place
if (osr) {
graph.addBlock(osr);
graph.moveBlockToBegin(osr);
graph.moveBlockToBegin(entry);
}
graph.unmarkBlocks();
JS_ASSERT(graph.numBlocks() == numBlocks);
#ifdef DEBUG
graph.setInRPO();
#endif
return true;
}
bool
ion::RenumberBlocks(MIRGraph &graph)
{

Просмотреть файл

@ -39,6 +39,12 @@ EliminateDeadCode(MIRGenerator *mir, MIRGraph &graph);
bool
ApplyTypeInformation(MIRGenerator *mir, MIRGraph &graph);
bool
RecalculateLoopDepth(MIRGraph &graph);
bool
ReorderBlocks(MIRGraph &graph);
bool
RenumberBlocks(MIRGraph &graph);

Просмотреть файл

@ -1265,7 +1265,6 @@ IonBuilder::processIfEnd(CFGState &state)
}
current = state.branch.ifFalse;
graph().moveBlockToEnd(current);
pc = current->pc();
return ControlStatus_Joined;
}
@ -1280,7 +1279,6 @@ IonBuilder::processIfElseTrueEnd(CFGState &state)
state.stopAt = state.branch.falseEnd;
pc = state.branch.ifFalse->pc();
current = state.branch.ifFalse;
graph().moveBlockToEnd(current);
return ControlStatus_Jumped;
}
@ -1339,10 +1337,7 @@ IonBuilder::processBrokenLoop(CFGState &state)
// structure never actually loops, the condition itself can still fail and
// thus we must resume at the successor, if one exists.
current = state.loop.successor;
if (current) {
JS_ASSERT(current->loopDepth() == loopDepth_);
graph().moveBlockToEnd(current);
}
JS_ASSERT_IF(current, current->loopDepth() == loopDepth_);
// Join the breaks together and continue parsing.
if (state.loop.breaks) {
@ -1384,10 +1379,8 @@ IonBuilder::finishLoop(CFGState &state, MBasicBlock *successor)
// including the successor.
if (!state.loop.entry->setBackedge(current))
return ControlStatus_Error;
if (successor) {
graph().moveBlockToEnd(successor);
if (successor)
successor->inheritPhis(state.loop.entry);
}
if (state.loop.breaks) {
// Propagate phis placed in the header to individual break exit points.
@ -1642,9 +1635,6 @@ IonBuilder::processNextTableSwitchCase(CFGState &state)
successor->addPredecessor(current);
}
// Insert successor after the current block, to maintain RPO.
graph().moveBlockToEnd(successor);
// If this is the last successor the block should stop at the end of the tableswitch
// Else it should stop at the start of the next successor
if (state.tableswitch.currentBlock+1 < state.tableswitch.ins->numBlocks())
@ -1668,7 +1658,6 @@ IonBuilder::processAndOrEnd(CFGState &state)
return ControlStatus_Error;
current = state.branch.ifFalse;
graph().moveBlockToEnd(current);
pc = current->pc();
return ControlStatus_Joined;
}
@ -2215,9 +2204,6 @@ IonBuilder::tableSwitch(JSOp op, jssrcnote *sn)
pc2 += JUMP_OFFSET_LEN;
}
// Move defaultcase to the end, to maintain RPO.
graph().moveBlockToEnd(defaultcase);
JS_ASSERT(tableswitch->numCases() == (uint32_t)(high - low + 1));
JS_ASSERT(tableswitch->numSuccessors() > 0);
@ -2543,9 +2529,6 @@ IonBuilder::processCondSwitchBody(CFGState &state)
MBasicBlock *nextBody = bodies[currentIdx++];
JS_ASSERT_IF(current, pc == nextBody->pc());
// Fix the reverse post-order iteration.
graph().moveBlockToEnd(nextBody);
// The last body continue into the new one.
if (current) {
current->end(MGoto::New(nextBody));
@ -3869,7 +3852,6 @@ IonBuilder::inlineCalls(CallInfo &callInfo, AutoObjectVector &targets,
// Check the depth change: +1 for retval
JS_ASSERT(returnBlock->stackDepth() == dispatchBlock->stackDepth() - callInfo.numFormals() + 1);
graph().moveBlockToEnd(returnBlock);
current = returnBlock;
return true;
}

Просмотреть файл

@ -482,6 +482,11 @@ class MIRGraph
size_t numBlocks_;
#ifdef DEBUG
// Is the graph in Reverse Post Order
bool inRPO_;
#endif
public:
MIRGraph(TempAllocator *alloc)
: alloc_(alloc),
@ -491,6 +496,9 @@ class MIRGraph
osrBlock_(NULL),
osrStart_(NULL),
numBlocks_(0)
#ifdef DEBUG
, inRPO_(false)
#endif
{ }
template <typename T>
@ -539,25 +547,28 @@ class MIRGraph
return blocks_.end();
}
PostorderIterator poBegin() {
JS_ASSERT(inRPO_);
return blocks_.rbegin();
}
PostorderIterator poEnd() {
JS_ASSERT(inRPO_);
return blocks_.rend();
}
ReversePostorderIterator rpoBegin() {
JS_ASSERT(inRPO_);
return blocks_.begin();
}
ReversePostorderIterator rpoEnd() {
JS_ASSERT(inRPO_);
return blocks_.end();
}
void removeBlock(MBasicBlock *block) {
blocks_.remove(block);
numBlocks_--;
}
void moveBlockToEnd(MBasicBlock *block) {
JS_ASSERT(block->id());
void moveBlockToBegin(MBasicBlock *block) {
blocks_.remove(block);
blocks_.pushBack(block);
blocks_.pushFront(block);
}
size_t numBlocks() const {
return numBlocks_;
@ -612,6 +623,12 @@ class MIRGraph
return scripts_.begin();
}
#ifdef DEBUG
void setInRPO() {
inRPO_ = true;
}
#endif
// The ParSlice is an instance of ForkJoinSlice*, it carries
// "per-helper-thread" information. So as not to modify the
// calling convention for parallel code, we obtain the current