Backed out changeset b8a962a66a80 (bug 1044578) for ARM simulator orange.

This commit is contained in:
Ryan VanderMeulen 2014-08-26 09:56:43 -04:00
Родитель 3f788f9a43
Коммит fceed2512c
5 изменённых файлов: 15 добавлений и 104 удалений

Просмотреть файл

@ -542,20 +542,19 @@ BacktrackingAllocator::processInterval(LiveInterval *interval)
bool canAllocate = setIntervalRequirement(interval);
bool fixed;
LiveInterval *conflict[MaxAliasedRegisters] = {nullptr};
LiveInterval *conflict = nullptr;
for (size_t attempt = 0;; attempt++) {
if (canAllocate) {
bool success = false;
fixed = false;
for (uint32_t i = 0; i < MaxAliasedRegisters; i++)
conflict[i] = nullptr;
conflict = nullptr;
// Ok, let's try allocating for this interval.
if (interval->requirement()->kind() == Requirement::FIXED) {
if (!tryAllocateFixed(interval, &success, &fixed, conflict))
if (!tryAllocateFixed(interval, &success, &fixed, &conflict))
return false;
} else {
if (!tryAllocateNonFixed(interval, &success, &fixed, conflict))
if (!tryAllocateNonFixed(interval, &success, &fixed, &conflict))
return false;
}
@ -565,15 +564,12 @@ BacktrackingAllocator::processInterval(LiveInterval *interval)
// If that didn't work, but we have a non-fixed LiveInterval known
// to be conflicting, maybe we can evict it and try again.
// The conflict array is filled up starting from 0, and they were
// initialized to nullptr, so just checking the 0th element is
// sufficient.
if (attempt < MAX_ATTEMPTS &&
!fixed &&
conflict[0] &&
computeSpillsWeight(conflict) < computeSpillWeight(interval))
conflict &&
computeSpillWeight(conflict) < computeSpillWeight(interval))
{
if (!evictIntervals(conflict))
if (!evictInterval(conflict))
return false;
continue;
}
@ -775,11 +771,9 @@ BacktrackingAllocator::tryAllocateRegister(PhysicalRegister &r, LiveInterval *in
JS_ASSERT_IF(interval->requirement()->kind() == Requirement::FIXED,
interval->requirement()->allocation() == LAllocation(r.reg));
size_t aliasCount = 0;
LiveInterval *localConflicts[MaxAliasedRegisters] = {nullptr};
for (size_t i = 0; i < interval->numRanges(); i++) {
AllocatedRange range(interval, interval->getRange(i)), existing;
bool shortCircuit = false;
for (size_t a = 0; a < r.reg.numAliased(); a++) {
PhysicalRegister &rAlias = registers[r.reg.aliased(a).code()];
if (!rAlias.allocations.contains(range, &existing))
@ -792,19 +786,8 @@ BacktrackingAllocator::tryAllocateRegister(PhysicalRegister &r, LiveInterval *in
existing.range->toString(),
computeSpillWeight(existing.interval));
}
JS_ASSERT(aliasCount < MaxAliasedRegisters);
localConflicts[aliasCount++] = existing.interval;
// Abstraction Violaiton, and not-perfect optimization!
// if the registers are an exact fit, then no other intervals
// need to be checked. They are an exact fit if a = 0 (by
// convention). this is non-perfect because this optimization
// also applies if the register-to-be-evicted is strictly
// larger than r. There is currently no api to give this
// information, so check a directly for now.
if (a == 0) {
shortCircuit = true;
break;
}
if (!*pconflicting || computeSpillWeight(existing.interval) < computeSpillWeight(*pconflicting))
*pconflicting = existing.interval;
} else {
if (IonSpewEnabled(IonSpew_RegAlloc)) {
IonSpew(IonSpew_RegAlloc, " %s collides with fixed use %s",
@ -814,16 +797,8 @@ BacktrackingAllocator::tryAllocateRegister(PhysicalRegister &r, LiveInterval *in
}
return true;
}
if (shortCircuit)
break;
}
if (aliasCount != 0) {
if (!pconflicting[0] || computeSpillsWeight(localConflicts) < computeSpillsWeight(pconflicting)) {
for (size_t i = 0; i < MaxAliasedRegisters; i++)
pconflicting[i] = localConflicts[i];
}
return true;
}
IonSpew(IonSpew_RegAlloc, " allocated to %s", r.reg.name());
for (size_t i = 0; i < interval->numRanges(); i++) {
@ -1913,33 +1888,8 @@ BacktrackingAllocator::splitAcrossCalls(LiveInterval *interval)
return splitAt(interval, callPositions);
}
static LiveInterval *
maxEnd(LiveInterval **conflicts)
{
LiveInterval *ret = nullptr;
for (uint32_t i = 0; i < MaxAliasedRegisters; i++) {
if (conflicts[i] == nullptr)
continue;
if (ret == nullptr || conflicts[i]->end() > ret->end())
ret = conflicts[i];
}
return ret;
}
static LiveInterval *
minStart(LiveInterval **conflicts)
{
LiveInterval *ret = nullptr;
for (uint32_t i = 0; i < MaxAliasedRegisters; i++) {
if (conflicts[i] == nullptr)
continue;
if (ret == nullptr || conflicts[i]->start() < ret->start())
ret = conflicts[i];
}
return ret;
}
bool
BacktrackingAllocator::chooseIntervalSplit(LiveInterval *interval, LiveInterval **conflict)
BacktrackingAllocator::chooseIntervalSplit(LiveInterval *interval, LiveInterval *conflict)
{
bool success = false;
@ -1948,12 +1898,12 @@ BacktrackingAllocator::chooseIntervalSplit(LiveInterval *interval, LiveInterval
if (success)
return true;
if (!trySplitBeforeFirstRegisterUse(interval, maxEnd(conflict), &success))
if (!trySplitBeforeFirstRegisterUse(interval, conflict, &success))
return false;
if (success)
return true;
if (!trySplitAfterLastRegisterUse(interval, minStart(conflict), &success))
if (!trySplitAfterLastRegisterUse(interval, conflict, &success))
return false;
if (success)
return true;

Просмотреть файл

@ -225,15 +225,6 @@ class BacktrackingAllocator
bool tryAllocateGroupRegister(PhysicalRegister &r, VirtualRegisterGroup *group,
bool *psuccess, bool *pfixed, LiveInterval **pconflicting);
bool evictInterval(LiveInterval *interval);
bool evictIntervals(LiveInterval **intervals) {
for (size_t i = 0; i < MaxAliasedRegisters; i++) {
if (intervals[i] == nullptr)
continue;
if (!evictInterval(intervals[i]))
return false;
}
return true;
}
void distributeUses(LiveInterval *interval, const LiveIntervalVector &newIntervals);
bool split(LiveInterval *interval, const LiveIntervalVector &newIntervals);
bool requeueIntervals(const LiveIntervalVector &newIntervals);
@ -264,20 +255,11 @@ class BacktrackingAllocator
size_t computePriority(const LiveInterval *interval);
size_t computeSpillWeight(const LiveInterval *interval);
size_t computeSpillsWeight(LiveInterval **intervals) {
size_t ret = 0;
for (size_t i = 0; i < MaxAliasedRegisters; i++) {
if (intervals[i] == nullptr)
continue;
ret += computeSpillWeight(intervals[i]);
}
return ret;
}
size_t computePriority(const VirtualRegisterGroup *group);
size_t computeSpillWeight(const VirtualRegisterGroup *group);
bool chooseIntervalSplit(LiveInterval *interval, LiveInterval **conflict);
bool chooseIntervalSplit(LiveInterval *interval, LiveInterval *conflict);
bool splitAt(LiveInterval *interval, const SplitPositions &splitPositions);
bool trySplitAcrossHotcode(LiveInterval *interval, bool *success);

Просмотреть файл

@ -35,13 +35,6 @@ static const int32_t NUNBOX32_TYPE_OFFSET = 4;
static const int32_t NUNBOX32_PAYLOAD_OFFSET = 0;
static const uint32_t ShadowStackSpace = 0;
// MaxAliasedRegisters is the maximum number of names that can be allocated to
// any register. For example since S0, S1 and D0 cannot be in use
// at the same time, the largest number of aliased registers is 2 (S0, S1).
// This is for the register allocators. It is so the allocator can keep track
// of all intervals that alias a given register.
static const uint32_t MaxAliasedRegisters = 2;
////
// These offsets are related to bailouts.
////

Просмотреть файл

@ -24,13 +24,6 @@ static const uint32_t ShadowStackSpace = 32;
static const uint32_t ShadowStackSpace = 0;
#endif
// MaxAliasedRegisters is the largest number of registers that be simultaneously
// allocated, and alais a single register. If al and ah could be allocated
// independently, this would be two, but since there is no aliasing on x86/x64
// this is 1.
// This is so the register allocator knows the largest number of intervals
// it may have to evict at once
static const uint32_t MaxAliasedRegisters = 1;
class Registers {
public:
typedef X86Registers::RegisterID Code;

Просмотреть файл

@ -26,13 +26,6 @@ static const uint32_t ShadowStackSpace = 0;
static const int32_t NUNBOX32_TYPE_OFFSET = 4;
static const int32_t NUNBOX32_PAYLOAD_OFFSET = 0;
// MaxAliasedRegisters is the largest number of registers that be simultaneously
// allocated, and alais a single register. If al and ah could be allocated
// independently, this would be two, but since there is no aliasing on x86/x64
// this is 1.
// This is so the register allocator knows the largest number of intervals
// it may have to evict at once
static const uint32_t MaxAliasedRegisters = 1;
////
// These offsets are related to bailouts.
////