This commit is contained in:
Ms2ger 2012-12-02 11:32:57 +01:00
Родитель 79846abbee 62a965cb12
Коммит 910e6a92cd
23 изменённых файлов: 1452 добавлений и 1335 удалений

Просмотреть файл

@ -212,10 +212,6 @@ maybe_clobber_profiledbuild:
find $(DIST)/$(MOZ_APP_NAME) -name "*.pgc" -exec mv {} $(DIST)/bin \;
endif
# put in our default gdbinit so that the gdb debugging experience is happier.
libs:: .gdbinit
$(INSTALL) $< $(DIST)/bin
.PHONY: maybe_clobber_profiledbuild
# Look for R_386_PC32 relocations in shared libs, these

Просмотреть файл

@ -23,7 +23,6 @@ XPCOMUtils.defineLazyGetter(this, "domWindowUtils", function () {
.getInterface(Ci.nsIDOMWindowUtils);
});
const FOCUS_CHANGE_DELAY = 20;
const RESIZE_SCROLL_DELAY = 20;
let HTMLInputElement = Ci.nsIDOMHTMLInputElement;
@ -51,7 +50,6 @@ let FormAssistant = {
isKeyboardOpened: false,
selectionStart: 0,
selectionEnd: 0,
blurTimeout: null,
scrollIntoViewTimeout: null,
_focusedElement: null,
@ -95,22 +93,13 @@ let FormAssistant = {
if (this.isTextInputElement(target) && this.isIMEDisabled())
return;
if (target && this.isFocusableElement(target)) {
if (this.blurTimeout) {
this.blurTimeout = content.clearTimeout(this.blurTimeout);
this.handleIMEStateDisabled();
}
if (target && this.isFocusableElement(target))
this.handleIMEStateEnabled(target);
}
break;
case "blur":
if (this.focusedElement) {
this.blurTimeout = content.setTimeout(function () {
this.blurTimeout = null;
this.handleIMEStateDisabled();
}.bind(this), FOCUS_CHANGE_DELAY);
}
if (this.focusedElement)
this.handleIMEStateDisabled();
break;
case 'mousedown':

Просмотреть файл

@ -377,10 +377,6 @@
@BINPATH@/components/nsHelperAppDlg.js
@BINPATH@/components/nsDownloadManagerUI.manifest
@BINPATH@/components/nsDownloadManagerUI.js
@BINPATH@/components/NetworkGeolocationProvider.manifest
@BINPATH@/components/NetworkGeolocationProvider.js
@BINPATH@/components/GPSDGeolocationProvider.manifest
@BINPATH@/components/GPSDGeolocationProvider.js
@BINPATH@/components/nsSidebar.manifest
@BINPATH@/components/nsSidebar.js
#ifndef MOZ_WIDGET_GONK

Просмотреть файл

@ -1,5 +1,11 @@
# .gdbinit file for debugging Mozilla
# You may need to put an 'add-auto-load-safe-path' command in your
# $HOME/.gdbinit file to get GDB to trust this file. If your builds are
# generally in $HOME/moz, then you can say:
#
# add-auto-load-safe-path ~/moz
# Don't stop for the SIG32/33/etc signals that Flash produces
handle SIG32 noprint nostop pass
handle SIG33 noprint nostop pass

Просмотреть файл

@ -97,6 +97,12 @@ endif
endif
# Put a useful .gdbinit in the bin directory, to be picked up automatically
# by GDB when we debug executables there.
GDBINIT_FILES := .gdbinit
GDBINIT_DEST = $(FINAL_TARGET)
INSTALL_TARGETS += GDBINIT
include $(topsrcdir)/config/rules.mk
# we install to _leaktest/

Просмотреть файл

@ -4450,15 +4450,19 @@ def getEnumValueName(value):
# characters in them. Deal with the former by returning "_empty",
# deal with possible name collisions from that by throwing if the
# enum value is actually "_empty", and throw on any value
# containing chars other than [a-z] or '-' for now. Replace '-' with '_'.
value = value.replace('-', '_')
# containing non-ASCII chars for now. Replace all chars other than
# [0-9A-Za-z_] with '_'.
if re.match("[^\x20-\x7E]", value):
raise SyntaxError('Enum value "' + value + '" contains non-ASCII characters')
if re.match("^[0-9]", value):
raise SyntaxError('Enum value "' + value + '" starts with a digit')
value = re.sub(r'[^0-9A-Za-z_]', '_', value)
if re.match("^_[A-Z]|__", value):
raise SyntaxError('Enum value "' + value + '" is reserved by the C++ spec')
if value == "_empty":
raise SyntaxError('"_empty" is not an IDL enum value we support yet')
if value == "":
return "_empty"
if not re.match("^[a-z_]+$", value):
raise SyntaxError('Enum value "' + value + '" contains characters '
'outside [a-z_]')
return MakeNativeName(value)
class CGEnum(CGThing):

Просмотреть файл

@ -1833,6 +1833,9 @@ ContentParent::RecvAsyncMessage(const nsString& aMsg,
bool
ContentParent::RecvAddGeolocationListener()
{
if (!AssertAppProcessPermission(this, "geolocation")) {
return false;
}
if (mGeolocationWatchID == -1) {
nsCOMPtr<nsIDOMGeoGeolocation> geo = do_GetService("@mozilla.org/geolocation;1");
if (!geo) {
@ -1847,20 +1850,28 @@ ContentParent::RecvAddGeolocationListener()
bool
ContentParent::RecvRemoveGeolocationListener()
{
if (mGeolocationWatchID != -1) {
nsCOMPtr<nsIDOMGeoGeolocation> geo = do_GetService("@mozilla.org/geolocation;1");
if (!geo) {
return true;
}
geo->ClearWatch(mGeolocationWatchID);
mGeolocationWatchID = -1;
if (mGeolocationWatchID == -1) {
return true;
}
if (!AssertAppProcessPermission(this, "geolocation")) {
return false;
}
nsCOMPtr<nsIDOMGeoGeolocation> geo = do_GetService("@mozilla.org/geolocation;1");
if (!geo) {
return true;
}
geo->ClearWatch(mGeolocationWatchID);
mGeolocationWatchID = -1;
return true;
}
NS_IMETHODIMP
ContentParent::HandleEvent(nsIDOMGeoPosition* postion)
{
if (!AssertAppProcessPermission(this, "geolocation")) {
return NS_ERROR_FAILURE;
}
unused << SendGeolocationUpdate(GeoPosition(postion));
return NS_OK;
}

Просмотреть файл

@ -41,6 +41,11 @@
#include "OSFileConstants.h"
#include "nsIOSFileConstantsService.h"
#if defined(__DragonFly__) || defined(__FreeBSD__) \
|| defined(__NetBSD__) || defined(__OpenBSD__)
#define __dd_fd dd_fd
#endif
/**
* This module defines the basic libc constants (error numbers, open modes,
* etc.) used by OS.File and possibly other OS-bound JavaScript libraries.
@ -64,6 +69,7 @@ typedef struct {
nsString libDir;
nsString tmpDir;
nsString profileDir;
nsString localProfileDir;
} Paths;
/**
@ -133,6 +139,7 @@ nsresult InitOSFileConstants()
GetPathToSpecialDir(NS_OS_TEMP_DIR, paths->tmpDir);
GetPathToSpecialDir(NS_APP_USER_PROFILE_50_DIR, paths->profileDir);
GetPathToSpecialDir(NS_APP_USER_PROFILE_LOCAL_50_DIR, paths->localProfileDir);
gPaths = paths.forget();
return NS_OK;
@ -381,8 +388,8 @@ static dom::ConstantSpec gLibcProperties[] =
{ "OSFILE_OFFSETOF_DIRENT_D_TYPE", INT_TO_JSVAL(offsetof (struct dirent, d_type)) },
#endif // defined(DT_UNKNOWN)
// Under MacOS X, |dirfd| is a macro rather than a function, so we
// need a little help to get it to work
// Under MacOS X and BSDs, |dirfd| is a macro rather than a
// function, so we need a little help to get it to work
#if defined(dirfd)
{ "OSFILE_SIZEOF_DIR", INT_TO_JSVAL(sizeof (DIR)) },
@ -676,6 +683,10 @@ bool DefineOSFileConstants(JSContext *cx, JSObject *global)
return false;
}
if (!SetStringProperty(cx, objPath, "localProfileDir", gPaths->localProfileDir)) {
return false;
}
return true;
}

Просмотреть файл

@ -292,6 +292,7 @@ CPPSRCS += MIR.cpp \
LICM.cpp \
LinearScan.cpp \
LIR.cpp \
LiveRangeAllocator.cpp \
Lowering.cpp \
Lowering-shared.cpp \
MCallOptimize.cpp \

Просмотреть файл

@ -117,9 +117,9 @@ C1Spewer::spewIntervals(FILE *fp, LinearScanAllocator *regalloc, LInstruction *i
for (size_t i = 0; i < vreg->numIntervals(); i++) {
LiveInterval *live = vreg->getInterval(i);
if (live->numRanges()) {
fprintf(fp, "%d object \"", (i == 0) ? vreg->reg() : int32(nextId++));
fprintf(fp, "%d object \"", (i == 0) ? vreg->id() : int32(nextId++));
LAllocation::PrintAllocation(fp, live->getAllocation());
fprintf(fp, "\" %d -1", vreg->reg());
fprintf(fp, "\" %d -1", vreg->id());
for (size_t j = 0; j < live->numRanges(); j++) {
fprintf(fp, " [%d, %d[", live->getRange(j)->from.pos(),
live->getRange(j)->to.pos());

Просмотреть файл

@ -396,7 +396,7 @@ JSONSpewer::spewIntervals(LinearScanAllocator *regalloc)
VirtualRegister *vreg = &regalloc->vregs[ins->getDef(k)->virtualRegister()];
beginObject();
integerProperty("vreg", vreg->reg());
integerProperty("vreg", vreg->id());
beginListProperty("intervals");
for (size_t i = 0; i < vreg->numIntervals(); i++) {

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -8,7 +8,7 @@
#ifndef js_ion_linearscan_h__
#define js_ion_linearscan_h__
#include "RegisterAllocator.h"
#include "LiveRangeAllocator.h"
#include "BitSet.h"
#include "StackSlotAllocator.h"
@ -17,245 +17,9 @@
namespace js {
namespace ion {
class VirtualRegister;
class Requirement
class LinearScanVirtualRegister : public VirtualRegister
{
public:
enum Kind {
NONE,
REGISTER,
FIXED,
SAME_AS_OTHER
};
Requirement()
: kind_(NONE)
{ }
Requirement(Kind kind)
: kind_(kind)
{
// These have dedicated constructors;
JS_ASSERT(kind != FIXED && kind != SAME_AS_OTHER);
}
Requirement(Kind kind, CodePosition at)
: kind_(kind),
position_(at)
{ }
Requirement(LAllocation fixed)
: kind_(FIXED),
allocation_(fixed)
{ }
// Only useful as a hint, encodes where the fixed requirement is used to
// avoid allocating a fixed register too early.
Requirement(LAllocation fixed, CodePosition at)
: kind_(FIXED),
allocation_(fixed),
position_(at)
{ }
Requirement(uint32 vreg, CodePosition at)
: kind_(SAME_AS_OTHER),
allocation_(LUse(vreg, LUse::ANY)),
position_(at)
{ }
Kind kind() const {
return kind_;
}
LAllocation allocation() const {
JS_ASSERT(!allocation_.isUse());
return allocation_;
}
uint32 virtualRegister() const {
JS_ASSERT(allocation_.isUse());
return allocation_.toUse()->virtualRegister();
}
CodePosition pos() const {
return position_;
}
int priority() const;
private:
Kind kind_;
LAllocation allocation_;
CodePosition position_;
};
struct UsePosition : public TempObject,
public InlineForwardListNode<UsePosition>
{
LUse *use;
CodePosition pos;
UsePosition(LUse *use, CodePosition pos) :
use(use),
pos(pos)
{ }
};
typedef InlineForwardListIterator<UsePosition> UsePositionIterator;
/*
* A live interval is a set of disjoint ranges of code positions where a
* virtual register is live. Linear scan register allocation operates on
* these intervals, splitting them as necessary and assigning allocations
* to them as it runs.
*/
class LiveInterval
: public InlineListNode<LiveInterval>,
public TempObject
{
public:
/*
* A range is a contiguous sequence of CodePositions where the virtual
* register associated with this interval is live.
*/
struct Range {
Range(CodePosition f, CodePosition t)
: from(f),
to(t)
{
JS_ASSERT(from < to);
}
CodePosition from;
// The end of this range, exclusive.
CodePosition to;
};
private:
Vector<Range, 1, IonAllocPolicy> ranges_;
LAllocation alloc_;
VirtualRegister *reg_;
uint32 index_;
Requirement requirement_;
Requirement hint_;
InlineForwardList<UsePosition> uses_;
size_t lastProcessedRange_;
public:
LiveInterval(VirtualRegister *reg, uint32 index)
: reg_(reg),
index_(index),
lastProcessedRange_(size_t(-1))
{ }
bool addRange(CodePosition from, CodePosition to);
bool addRangeAtHead(CodePosition from, CodePosition to);
void setFrom(CodePosition from);
CodePosition intersect(LiveInterval *other);
bool covers(CodePosition pos);
CodePosition nextCoveredAfter(CodePosition pos);
CodePosition start() const {
JS_ASSERT(!ranges_.empty());
return ranges_.back().from;
}
CodePosition end() const {
JS_ASSERT(!ranges_.empty());
return ranges_.begin()->to;
}
size_t numRanges() const {
return ranges_.length();
}
const Range *getRange(size_t i) const {
return &ranges_[i];
}
void setLastProcessedRange(size_t range, mozilla::DebugOnly<CodePosition> pos) {
// If the range starts after pos, we may not be able to use
// it in the next lastProcessedRangeIfValid call.
JS_ASSERT(ranges_[range].from <= pos);
lastProcessedRange_ = range;
}
size_t lastProcessedRangeIfValid(CodePosition pos) const {
if (lastProcessedRange_ < ranges_.length() && ranges_[lastProcessedRange_].from <= pos)
return lastProcessedRange_;
return ranges_.length() - 1;
}
LAllocation *getAllocation() {
return &alloc_;
}
void setAllocation(LAllocation alloc) {
alloc_ = alloc;
}
VirtualRegister *reg() const {
return reg_;
}
uint32 index() const {
return index_;
}
void setIndex(uint32 index) {
index_ = index;
}
Requirement *requirement() {
return &requirement_;
}
void setRequirement(const Requirement &requirement) {
// A SAME_AS_OTHER requirement complicates regalloc too much; it
// should only be used as hint.
JS_ASSERT(requirement.kind() != Requirement::SAME_AS_OTHER);
// Fixed registers are handled with fixed intervals, so fixed requirements
// are only valid for non-register allocations.f
JS_ASSERT_IF(requirement.kind() == Requirement::FIXED,
!requirement.allocation().isRegister());
requirement_ = requirement;
}
Requirement *hint() {
return &hint_;
}
void setHint(const Requirement &hint) {
hint_ = hint;
}
bool isSpill() const {
return alloc_.isStackSlot();
}
bool splitFrom(CodePosition pos, LiveInterval *after);
void addUse(UsePosition *use);
UsePosition *nextUseAfter(CodePosition pos);
CodePosition nextUsePosAfter(CodePosition pos);
CodePosition firstIncompatibleUse(LAllocation alloc);
UsePositionIterator usesBegin() const {
return uses_.begin();
}
UsePositionIterator usesEnd() const {
return uses_.end();
}
#ifdef DEBUG
void validateRanges();
#endif
};
/*
* Represents all of the register allocation state associated with a virtual
* register, including all associated intervals and pointers to relevant LIR
* structures.
*/
class VirtualRegister
{
uint32 reg_;
LBlock *block_;
LInstruction *ins_;
LDefinition *def_;
Vector<LiveInterval *, 1, IonAllocPolicy> intervals_;
LAllocation *canonicalSpill_;
CodePosition spillPosition_ ;
@ -265,65 +29,7 @@ class VirtualRegister
// processed by freeAllocation().
bool finished_ : 1;
// Whether def_ is a temp or an output.
bool isTemp_ : 1;
public:
bool init(uint32 reg, LBlock *block, LInstruction *ins, LDefinition *def, bool isTemp) {
reg_ = reg;
block_ = block;
ins_ = ins;
def_ = def;
isTemp_ = isTemp;
LiveInterval *initial = new LiveInterval(this, 0);
if (!initial)
return false;
return intervals_.append(initial);
}
uint32 reg() {
return reg_;
}
LBlock *block() {
return block_;
}
LInstruction *ins() {
return ins_;
}
LDefinition *def() const {
return def_;
}
LDefinition::Type type() const {
return def()->type();
}
bool isTemp() const {
return isTemp_;
}
size_t numIntervals() const {
return intervals_.length();
}
LiveInterval *getInterval(size_t i) const {
return intervals_[i];
}
LiveInterval *lastInterval() const {
JS_ASSERT(numIntervals() > 0);
return getInterval(numIntervals() - 1);
}
bool addInterval(LiveInterval *interval) {
JS_ASSERT(interval->numRanges());
// Preserve ascending order for faster lookups.
LiveInterval **found = NULL;
LiveInterval **i;
for (i = intervals_.begin(); i != intervals_.end(); i++) {
if (!found && interval->start() < (*i)->start())
found = i;
if (found)
(*i)->setIndex((*i)->index() + 1);
}
if (!found)
found = intervals_.end();
return intervals_.insert(found, interval);
}
void setCanonicalSpill(LAllocation *alloc) {
canonicalSpill_ = alloc;
}
@ -339,9 +45,6 @@ class VirtualRegister
bool finished() const {
return finished_;
}
bool isDouble() const {
return def_->type() == LDefinition::DOUBLE;
}
void setSpillAtDefinition(CodePosition pos) {
spillAtDefinition_ = true;
setSpillPosition(pos);
@ -355,63 +58,9 @@ class VirtualRegister
void setSpillPosition(CodePosition pos) {
spillPosition_ = pos;
}
LiveInterval *intervalFor(CodePosition pos);
LiveInterval *getFirstInterval();
};
class VirtualRegisterMap
{
private:
VirtualRegister *vregs_;
uint32 numVregs_;
public:
VirtualRegisterMap()
: vregs_(NULL),
numVregs_(0)
{ }
bool init(MIRGenerator *gen, uint32 numVregs) {
vregs_ = gen->allocate<VirtualRegister>(numVregs);
numVregs_ = numVregs;
if (!vregs_)
return false;
memset(vregs_, 0, sizeof(VirtualRegister) * numVregs);
return true;
}
VirtualRegister &operator[](unsigned int index) {
JS_ASSERT(index < numVregs_);
return vregs_[index];
}
VirtualRegister &operator[](const LAllocation *alloc) {
JS_ASSERT(alloc->isUse());
JS_ASSERT(alloc->toUse()->virtualRegister() < numVregs_);
return vregs_[alloc->toUse()->virtualRegister()];
}
VirtualRegister &operator[](const LDefinition *def) {
JS_ASSERT(def->virtualRegister() < numVregs_);
return vregs_[def->virtualRegister()];
}
uint32 numVirtualRegisters() const {
return numVregs_;
}
};
typedef HashMap<uint32,
LInstruction *,
DefaultHasher<uint32>,
IonAllocPolicy> InstructionMap;
typedef HashMap<uint32,
LiveInterval *,
DefaultHasher<uint32>,
IonAllocPolicy> LiveMap;
typedef InlineList<LiveInterval>::iterator IntervalIterator;
typedef InlineList<LiveInterval>::reverse_iterator IntervalReverseIterator;
class LinearScanAllocator : public RegisterAllocator
class LinearScanAllocator : public LiveRangeAllocator<LinearScanVirtualRegister>
{
friend class C1Spewer;
friend class JSONSpewer;
@ -430,15 +79,6 @@ class LinearScanAllocator : public RegisterAllocator
LiveInterval *dequeue();
};
// Computed inforamtion
BitSet **liveIn;
VirtualRegisterMap vregs;
FixedArityList<LiveInterval *, AnyRegister::Total> fixedIntervals;
// Union of all ranges in fixedIntervals, used to quickly determine
// whether an interval intersects with a fixed register.
LiveInterval *fixedIntervalsUnion;
// Allocation state
StackSlotAllocator stackSlotAllocator;
@ -454,8 +94,6 @@ class LinearScanAllocator : public RegisterAllocator
InlineList<LiveInterval> handled;
LiveInterval *current;
bool createDataStructures();
bool buildLivenessInfo();
bool allocateRegisters();
bool resolveControlFlow();
bool reifyAllocations();
@ -481,30 +119,23 @@ class LinearScanAllocator : public RegisterAllocator
void setIntervalRequirement(LiveInterval *interval);
size_t findFirstSafepoint(LiveInterval *interval, size_t firstSafepoint);
size_t findFirstNonCallSafepoint(CodePosition from);
bool addFixedRangeAtHead(AnyRegister reg, CodePosition from, CodePosition to) {
if (!fixedIntervals[reg.code()]->addRangeAtHead(from, to))
return false;
return fixedIntervalsUnion->addRangeAtHead(from, to);
}
bool isSpilledAt(LiveInterval *interval, CodePosition pos);
#ifdef DEBUG
void validateIntervals();
void validateAllocations();
void validateVirtualRegisters();
#else
inline void validateIntervals() { };
inline void validateAllocations() { };
inline void validateVirtualRegisters() { };
#endif
#ifdef JS_NUNBOX32
VirtualRegister *otherHalfOfNunbox(VirtualRegister *vreg);
LinearScanVirtualRegister *otherHalfOfNunbox(VirtualRegister *vreg);
#endif
public:
LinearScanAllocator(MIRGenerator *mir, LIRGenerator *lir, LIRGraph &graph)
: RegisterAllocator(mir, lir, graph)
: LiveRangeAllocator<LinearScanVirtualRegister>(mir, lir, graph)
{
}

Просмотреть файл

@ -0,0 +1,742 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=4 sw=4 et tw=99:
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "LiveRangeAllocator.h"
#include "LinearScan.h"
using namespace js;
using namespace js::ion;
using mozilla::DebugOnly;
int
Requirement::priority() const
{
switch (kind_) {
case Requirement::FIXED:
return 0;
case Requirement::REGISTER:
return 1;
case Requirement::NONE:
return 2;
default:
JS_NOT_REACHED("Unknown requirement kind.");
return -1;
}
}
bool
LiveInterval::addRangeAtHead(CodePosition from, CodePosition to)
{
JS_ASSERT(from < to);
Range newRange(from, to);
if (ranges_.empty())
return ranges_.append(newRange);
Range &first = ranges_.back();
if (to < first.from)
return ranges_.append(newRange);
if (to == first.from) {
first.from = from;
return true;
}
JS_ASSERT(from < first.to);
JS_ASSERT(to > first.from);
if (from < first.from)
first.from = from;
if (to > first.to)
first.to = to;
return true;
}
bool
LiveInterval::addRange(CodePosition from, CodePosition to)
{
JS_ASSERT(from < to);
Range newRange(from, to);
Range *i;
// Find the location to insert the new range
for (i = ranges_.end() - 1; i >= ranges_.begin(); i--) {
if (newRange.from <= i->to) {
if (i->from < newRange.from)
newRange.from = i->from;
break;
}
}
// Perform coalescing on overlapping ranges
for (; i >= ranges_.begin(); i--) {
if (newRange.to < i->from)
break;
if (newRange.to < i->to)
newRange.to = i->to;
ranges_.erase(i);
}
return ranges_.insert(i + 1, newRange);
}
void
LiveInterval::setFrom(CodePosition from)
{
while (!ranges_.empty()) {
if (ranges_.back().to < from) {
ranges_.erase(&ranges_.back());
} else {
if (from == ranges_.back().to)
ranges_.erase(&ranges_.back());
else
ranges_.back().from = from;
break;
}
}
}
bool
LiveInterval::covers(CodePosition pos)
{
if (pos < start() || pos >= end())
return false;
// Loop over the ranges in ascending order.
size_t i = lastProcessedRangeIfValid(pos);
for (; i < ranges_.length(); i--) {
if (pos < ranges_[i].from)
return false;
setLastProcessedRange(i, pos);
if (pos < ranges_[i].to)
return true;
}
return false;
}
CodePosition
LiveInterval::nextCoveredAfter(CodePosition pos)
{
for (size_t i = 0; i < ranges_.length(); i++) {
if (ranges_[i].to <= pos) {
if (i)
return ranges_[i-1].from;
break;
}
if (ranges_[i].from <= pos)
return pos;
}
return CodePosition::MIN;
}
CodePosition
LiveInterval::intersect(LiveInterval *other)
{
if (start() > other->start())
return other->intersect(this);
// Loop over the ranges in ascending order. As an optimization,
// try to start at the last processed range.
size_t i = lastProcessedRangeIfValid(other->start());
size_t j = other->ranges_.length() - 1;
if (i >= ranges_.length() || j >= other->ranges_.length())
return CodePosition::MIN;
while (true) {
const Range &r1 = ranges_[i];
const Range &r2 = other->ranges_[j];
if (r1.from <= r2.from) {
if (r1.from <= other->start())
setLastProcessedRange(i, other->start());
if (r2.from < r1.to)
return r2.from;
if (i == 0 || ranges_[i-1].from > other->end())
break;
i--;
} else {
if (r1.from < r2.to)
return r1.from;
if (j == 0 || other->ranges_[j-1].from > end())
break;
j--;
}
}
return CodePosition::MIN;
}
/*
* This function takes the callee interval and moves all ranges following or
* including provided position to the target interval. Additionally, if a
* range in the callee interval spans the given position, it is split and the
* latter half is placed in the target interval.
*
* This function should only be called if it is known that the interval should
* actually be split (and, presumably, a move inserted). As such, it is an
* error for the caller to request a split that moves all intervals into the
* target. Doing so will trip the assertion at the bottom of the function.
*/
bool
LiveInterval::splitFrom(CodePosition pos, LiveInterval *after)
{
JS_ASSERT(pos >= start() && pos < end());
JS_ASSERT(after->ranges_.empty());
// Move all intervals over to the target
size_t bufferLength = ranges_.length();
Range *buffer = ranges_.extractRawBuffer();
if (!buffer)
return false;
after->ranges_.replaceRawBuffer(buffer, bufferLength);
// Move intervals back as required
for (Range *i = &after->ranges_.back(); i >= after->ranges_.begin(); i--) {
if (pos >= i->to)
continue;
if (pos > i->from) {
// Split the range
Range split(i->from, pos);
i->from = pos;
if (!ranges_.append(split))
return false;
}
if (!ranges_.append(i + 1, after->ranges_.end()))
return false;
after->ranges_.shrinkBy(after->ranges_.end() - i - 1);
break;
}
// Split the linked list of use positions
UsePosition *prev = NULL;
for (UsePositionIterator usePos(usesBegin()); usePos != usesEnd(); usePos++) {
if (usePos->pos > pos)
break;
prev = *usePos;
}
uses_.splitAfter(prev, &after->uses_);
return true;
}
void
LiveInterval::addUse(UsePosition *use)
{
// Insert use positions in ascending order. Note that instructions
// are visited in reverse order, so in most cases the loop terminates
// at the first iteration and the use position will be added to the
// front of the list.
UsePosition *prev = NULL;
for (UsePositionIterator current(usesBegin()); current != usesEnd(); current++) {
if (current->pos >= use->pos)
break;
prev = *current;
}
if (prev)
uses_.insertAfter(prev, use);
else
uses_.pushFront(use);
}
UsePosition *
LiveInterval::nextUseAfter(CodePosition after)
{
for (UsePositionIterator usePos(usesBegin()); usePos != usesEnd(); usePos++) {
if (usePos->pos >= after) {
LUse::Policy policy = usePos->use->policy();
JS_ASSERT(policy != LUse::RECOVERED_INPUT);
if (policy != LUse::KEEPALIVE)
return *usePos;
}
}
return NULL;
}
/*
* This function locates the first "real" use of this interval that follows
* the given code position. Non-"real" uses are currently just snapshots,
* which keep virtual registers alive but do not result in the
* generation of code that use them.
*/
CodePosition
LiveInterval::nextUsePosAfter(CodePosition after)
{
UsePosition *min = nextUseAfter(after);
return min ? min->pos : CodePosition::MAX;
}
/*
* This function finds the position of the first use of this interval
* that is incompatible with the provideded allocation. For example,
* a use with a REGISTER policy would be incompatible with a stack slot
* allocation.
*/
CodePosition
LiveInterval::firstIncompatibleUse(LAllocation alloc)
{
for (UsePositionIterator usePos(usesBegin()); usePos != usesEnd(); usePos++) {
if (!UseCompatibleWith(usePos->use, alloc))
return usePos->pos;
}
return CodePosition::MAX;
}
LiveInterval *
VirtualRegister::intervalFor(CodePosition pos)
{
for (LiveInterval **i = intervals_.begin(); i != intervals_.end(); i++) {
if ((*i)->covers(pos))
return *i;
if (pos < (*i)->end())
break;
}
return NULL;
}
LiveInterval *
VirtualRegister::getFirstInterval()
{
JS_ASSERT(!intervals_.empty());
return intervals_[0];
}
// Dummy function to instantiate LiveRangeAllocator for each template instance.
void
EnsureLiveRangeAllocatorInstantiation(MIRGenerator *mir, LIRGenerator *lir, LIRGraph &graph)
{
LiveRangeAllocator<LinearScanVirtualRegister> allocator(mir, lir, graph);
allocator.buildLivenessInfo();
}
#ifdef DEBUG
static inline bool
NextInstructionHasFixedUses(LBlock *block, LInstruction *ins)
{
LInstructionIterator iter(block->begin(ins));
iter++;
for (LInstruction::InputIterator alloc(**iter); alloc.more(); alloc.next()) {
if (alloc->isUse() && alloc->toUse()->isFixedRegister())
return true;
}
return false;
}
// Returns true iff ins has a def/temp reusing the input allocation.
static bool
IsInputReused(LInstruction *ins, LUse *use)
{
for (size_t i = 0; i < ins->numDefs(); i++) {
if (ins->getDef(i)->policy() == LDefinition::MUST_REUSE_INPUT &&
ins->getOperand(ins->getDef(i)->getReusedInput())->toUse() == use)
{
return true;
}
}
for (size_t i = 0; i < ins->numTemps(); i++) {
if (ins->getTemp(i)->policy() == LDefinition::MUST_REUSE_INPUT &&
ins->getOperand(ins->getTemp(i)->getReusedInput())->toUse() == use)
{
return true;
}
}
return false;
}
#endif
/*
* This function pre-allocates and initializes as much global state as possible
* to avoid littering the algorithms with memory management cruft.
*/
template <typename VREG>
bool
LiveRangeAllocator<VREG>::init()
{
if (!RegisterAllocator::init())
return false;
liveIn = lir->mir()->allocate<BitSet*>(graph.numBlockIds());
if (!liveIn)
return false;
// Initialize fixed intervals.
for (size_t i = 0; i < AnyRegister::Total; i++) {
AnyRegister reg = AnyRegister::FromCode(i);
LiveInterval *interval = new LiveInterval(0);
interval->setAllocation(LAllocation(reg));
fixedIntervals[i] = interval;
}
fixedIntervalsUnion = new LiveInterval(0);
if (!vregs.init(lir->mir(), graph.numVirtualRegisters()))
return false;
// Build virtual register objects
for (size_t i = 0; i < graph.numBlocks(); i++) {
if (mir->shouldCancel("LSRA create data structures (main loop)"))
return false;
LBlock *block = graph.getBlock(i);
for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
for (size_t j = 0; j < ins->numDefs(); j++) {
LDefinition *def = ins->getDef(j);
if (def->policy() != LDefinition::PASSTHROUGH) {
uint32 reg = def->virtualRegister();
if (!vregs[reg].init(reg, block, *ins, def, /* isTemp */ false))
return false;
}
}
for (size_t j = 0; j < ins->numTemps(); j++) {
LDefinition *def = ins->getTemp(j);
if (def->isBogusTemp())
continue;
if (!vregs[def].init(def->virtualRegister(), block, *ins, def, /* isTemp */ true))
return false;
}
}
for (size_t j = 0; j < block->numPhis(); j++) {
LPhi *phi = block->getPhi(j);
LDefinition *def = phi->getDef(0);
if (!vregs[def].init(phi->id(), block, phi, def, /* isTemp */ false))
return false;
}
}
return true;
}
/*
* This function builds up liveness intervals for all virtual registers
* defined in the function. Additionally, it populates the liveIn array with
* information about which registers are live at the beginning of a block, to
* aid resolution and reification in a later phase.
*
* The algorithm is based on the one published in:
*
* Wimmer, Christian, and Michael Franz. "Linear Scan Register Allocation on
* SSA Form." Proceedings of the International Symposium on Code Generation
* and Optimization. Toronto, Ontario, Canada, ACM. 2010. 170-79. PDF.
*
* The algorithm operates on blocks ordered such that dominators of a block
* are before the block itself, and such that all blocks of a loop are
* contiguous. It proceeds backwards over the instructions in this order,
* marking registers live at their uses, ending their live intervals at
* definitions, and recording which registers are live at the top of every
* block. To deal with loop backedges, variables live at the beginning of
* a loop gain an interval covering the entire loop.
*/
template <typename VREG>
bool
LiveRangeAllocator<VREG>::buildLivenessInfo()
{
if (!init())
return false;
Vector<MBasicBlock *, 1, SystemAllocPolicy> loopWorkList;
BitSet *loopDone = BitSet::New(graph.numBlockIds());
if (!loopDone)
return false;
for (size_t i = graph.numBlocks(); i > 0; i--) {
if (mir->shouldCancel("LSRA Build Liveness Info (main loop)"))
return false;
LBlock *block = graph.getBlock(i - 1);
MBasicBlock *mblock = block->mir();
BitSet *live = BitSet::New(graph.numVirtualRegisters());
if (!live)
return false;
liveIn[mblock->id()] = live;
// Propagate liveIn from our successors to us
for (size_t i = 0; i < mblock->lastIns()->numSuccessors(); i++) {
MBasicBlock *successor = mblock->lastIns()->getSuccessor(i);
// Skip backedges, as we fix them up at the loop header.
if (mblock->id() < successor->id())
live->insertAll(liveIn[successor->id()]);
}
// Add successor phis
if (mblock->successorWithPhis()) {
LBlock *phiSuccessor = mblock->successorWithPhis()->lir();
for (unsigned int j = 0; j < phiSuccessor->numPhis(); j++) {
LPhi *phi = phiSuccessor->getPhi(j);
LAllocation *use = phi->getOperand(mblock->positionInPhiSuccessor());
uint32 reg = use->toUse()->virtualRegister();
live->insert(reg);
}
}
// Variables are assumed alive for the entire block, a define shortens
// the interval to the point of definition.
for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) {
if (!vregs[*liveRegId].getInterval(0)->addRangeAtHead(inputOf(block->firstId()),
outputOf(block->lastId()).next()))
{
return false;
}
}
// Shorten the front end of live intervals for live variables to their
// point of definition, if found.
for (LInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) {
// Calls may clobber registers, so force a spill and reload around the callsite.
if (ins->isCall()) {
for (AnyRegisterIterator iter(allRegisters_); iter.more(); iter++) {
if (!addFixedRangeAtHead(*iter, inputOf(*ins), outputOf(*ins)))
return false;
}
}
for (size_t i = 0; i < ins->numDefs(); i++) {
if (ins->getDef(i)->policy() != LDefinition::PASSTHROUGH) {
LDefinition *def = ins->getDef(i);
CodePosition from;
if (def->policy() == LDefinition::PRESET && def->output()->isRegister()) {
// The fixed range covers the current instruction so the
// interval for the virtual register starts at the next
// instruction. If the next instruction has a fixed use,
// this can lead to unnecessary register moves. To avoid
// special handling for this, assert the next instruction
// has no fixed uses. defineFixed guarantees this by inserting
// an LNop.
JS_ASSERT(!NextInstructionHasFixedUses(block, *ins));
AnyRegister reg = def->output()->toRegister();
if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins).next()))
return false;
from = outputOf(*ins).next();
} else {
from = inputOf(*ins);
}
if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
// MUST_REUSE_INPUT is implemented by allocating an output
// register and moving the input to it. Register hints are
// used to avoid unnecessary moves. We give the input an
// LUse::ANY policy to avoid allocating a register for the
// input.
LUse *inputUse = ins->getOperand(def->getReusedInput())->toUse();
JS_ASSERT(inputUse->policy() == LUse::REGISTER);
JS_ASSERT(inputUse->usedAtStart());
*inputUse = LUse(inputUse->virtualRegister(), LUse::ANY, /* usedAtStart = */ true);
}
LiveInterval *interval = vregs[def].getInterval(0);
interval->setFrom(from);
// Ensure that if there aren't any uses, there's at least
// some interval for the output to go into.
if (interval->numRanges() == 0) {
if (!interval->addRangeAtHead(from, from.next()))
return false;
}
live->remove(def->virtualRegister());
}
}
for (size_t i = 0; i < ins->numTemps(); i++) {
LDefinition *temp = ins->getTemp(i);
if (temp->isBogusTemp())
continue;
if (ins->isCall()) {
JS_ASSERT(temp->isPreset());
continue;
}
if (temp->policy() == LDefinition::PRESET) {
AnyRegister reg = temp->output()->toRegister();
if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins)))
return false;
} else {
if (!vregs[temp].getInterval(0)->addRangeAtHead(inputOf(*ins), outputOf(*ins)))
return false;
}
}
DebugOnly<bool> hasUseRegister = false;
DebugOnly<bool> hasUseRegisterAtStart = false;
for (LInstruction::InputIterator alloc(**ins); alloc.more(); alloc.next()) {
if (alloc->isUse()) {
LUse *use = alloc->toUse();
// The first instruction, LLabel, has no uses.
JS_ASSERT(inputOf(*ins) > outputOf(block->firstId()));
// Call uses should always be at-start or fixed, since the fixed intervals
// use all registers.
JS_ASSERT_IF(ins->isCall() && !alloc.isSnapshotInput(),
use->isFixedRegister() || use->usedAtStart());
#ifdef DEBUG
// Don't allow at-start call uses if there are temps of the same kind,
// so that we don't assign the same register.
if (ins->isCall() && use->usedAtStart()) {
for (size_t i = 0; i < ins->numTemps(); i++)
JS_ASSERT(vregs[ins->getTemp(i)].isDouble() != vregs[use].isDouble());
}
// If there are both useRegisterAtStart(x) and useRegister(y)
// uses, we may assign the same register to both operands due to
// interval splitting (bug 772830). Don't allow this for now.
if (use->policy() == LUse::REGISTER) {
if (use->usedAtStart()) {
if (!IsInputReused(*ins, use))
hasUseRegisterAtStart = true;
} else {
hasUseRegister = true;
}
}
JS_ASSERT(!(hasUseRegister && hasUseRegisterAtStart));
#endif
// Don't treat RECOVERED_INPUT uses as keeping the vreg alive.
if (use->policy() == LUse::RECOVERED_INPUT)
continue;
CodePosition to;
if (use->isFixedRegister()) {
JS_ASSERT(!use->usedAtStart());
AnyRegister reg = GetFixedRegister(vregs[use].def(), use);
if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins)))
return false;
to = inputOf(*ins);
} else {
to = use->usedAtStart() ? inputOf(*ins) : outputOf(*ins);
}
LiveInterval *interval = vregs[use].getInterval(0);
if (!interval->addRangeAtHead(inputOf(block->firstId()), to))
return false;
interval->addUse(new UsePosition(use, to));
live->insert(use->virtualRegister());
}
}
}
// Phis have simultaneous assignment semantics at block begin, so at
// the beginning of the block we can be sure that liveIn does not
// contain any phi outputs.
for (unsigned int i = 0; i < block->numPhis(); i++) {
LDefinition *def = block->getPhi(i)->getDef(0);
if (live->contains(def->virtualRegister())) {
live->remove(def->virtualRegister());
} else {
// This is a dead phi, so add a dummy range over all phis. This
// can go away if we have an earlier dead code elimination pass.
if (!vregs[def].getInterval(0)->addRangeAtHead(inputOf(block->firstId()),
outputOf(block->firstId())))
{
return false;
}
}
}
if (mblock->isLoopHeader()) {
// A divergence from the published algorithm is required here, as
// our block order does not guarantee that blocks of a loop are
// contiguous. As a result, a single live interval spanning the
// loop is not possible. Additionally, we require liveIn in a later
// pass for resolution, so that must also be fixed up here.
MBasicBlock *loopBlock = mblock->backedge();
while (true) {
// Blocks must already have been visited to have a liveIn set.
JS_ASSERT(loopBlock->id() >= mblock->id());
// Add an interval for this entire loop block
CodePosition from = inputOf(loopBlock->lir()->firstId());
CodePosition to = outputOf(loopBlock->lir()->lastId()).next();
for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) {
if (!vregs[*liveRegId].getInterval(0)->addRange(from, to))
return false;
}
// Fix up the liveIn set to account for the new interval
liveIn[loopBlock->id()]->insertAll(live);
// Make sure we don't visit this node again
loopDone->insert(loopBlock->id());
// If this is the loop header, any predecessors are either the
// backedge or out of the loop, so skip any predecessors of
// this block
if (loopBlock != mblock) {
for (size_t i = 0; i < loopBlock->numPredecessors(); i++) {
MBasicBlock *pred = loopBlock->getPredecessor(i);
if (loopDone->contains(pred->id()))
continue;
if (!loopWorkList.append(pred))
return false;
}
}
// Terminate loop if out of work.
if (loopWorkList.empty())
break;
// Grab the next block off the work list, skipping any OSR block.
do {
loopBlock = loopWorkList.popCopy();
} while (loopBlock->lir() == graph.osrBlock());
}
// Clear the done set for other loops
loopDone->clear();
}
JS_ASSERT_IF(!mblock->numPredecessors(), live->empty());
}
validateVirtualRegisters();
// If the script has an infinite loop, there may be no MReturn and therefore
// no fixed intervals. Add a small range to fixedIntervalsUnion so that the
// rest of the allocator can assume it has at least one range.
if (fixedIntervalsUnion->numRanges() == 0) {
if (!fixedIntervalsUnion->addRangeAtHead(CodePosition(0, CodePosition::INPUT),
CodePosition(0, CodePosition::OUTPUT)))
{
return false;
}
}
return true;
}
#ifdef DEBUG
void
LiveInterval::validateRanges()
{
Range *prev = NULL;
for (size_t i = ranges_.length() - 1; i < ranges_.length(); i--) {
Range *range = &ranges_[i];
JS_ASSERT(range->from < range->to);
JS_ASSERT_IF(prev, prev->to <= range->from);
prev = range;
}
}
#endif // DEBUG

Просмотреть файл

@ -0,0 +1,519 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=4 sw=4 et tw=99:
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef js_ion_liverangeallocator_h__
#define js_ion_liverangeallocator_h__
#include "RegisterAllocator.h"
// Common structures and functions used by register allocators that operate on
// virtual register live ranges.
namespace js {
namespace ion {
class Requirement
{
public:
enum Kind {
NONE,
REGISTER,
FIXED,
SAME_AS_OTHER
};
Requirement()
: kind_(NONE)
{ }
Requirement(Kind kind)
: kind_(kind)
{
// These have dedicated constructors;
JS_ASSERT(kind != FIXED && kind != SAME_AS_OTHER);
}
Requirement(Kind kind, CodePosition at)
: kind_(kind),
position_(at)
{ }
Requirement(LAllocation fixed)
: kind_(FIXED),
allocation_(fixed)
{ }
// Only useful as a hint, encodes where the fixed requirement is used to
// avoid allocating a fixed register too early.
Requirement(LAllocation fixed, CodePosition at)
: kind_(FIXED),
allocation_(fixed),
position_(at)
{ }
Requirement(uint32 vreg, CodePosition at)
: kind_(SAME_AS_OTHER),
allocation_(LUse(vreg, LUse::ANY)),
position_(at)
{ }
Kind kind() const {
return kind_;
}
LAllocation allocation() const {
JS_ASSERT(!allocation_.isUse());
return allocation_;
}
uint32 virtualRegister() const {
JS_ASSERT(allocation_.isUse());
return allocation_.toUse()->virtualRegister();
}
CodePosition pos() const {
return position_;
}
int priority() const;
private:
Kind kind_;
LAllocation allocation_;
CodePosition position_;
};
struct UsePosition : public TempObject,
public InlineForwardListNode<UsePosition>
{
LUse *use;
CodePosition pos;
UsePosition(LUse *use, CodePosition pos) :
use(use),
pos(pos)
{ }
};
typedef InlineForwardListIterator<UsePosition> UsePositionIterator;
static inline bool
UseCompatibleWith(const LUse *use, LAllocation alloc)
{
switch (use->policy()) {
case LUse::ANY:
case LUse::KEEPALIVE:
return alloc.isRegister() || alloc.isMemory();
case LUse::REGISTER:
return alloc.isRegister();
case LUse::FIXED:
// Fixed uses are handled using fixed intervals. The
// UsePosition is only used as hint.
return alloc.isRegister();
default:
JS_NOT_REACHED("Unknown use policy");
}
return false;
}
#ifdef DEBUG
static inline bool
DefinitionCompatibleWith(LInstruction *ins, const LDefinition *def, LAllocation alloc)
{
if (ins->isPhi()) {
if (def->type() == LDefinition::DOUBLE)
return alloc.isFloatReg() || alloc.kind() == LAllocation::DOUBLE_SLOT;
return alloc.isGeneralReg() || alloc.kind() == LAllocation::STACK_SLOT;
}
switch (def->policy()) {
case LDefinition::DEFAULT:
if (!alloc.isRegister())
return false;
return alloc.isFloatReg() == (def->type() == LDefinition::DOUBLE);
case LDefinition::PRESET:
return alloc == *def->output();
case LDefinition::MUST_REUSE_INPUT:
if (!alloc.isRegister() || !ins->numOperands())
return false;
return alloc == *ins->getOperand(def->getReusedInput());
case LDefinition::PASSTHROUGH:
return true;
default:
JS_NOT_REACHED("Unknown definition policy");
}
return false;
}
#endif // DEBUG
/*
* A live interval is a set of disjoint ranges of code positions where a
* virtual register is live. Register allocation operates on these intervals,
* splitting them as necessary and assigning allocations to them as it runs.
*/
class LiveInterval
: public InlineListNode<LiveInterval>,
public TempObject
{
public:
/*
* A range is a contiguous sequence of CodePositions where the virtual
* register associated with this interval is live.
*/
struct Range {
Range(CodePosition f, CodePosition t)
: from(f),
to(t)
{
JS_ASSERT(from < to);
}
CodePosition from;
// The end of this range, exclusive.
CodePosition to;
};
private:
Vector<Range, 1, IonAllocPolicy> ranges_;
LAllocation alloc_;
uint32 vreg_;
uint32 index_;
Requirement requirement_;
Requirement hint_;
InlineForwardList<UsePosition> uses_;
size_t lastProcessedRange_;
public:
LiveInterval(uint32 vreg, uint32 index)
: vreg_(vreg),
index_(index),
lastProcessedRange_(size_t(-1))
{ }
LiveInterval(uint32 index)
: vreg_(UINT32_MAX),
index_(index),
lastProcessedRange_(size_t(-1))
{ }
bool addRange(CodePosition from, CodePosition to);
bool addRangeAtHead(CodePosition from, CodePosition to);
void setFrom(CodePosition from);
CodePosition intersect(LiveInterval *other);
bool covers(CodePosition pos);
CodePosition nextCoveredAfter(CodePosition pos);
CodePosition start() const {
JS_ASSERT(!ranges_.empty());
return ranges_.back().from;
}
CodePosition end() const {
JS_ASSERT(!ranges_.empty());
return ranges_.begin()->to;
}
size_t numRanges() const {
return ranges_.length();
}
const Range *getRange(size_t i) const {
return &ranges_[i];
}
void setLastProcessedRange(size_t range, mozilla::DebugOnly<CodePosition> pos) {
// If the range starts after pos, we may not be able to use
// it in the next lastProcessedRangeIfValid call.
JS_ASSERT(ranges_[range].from <= pos);
lastProcessedRange_ = range;
}
size_t lastProcessedRangeIfValid(CodePosition pos) const {
if (lastProcessedRange_ < ranges_.length() && ranges_[lastProcessedRange_].from <= pos)
return lastProcessedRange_;
return ranges_.length() - 1;
}
LAllocation *getAllocation() {
return &alloc_;
}
void setAllocation(LAllocation alloc) {
alloc_ = alloc;
}
bool hasVreg() const {
return vreg_ != UINT32_MAX;
}
uint32 vreg() const {
JS_ASSERT(hasVreg());
return vreg_;
}
uint32 index() const {
return index_;
}
void setIndex(uint32 index) {
index_ = index;
}
Requirement *requirement() {
return &requirement_;
}
void setRequirement(const Requirement &requirement) {
// A SAME_AS_OTHER requirement complicates regalloc too much; it
// should only be used as hint.
JS_ASSERT(requirement.kind() != Requirement::SAME_AS_OTHER);
// Fixed registers are handled with fixed intervals, so fixed requirements
// are only valid for non-register allocations.f
JS_ASSERT_IF(requirement.kind() == Requirement::FIXED,
!requirement.allocation().isRegister());
requirement_ = requirement;
}
Requirement *hint() {
return &hint_;
}
void setHint(const Requirement &hint) {
hint_ = hint;
}
bool isSpill() const {
return alloc_.isStackSlot();
}
bool splitFrom(CodePosition pos, LiveInterval *after);
void addUse(UsePosition *use);
UsePosition *nextUseAfter(CodePosition pos);
CodePosition nextUsePosAfter(CodePosition pos);
CodePosition firstIncompatibleUse(LAllocation alloc);
UsePositionIterator usesBegin() const {
return uses_.begin();
}
UsePositionIterator usesEnd() const {
return uses_.end();
}
#ifdef DEBUG
void validateRanges();
#endif
};
/*
* Represents all of the register allocation state associated with a virtual
* register, including all associated intervals and pointers to relevant LIR
* structures.
*/
class VirtualRegister
{
uint32 id_;
LBlock *block_;
LInstruction *ins_;
LDefinition *def_;
Vector<LiveInterval *, 1, IonAllocPolicy> intervals_;
// Whether def_ is a temp or an output.
bool isTemp_ : 1;
public:
bool init(uint32 id, LBlock *block, LInstruction *ins, LDefinition *def, bool isTemp) {
id_ = id;
block_ = block;
ins_ = ins;
def_ = def;
isTemp_ = isTemp;
LiveInterval *initial = new LiveInterval(def->virtualRegister(), 0);
if (!initial)
return false;
return intervals_.append(initial);
}
uint32 id() {
return id_;
}
LBlock *block() {
return block_;
}
LInstruction *ins() {
return ins_;
}
LDefinition *def() const {
return def_;
}
LDefinition::Type type() const {
return def()->type();
}
bool isTemp() const {
return isTemp_;
}
size_t numIntervals() const {
return intervals_.length();
}
LiveInterval *getInterval(size_t i) const {
return intervals_[i];
}
LiveInterval *lastInterval() const {
JS_ASSERT(numIntervals() > 0);
return getInterval(numIntervals() - 1);
}
bool addInterval(LiveInterval *interval) {
JS_ASSERT(interval->numRanges());
// Preserve ascending order for faster lookups.
LiveInterval **found = NULL;
LiveInterval **i;
for (i = intervals_.begin(); i != intervals_.end(); i++) {
if (!found && interval->start() < (*i)->start())
found = i;
if (found)
(*i)->setIndex((*i)->index() + 1);
}
if (!found)
found = intervals_.end();
return intervals_.insert(found, interval);
}
bool isDouble() const {
return def_->type() == LDefinition::DOUBLE;
}
LiveInterval *intervalFor(CodePosition pos);
LiveInterval *getFirstInterval();
};
// Index of the virtual registers in a graph. VREG is a subclass of
// VirtualRegister extended with any allocator specific state for the vreg.
template <typename VREG>
class VirtualRegisterMap
{
private:
VREG *vregs_;
uint32 numVregs_;
public:
VirtualRegisterMap()
: vregs_(NULL),
numVregs_(0)
{ }
bool init(MIRGenerator *gen, uint32 numVregs) {
vregs_ = gen->allocate<VREG>(numVregs);
numVregs_ = numVregs;
if (!vregs_)
return false;
memset(vregs_, 0, sizeof(VREG) * numVregs);
return true;
}
VREG &operator[](unsigned int index) {
JS_ASSERT(index < numVregs_);
return vregs_[index];
}
VREG &operator[](const LAllocation *alloc) {
JS_ASSERT(alloc->isUse());
JS_ASSERT(alloc->toUse()->virtualRegister() < numVregs_);
return vregs_[alloc->toUse()->virtualRegister()];
}
VREG &operator[](const LDefinition *def) {
JS_ASSERT(def->virtualRegister() < numVregs_);
return vregs_[def->virtualRegister()];
}
uint32 numVirtualRegisters() const {
return numVregs_;
}
};
static inline AnyRegister
GetFixedRegister(LDefinition *def, LUse *use)
{
return def->type() == LDefinition::DOUBLE
? AnyRegister(FloatRegister::FromCode(use->registerCode()))
: AnyRegister(Register::FromCode(use->registerCode()));
}
static inline bool
IsNunbox(VirtualRegister *vreg)
{
#ifdef JS_NUNBOX32
return (vreg->type() == LDefinition::TYPE ||
vreg->type() == LDefinition::PAYLOAD);
#else
return false;
#endif
}
static inline bool
IsTraceable(VirtualRegister *reg)
{
if (reg->type() == LDefinition::OBJECT)
return true;
#ifdef JS_PUNBOX64
if (reg->type() == LDefinition::BOX)
return true;
#endif
return false;
}
typedef InlineList<LiveInterval>::iterator IntervalIterator;
typedef InlineList<LiveInterval>::reverse_iterator IntervalReverseIterator;
template <typename VREG>
class LiveRangeAllocator : public RegisterAllocator
{
protected:
// Computed inforamtion
BitSet **liveIn;
VirtualRegisterMap<VREG> vregs;
FixedArityList<LiveInterval *, AnyRegister::Total> fixedIntervals;
// Union of all ranges in fixedIntervals, used to quickly determine
// whether an interval intersects with a fixed register.
LiveInterval *fixedIntervalsUnion;
public:
LiveRangeAllocator(MIRGenerator *mir, LIRGenerator *lir, LIRGraph &graph)
: RegisterAllocator(mir, lir, graph),
liveIn(NULL),
fixedIntervalsUnion(NULL)
{
}
bool buildLivenessInfo();
protected:
bool init();
bool addFixedRangeAtHead(AnyRegister reg, CodePosition from, CodePosition to) {
if (!fixedIntervals[reg.code()]->addRangeAtHead(from, to))
return false;
return fixedIntervalsUnion->addRangeAtHead(from, to);
}
void validateVirtualRegisters()
{
#ifdef DEBUG
for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
VirtualRegister *reg = &vregs[i];
LiveInterval *prev = NULL;
for (size_t j = 0; j < reg->numIntervals(); j++) {
LiveInterval *interval = reg->getInterval(j);
JS_ASSERT(interval->vreg() == i);
JS_ASSERT(interval->index() == j);
if (interval->numRanges() == 0)
continue;
JS_ASSERT_IF(prev, prev->end() <= interval->start());
interval->validateRanges();
prev = interval;
}
}
#endif
}
};
} // namespace ion
} // namespace js
#endif

Просмотреть файл

@ -8,4 +8,4 @@ files after the copy step.
The upstream repository is https://git.xiph.org/opus.git
The git tag/revision used was 1.0.0.
The git tag/revision used was v1.0.0.

Просмотреть файл

@ -0,0 +1,40 @@
From 9345aaa5ca1c2fb7d62981b2a538e0ce20612c38 Mon Sep 17 00:00:00 2001
From: Jean-Marc Valin <jmvalin@jmvalin.ca>
Date: Fri, 30 Nov 2012 17:36:36 -0500
Subject: [PATCH] Fixes an out-of-bounds read issue with the padding handling
code
This was reported by Juri Aedla and is limited to reading memory up
to about 60 kB beyond the compressed buffer. This can only be triggered
by a compressed packet more than about 16 MB long, so it's not a problem
for RTP. In theory, it *could* crash an Ogg decoder if the memory just after
the incoming packet is out-of-range.
---
src/opus_decoder.c | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/src/opus_decoder.c b/src/opus_decoder.c
index 167e4e4..0be6730 100644
--- a/src/opus_decoder.c
+++ b/src/opus_decoder.c
@@ -641,16 +641,14 @@ static int opus_packet_parse_impl(const unsigned char *data, opus_int32 len,
/* Padding flag is bit 6 */
if (ch&0x40)
{
- int padding=0;
int p;
do {
if (len<=0)
return OPUS_INVALID_PACKET;
p = *data++;
len--;
- padding += p==255 ? 254: p;
+ len -= p==255 ? 254: p;
} while (p==255);
- len -= padding;
}
if (len<0)
return OPUS_INVALID_PACKET;
--
1.7.11.7

Просмотреть файл

@ -595,16 +595,14 @@ static int opus_packet_parse_impl(const unsigned char *data, int len,
/* Padding flag is bit 6 */
if (ch&0x40)
{
int padding=0;
int p;
do {
if (len<=0)
return OPUS_INVALID_PACKET;
p = *data++;
len--;
padding += p==255 ? 254: p;
len -= p==255 ? 254: p;
} while (p==255);
len -= padding;
}
if (len<0)
return OPUS_INVALID_PACKET;

Просмотреть файл

@ -63,4 +63,5 @@ sed -e "s/^The git tag\/revision used was .*/The git tag\/revision used was ${ve
mv ${TARGET}/README_MOZILLA+ ${TARGET}/README_MOZILLA
# apply outstanding local patches
patch -p3 < ./bug776661.patch
patch -p3 < bug776661.patch
patch -p1 < padding.patch

Просмотреть файл

@ -56,7 +56,6 @@
#include "nsTArray.h"
#include "nsXPIDLString.h"
#include "prlog.h"
#include "prmem.h"
#include "rdf.h"
#include "rdfutil.h"
#include "nsReadableUtils.h"
@ -356,7 +355,7 @@ RDFContentSinkImpl::~RDFContentSinkImpl()
delete mContextStack;
}
PR_FREEIF(mText);
moz_free(mText);
if (--gRefCnt == 0) {
@ -763,7 +762,7 @@ RDFContentSinkImpl::AddText(const PRUnichar* aText, int32_t aLength)
{
// Create buffer when we first need it
if (0 == mTextSize) {
mText = (PRUnichar *) PR_MALLOC(sizeof(PRUnichar) * 4096);
mText = (PRUnichar *) moz_malloc(sizeof(PRUnichar) * 4096);
if (!mText) {
return NS_ERROR_OUT_OF_MEMORY;
}
@ -781,7 +780,7 @@ RDFContentSinkImpl::AddText(const PRUnichar* aText, int32_t aLength)
int32_t newSize = (2 * mTextSize > (mTextSize + aLength)) ?
(2 * mTextSize) : (mTextSize + aLength);
PRUnichar* newText =
(PRUnichar *) PR_REALLOC(mText, sizeof(PRUnichar) * newSize);
(PRUnichar *) moz_realloc(mText, sizeof(PRUnichar) * newSize);
if (!newText)
return NS_ERROR_OUT_OF_MEMORY;
mTextSize = newSize;

Просмотреть файл

@ -18,13 +18,6 @@
#include "nsXPIDLString.h"
#include "nsRDFCID.h"
#include "rdfutil.h"
#include "plhash.h"
#include "plstr.h"
#include "prlong.h"
#include "prlog.h"
#include "prmem.h"
#include "prprf.h"
#include "prio.h"
#include "rdf.h"
#include "nsEnumeratorUtils.h"
#include "nsIURL.h"
@ -871,7 +864,6 @@ FileSystemDataSource::GetVolumeList(nsISimpleEnumerator** aResult)
int32_t driveType;
PRUnichar drive[32];
int32_t volNum;
char *url;
for (volNum = 0; volNum < 26; volNum++)
{
@ -880,15 +872,13 @@ FileSystemDataSource::GetVolumeList(nsISimpleEnumerator** aResult)
driveType = GetDriveTypeW(drive);
if (driveType != DRIVE_UNKNOWN && driveType != DRIVE_NO_ROOT_DIR)
{
if (nullptr != (url = PR_smprintf("file:///%c|/", volNum + 'A')))
{
rv = mRDFService->GetResource(nsDependentCString(url),
getter_AddRefs(vol));
PR_Free(url);
nsAutoCString url;
url.AppendPrintf("file:///%c|/", volNum + 'A');
rv = mRDFService->GetResource(url, getter_AddRefs(vol));
if (NS_FAILED(rv))
return rv;
if (NS_FAILED(rv)) return rv;
volumes->AppendElement(vol);
}
volumes->AppendElement(vol);
}
}
#endif
@ -901,7 +891,6 @@ FileSystemDataSource::GetVolumeList(nsISimpleEnumerator** aResult)
#ifdef XP_OS2
ULONG ulDriveNo = 0;
ULONG ulDriveMap = 0;
char *url;
rv = DosQueryCurrentDisk(&ulDriveNo, &ulDriveMap);
if (NS_FAILED(rv))
@ -911,14 +900,12 @@ FileSystemDataSource::GetVolumeList(nsISimpleEnumerator** aResult)
{
if (((ulDriveMap << (31 - volNum)) >> 31))
{
if (nullptr != (url = PR_smprintf("file:///%c|/", volNum + 'A')))
{
rv = mRDFService->GetResource(nsDependentCString(url), getter_AddRefs(vol));
PR_Free(url);
nsAutoCString url;
url.AppendPrintf("file:///%c|/", volNum + 'A');
rv = mRDFService->GetResource(nsDependentCString(url), getter_AddRefs(vol));
if (NS_FAILED(rv)) return rv;
volumes->AppendElement(vol);
}
if (NS_FAILED(rv)) return rv;
volumes->AppendElement(vol);
}
}

Просмотреть файл

@ -170,6 +170,7 @@ let test_path = maketest("path", function path(test) {
test.ok(OS.Constants.Path, "OS.Constants.Path exists");
test.is(OS.Constants.Path.tmpDir, Services.dirsvc.get("TmpD", Components.interfaces.nsIFile).path, "OS.Constants.Path.tmpDir is correct");
test.is(OS.Constants.Path.profileDir, Services.dirsvc.get("ProfD", Components.interfaces.nsIFile).path, "OS.Constants.Path.profileDir is correct");
test.is(OS.Constants.Path.localProfileDir, Services.dirsvc.get("ProfLD", Components.interfaces.nsIFile).path, "OS.Constants.Path.localProfileDir is correct");
});
});

Просмотреть файл

@ -11,6 +11,7 @@
#include "base/basictypes.h"
/* This must occur *after* base/basictypes.h to avoid typedefs conflicts. */
#include "mozilla/Base64.h"
#include "mozilla/Util.h"
#include "mozilla/dom/ContentChild.h"
@ -26,6 +27,7 @@
#include "nsIDirectoryService.h"
#include "nsAppDirectoryServiceDefs.h"
#include "nsICategoryManager.h"
#include "nsDependentSubstring.h"
#include "nsXPIDLString.h"
#include "nsUnicharUtils.h"
#include "nsIStringEnumerator.h"
@ -97,8 +99,6 @@
#include "nsLocalHandlerApp.h"
#include "nsIRandomGenerator.h"
#include "plbase64.h"
#include "prmem.h"
#include "ContentChild.h"
#include "nsXULAppAPI.h"
@ -1305,20 +1305,14 @@ nsresult nsExternalAppHandler::SetUpTempFile(nsIChannel * aChannel)
rv = rg->GenerateRandomBytes(requiredBytesLength, &buffer);
NS_ENSURE_SUCCESS(rv, rv);
char *b64 = PL_Base64Encode(reinterpret_cast<const char *>(buffer),
requiredBytesLength, nullptr);
nsAutoCString tempLeafName;
nsDependentCSubstring randomData(reinterpret_cast<const char*>(buffer), requiredBytesLength);
rv = Base64Encode(randomData, tempLeafName);
NS_Free(buffer);
buffer = nullptr;
NS_ENSURE_SUCCESS(rv, rv);
if (!b64)
return NS_ERROR_OUT_OF_MEMORY;
NS_ASSERTION(strlen(b64) >= wantedFileNameLength,
"not enough bytes produced for conversion!");
nsAutoCString tempLeafName(b64, wantedFileNameLength);
PR_Free(b64);
b64 = nullptr;
tempLeafName.Truncate(wantedFileNameLength);
// Base64 characters are alphanumeric (a-zA-Z0-9) and '+' and '/', so we need
// to replace illegal characters -- notably '/'