merge mozilla-inbound to mozilla-central a=merge

This commit is contained in:
Carsten "Tomcat" Book 2015-06-01 15:00:24 +02:00
Родитель 8e6b2e5b18 0d2e591a3d
Коммит cfa2965ba6
152 изменённых файлов: 3746 добавлений и 2256 удалений

Просмотреть файл

@ -800,7 +800,11 @@ getParentCB(AtkObject *aAtkObj)
atkParent = parent ? AccessibleWrap::GetAtkObject(parent) : nullptr;
} else if (ProxyAccessible* proxy = GetProxy(aAtkObj)) {
ProxyAccessible* parent = proxy->Parent();
atkParent = parent ? GetWrapperFor(parent) : nullptr;
if (parent)
atkParent = GetWrapperFor(parent);
// Otherwise this should be the proxy for the tab's top level document.
atkParent = AccessibleWrap::GetAtkObject(proxy->OuterDocOfRemoteBrowser());
}
if (atkParent)
@ -859,6 +863,9 @@ getIndexInParentCB(AtkObject* aAtkObj)
if (ProxyAccessible* parent = proxy->Parent())
return parent->IndexOfEmbeddedChild(proxy);
if (proxy->OuterDocOfRemoteBrowser())
return 0;
return -1;
}

Просмотреть файл

@ -6,6 +6,10 @@
#include "ProxyAccessible.h"
#include "DocAccessibleParent.h"
#include "DocAccessible.h"
#include "mozilla/a11y/DocManager.h"
#include "mozilla/dom/Element.h"
#include "mozilla/dom/TabParent.h"
#include "mozilla/unused.h"
#include "mozilla/a11y/Platform.h"
#include "RelationType.h"
@ -963,5 +967,19 @@ ProxyAccessible::URLDocTypeMimeType(nsString& aURL, nsString& aDocType,
unused << mDoc->SendURLDocTypeMimeType(mID, &aURL, &aDocType, &aMimeType);
}
Accessible*
ProxyAccessible::OuterDocOfRemoteBrowser() const
{
auto tab = static_cast<dom::TabParent*>(mDoc->Manager());
dom::Element* frame = tab->GetOwnerElement();
NS_ASSERTION(frame, "why isn't the tab in a frame!");
if (!frame)
return nullptr;
DocAccessible* chromeDoc = GetExistingDocAccessible(frame->OwnerDoc());
NS_ASSERTION(chromeDoc, "accessible tab in not accessible chromeDocument");
return chromeDoc ? chromeDoc->GetAccessible(frame) : nullptr;
}
}
}

Просмотреть файл

@ -18,6 +18,7 @@
namespace mozilla {
namespace a11y {
class Accessible;
class Attribute;
class DocAccessibleParent;
enum class RelationType;
@ -65,6 +66,8 @@ public:
*/
ProxyAccessible* Parent() const { return mParent; }
Accessible* OuterDocOfRemoteBrowser() const;
/**
* Get the role of the accessible we're proxying.
*/

Просмотреть файл

@ -59,10 +59,7 @@ public: // construction, destruction
* for it.
*/
bool IsIgnored();
inline bool HasPopup ()
{ return (NativeState() & mozilla::a11y::states::HASPOPUP); }
/**
* Returns this accessible's all children, adhering to "flat" accessibles by
* not returning their children.
@ -108,6 +105,8 @@ private:
bool mNativeInited;
};
Class GetTypeFromRole(roles::Role aRole);
} // namespace a11y
} // namespace mozilla

Просмотреть файл

@ -15,6 +15,7 @@
#import "mozHTMLAccessible.h"
#import "mozTextAccessible.h"
using namespace mozilla;
using namespace mozilla::a11y;
AccessibleWrap::
@ -33,8 +34,10 @@ AccessibleWrap::GetNativeObject()
{
NS_OBJC_BEGIN_TRY_ABORT_BLOCK_NIL;
if (!mNativeInited && !mNativeObject && !IsDefunct() && !AncestorIsFlat())
mNativeObject = [[GetNativeType() alloc] initWithAccessible:this];
if (!mNativeInited && !mNativeObject && !IsDefunct() && !AncestorIsFlat()) {
uintptr_t accWrap = reinterpret_cast<uintptr_t>(this);
mNativeObject = [[GetNativeType() alloc] initWithAccessible:accWrap];
}
mNativeInited = true;
@ -59,51 +62,7 @@ AccessibleWrap::GetNativeType ()
if (IsXULTabpanels())
return [mozPaneAccessible class];
roles::Role role = Role();
switch (role) {
case roles::PUSHBUTTON:
case roles::SPLITBUTTON:
case roles::TOGGLE_BUTTON:
{
// if this button may show a popup, let's make it of the popupbutton type.
return HasPopup() ? [mozPopupButtonAccessible class] :
[mozButtonAccessible class];
}
case roles::PAGETAB:
return [mozButtonAccessible class];
case roles::CHECKBUTTON:
return [mozCheckboxAccessible class];
case roles::HEADING:
return [mozHeadingAccessible class];
case roles::PAGETABLIST:
return [mozTabsAccessible class];
case roles::ENTRY:
case roles::STATICTEXT:
case roles::CAPTION:
case roles::ACCEL_LABEL:
case roles::PASSWORD_TEXT:
// normal textfield (static or editable)
return [mozTextAccessible class];
case roles::TEXT_LEAF:
return [mozTextLeafAccessible class];
case roles::LINK:
return [mozLinkAccessible class];
case roles::COMBOBOX:
return [mozPopupButtonAccessible class];
default:
return [mozAccessible class];
}
return nil;
return GetTypeFromRole(Role());
NS_OBJC_END_TRY_ABORT_BLOCK_NIL;
}
@ -274,3 +233,52 @@ AccessibleWrap::AncestorIsFlat()
// no parent was flat
return false;
}
Class
a11y::GetTypeFromRole(roles::Role aRole)
{
NS_OBJC_BEGIN_TRY_ABORT_BLOCK_NIL;
switch (aRole) {
case roles::COMBOBOX:
case roles::PUSHBUTTON:
case roles::SPLITBUTTON:
case roles::TOGGLE_BUTTON:
{
return [mozButtonAccessible class];
}
case roles::PAGETAB:
return [mozButtonAccessible class];
case roles::CHECKBUTTON:
return [mozCheckboxAccessible class];
case roles::HEADING:
return [mozHeadingAccessible class];
case roles::PAGETABLIST:
return [mozTabsAccessible class];
case roles::ENTRY:
case roles::STATICTEXT:
case roles::CAPTION:
case roles::ACCEL_LABEL:
case roles::PASSWORD_TEXT:
// normal textfield (static or editable)
return [mozTextAccessible class];
case roles::TEXT_LEAF:
return [mozTextLeafAccessible class];
case roles::LINK:
return [mozLinkAccessible class];
default:
return [mozAccessible class];
}
return nil;
NS_OBJC_END_TRY_ABORT_BLOCK_NIL;
}

Просмотреть файл

@ -7,6 +7,7 @@
#import <Cocoa/Cocoa.h>
#include "Platform.h"
#include "ProxyAccessible.h"
#include "nsAppShell.h"
@ -34,13 +35,23 @@ PlatformShutdown()
}
void
ProxyCreated(ProxyAccessible*, uint32_t)
ProxyCreated(ProxyAccessible* aProxy, uint32_t)
{
// Pass in dummy state for now as retrieving proxy state requires IPC.
Class type = GetTypeFromRole(aProxy->Role());
uintptr_t accWrap = reinterpret_cast<uintptr_t>(aProxy) | IS_PROXY;
mozAccessible* mozWrapper = [[type alloc] initWithAccessible:accWrap];
aProxy->SetWrapper(reinterpret_cast<uintptr_t>(mozWrapper));
}
void
ProxyDestroyed(ProxyAccessible*)
ProxyDestroyed(ProxyAccessible* aProxy)
{
mozAccessible* wrapper =
reinterpret_cast<mozAccessible*>(aProxy->GetWrapper());
[wrapper expire];
[wrapper release];
aProxy->SetWrapper(0);
}
void

Просмотреть файл

@ -30,6 +30,7 @@ LOCAL_INCLUDES += [
'/accessible/base',
'/accessible/generic',
'/accessible/html',
'/accessible/ipc',
'/accessible/xul',
'/layout/generic',
'/layout/xul',

Просмотреть файл

@ -57,11 +57,14 @@ static const uintptr_t IS_PROXY = 1;
mozilla::a11y::role mRole;
}
// return the Accessible for this mozAccessible.
- (mozilla::a11y::AccessibleWrap*) getGeckoAccessible;
// return the Accessible for this mozAccessible if it exists.
- (mozilla::a11y::AccessibleWrap*)getGeckoAccessible;
// return the ProxyAccessible for this mozAccessible if it exists.
- (mozilla::a11y::ProxyAccessible*)getProxyAccessible;
// inits with the gecko owner.
- (id)initWithAccessible:(mozilla::a11y::AccessibleWrap*)geckoParent;
- (id)initWithAccessible:(uintptr_t)aGeckoObj;
// our accessible parent (AXParent)
- (id <mozAccessible>)parent;

Просмотреть файл

@ -60,15 +60,18 @@ GetClosestInterestingAccessible(id anObject)
@implementation mozAccessible
- (id)initWithAccessible:(AccessibleWrap*)geckoAccessible
- (id)initWithAccessible:(uintptr_t)aGeckoAccessible
{
NS_OBJC_BEGIN_TRY_ABORT_BLOCK_NIL;
if ((self = [super init])) {
mGeckoAccessible = reinterpret_cast<uintptr_t>(geckoAccessible);
mRole = geckoAccessible->Role();
mGeckoAccessible = aGeckoAccessible;
if (aGeckoAccessible & IS_PROXY)
mRole = [self getProxyAccessible]->Role();
else
mRole = [self getGeckoAccessible]->Role();
}
return self;
NS_OBJC_END_TRY_ABORT_BLOCK_NIL;
@ -92,6 +95,15 @@ GetClosestInterestingAccessible(id anObject)
return reinterpret_cast<AccessibleWrap*>(mGeckoAccessible);
}
- (mozilla::a11y::ProxyAccessible*)getProxyAccessible
{
// Check if mGeckoAccessible points at a proxy
if (!(mGeckoAccessible & IS_PROXY))
return nil;
return reinterpret_cast<ProxyAccessible*>(mGeckoAccessible & ~IS_PROXY);
}
#pragma mark -

Просмотреть файл

@ -9,6 +9,9 @@
/* Simple subclasses for things like checkboxes, buttons, etc. */
@interface mozButtonAccessible : mozAccessible
{
}
- (BOOL)hasPopup;
- (void)click;
- (BOOL)isTab;
@end
@ -18,10 +21,6 @@
- (int)isChecked;
@end
/* Used for buttons that may pop up a menu. */
@interface mozPopupButtonAccessible : mozButtonAccessible
@end
/* Class for tabs - not individual tabs */
@interface mozTabsAccessible : mozAccessible
{

Просмотреть файл

@ -42,6 +42,7 @@ enum CheckboxValue {
NSAccessibilityEnabledAttribute, // required
NSAccessibilityFocusedAttribute, // required
NSAccessibilityTitleAttribute, // required
NSAccessibilityChildrenAttribute,
NSAccessibilityDescriptionAttribute,
#if DEBUG
@"AXMozDescription",
@ -57,15 +58,19 @@ enum CheckboxValue {
{
NS_OBJC_BEGIN_TRY_ABORT_BLOCK_NIL;
if ([attribute isEqualToString:NSAccessibilityChildrenAttribute])
if ([attribute isEqualToString:NSAccessibilityChildrenAttribute]) {
if ([self hasPopup])
return [self children];
return nil;
}
if ([attribute isEqualToString:NSAccessibilityRoleDescriptionAttribute]) {
if ([self isTab])
return utils::LocalizedString(NS_LITERAL_STRING("tab"));
return NSAccessibilityRoleDescription([self role], nil);
}
return [super accessibilityAttributeValue:attribute];
NS_OBJC_END_TRY_ABORT_BLOCK_NIL;
@ -80,36 +85,49 @@ enum CheckboxValue {
{
NS_OBJC_BEGIN_TRY_ABORT_BLOCK_NIL;
if ([self isEnabled])
if ([self isEnabled]) {
if ([self hasPopup])
return [NSArray arrayWithObjects:NSAccessibilityPressAction,
NSAccessibilityShowMenuAction,
nil];
return [NSArray arrayWithObject:NSAccessibilityPressAction];
}
return nil;
NS_OBJC_END_TRY_ABORT_BLOCK_NIL;
}
- (NSString*)accessibilityActionDescription:(NSString*)action
- (NSString*)accessibilityActionDescription:(NSString*)action
{
NS_OBJC_BEGIN_TRY_ABORT_BLOCK_NIL;
if ([action isEqualToString:NSAccessibilityPressAction]) {
if ([self isTab])
return utils::LocalizedString(NS_LITERAL_STRING("switch"));
return @"press button"; // XXX: localize this later?
}
if ([self hasPopup]) {
if ([action isEqualToString:NSAccessibilityShowMenuAction])
return @"show menu";
}
return nil;
NS_OBJC_END_TRY_ABORT_BLOCK_NIL;
}
- (void)accessibilityPerformAction:(NSString*)action
- (void)accessibilityPerformAction:(NSString*)action
{
NS_OBJC_BEGIN_TRY_ABORT_BLOCK;
if ([action isEqualToString:NSAccessibilityPressAction])
if ([self isEnabled] && [action isEqualToString:NSAccessibilityPressAction]) {
// TODO: this should bring up the menu, but currently doesn't.
// once msaa and atk have merged better, they will implement
// the action needed to show the menu.
[self click];
}
NS_OBJC_END_TRY_ABORT_BLOCK;
}
@ -127,6 +145,12 @@ enum CheckboxValue {
return (accWrap && (accWrap->Role() == roles::PAGETAB));
}
- (BOOL)hasPopup
{
AccessibleWrap* accWrap = [self getGeckoAccessible];
return accWrap && (accWrap->NativeState() & mozilla::a11y::states::HASPOPUP);
}
@end
@implementation mozCheckboxAccessible
@ -170,91 +194,6 @@ enum CheckboxValue {
@end
@implementation mozPopupButtonAccessible
- (NSArray *)accessibilityAttributeNames
{
NS_OBJC_BEGIN_TRY_ABORT_BLOCK_NIL;
static NSArray *attributes = nil;
if (!attributes) {
attributes = [[NSArray alloc] initWithObjects:NSAccessibilityParentAttribute, // required
NSAccessibilityPositionAttribute, // required
NSAccessibilityRoleAttribute, // required
NSAccessibilitySizeAttribute, // required
NSAccessibilityWindowAttribute, // required
NSAccessibilityTopLevelUIElementAttribute, // required
NSAccessibilityHelpAttribute,
NSAccessibilityEnabledAttribute, // required
NSAccessibilityFocusedAttribute, // required
NSAccessibilityTitleAttribute, // required for popupmenus, and for menubuttons with a title
NSAccessibilityChildrenAttribute, // required
NSAccessibilityDescriptionAttribute, // required if it has no title attr
#if DEBUG
@"AXMozDescription",
#endif
nil];
}
return attributes;
NS_OBJC_END_TRY_ABORT_BLOCK_NIL;
}
- (id)accessibilityAttributeValue:(NSString *)attribute
{
NS_OBJC_BEGIN_TRY_ABORT_BLOCK_NIL;
if ([attribute isEqualToString:NSAccessibilityChildrenAttribute]) {
return [super children];
}
return [super accessibilityAttributeValue:attribute];
NS_OBJC_END_TRY_ABORT_BLOCK_NIL;
}
- (NSArray *)accessibilityActionNames
{
NS_OBJC_BEGIN_TRY_ABORT_BLOCK_NIL;
if ([self isEnabled]) {
return [NSArray arrayWithObjects:NSAccessibilityPressAction,
NSAccessibilityShowMenuAction,
nil];
}
return nil;
NS_OBJC_END_TRY_ABORT_BLOCK_NIL;
}
- (NSString *)accessibilityActionDescription:(NSString *)action
{
NS_OBJC_BEGIN_TRY_ABORT_BLOCK_NIL;
if ([action isEqualToString:NSAccessibilityShowMenuAction])
return @"show menu";
return [super accessibilityActionDescription:action];
NS_OBJC_END_TRY_ABORT_BLOCK_NIL;
}
- (void)accessibilityPerformAction:(NSString *)action
{
NS_OBJC_BEGIN_TRY_ABORT_BLOCK;
// both the ShowMenu and Click action do the same thing.
if ([self isEnabled]) {
// TODO: this should bring up the menu, but currently doesn't.
// once msaa and atk have merged better, they will implement
// the action needed to show the menu.
[super click];
}
NS_OBJC_END_TRY_ABORT_BLOCK;
}
@end
@implementation mozTabsAccessible
- (void)dealloc

Просмотреть файл

@ -37,6 +37,7 @@ SEARCH_PATHS = [
'python/which',
'python/pystache',
'python/pyyaml/lib',
'python/requests',
'build',
'build/pymake',
'config',

Просмотреть файл

@ -65,7 +65,6 @@ public class FileCursor extends AbstractWindowedCursor {
nCount = 1;
}
mRowIdColumnIndex = 0;
}
}
}

Просмотреть файл

@ -66,7 +66,6 @@ public class FileCursor extends AbstractWindowedCursor {
nCount = 1;
}
mRowIdColumnIndex = 0;
}
}
}

Просмотреть файл

@ -2228,7 +2228,7 @@ DOMGCSliceCallback(JSRuntime *aRt, JS::GCProgress aProgress, const JS::GCDescrip
if (sPostGCEventsToConsole) {
NS_NAMED_LITERAL_STRING(kFmt, "GC(T+%.1f) ");
nsString prefix, gcstats;
gcstats.Adopt(aDesc.formatMessage(aRt));
gcstats.Adopt(aDesc.formatSummaryMessage(aRt));
prefix.Adopt(nsTextFormatter::smprintf(kFmt.get(),
double(delta) / PR_USEC_PER_SEC));
nsString msg = prefix + gcstats;
@ -2304,6 +2304,15 @@ DOMGCSliceCallback(JSRuntime *aRt, JS::GCProgress aProgress, const JS::GCDescrip
nsCycleCollector_dispatchDeferredDeletion();
}
if (sPostGCEventsToConsole) {
nsString gcstats;
gcstats.Adopt(aDesc.formatSliceMessage(aRt));
nsCOMPtr<nsIConsoleService> cs = do_GetService(NS_CONSOLESERVICE_CONTRACTID);
if (cs) {
cs->LogStringMessage(gcstats.get());
}
}
break;
default:

Просмотреть файл

@ -10,6 +10,7 @@
#include <unistd.h>
#endif
#include "mozilla/ArrayUtils.h"
#include "mozilla/CheckedInt.h"
#include "mozilla/dom/BlobSet.h"
#include "mozilla/dom/File.h"
#include "mozilla/dom/XMLHttpRequestUploadBinding.h"
@ -3993,26 +3994,30 @@ ArrayBufferBuilder::append(const uint8_t *aNewData, uint32_t aDataLen,
{
MOZ_ASSERT(!mMapPtr);
CheckedUint32 neededCapacity = mLength;
neededCapacity += aDataLen;
if (!neededCapacity.isValid()) {
return false;
}
if (mLength + aDataLen > mCapacity) {
uint32_t newcap;
CheckedUint32 newcap = mCapacity;
// Double while under aMaxGrowth or if not specified.
if (!aMaxGrowth || mCapacity < aMaxGrowth) {
newcap = mCapacity * 2;
newcap *= 2;
} else {
newcap = mCapacity + aMaxGrowth;
newcap += aMaxGrowth;
}
// But make sure there's always enough to satisfy our request.
if (newcap < mLength + aDataLen) {
newcap = mLength + aDataLen;
}
// Did we overflow?
if (newcap < mCapacity) {
if (!newcap.isValid()) {
return false;
}
if (!setCapacity(newcap)) {
// But make sure there's always enough to satisfy our request.
if (newcap.value() < neededCapacity.value()) {
newcap = neededCapacity;
}
if (!setCapacity(newcap.value())) {
return false;
}
}

Просмотреть файл

@ -3454,17 +3454,21 @@ struct MOZ_STACK_CLASS CanvasBidiProcessor : public nsBidiPresUtils::BidiProcess
const gfxTextRun::DetailedGlyph *d = mTextRun->GetDetailedGlyphs(i);
if (glyphs[i].IsMissing() && d->mAdvance > 0) {
newGlyph.mIndex = 0;
if (rtl) {
inlinePos = baselineOriginInline - advanceSum -
d->mAdvance * devUnitsPerAppUnit;
} else {
inlinePos = baselineOriginInline + advanceSum;
if (glyphs[i].IsMissing()) {
if (d->mAdvance > 0) {
// Perhaps we should render a hexbox here, but for now
// we just draw the font's .notdef glyph. (See bug 808288.)
newGlyph.mIndex = 0;
if (rtl) {
inlinePos = baselineOriginInline - advanceSum -
d->mAdvance * devUnitsPerAppUnit;
} else {
inlinePos = baselineOriginInline + advanceSum;
}
blockPos = baselineOriginBlock;
advanceSum += d->mAdvance * devUnitsPerAppUnit;
glyphBuf.push_back(newGlyph);
}
blockPos = baselineOriginBlock;
advanceSum += d->mAdvance * devUnitsPerAppUnit;
glyphBuf.push_back(newGlyph);
continue;
}

Просмотреть файл

@ -4,8 +4,10 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "mozilla/dom/SVGAnimationElement.h"
#include "nsSMILAnimationFunction.h"
#include "mozilla/dom/SVGAnimationElement.h"
#include "mozilla/Move.h"
#include "nsISMILAttr.h"
#include "nsSMILParserUtils.h"
#include "nsSMILNullType.h"
@ -267,9 +269,7 @@ nsSMILAnimationFunction::ComposeResult(const nsISMILAttr& aSMILAttr,
// If additive animation isn't required or isn't supported, set the value.
if (!isAdditive || NS_FAILED(aResult.SandwichAdd(result))) {
aResult.Swap(result);
// Note: The old value of aResult is now in |result|, and it will get
// cleaned up when |result| goes out of scope, when this function returns.
aResult = Move(result);
}
}

Просмотреть файл

@ -7,11 +7,13 @@
/* representation of a SMIL-animatable CSS property on an element */
#include "nsSMILCSSProperty.h"
#include "mozilla/dom/Element.h"
#include "mozilla/Move.h"
#include "nsSMILCSSValueType.h"
#include "nsSMILValue.h"
#include "nsComputedDOMStyle.h"
#include "nsCSSProps.h"
#include "mozilla/dom/Element.h"
#include "nsIDOMElement.h"
#include "nsIDocument.h"
@ -81,7 +83,7 @@ nsSMILCSSProperty::GetBaseValue() const
// In either case, just return a dummy value (initialized with the right
// type, so as not to indicate failure).
nsSMILValue tmpVal(&nsSMILCSSValueType::sSingleton);
baseValue.Swap(tmpVal);
Swap(baseValue, tmpVal);
return baseValue;
}

Просмотреть файл

@ -44,6 +44,35 @@ nsSMILValue::operator=(const nsSMILValue& aVal)
return *this;
}
// Move constructor / reassignment operator:
nsSMILValue::nsSMILValue(nsSMILValue&& aVal)
: mU(aVal.mU), // Copying union is only OK because we clear aVal.mType below.
mType(aVal.mType)
{
// Leave aVal with a null type, so that it's safely destructible (and won't
// mess with anything referenced by its union, which we've copied).
aVal.mType = nsSMILNullType::Singleton();
}
nsSMILValue&
nsSMILValue::operator=(nsSMILValue&& aVal)
{
if (!IsNull()) {
// Clean up any data we're currently tracking.
DestroyAndCheckPostcondition();
}
// Copy the union (which could include a pointer to external memory) & mType:
mU = aVal.mU;
mType = aVal.mType;
// Leave aVal with a null type, so that it's safely destructible (and won't
// mess with anything referenced by its union, which we've now copied).
aVal.mType = nsSMILNullType::Singleton();
return *this;
}
bool
nsSMILValue::operator==(const nsSMILValue& aVal) const
{
@ -53,19 +82,6 @@ nsSMILValue::operator==(const nsSMILValue& aVal) const
return mType == aVal.mType && mType->IsEqual(*this, aVal);
}
void
nsSMILValue::Swap(nsSMILValue& aOther)
{
nsSMILValue tmp;
memcpy(&tmp, &aOther, sizeof(nsSMILValue)); // tmp = aOther
memcpy(&aOther, this, sizeof(nsSMILValue)); // aOther = this
memcpy(this, &tmp, sizeof(nsSMILValue)); // this = tmp
// |tmp| is about to die -- we need to clear its mType, so that its
// destructor doesn't muck with the data we just transferred out of it.
tmp.mType = nsSMILNullType::Singleton();
}
nsresult
nsSMILValue::Add(const nsSMILValue& aValueToAdd, uint32_t aCount)
{

Просмотреть файл

@ -33,6 +33,10 @@ public:
const nsSMILValue& operator=(const nsSMILValue& aVal);
// Move constructor / reassignment operator:
nsSMILValue(nsSMILValue&& aVal);
nsSMILValue& operator=(nsSMILValue&& aVal);
// Equality operators. These are allowed to be conservative (return false
// more than you'd expect) - see comment above nsISMILType::IsEqual.
bool operator==(const nsSMILValue& aVal) const;
@ -45,9 +49,6 @@ public:
return (mType == nsSMILNullType::Singleton());
}
// Swaps the member data (mU & mPtr) of |this| with |aOther|
void Swap(nsSMILValue& aOther);
nsresult Add(const nsSMILValue& aValueToAdd, uint32_t aCount = 1);
nsresult SandwichAdd(const nsSMILValue& aValueToAdd);
nsresult ComputeDistance(const nsSMILValue& aTo, double& aDistance) const;

Просмотреть файл

@ -5,7 +5,9 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "SVGAnimatedLengthList.h"
#include "DOMSVGAnimatedLengthList.h"
#include "mozilla/Move.h"
#include "nsSVGElement.h"
#include "nsSVGAttrTearoffTable.h"
#include "nsSMILValue.h"
@ -138,7 +140,7 @@ SVGAnimatedLengthList::
nsresult rv = llai->SetValueFromString(aStr);
if (NS_SUCCEEDED(rv)) {
llai->SetInfo(mElement, mAxis, mCanZeroPadList);
aValue.Swap(val);
aValue = Move(val);
// If any of the lengths in the list depend on their context, then we must
// prevent caching of the entire animation sandwich. This is because the
@ -181,7 +183,7 @@ SVGAnimatedLengthList::SMILAnimatedLengthList::GetBaseValue() const
nsresult rv = llai->CopyFrom(mVal->mBaseVal);
if (NS_SUCCEEDED(rv)) {
llai->SetInfo(mElement, mAxis, mCanZeroPadList);
val.Swap(tmp);
val = Move(tmp);
}
return val;
}

Просмотреть файл

@ -5,7 +5,9 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "SVGAnimatedNumberList.h"
#include "DOMSVGAnimatedNumberList.h"
#include "mozilla/Move.h"
#include "nsSVGElement.h"
#include "nsSVGAttrTearoffTable.h"
#include "nsSMILValue.h"
@ -138,7 +140,7 @@ SVGAnimatedNumberList::
nsresult rv = nlai->SetValueFromString(aStr);
if (NS_SUCCEEDED(rv)) {
nlai->SetInfo(mElement);
aValue.Swap(val);
aValue = Move(val);
}
aPreventCachingOfSandwich = false;
return rv;
@ -157,7 +159,7 @@ SVGAnimatedNumberList::SMILAnimatedNumberList::GetBaseValue() const
nsresult rv = nlai->CopyFrom(mVal->mBaseVal);
if (NS_SUCCEEDED(rv)) {
nlai->SetInfo(mElement);
val.Swap(tmp);
Swap(val, tmp);
}
return val;
}

Просмотреть файл

@ -5,7 +5,9 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "SVGAnimatedPathSegList.h"
#include "DOMSVGPathSegList.h"
#include "mozilla/Move.h"
#include "nsSVGElement.h"
#include "nsSVGAttrTearoffTable.h"
#include "nsSMILValue.h"
@ -160,7 +162,7 @@ SVGAnimatedPathSegList::
nsresult rv = list->SetValueFromString(aStr);
if (NS_SUCCEEDED(rv)) {
list->SetElement(mElement);
aValue.Swap(val);
aValue = Move(val);
}
aPreventCachingOfSandwich = false;
return rv;
@ -179,7 +181,7 @@ SVGAnimatedPathSegList::SMILAnimatedPathSegList::GetBaseValue() const
nsresult rv = list->CopyFrom(mVal->mBaseVal);
if (NS_SUCCEEDED(rv)) {
list->SetElement(mElement);
val.Swap(tmp);
val = Move(tmp);
}
return val;
}

Просмотреть файл

@ -5,7 +5,9 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "SVGAnimatedPointList.h"
#include "DOMSVGPointList.h"
#include "mozilla/Move.h"
#include "nsSVGElement.h"
#include "nsSVGAttrTearoffTable.h"
#include "nsSMILValue.h"
@ -163,7 +165,7 @@ SVGAnimatedPointList::
nsresult rv = list->SetValueFromString(aStr);
if (NS_SUCCEEDED(rv)) {
list->SetInfo(mElement);
aValue.Swap(val);
aValue = Move(val);
}
aPreventCachingOfSandwich = false;
return rv;
@ -182,7 +184,7 @@ SVGAnimatedPointList::SMILAnimatedPointList::GetBaseValue() const
nsresult rv = list->CopyFrom(mVal->mBaseVal);
if (NS_SUCCEEDED(rv)) {
list->SetInfo(mElement);
val.Swap(tmp);
Swap(val, tmp);
}
return val;
}

Просмотреть файл

@ -4,10 +4,11 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "mozilla/ArrayUtils.h"
#include "nsSVGAngle.h"
#include "mozilla/ArrayUtils.h"
#include "mozilla/dom/SVGMarkerElement.h"
#include "mozilla/Move.h"
#include "nsContentUtils.h" // NS_ENSURE_FINITE
#include "nsSMILValue.h"
#include "nsSVGAttrTearoffTable.h"
@ -383,7 +384,7 @@ nsSVGAngle::SMILOrient::ValueFromString(const nsAString& aStr,
val.mU.mOrient.mUnit = unitType;
val.mU.mOrient.mOrientType = SVG_MARKER_ORIENT_ANGLE;
}
aValue.Swap(val);
aValue = Move(val);
aPreventCachingOfSandwich = false;
return NS_OK;

Просмотреть файл

@ -5,8 +5,10 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "nsSVGAnimatedTransformList.h"
#include "mozilla/dom/SVGAnimatedTransformList.h"
#include "mozilla/dom/SVGAnimationElement.h"
#include "mozilla/Move.h"
#include "nsCharSeparatedTokenizer.h"
#include "nsSVGTransform.h"
#include "nsSMILValue.h"
@ -248,7 +250,7 @@ nsSVGAnimatedTransformList::SMILAnimatedTransformList::ParseValue(
}
// Success! Populate our outparam with parsed value.
aResult.Swap(val);
aResult = Move(val);
}
int32_t

Просмотреть файл

@ -5,10 +5,12 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "nsSVGClass.h"
#include "mozilla/dom/SVGAnimatedString.h"
#include "mozilla/Move.h"
#include "nsSVGElement.h"
#include "nsSMILValue.h"
#include "SMILStringType.h"
#include "mozilla/dom/SVGAnimatedString.h"
using namespace mozilla;
using namespace mozilla::dom;
@ -130,7 +132,7 @@ nsSVGClass::SMILString::ValueFromString(const nsAString& aStr,
nsSMILValue val(SMILStringType::Singleton());
*static_cast<nsAString*>(val.mU.mPtr) = aStr;
aValue.Swap(val);
aValue = Move(val);
aPreventCachingOfSandwich = false;
return NS_OK;
}

Просмотреть файл

@ -5,6 +5,8 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "nsSVGString.h"
#include "mozilla/Move.h"
#include "nsSVGAttrTearoffTable.h"
#include "nsSMILValue.h"
#include "SMILStringType.h"
@ -110,7 +112,7 @@ nsSVGString::SMILString::ValueFromString(const nsAString& aStr,
nsSMILValue val(SMILStringType::Singleton());
*static_cast<nsAString*>(val.mU.mPtr) = aStr;
aValue.Swap(val);
aValue = Move(val);
aPreventCachingOfSandwich = false;
return NS_OK;
}

Просмотреть файл

@ -5,6 +5,8 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "nsSVGViewBox.h"
#include "mozilla/Move.h"
#include "nsCharSeparatedTokenizer.h"
#include "nsSMILValue.h"
#include "nsTextFormatter.h"
@ -300,7 +302,7 @@ nsSVGViewBox::SMILViewBox
}
nsSMILValue val(&SVGViewBoxSMILType::sSingleton);
*static_cast<nsSVGViewBoxRect*>(val.mU.mPtr) = viewBox;
aValue.Swap(val);
aValue = Move(val);
aPreventCachingOfSandwich = false;
return NS_OK;

Просмотреть файл

@ -16,21 +16,12 @@ protected:
nsIContent* mTemplate;
};
PLDHashTable mTable;
void
Init()
{
PL_DHashTableInit(&mTable, PL_DHashGetStubOps(), sizeof(Entry));
}
void
Finish() { PL_DHashTableFinish(&mTable); }
PLDHashTable2 mTable;
public:
nsTemplateMap() { Init(); }
nsTemplateMap() : mTable(PL_DHashGetStubOps(), sizeof(Entry)) { }
~nsTemplateMap() { Finish(); }
~nsTemplateMap() { }
void
Put(nsIContent* aContent, nsIContent* aTemplate) {
@ -70,7 +61,7 @@ public:
}
void
Clear() { Finish(); Init(); }
Clear() { mTable.Clear(); }
};
#endif // nsTemplateMap_h__

Просмотреть файл

@ -26,6 +26,7 @@ if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'windows':
'../printingui/win',
]
elif CONFIG['MOZ_WIDGET_TOOLKIT'] == 'cocoa':
DEFINES['PROXY_PRINTING'] = 1
LOCAL_INCLUDES += [
'../printingui/mac',
]

Просмотреть файл

@ -72,6 +72,7 @@ struct PrintData {
bool isInitializedFromPrinter;
bool isInitializedFromPrefs;
bool persistMarginBoxSettings;
int32_t optionFlags;
/* Windows-specific things */
nsString driverName;
@ -91,9 +92,20 @@ struct PrintData {
CStringKeyValue[] GTKPrintSettings;
/**
* TODO: OS X specific things - specifically, an array of names for the
* document to be supplied by nsIWebBrowserPrint::enumerateDocumentNames
* OS X specific things.
*/
nsString printJobName;
bool printAllPages;
bool mustCollate;
nsString disposition;
/** TODO: Is there an "unsigned short" primitive? **/
short pagesAcross;
short pagesDown;
double printTime;
bool detailedErrorReporting;
nsString faxNumber;
bool addHeaderAndFooter;
bool fileNameExtensionHidden;
};
} // namespace embedding

Просмотреть файл

@ -129,7 +129,23 @@ NS_IMETHODIMP
MockWebBrowserPrint::EnumerateDocumentNames(uint32_t* aCount,
char16_t*** aResult)
{
return NS_ERROR_NOT_IMPLEMENTED;
*aCount = 0;
*aResult = nullptr;
if (mData.printJobName().IsEmpty()) {
return NS_OK;
}
// The only consumer that cares about this is the OS X printing
// dialog, and even then, it only cares about the first document
// name. That's why we only send a single document name through
// PrintData.
char16_t** array = (char16_t**) moz_xmalloc(sizeof(char16_t*));
array[0] = ToNewUnicode(mData.printJobName());
*aCount = 1;
*aResult = array;
return NS_OK;
}
NS_IMETHODIMP

Просмотреть файл

@ -49,5 +49,48 @@ TEST(Cairo, Bug825721) {
TryCircle(0.0, 1.0, 5761126469220696064.0);
}
TEST(Cairo, Bug1063486) {
double x1, y1, x2, y2;
const double epsilon = .01;
cairo_surface_t *surf = cairo_image_surface_create(CAIRO_FORMAT_ARGB32, 1, 1);
ASSERT_TRUE(surf != nullptr);
cairo_t *cairo = cairo_create(surf);
ASSERT_TRUE(cairo != nullptr);
printf("Path 1\n");
cairo_move_to(cairo, -20, -10);
cairo_line_to(cairo, 20, -10);
cairo_line_to(cairo, 20, 10);
cairo_curve_to(cairo, 10,10, -10,10, -20,10);
cairo_curve_to(cairo, -30,10, -30,-10, -20,-10);
cairo_path_extents(cairo, &x1, &y1, &x2, &y2);
ASSERT_LT(std::abs(-27.5 - x1), epsilon); // the failing coordinate
ASSERT_LT(std::abs(-10 - y1), epsilon);
ASSERT_LT(std::abs(20 - x2), epsilon);
ASSERT_LT(std::abs(10 - y2), epsilon);
printf("Path 2\n");
cairo_new_path(cairo);
cairo_move_to(cairo, 10, 30);
cairo_line_to(cairo, 90, 30);
cairo_curve_to(cairo, 30,30, 30,30, 10,30);
cairo_curve_to(cairo, 0,30, 0,0, 30,5);
cairo_path_extents(cairo, &x1, &y1, &x2, &y2);
ASSERT_LT(std::abs(4.019531 - x1), epsilon); // the failing coordinate
ASSERT_LT(std::abs(4.437500 - y1), epsilon);
ASSERT_LT(std::abs(90. - x2), epsilon);
ASSERT_LT(std::abs(30. - y2), epsilon);
cairo_surface_destroy(surf);
cairo_destroy(cairo);
}
}
}

Просмотреть файл

@ -131,6 +131,7 @@ _cairo_path_bounder_curve_to (void *closure,
else
{
/* All control points are within the current extents. */
bounder->current_point = *d;
return CAIRO_STATUS_SUCCESS;
}
}

Просмотреть файл

@ -733,6 +733,7 @@ struct ParamTraits<mozilla::layers::FrameMetrics>
WriteParam(aMsg, aParam.GetPageScrollAmount());
WriteParam(aMsg, aParam.AllowVerticalScrollWithWheel());
WriteParam(aMsg, aParam.mClipRect);
WriteParam(aMsg, aParam.mIsLayersIdRoot);
WriteParam(aMsg, aParam.GetContentDescription());
}
@ -776,6 +777,7 @@ struct ParamTraits<mozilla::layers::FrameMetrics>
ReadParam(aMsg, aIter, &aResult->mPageScrollAmount) &&
ReadParam(aMsg, aIter, &aResult->mAllowVerticalScrollWithWheel) &&
ReadParam(aMsg, aIter, &aResult->mClipRect) &&
ReadParam(aMsg, aIter, &aResult->mIsLayersIdRoot) &&
ReadContentDescription(aMsg, aIter, aResult));
}
};

Просмотреть файл

@ -67,6 +67,7 @@ public:
, mLineScrollAmount(0, 0)
, mPageScrollAmount(0, 0)
, mAllowVerticalScrollWithWheel(false)
, mIsLayersIdRoot(false)
{
}
@ -100,7 +101,8 @@ public:
mLineScrollAmount == aOther.mLineScrollAmount &&
mPageScrollAmount == aOther.mPageScrollAmount &&
mAllowVerticalScrollWithWheel == aOther.mAllowVerticalScrollWithWheel &&
mClipRect == aOther.mClipRect;
mClipRect == aOther.mClipRect &&
mIsLayersIdRoot == aOther.mIsLayersIdRoot;
}
bool operator!=(const FrameMetrics& aOther) const
{
@ -525,6 +527,13 @@ public:
return mClipRect.ref();
}
void SetIsLayersIdRoot(bool aValue) {
mIsLayersIdRoot = aValue;
}
bool IsLayersIdRoot() const {
return mIsLayersIdRoot;
}
private:
// The pres-shell resolution that has been induced on the document containing
@ -699,6 +708,10 @@ private:
// The clip rect to use when compositing a layer with this FrameMetrics.
Maybe<ParentLayerIntRect> mClipRect;
// Whether these framemetrics are for the root scroll frame (root element if
// we don't have a root scroll frame) for its layers id.
bool mIsLayersIdRoot;
// WARNING!!!!
//
// When adding new fields to FrameMetrics, the following places should be

Просмотреть файл

@ -213,7 +213,7 @@ AppendToString(std::stringstream& aStream, const ScrollableLayerGuid& s,
const char* pfx, const char* sfx)
{
aStream << pfx
<< nsPrintfCString("{ l=%llu, p=%u, v=%llu }", s.mLayersId, s.mPresShellId, s.mScrollId).get()
<< nsPrintfCString("{ l=%" PRIu64 ", p=%u, v=%" PRIu64 " }", s.mLayersId, s.mPresShellId, s.mScrollId).get()
<< sfx;
}

Просмотреть файл

@ -280,7 +280,8 @@ GetEventRegions(const LayerMetricsWrapper& aLayer)
already_AddRefed<HitTestingTreeNode>
APZCTreeManager::RecycleOrCreateNode(TreeBuildingState& aState,
AsyncPanZoomController* aApzc)
AsyncPanZoomController* aApzc,
uint64_t aLayersId)
{
// Find a node without an APZC and return it. Note that unless the layer tree
// actually changes, this loop should generally do an early-return on the
@ -289,11 +290,11 @@ APZCTreeManager::RecycleOrCreateNode(TreeBuildingState& aState,
nsRefPtr<HitTestingTreeNode> node = aState.mNodesToDestroy[i];
if (!node->IsPrimaryHolder()) {
aState.mNodesToDestroy.RemoveElement(node);
node->RecycleWith(aApzc);
node->RecycleWith(aApzc, aLayersId);
return node.forget();
}
}
nsRefPtr<HitTestingTreeNode> node = new HitTestingTreeNode(aApzc, false);
nsRefPtr<HitTestingTreeNode> node = new HitTestingTreeNode(aApzc, false, aLayersId);
return node.forget();
}
@ -333,7 +334,7 @@ APZCTreeManager::PrepareNodeForLayer(const LayerMetricsWrapper& aLayer,
nsRefPtr<HitTestingTreeNode> node = nullptr;
if (!needsApzc) {
node = RecycleOrCreateNode(aState, nullptr);
node = RecycleOrCreateNode(aState, nullptr, aLayersId);
AttachNodeToTree(node, aParent, aNextSibling);
node->SetHitTestData(GetEventRegions(aLayer), aLayer.GetTransform(),
aLayer.GetClipRect() ? Some(ParentLayerIntRegion(*aLayer.GetClipRect())) : Nothing(),
@ -413,7 +414,7 @@ APZCTreeManager::PrepareNodeForLayer(const LayerMetricsWrapper& aLayer,
apzc->ShareFrameMetricsAcrossProcesses();
}
MOZ_ASSERT(node == nullptr);
node = new HitTestingTreeNode(apzc, true);
node = new HitTestingTreeNode(apzc, true, aLayersId);
} else {
// If we are re-using a node for this layer clear the tree pointers
// so that it doesn't continue pointing to nodes that might no longer
@ -452,9 +453,9 @@ APZCTreeManager::PrepareNodeForLayer(const LayerMetricsWrapper& aLayer,
// we are logging about APZCs is the scroll id, and otherwise we could
// confuse APZCs from different layer trees with the same scroll id.
if (aLayersId == aState.mOriginatingLayersId) {
if (apzc->IsRootForLayersId()) {
if (apzc->HasNoParentWithSameLayersId()) {
aState.mPaintLogger.LogTestData(aMetrics.GetScrollId(),
"isRootForLayersId", true);
"hasNoParentWithSameLayersId", true);
} else {
MOZ_ASSERT(apzc->GetParent());
aState.mPaintLogger.LogTestData(aMetrics.GetScrollId(),
@ -463,7 +464,7 @@ APZCTreeManager::PrepareNodeForLayer(const LayerMetricsWrapper& aLayer,
}
if (newApzc) {
if (apzc->IsRootForLayersId()) {
if (apzc->HasNoParentWithSameLayersId()) {
// If we just created a new apzc that is the root for its layers ID, then
// we need to update its zoom constraints which might have arrived before this
// was created
@ -487,7 +488,7 @@ APZCTreeManager::PrepareNodeForLayer(const LayerMetricsWrapper& aLayer,
// now that will also be using that APZC. The hit-test region on the APZC needs
// to be updated to deal with the new layer's hit region.
node = RecycleOrCreateNode(aState, apzc);
node = RecycleOrCreateNode(aState, apzc, aLayersId);
AttachNodeToTree(node, aParent, aNextSibling);
// Even though different layers associated with a given APZC may be at
@ -1039,7 +1040,7 @@ APZCTreeManager::UpdateZoomConstraints(const ScrollableLayerGuid& aGuid,
// For a given layers id, non-root APZCs inherit the zoom constraints
// of their root.
if (node && node->GetApzc()->IsRootForLayersId()) {
if (node && node->GetApzc()->HasNoParentWithSameLayersId()) {
UpdateZoomConstraintsRecursively(node.get(), aConstraints);
}
}
@ -1056,7 +1057,7 @@ APZCTreeManager::UpdateZoomConstraintsRecursively(HitTestingTreeNode* aNode,
}
for (HitTestingTreeNode* child = aNode->GetLastChild(); child; child = child->GetPrevSibling()) {
// We can have subtrees with their own layers id - leave those alone.
if (child->GetApzc() && child->GetApzc()->IsRootForLayersId()) {
if (child->GetApzc() && child->GetApzc()->HasNoParentWithSameLayersId()) {
continue;
}
UpdateZoomConstraintsRecursively(child, aConstraints);
@ -1343,7 +1344,7 @@ APZCTreeManager::BuildOverscrollHandoffChain(const nsRefPtr<AsyncPanZoomControll
result->Add(apzc);
if (apzc->GetScrollHandoffParentId() == FrameMetrics::NULL_SCROLL_ID) {
if (!apzc->IsRootForLayersId()) {
if (!apzc->HasNoParentWithSameLayersId()) {
// This probably indicates a bug or missed case in layout code
NS_WARNING("Found a non-root APZ with no handoff parent");
}
@ -1361,7 +1362,7 @@ APZCTreeManager::BuildOverscrollHandoffChain(const nsRefPtr<AsyncPanZoomControll
// scroll id.
AsyncPanZoomController* scrollParent = nullptr;
AsyncPanZoomController* parent = apzc;
while (!parent->IsRootForLayersId()) {
while (!parent->HasNoParentWithSameLayersId()) {
parent = parent->GetParent();
// While walking up to find the root of the subtree, if we encounter the
// handoff parent, we don't actually need to do the search so we can
@ -1461,7 +1462,11 @@ APZCTreeManager::GetAPZCAtPoint(HitTestingTreeNode* aNode,
node);
HitTestResult hitResult = node->HitTest(aHitTestPoint);
if (hitResult != HitTestResult::HitNothing) {
result = node->GetNearestContainingApzc();
result = node->GetNearestContainingApzcWithSameLayersId();
if (!result) {
result = FindRootApzcForLayersId(node->GetLayersId());
MOZ_ASSERT(result);
}
APZCTM_LOG("Successfully matched APZC %p via node %p (hit result %d)\n",
result, node, hitResult);
MOZ_ASSERT(hitResult == HitLayer || hitResult == HitDispatchToContentRegion);
@ -1478,6 +1483,35 @@ APZCTreeManager::GetAPZCAtPoint(HitTestingTreeNode* aNode,
return nullptr;
}
AsyncPanZoomController*
APZCTreeManager::FindRootApzcForLayersId(uint64_t aLayersId) const
{
mTreeLock.AssertCurrentThreadOwns();
if (!mRootNode) {
return nullptr;
}
std::deque<const HitTestingTreeNode*> queue;
queue.push_back(mRootNode);
while (!queue.empty()) {
const HitTestingTreeNode* node = queue.front();
queue.pop_front();
AsyncPanZoomController* apzc = node->GetApzc();
if (apzc && apzc->GetLayersId() == aLayersId && apzc->IsRootForLayersId()) {
return apzc;
}
for (HitTestingTreeNode* child = node->GetLastChild();
child;
child = child->GetPrevSibling()) {
queue.push_back(child);
}
}
return nullptr;
}
/* The methods GetScreenToApzcTransform() and GetApzcToGeckoTransform() return
some useful transformations that input events may need applied. This is best
illustrated with an example. Consider a chain of layers, L, M, N, O, P, Q, R. Layer L
@ -1704,7 +1738,7 @@ APZCTreeManager::RootAPZCForLayersId(AsyncPanZoomController* aApzc) const
{
MonitorAutoLock lock(mTreeLock);
nsRefPtr<AsyncPanZoomController> apzc = aApzc;
while (apzc && !apzc->IsRootForLayersId()) {
while (apzc && !apzc->HasNoParentWithSameLayersId()) {
apzc = apzc->GetParent();
}
return apzc.forget();

Просмотреть файл

@ -412,6 +412,7 @@ private:
AsyncPanZoomController* GetAPZCAtPoint(HitTestingTreeNode* aNode,
const ParentLayerPoint& aHitTestPoint,
HitTestResult* aOutHitResult);
AsyncPanZoomController* FindRootApzcForLayersId(uint64_t aLayersId) const;
already_AddRefed<AsyncPanZoomController> GetMultitouchTarget(AsyncPanZoomController* aApzc1, AsyncPanZoomController* aApzc2) const;
already_AddRefed<AsyncPanZoomController> CommonAncestor(AsyncPanZoomController* aApzc1, AsyncPanZoomController* aApzc2) const;
already_AddRefed<AsyncPanZoomController> RootAPZCForLayersId(AsyncPanZoomController* aApzc) const;
@ -433,7 +434,8 @@ private:
void FlushRepaintsRecursively(HitTestingTreeNode* aNode);
already_AddRefed<HitTestingTreeNode> RecycleOrCreateNode(TreeBuildingState& aState,
AsyncPanZoomController* aApzc);
AsyncPanZoomController* aApzc,
uint64_t aLayersId);
HitTestingTreeNode* PrepareNodeForLayer(const LayerMetricsWrapper& aLayer,
const FrameMetrics& aMetrics,
uint64_t aLayersId,

Просмотреть файл

@ -2900,6 +2900,7 @@ void AsyncPanZoomController::NotifyLayersUpdated(const FrameMetrics& aLayerMetri
mFrameMetrics.SetLineScrollAmount(aLayerMetrics.GetLineScrollAmount());
mFrameMetrics.SetPageScrollAmount(aLayerMetrics.GetPageScrollAmount());
mFrameMetrics.SetClipRect(aLayerMetrics.GetClipRect());
mFrameMetrics.SetIsLayersIdRoot(aLayerMetrics.IsLayersIdRoot());
if (scrollOffsetUpdated) {
APZC_LOG("%p updating scroll offset from %s to %s\n", this,

Просмотреть файл

@ -899,12 +899,18 @@ public:
}
/* Returns true if there is no APZC higher in the tree with the same
* layers id.
* layers id. Deprecated. New code shouldn't use this. Old code should be
* updated to not use this.
*/
bool IsRootForLayersId() const {
bool HasNoParentWithSameLayersId() const {
return !mParent || (mParent->mLayersId != mLayersId);
}
bool IsRootForLayersId() const {
ReentrantMonitorAutoEnter lock(mMonitor);
return mFrameMetrics.IsLayersIdRoot();
}
private:
// This is a raw pointer to avoid introducing a reference cycle between
// AsyncPanZoomController and APZCTreeManager. Since these objects don't
@ -1087,6 +1093,11 @@ public:
return mAsyncTransformAppliedToContent;
}
uint64_t GetLayersId() const
{
return mLayersId;
}
private:
// Extra offset to add in SampleContentTransformForFrame for testing
CSSPoint mTestAsyncScrollOffset;

Просмотреть файл

@ -18,22 +18,28 @@ namespace mozilla {
namespace layers {
HitTestingTreeNode::HitTestingTreeNode(AsyncPanZoomController* aApzc,
bool aIsPrimaryHolder)
bool aIsPrimaryHolder,
uint64_t aLayersId)
: mApzc(aApzc)
, mIsPrimaryApzcHolder(aIsPrimaryHolder)
, mLayersId(aLayersId)
, mOverride(EventRegionsOverride::NoOverride)
{
if (mIsPrimaryApzcHolder) {
MOZ_ASSERT(mApzc);
}
MOZ_ASSERT(!mApzc || mApzc->GetLayersId() == mLayersId);
}
void
HitTestingTreeNode::RecycleWith(AsyncPanZoomController* aApzc)
HitTestingTreeNode::RecycleWith(AsyncPanZoomController* aApzc,
uint64_t aLayersId)
{
MOZ_ASSERT(!mIsPrimaryApzcHolder);
Destroy(); // clear out tree pointers
mApzc = aApzc;
mLayersId = aLayersId;
MOZ_ASSERT(!mApzc || mApzc->GetLayersId() == mLayersId);
// The caller is expected to call SetHitTestData to repopulate the hit-test
// fields.
}
@ -57,6 +63,8 @@ HitTestingTreeNode::Destroy()
}
mApzc = nullptr;
}
mLayersId = 0;
}
void
@ -147,12 +155,31 @@ HitTestingTreeNode::GetNearestContainingApzc() const
return nullptr;
}
AsyncPanZoomController*
HitTestingTreeNode::GetNearestContainingApzcWithSameLayersId() const
{
for (const HitTestingTreeNode* n = this;
n && n->mLayersId == mLayersId;
n = n->GetParent()) {
if (n->GetApzc()) {
return n->GetApzc();
}
}
return nullptr;
}
bool
HitTestingTreeNode::IsPrimaryHolder() const
{
return mIsPrimaryApzcHolder;
}
uint64_t
HitTestingTreeNode::GetLayersId() const
{
return mLayersId;
}
void
HitTestingTreeNode::SetHitTestData(const EventRegions& aRegions,
const gfx::Matrix4x4& aTransform,
@ -229,7 +256,8 @@ HitTestingTreeNode::Dump(const char* aPrefix) const
mPrevSibling->Dump(aPrefix);
}
printf_stderr("%sHitTestingTreeNode (%p) APZC (%p) g=(%s) %s%sr=(%s) t=(%s) c=(%s)\n",
aPrefix, this, mApzc.get(), mApzc ? Stringify(mApzc->GetGuid()).c_str() : "",
aPrefix, this, mApzc.get(),
mApzc ? Stringify(mApzc->GetGuid()).c_str() : nsPrintfCString("l=%" PRIu64, mLayersId).get(),
(mOverride & EventRegionsOverride::ForceDispatchToContent) ? "fdtc " : "",
(mOverride & EventRegionsOverride::ForceEmptyHitRegion) ? "fehr " : "",
Stringify(mEventRegions).c_str(), Stringify(mTransform).c_str(),

Просмотреть файл

@ -53,8 +53,9 @@ class HitTestingTreeNode {
private:
~HitTestingTreeNode();
public:
HitTestingTreeNode(AsyncPanZoomController* aApzc, bool aIsPrimaryHolder);
void RecycleWith(AsyncPanZoomController* aApzc);
HitTestingTreeNode(AsyncPanZoomController* aApzc, bool aIsPrimaryHolder,
uint64_t aLayersId);
void RecycleWith(AsyncPanZoomController* aApzc, uint64_t aLayersId);
void Destroy();
/* Tree construction methods */
@ -75,7 +76,9 @@ public:
AsyncPanZoomController* GetApzc() const;
AsyncPanZoomController* GetNearestContainingApzc() const;
AsyncPanZoomController* GetNearestContainingApzcWithSameLayersId() const;
bool IsPrimaryHolder() const;
uint64_t GetLayersId() const;
/* Hit test related methods */
@ -106,6 +109,8 @@ private:
nsRefPtr<AsyncPanZoomController> mApzc;
bool mIsPrimaryApzcHolder;
uint64_t mLayersId;
/* Let {L,M} be the {layer, scrollable metrics} pair that this node
* corresponds to in the layer tree. mEventRegions contains the event regions
* from L, in the case where event-regions are enabled. If event-regions are

Просмотреть файл

@ -91,7 +91,7 @@ function buildApzcTree(paint) {
// This 'root' does not correspond to an APZC.
var root = makeNode(-1);
for (var scrollId in paint) {
if ("isRootForLayersId" in paint[scrollId]) {
if ("hasNoParentWithSameLayersId" in paint[scrollId]) {
addRoot(root, scrollId);
} else if ("parentScrollId" in paint[scrollId]) {
addLink(root, scrollId, paint[scrollId]["parentScrollId"]);

Просмотреть файл

@ -1894,6 +1894,10 @@ protected:
CSSRect aScrollableRect = CSSRect(-1, -1, -1, -1)) {
FrameMetrics metrics;
metrics.SetScrollId(aScrollId);
// By convention in this test file, START_SCROLL_ID is the root, so mark it as such.
if (aScrollId == FrameMetrics::START_SCROLL_ID) {
metrics.SetIsLayersIdRoot(true);
}
IntRect layerBound = aLayer->GetVisibleRegion().GetBounds();
metrics.SetCompositionBounds(ParentLayerRect(layerBound.x, layerBound.y,
layerBound.width, layerBound.height));
@ -2864,9 +2868,10 @@ protected:
void CreateBug1119497LayerTree() {
const char* layerTreeSyntax = "c(tt)";
// LayerID 0 12
// 0 is the root and doesn't have an APZC
// 1 is behind 2 and does have an APZC
// 2 entirely covers 1 and should take all the input events
// 0 is the root and has an APZC
// 1 is behind 2 and has an APZC
// 2 entirely covers 1 and should take all the input events, but has no APZC
// so hits to 2 should go to to the root APZC
nsIntRegion layerVisibleRegions[] = {
nsIntRegion(IntRect(0, 0, 100, 100)),
nsIntRegion(IntRect(0, 0, 100, 100)),
@ -2874,6 +2879,7 @@ protected:
};
root = CreateLayerTree(layerTreeSyntax, layerVisibleRegions, nullptr, lm, layers);
SetScrollableFrameMetrics(root, FrameMetrics::START_SCROLL_ID);
SetScrollableFrameMetrics(layers[1], FrameMetrics::START_SCROLL_ID + 1);
registration = MakeUnique<ScopedLayerTreeRegistration>(0, root, mcc);
@ -3006,8 +3012,8 @@ TEST_F(APZEventRegionsTester, Bug1119497) {
HitTestResult result;
nsRefPtr<AsyncPanZoomController> hit = manager->GetTargetAPZC(ScreenPoint(50, 50), &result);
// We should hit layers[2], so |result| will be HitLayer but there's no
// actual APZC in that parent chain, so |hit| should be nullptr.
EXPECT_EQ(nullptr, hit.get());
// actual APZC on layers[2], so it will be the APZC of the root layer.
EXPECT_EQ(ApzcOf(layers[0]), hit.get());
EXPECT_EQ(HitTestResult::HitLayer, result);
}

Просмотреть файл

@ -32,6 +32,7 @@ if CONFIG['MOZ_WIDGET_TOOLKIT'] != 'windows':
UNIFIED_SOURCES += [ '/gfx/2d/unittest/%s' % p for p in [
'TestBase.cpp',
'TestBugs.cpp',
'TestCairo.cpp',
'TestPoint.cpp',
'TestScaling.cpp',
]]

Просмотреть файл

@ -2249,7 +2249,7 @@ gfxFont::Measure(gfxTextRun *aTextRun,
const gfxTextRun::DetailedGlyph *details =
aTextRun->GetDetailedGlyphs(i);
NS_ASSERTION(details != nullptr,
"detaiedGlyph record should not be missing!");
"detailedGlyph record should not be missing!");
uint32_t j;
for (j = 0; j < glyphCount; ++j, ++details) {
uint32_t glyphIndex = details->mGlyphID;

Просмотреть файл

@ -337,26 +337,27 @@ gfxGDIFont::Initialize()
}
}
// Cache the width of a single space.
SIZE size;
GetTextExtentPoint32W(dc.GetDC(), L" ", 1, &size);
mMetrics->spaceWidth = ROUND(size.cx);
// Cache the width of digit zero.
// XXX MSDN (http://msdn.microsoft.com/en-us/library/ms534223.aspx)
// does not say what the failure modes for GetTextExtentPoint32 are -
// is it safe to assume it will fail iff the font has no '0'?
if (GetTextExtentPoint32W(dc.GetDC(), L"0", 1, &size)) {
mMetrics->zeroOrAveCharWidth = ROUND(size.cx);
} else {
mMetrics->zeroOrAveCharWidth = mMetrics->aveCharWidth;
}
WORD glyph;
SIZE size;
DWORD ret = GetGlyphIndicesW(dc.GetDC(), L" ", 1, &glyph,
GGI_MARK_NONEXISTING_GLYPHS);
if (ret != GDI_ERROR && glyph != 0xFFFF) {
mSpaceGlyph = glyph;
// Cache the width of a single space.
GetTextExtentPoint32W(dc.GetDC(), L" ", 1, &size);
mMetrics->spaceWidth = ROUND(size.cx);
} else {
mMetrics->spaceWidth = mMetrics->aveCharWidth;
}
// Cache the width of digit zero, if available.
ret = GetGlyphIndicesW(dc.GetDC(), L"0", 1, &glyph,
GGI_MARK_NONEXISTING_GLYPHS);
if (ret != GDI_ERROR && glyph != 0xFFFF) {
GetTextExtentPoint32W(dc.GetDC(), L"0", 1, &size);
mMetrics->zeroOrAveCharWidth = ROUND(size.cx);
} else {
mMetrics->zeroOrAveCharWidth = mMetrics->aveCharWidth;
}
SanitizeMetrics(mMetrics, GetFontEntry()->mIsBadUnderlineFont);

Просмотреть файл

@ -1791,10 +1791,18 @@ gfxHarfBuzzShaper::SetGlyphsFromRun(gfxContext *aContext,
charGlyphs[baseCharIndex].SetSimpleGlyph(advance,
ginfo[glyphStart].codepoint);
} else {
// collect all glyphs in a list to be assigned to the first char;
// Collect all glyphs in a list to be assigned to the first char;
// there must be at least one in the clump, and we already measured
// its advance, hence the placement of the loop-exit test and the
// measurement of the next glyph
// measurement of the next glyph.
// For vertical orientation, we add a "base offset" to compensate
// for the positioning within the cluster being based on horizontal
// glyph origin/offset.
hb_position_t baseIOffset, baseBOffset;
if (aVertical) {
baseIOffset = 2 * (i_offset - i_advance);
baseBOffset = GetGlyphHAdvance(ginfo[glyphStart].codepoint);
}
while (1) {
gfxTextRun::DetailedGlyph* details =
detailedGlyphs.AppendElement();
@ -1817,9 +1825,9 @@ gfxHarfBuzzShaper::SetGlyphsFromRun(gfxContext *aContext,
}
if (aVertical) {
i_offset = posInfo[glyphStart].y_offset;
i_offset = baseIOffset - posInfo[glyphStart].y_offset;
i_advance = posInfo[glyphStart].y_advance;
b_offset = posInfo[glyphStart].x_offset;
b_offset = baseBOffset - posInfo[glyphStart].x_offset;
b_advance = posInfo[glyphStart].x_advance;
} else {
i_offset = posInfo[glyphStart].x_offset;

Просмотреть файл

@ -336,7 +336,8 @@ struct JS_PUBLIC_API(GCDescription) {
GCDescription(bool isCompartment, JSGCInvocationKind kind)
: isCompartment_(isCompartment), invocationKind_(kind) {}
char16_t* formatMessage(JSRuntime* rt) const;
char16_t* formatSliceMessage(JSRuntime* rt) const;
char16_t* formatSummaryMessage(JSRuntime* rt) const;
char16_t* formatJSON(JSRuntime* rt, uint64_t timestamp) const;
JS::dbg::GarbageCollectionEvent::Ptr toGCEvent(JSRuntime* rt) const;

Просмотреть файл

@ -76,7 +76,7 @@
* - MutableHandle<T> is a non-const reference to Rooted<T>. It is used in the
* same way as Handle<T> and includes a |set(const T& v)| method to allow
* updating the value of the referenced Rooted<T>. A MutableHandle<T> can be
* created from a Rooted<T> by using |Rooted<T>::operator&()|.
* created with an implicit cast from a Rooted<T>*.
*
* In some cases the small performance overhead of exact rooting (measured to
* be a few nanoseconds on desktop) is too much. In these cases, try the
@ -1185,7 +1185,5 @@ CallTraceCallbackOnNonHeap(T* v, const TraceCallbacks& aCallbacks, const char* a
} /* namespace js */
#undef DELETE_ASSIGNMENT_OPS
#undef DECLARE_NONPOINTER_MUTABLE_ACCESSOR_METHODS
#undef DECLARE_NONPOINTER_ACCESSOR_METHODS
#endif /* js_RootingAPI_h */

Просмотреть файл

@ -33,210 +33,6 @@ using mozilla::PodZero;
/* Except for the first and last, slices of less than 10ms are not reported. */
static const int64_t SLICE_MIN_REPORT_TIME = 10 * PRMJ_USEC_PER_MSEC;
class gcstats::StatisticsSerializer
{
typedef Vector<char, 128, SystemAllocPolicy> CharBuffer;
CharBuffer buf_;
bool asJSON_;
bool needComma_;
bool oom_;
static const int MaxFieldValueLength = 128;
public:
enum Mode {
AsJSON = true,
AsText = false
};
explicit StatisticsSerializer(Mode asJSON)
: buf_(), asJSON_(asJSON), needComma_(false), oom_(false)
{}
bool isJSON() { return asJSON_; }
bool isOOM() { return oom_; }
void endLine() {
if (!asJSON_) {
p("\n");
needComma_ = false;
}
}
void extra(const char* str) {
if (!asJSON_) {
needComma_ = false;
p(str);
}
}
void appendString(const char* name, const char* value) {
put(name, value, "", true);
}
void appendNumber(const char* name, const char* vfmt, const char* units, ...) {
va_list va;
va_start(va, units);
append(name, vfmt, va, units);
va_end(va);
}
void appendDecimal(const char* name, const char* units, double d) {
if (d < 0)
d = 0;
if (asJSON_)
appendNumber(name, "%d.%03d", units, (int)d, (int)(d * 1000.) % 1000);
else
appendNumber(name, "%.1f", units, d);
}
void appendIfNonzeroMS(const char* name, double v) {
if (asJSON_ || v >= 0.1)
appendDecimal(name, "ms", v);
}
void beginObject(const char* name) {
if (needComma_)
pJSON(", ");
if (asJSON_ && name) {
putKey(name);
pJSON(": ");
}
pJSON("{");
needComma_ = false;
}
void endObject() {
needComma_ = false;
pJSON("}");
needComma_ = true;
}
void beginArray(const char* name) {
if (needComma_)
pJSON(", ");
if (asJSON_)
putKey(name);
pJSON(": [");
needComma_ = false;
}
void endArray() {
needComma_ = false;
pJSON("]");
needComma_ = true;
}
char16_t* finishJSString() {
char* buf = finishCString();
if (!buf)
return nullptr;
size_t nchars = strlen(buf);
char16_t* out = js_pod_malloc<char16_t>(nchars + 1);
if (!out) {
oom_ = true;
js_free(buf);
return nullptr;
}
CopyAndInflateChars(out, buf, nchars);
js_free(buf);
out[nchars] = 0;
return out;
}
char* finishCString() {
if (oom_)
return nullptr;
buf_.append('\0');
char* buf = buf_.extractRawBuffer();
if (!buf)
oom_ = true;
return buf;
}
private:
void append(const char* name, const char* vfmt,
va_list va, const char* units)
{
char val[MaxFieldValueLength];
JS_vsnprintf(val, MaxFieldValueLength, vfmt, va);
put(name, val, units, false);
}
void p(const char* cstr) {
if (oom_)
return;
if (!buf_.append(cstr, strlen(cstr)))
oom_ = true;
}
void p(const char c) {
if (oom_)
return;
if (!buf_.append(c))
oom_ = true;
}
void pJSON(const char* str) {
if (asJSON_)
p(str);
}
void put(const char* name, const char* val, const char* units, bool valueIsQuoted) {
if (needComma_)
p(", ");
needComma_ = true;
putKey(name);
p(": ");
if (valueIsQuoted)
putQuoted(val);
else
p(val);
if (!asJSON_)
p(units);
}
void putQuoted(const char* str) {
pJSON("\"");
p(str);
pJSON("\"");
}
void putKey(const char* str) {
if (!asJSON_) {
p(str);
return;
}
p("\"");
const char* c = str;
while (*c) {
if (*c == ' ' || *c == '\t')
p('_');
else if (isupper(*c))
p(tolower(*c));
else if (*c == '+')
p("added_");
else if (*c == '-')
p("removed_");
else if (*c != '(' && *c != ')')
p(*c);
c++;
}
p("\"");
}
};
/*
* If this fails, then you can either delete this assertion and allow all
* larger-numbered reasons to pile up in the last telemetry bucket, or switch
@ -443,25 +239,11 @@ struct AllPhaseIterator {
}
};
static void
FormatPhaseTimes(StatisticsSerializer& ss, const char* name, Statistics::PhaseTimeTable times)
{
ss.beginObject(name);
for (AllPhaseIterator iter(times); !iter.done(); iter.advance()) {
Phase phase;
size_t dagSlot;
iter.get(&phase, &dagSlot);
ss.appendIfNonzeroMS(phases[phase].name, t(times[dagSlot][phase]));
}
ss.endObject();
}
void
Statistics::gcDuration(int64_t* total, int64_t* maxPause)
Statistics::gcDuration(int64_t* total, int64_t* maxPause) const
{
*total = *maxPause = 0;
for (SliceData* slice = slices.begin(); slice != slices.end(); slice++) {
for (const SliceData* slice = slices.begin(); slice != slices.end(); slice++) {
*total += slice->duration();
if (slice->duration() > *maxPause)
*maxPause = slice->duration();
@ -480,90 +262,6 @@ Statistics::sccDurations(int64_t* total, int64_t* maxPause)
}
}
bool
Statistics::formatData(StatisticsSerializer& ss, uint64_t timestamp)
{
MOZ_ASSERT(!aborted);
int64_t total, longest;
gcDuration(&total, &longest);
int64_t sccTotal, sccLongest;
sccDurations(&sccTotal, &sccLongest);
double mmu20 = computeMMU(20 * PRMJ_USEC_PER_MSEC);
double mmu50 = computeMMU(50 * PRMJ_USEC_PER_MSEC);
ss.beginObject(nullptr);
if (ss.isJSON())
ss.appendNumber("Timestamp", "%llu", "", (unsigned long long)timestamp);
if (slices.length() > 1 || ss.isJSON())
ss.appendDecimal("Max Pause", "ms", t(longest));
else
ss.appendString("Reason", ExplainReason(slices[0].reason));
ss.appendDecimal("Total Time", "ms", t(total));
ss.appendNumber("Zones Collected", "%d", "", zoneStats.collectedZoneCount);
ss.appendNumber("Total Zones", "%d", "", zoneStats.zoneCount);
ss.appendNumber("Total Compartments", "%d", "", zoneStats.compartmentCount);
ss.appendNumber("Minor GCs", "%d", "", counts[STAT_MINOR_GC]);
ss.appendNumber("Store Buffer Overflows", "%d", "", counts[STAT_STOREBUFFER_OVERFLOW]);
ss.appendNumber("MMU (20ms)", "%d", "%", int(mmu20 * 100));
ss.appendNumber("MMU (50ms)", "%d", "%", int(mmu50 * 100));
ss.appendDecimal("SCC Sweep Total", "ms", t(sccTotal));
ss.appendDecimal("SCC Sweep Max Pause", "ms", t(sccLongest));
if (nonincrementalReason_ || ss.isJSON()) {
ss.appendString("Nonincremental Reason",
nonincrementalReason_ ? nonincrementalReason_ : "none");
}
ss.appendNumber("Allocated", "%u", "MB", unsigned(preBytes / 1024 / 1024));
ss.appendNumber("+Chunks", "%d", "", counts[STAT_NEW_CHUNK]);
ss.appendNumber("-Chunks", "%d", "", counts[STAT_DESTROY_CHUNK]);
ss.endLine();
if (slices.length() > 1 || ss.isJSON()) {
ss.beginArray("Slices");
for (size_t i = 0; i < slices.length(); i++) {
int64_t width = slices[i].duration();
if (i != 0 && i != slices.length() - 1 && width < SLICE_MIN_REPORT_TIME &&
!slices[i].resetReason && !ss.isJSON())
{
continue;
}
char budgetDescription[200];
slices[i].budget.describe(budgetDescription, sizeof(budgetDescription) - 1);
ss.beginObject(nullptr);
ss.extra(" ");
ss.appendNumber("Slice", "%d", "", i);
ss.appendDecimal("Pause", "", t(width));
ss.extra(" (");
ss.appendDecimal("When", "ms", t(slices[i].start - slices[0].start));
ss.appendString("Reason", ExplainReason(slices[i].reason));
ss.appendString("Budget", budgetDescription);
if (ss.isJSON()) {
ss.appendDecimal("Page Faults", "",
double(slices[i].endFaults - slices[i].startFaults));
ss.appendNumber("Start Timestamp", "%llu", "", (unsigned long long)slices[i].start);
ss.appendNumber("End Timestamp", "%llu", "", (unsigned long long)slices[i].end);
}
if (slices[i].resetReason)
ss.appendString("Reset", slices[i].resetReason);
ss.extra("): ");
FormatPhaseTimes(ss, "Times", slices[i].phaseTimes);
ss.endLine();
ss.endObject();
}
ss.endArray();
}
ss.extra(" Totals: ");
FormatPhaseTimes(ss, "Totals", phaseTimes);
ss.endObject();
return !ss.isOOM();
}
typedef Vector<UniqueChars, 8, SystemAllocPolicy> FragmentVector;
static UniqueChars
@ -617,6 +315,119 @@ SumChildTimes(size_t phaseSlot, Phase phase, Statistics::PhaseTimeTable phaseTim
return total;
}
UniqueChars
Statistics::formatCompactSliceMessage() const
{
// Skip if we OOM'ed.
if (slices.length() == 0)
return UniqueChars(nullptr);
const size_t index = slices.length() - 1;
const SliceData& slice = slices[index];
char budgetDescription[200];
slice.budget.describe(budgetDescription, sizeof(budgetDescription) - 1);
const char* format =
"GC Slice %u - Pause: %.3fms of %s budget (@ %.3fms); Reason: %s; Reset: %s%s; Times: ";
char buffer[1024];
memset(buffer, 0, sizeof(buffer));
JS_snprintf(buffer, sizeof(buffer), format, index,
t(slice.duration()), budgetDescription, t(slice.start - slices[0].start),
ExplainReason(slice.reason),
slice.resetReason ? "yes - " : "no", slice.resetReason ? slice.resetReason : "");
FragmentVector fragments;
if (!fragments.append(make_string_copy(buffer)) ||
!fragments.append(formatCompactSlicePhaseTimes(slices[index].phaseTimes)))
{
return UniqueChars(nullptr);
}
return Join(fragments);
}
UniqueChars
Statistics::formatCompactSummaryMessage() const
{
const double bytesPerMiB = 1024 * 1024;
FragmentVector fragments;
if (!fragments.append(make_string_copy("Summary - ")))
return UniqueChars(nullptr);
int64_t total, longest;
gcDuration(&total, &longest);
const double mmu20 = computeMMU(20 * PRMJ_USEC_PER_MSEC);
const double mmu50 = computeMMU(50 * PRMJ_USEC_PER_MSEC);
char buffer[1024];
if (!nonincrementalReason_) {
JS_snprintf(buffer, sizeof(buffer),
"Max Pause: %.3fms; MMU 20ms: %.1f%%; MMU 50ms: %.1f%%; Total: %.3fms; ",
t(longest), mmu20 * 100., mmu50 * 100., t(total));
} else {
JS_snprintf(buffer, sizeof(buffer), "Non-Incremental: %.3fms; ", t(total));
}
if (!fragments.append(make_string_copy(buffer)))
return UniqueChars(nullptr);
JS_snprintf(buffer, sizeof(buffer),
"Zones: %d of %d; Compartments: %d of %d; HeapSize: %.3f MiB; "\
"HeapChange (abs): %+d (%d); ",
zoneStats.collectedZoneCount, zoneStats.zoneCount,
zoneStats.collectedCompartmentCount, zoneStats.compartmentCount,
double(preBytes) / bytesPerMiB,
counts[STAT_NEW_CHUNK] - counts[STAT_DESTROY_CHUNK],
counts[STAT_NEW_CHUNK] + counts[STAT_DESTROY_CHUNK]);
if (!fragments.append(make_string_copy(buffer)))
return UniqueChars(nullptr);
MOZ_ASSERT_IF(counts[STAT_ARENA_RELOCATED], gckind == GC_SHRINK);
if (gckind == GC_SHRINK) {
JS_snprintf(buffer, sizeof(buffer),
"Kind: %s; Relocated: %.3f MiB; ",
ExplainInvocationKind(gckind),
double(ArenaSize * counts[STAT_ARENA_RELOCATED]) / bytesPerMiB);
if (!fragments.append(make_string_copy(buffer)))
return UniqueChars(nullptr);
}
return Join(fragments);
}
UniqueChars
Statistics::formatCompactSlicePhaseTimes(PhaseTimeTable phaseTimes) const
{
static const int64_t MaxUnaccountedTimeUS = 100;
FragmentVector fragments;
char buffer[128];
for (AllPhaseIterator iter(phaseTimes); !iter.done(); iter.advance()) {
Phase phase;
size_t dagSlot;
size_t level;
iter.get(&phase, &dagSlot, &level);
MOZ_ASSERT(level < 4);
int64_t ownTime = phaseTimes[dagSlot][phase];
int64_t childTime = SumChildTimes(dagSlot, phase, phaseTimes);
if (ownTime > MaxUnaccountedTimeUS) {
JS_snprintf(buffer, sizeof(buffer), "%s: %.3fms", phases[phase].name, t(ownTime));
if (!fragments.append(make_string_copy(buffer)))
return UniqueChars(nullptr);
if (childTime && (ownTime - childTime) > MaxUnaccountedTimeUS) {
MOZ_ASSERT(level < 3);
JS_snprintf(buffer, sizeof(buffer), "%s: %.3fms", "Other", t(ownTime - childTime));
if (!fragments.append(make_string_copy(buffer)))
return UniqueChars(nullptr);
}
}
}
return Join(fragments, ", ");
}
UniqueChars
Statistics::formatDetailedMessage()
{
@ -919,14 +730,6 @@ Statistics::formatJsonPhaseTimes(PhaseTimeTable phaseTimes)
return Join(fragments, ",");
}
char16_t*
Statistics::formatMessage()
{
StatisticsSerializer ss(StatisticsSerializer::AsText);
formatData(ss, 0);
return ss.finishJSString();
}
Statistics::Statistics(JSRuntime* rt)
: runtime(rt),
startupTime(PRMJ_Now()),
@ -1012,18 +815,8 @@ Statistics::Statistics(JSRuntime* rt)
Statistics::~Statistics()
{
if (fp) {
StatisticsSerializer ss(StatisticsSerializer::AsText);
FormatPhaseTimes(ss, "", phaseTotals);
char* msg = ss.finishCString();
if (msg) {
fprintf(fp, "TOTALS\n%s\n\n-------\n", msg);
js_free(msg);
}
if (fp != stdout && fp != stderr)
fclose(fp);
}
if (fp && fp != stdout && fp != stderr)
fclose(fp);
}
JS::GCSliceCallback
@ -1314,7 +1107,7 @@ Statistics::endSCC(unsigned scc, int64_t start)
* as long as the total time it spends is at most 10ms.
*/
double
Statistics::computeMMU(int64_t window)
Statistics::computeMMU(int64_t window) const
{
MOZ_ASSERT(!slices.empty());

Просмотреть файл

@ -101,8 +101,6 @@ enum Stat {
STAT_LIMIT
};
class StatisticsSerializer;
struct ZoneGCStats
{
/* Number of zones collected in this GC. */
@ -186,7 +184,8 @@ struct Statistics
int64_t beginSCC();
void endSCC(unsigned scc, int64_t start);
char16_t* formatMessage();
UniqueChars formatCompactSliceMessage() const;
UniqueChars formatCompactSummaryMessage() const;
UniqueChars formatJsonMessage(uint64_t timestamp);
UniqueChars formatDetailedMessage();
@ -233,7 +232,7 @@ struct Statistics
size_t slicesLength() const { return slices.length(); }
/* Create a convenient typedef for referring tables of phase times. */
typedef int64_t (*PhaseTimeTable)[PHASE_LIMIT];
typedef int64_t const (*PhaseTimeTable)[PHASE_LIMIT];
private:
JSRuntime* runtime;
@ -277,7 +276,7 @@ struct Statistics
size_t preBytes;
/* Records the maximum GC pause in an API-controlled interval (in us). */
int64_t maxPauseInInterval;
mutable int64_t maxPauseInInterval;
/* Phases that are currently on stack. */
Phase phaseNesting[MAX_NESTING];
@ -309,10 +308,11 @@ struct Statistics
void recordPhaseEnd(Phase phase);
void gcDuration(int64_t* total, int64_t* maxPause);
void gcDuration(int64_t* total, int64_t* maxPause) const;
void sccDurations(int64_t* total, int64_t* maxPause);
void printStats();
bool formatData(StatisticsSerializer& ss, uint64_t timestamp);
UniqueChars formatCompactSlicePhaseTimes(PhaseTimeTable phaseTimes) const;
UniqueChars formatDetailedDescription();
UniqueChars formatDetailedSliceDescription(unsigned i, const SliceData& slice);
@ -323,7 +323,7 @@ struct Statistics
UniqueChars formatJsonSliceDescription(unsigned i, const SliceData& slice);
UniqueChars formatJsonPhaseTimes(PhaseTimeTable phaseTimes);
double computeMMU(int64_t resolution);
double computeMMU(int64_t resolution) const;
};
struct AutoGCSlice

Просмотреть файл

@ -3808,7 +3808,7 @@ IsOptimizableElementPropertyName(JSContext* cx, HandleValue key, MutableHandleId
static bool
TryAttachNativeGetValueElemStub(JSContext* cx, HandleScript script, jsbytecode* pc,
ICGetElem_Fallback* stub, HandleNativeObject obj,
HandleValue key)
HandleValue key, bool* attached)
{
RootedId id(cx);
if (!IsOptimizableElementPropertyName(cx, key, &id))
@ -3858,6 +3858,7 @@ TryAttachNativeGetValueElemStub(JSContext* cx, HandleScript script, jsbytecode*
return false;
stub->addNewStub(newStub);
*attached = true;
}
return true;
}
@ -3865,7 +3866,7 @@ TryAttachNativeGetValueElemStub(JSContext* cx, HandleScript script, jsbytecode*
static bool
TryAttachNativeGetAccessorElemStub(JSContext* cx, HandleScript script, jsbytecode* pc,
ICGetElem_Fallback* stub, HandleNativeObject obj,
HandleValue key, bool* attached)
HandleValue key, bool* attached, bool* isTemporarilyUnoptimizable)
{
MOZ_ASSERT(!*attached);
@ -3881,15 +3882,15 @@ TryAttachNativeGetAccessorElemStub(JSContext* cx, HandleScript script, jsbytecod
RootedObject baseHolder(cx);
if (!EffectlesslyLookupProperty(cx, obj, propName, &baseHolder, &shape))
return false;
if(!baseHolder || baseHolder->isNative())
if (!baseHolder || baseHolder->isNative())
return true;
HandleNativeObject holder = baseHolder.as<NativeObject>();
bool getterIsScripted = false;
bool isTemporarilyUnoptimizable = false;
if (IsCacheableGetPropCall(cx, obj, baseHolder, shape, &getterIsScripted,
&isTemporarilyUnoptimizable, /*isDOMProxy=*/false)) {
isTemporarilyUnoptimizable, /*isDOMProxy=*/false))
{
RootedFunction getter(cx, &shape->getterObject()->as<JSFunction>());
#if JS_HAS_NO_SUCH_METHOD
@ -4003,7 +4004,7 @@ IsNativeOrUnboxedDenseElementAccess(HandleObject obj, HandleValue key)
static bool
TryAttachGetElemStub(JSContext* cx, JSScript* script, jsbytecode* pc, ICGetElem_Fallback* stub,
HandleValue lhs, HandleValue rhs, HandleValue res)
HandleValue lhs, HandleValue rhs, HandleValue res, bool* attached)
{
bool isCallElem = (JSOp(*pc) == JSOP_CALLELEM);
@ -4020,6 +4021,7 @@ TryAttachGetElemStub(JSContext* cx, JSScript* script, jsbytecode* pc, ICGetElem_
return false;
stub->addNewStub(stringStub);
*attached = true;
return true;
}
@ -4038,6 +4040,7 @@ TryAttachGetElemStub(JSContext* cx, JSScript* script, jsbytecode* pc, ICGetElem_
return false;
stub->addNewStub(argsStub);
*attached = true;
return true;
}
@ -4060,6 +4063,7 @@ TryAttachGetElemStub(JSContext* cx, JSScript* script, jsbytecode* pc, ICGetElem_
return false;
stub->addNewStub(argsStub);
*attached = true;
return true;
}
}
@ -4074,6 +4078,7 @@ TryAttachGetElemStub(JSContext* cx, JSScript* script, jsbytecode* pc, ICGetElem_
return false;
stub->addNewStub(denseStub);
*attached = true;
return true;
}
@ -4081,10 +4086,12 @@ TryAttachGetElemStub(JSContext* cx, JSScript* script, jsbytecode* pc, ICGetElem_
if (obj->isNative() && rhs.isString()) {
RootedScript rootedScript(cx, script);
if (!TryAttachNativeGetValueElemStub(cx, rootedScript, pc, stub,
obj.as<NativeObject>(), rhs))
obj.as<NativeObject>(), rhs, attached))
{
return false;
}
if (*attached)
return true;
script = rootedScript;
}
@ -4098,6 +4105,7 @@ TryAttachGetElemStub(JSContext* cx, JSScript* script, jsbytecode* pc, ICGetElem_
return false;
stub->addNewStub(unboxedStub);
*attached = true;
return true;
}
@ -4131,6 +4139,7 @@ TryAttachGetElemStub(JSContext* cx, JSScript* script, jsbytecode* pc, ICGetElem_
return false;
stub->addNewStub(typedArrayStub);
*attached = true;
return true;
}
@ -4179,15 +4188,20 @@ DoGetElemFallback(JSContext* cx, BaselineFrame* frame, ICGetElem_Fallback* stub_
if (stub->numOptimizedStubs() >= ICGetElem_Fallback::MAX_OPTIMIZED_STUBS) {
// TODO: Discard all stubs in this IC and replace with inert megamorphic stub.
// But for now we just bail.
stub->noteUnoptimizableAccess();
attached = true;
}
// Try to attach an optimized getter stub.
bool isTemporarilyUnoptimizable = false;
if (!attached && lhs.isObject() && lhs.toObject().isNative() && rhs.isString()){
RootedScript rootedScript(cx, frame->script());
RootedNativeObject obj(cx, &lhs.toObject().as<NativeObject>());
if (!TryAttachNativeGetAccessorElemStub(cx, rootedScript, pc, stub, obj, rhs, &attached))
if (!TryAttachNativeGetAccessorElemStub(cx, rootedScript, pc, stub, obj, rhs, &attached,
&isTemporarilyUnoptimizable))
{
return false;
}
script = rootedScript;
}
@ -4209,11 +4223,11 @@ DoGetElemFallback(JSContext* cx, BaselineFrame* frame, ICGetElem_Fallback* stub_
return true;
// Try to attach an optimized stub.
if (!TryAttachGetElemStub(cx, frame->script(), pc, stub, lhs, rhs, res))
if (!TryAttachGetElemStub(cx, frame->script(), pc, stub, lhs, rhs, res, &attached))
return false;
// If we ever add a way to note unoptimizable accesses here, propagate the
// isTemporarilyUnoptimizable state from TryAttachNativeGetElemStub to here.
if (!attached && !isTemporarilyUnoptimizable)
stub->noteUnoptimizableAccess();
return true;
}
@ -9948,7 +9962,7 @@ GetTemplateObjectForNative(JSContext* cx, HandleScript script, jsbytecode* pc,
return true;
}
if (native == js::array_concat) {
if (native == js::array_concat || native == js::array_slice) {
if (args.thisv().isObject() && !args.thisv().toObject().isSingleton()) {
res.set(NewFullyAllocatedArrayTryReuseGroup(cx, &args.thisv().toObject(), 0,
TenuredObject, /* forceAnalyze = */ true));
@ -12682,11 +12696,15 @@ ICGetIntrinsic_Constant::~ICGetIntrinsic_Constant()
{ }
ICGetProp_Primitive::ICGetProp_Primitive(JitCode* stubCode, ICStub* firstMonitorStub,
Shape* protoShape, uint32_t offset)
JSValueType primitiveType, Shape* protoShape,
uint32_t offset)
: ICMonitoredStub(GetProp_Primitive, stubCode, firstMonitorStub),
protoShape_(protoShape),
offset_(offset)
{ }
{
extra_ = uint16_t(primitiveType);
MOZ_ASSERT(JSValueType(extra_) == primitiveType);
}
ICGetPropNativeStub::ICGetPropNativeStub(ICStub::Kind kind, JitCode* stubCode,
ICStub* firstMonitorStub,

Просмотреть файл

@ -2638,6 +2638,7 @@ class ICGetElem_Fallback : public ICMonitoredFallbackStub
static const uint16_t EXTRA_NON_NATIVE = 0x1;
static const uint16_t EXTRA_NEGATIVE_INDEX = 0x2;
static const uint16_t EXTRA_UNOPTIMIZABLE_ACCESS = 0x4;
public:
static const uint32_t MAX_OPTIMIZED_STUBS = 16;
@ -2655,6 +2656,12 @@ class ICGetElem_Fallback : public ICMonitoredFallbackStub
bool hasNegativeIndex() const {
return extra_ & EXTRA_NEGATIVE_INDEX;
}
void noteUnoptimizableAccess() {
extra_ |= EXTRA_UNOPTIMIZABLE_ACCESS;
}
bool hadUnoptimizableAccess() const {
return extra_ & EXTRA_UNOPTIMIZABLE_ACCESS;
}
// Compiler for this stub kind.
class Compiler : public ICStubCompiler {
@ -4109,13 +4116,17 @@ class ICGetProp_Primitive : public ICMonitoredStub
// Fixed or dynamic slot offset.
uint32_t offset_;
ICGetProp_Primitive(JitCode* stubCode, ICStub* firstMonitorStub,
ICGetProp_Primitive(JitCode* stubCode, ICStub* firstMonitorStub, JSValueType primitiveType,
Shape* protoShape, uint32_t offset);
public:
HeapPtrShape& protoShape() {
return protoShape_;
}
JSValueType primitiveType() const {
return JSValueType(extra_);
}
static size_t offsetOfProtoShape() {
return offsetof(ICGetProp_Primitive, protoShape_);
}
@ -4155,7 +4166,7 @@ class ICGetProp_Primitive : public ICMonitoredStub
ICStub* getStub(ICStubSpace* space) {
RootedShape protoShape(cx, prototype_->as<NativeObject>().lastProperty());
return newStub<ICGetProp_Primitive>(space, getStubCode(), firstMonitorStub_,
protoShape, offset_);
primitiveType_, protoShape, offset_);
}
};
};

Просмотреть файл

@ -702,6 +702,80 @@ BaselineInspector::commonSetPropFunction(jsbytecode* pc, JSObject** holder, Shap
return true;
}
MIRType
BaselineInspector::expectedPropertyAccessInputType(jsbytecode* pc)
{
if (!hasBaselineScript())
return MIRType_Value;
const ICEntry& entry = icEntryFromPC(pc);
MIRType type = MIRType_None;
for (ICStub* stub = entry.firstStub(); stub; stub = stub->next()) {
MIRType stubType;
switch (stub->kind()) {
case ICStub::GetProp_Fallback:
if (stub->toGetProp_Fallback()->hadUnoptimizableAccess())
return MIRType_Value;
continue;
case ICStub::GetElem_Fallback:
if (stub->toGetElem_Fallback()->hadUnoptimizableAccess())
return MIRType_Value;
continue;
case ICStub::GetProp_Generic:
return MIRType_Value;
case ICStub::GetProp_ArgumentsLength:
case ICStub::GetElem_Arguments:
// Either an object or magic arguments.
return MIRType_Value;
case ICStub::GetProp_ArrayLength:
case ICStub::GetProp_Native:
case ICStub::GetProp_NativeDoesNotExist:
case ICStub::GetProp_NativePrototype:
case ICStub::GetProp_Unboxed:
case ICStub::GetProp_TypedObject:
case ICStub::GetProp_CallScripted:
case ICStub::GetProp_CallNative:
case ICStub::GetProp_CallDOMProxyNative:
case ICStub::GetProp_CallDOMProxyWithGenerationNative:
case ICStub::GetProp_DOMProxyShadowed:
case ICStub::GetElem_NativeSlot:
case ICStub::GetElem_NativePrototypeSlot:
case ICStub::GetElem_NativePrototypeCallNative:
case ICStub::GetElem_NativePrototypeCallScripted:
case ICStub::GetElem_String:
case ICStub::GetElem_Dense:
case ICStub::GetElem_TypedArray:
stubType = MIRType_Object;
break;
case ICStub::GetProp_Primitive:
stubType = MIRTypeFromValueType(stub->toGetProp_Primitive()->primitiveType());
break;
case ICStub::GetProp_StringLength:
stubType = MIRType_String;
break;
default:
MOZ_CRASH("Unexpected stub");
}
if (type != MIRType_None) {
if (type != stubType)
return MIRType_Value;
} else {
type = stubType;
}
}
return (type == MIRType_None) ? MIRType_Value : type;
}
bool
BaselineInspector::instanceOfData(jsbytecode* pc, Shape** shape, uint32_t* slot,
JSObject** prototypeObject)

Просмотреть файл

@ -104,6 +104,7 @@ class BaselineInspector
MIRType expectedResultType(jsbytecode* pc);
MCompare::CompareType expectedCompareType(jsbytecode* pc);
MIRType expectedBinaryArithSpecialization(jsbytecode* pc);
MIRType expectedPropertyAccessInputType(jsbytecode* pc);
bool hasSeenNonNativeGetElement(jsbytecode* pc);
bool hasSeenNegativeIndexGetElement(jsbytecode* pc);

Просмотреть файл

@ -7148,6 +7148,41 @@ CodeGenerator::visitArrayConcat(LArrayConcat* lir)
callVM(ArrayConcatDenseInfo, lir);
}
typedef JSObject* (*ArraySliceDenseFn)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
static const VMFunction ArraySliceDenseInfo = FunctionInfo<ArraySliceDenseFn>(array_slice_dense);
void
CodeGenerator::visitArraySlice(LArraySlice* lir)
{
Register object = ToRegister(lir->object());
Register begin = ToRegister(lir->begin());
Register end = ToRegister(lir->end());
Register temp1 = ToRegister(lir->temp1());
Register temp2 = ToRegister(lir->temp2());
Label call, fail;
// Try to allocate an object.
masm.createGCObject(temp1, temp2, lir->mir()->templateObj(), lir->mir()->initialHeap(), &fail);
// Fixup the group of the result in case it doesn't match the template object.
masm.loadPtr(Address(object, JSObject::offsetOfGroup()), temp2);
masm.storePtr(temp2, Address(temp1, JSObject::offsetOfGroup()));
masm.jump(&call);
{
masm.bind(&fail);
masm.movePtr(ImmPtr(nullptr), temp1);
}
masm.bind(&call);
pushArg(temp1);
pushArg(end);
pushArg(begin);
pushArg(object);
callVM(ArraySliceDenseInfo, lir);
}
typedef JSString* (*ArrayJoinFn)(JSContext*, HandleObject, HandleString);
static const VMFunction ArrayJoinInfo = FunctionInfo<ArrayJoinFn>(jit::ArrayJoin);

Просмотреть файл

@ -265,6 +265,7 @@ class CodeGenerator : public CodeGeneratorSpecific
void visitArrayPushV(LArrayPushV* lir);
void visitArrayPushT(LArrayPushT* lir);
void visitArrayConcat(LArrayConcat* lir);
void visitArraySlice(LArraySlice* lir);
void visitArrayJoin(LArrayJoin* lir);
void visitLoadUnboxedScalar(LLoadUnboxedScalar* lir);
void visitLoadTypedArrayElementHole(LLoadTypedArrayElementHole* lir);

Просмотреть файл

@ -7194,27 +7194,26 @@ IonBuilder::testSingletonProperty(JSObject* obj, PropertyName* name)
return nullptr;
}
bool
IonBuilder::testSingletonPropertyTypes(MDefinition* obj, JSObject* singleton, PropertyName* name,
bool* testObject, bool* testString)
JSObject*
IonBuilder::testSingletonPropertyTypes(MDefinition* obj, PropertyName* name)
{
// As for TestSingletonProperty, but the input is any value in a type set
// rather than a specific object. If testObject is set then the constant
// result can only be used after ensuring the input is an object.
*testObject = false;
*testString = false;
// rather than a specific object.
TemporaryTypeSet* types = obj->resultTypeSet();
if (types && types->unknownObject())
return false;
return nullptr;
JSObject* objectSingleton = types ? types->maybeSingleton() : nullptr;
if (objectSingleton)
return testSingletonProperty(objectSingleton, name) == singleton;
return testSingletonProperty(objectSingleton, name);
MIRType objType = obj->type();
if (objType == MIRType_Value && types)
objType = types->getKnownMIRType();
JSProtoKey key;
switch (obj->type()) {
switch (objType) {
case MIRType_String:
key = JSProto_String;
break;
@ -7232,23 +7231,14 @@ IonBuilder::testSingletonPropertyTypes(MDefinition* obj, JSObject* singleton, Pr
key = JSProto_Boolean;
break;
case MIRType_Object:
case MIRType_Value: {
case MIRType_Object: {
if (!types)
return false;
if (types->hasType(TypeSet::StringType())) {
key = JSProto_String;
*testString = true;
break;
}
if (!types->maybeObject())
return false;
return nullptr;
// For property accesses which may be on many objects, we just need to
// find a prototype common to all the objects; if that prototype
// has the singleton property, the access will not be on a missing property.
JSObject* singleton = nullptr;
for (unsigned i = 0; i < types->getObjectCount(); i++) {
TypeSet::ObjectKey* key = types->getObject(i);
if (!key)
@ -7258,35 +7248,40 @@ IonBuilder::testSingletonPropertyTypes(MDefinition* obj, JSObject* singleton, Pr
const Class* clasp = key->clasp();
if (!ClassHasEffectlessLookup(clasp) || ObjectHasExtraOwnProperty(compartment, key, name))
return false;
return nullptr;
if (key->unknownProperties())
return false;
return nullptr;
HeapTypeSetKey property = key->property(NameToId(name));
if (property.isOwnProperty(constraints()))
return false;
return nullptr;
if (JSObject* proto = key->proto().toObjectOrNull()) {
// Test this type.
if (testSingletonProperty(proto, name) != singleton)
return false;
JSObject* thisSingleton = testSingletonProperty(proto, name);
if (!thisSingleton)
return nullptr;
if (singleton) {
if (thisSingleton != singleton)
return nullptr;
} else {
singleton = thisSingleton;
}
} else {
// Can't be on the prototype chain with no prototypes...
return false;
return nullptr;
}
}
// If this is not a known object, a test will be needed.
*testObject = (obj->type() != MIRType_Object);
return true;
return singleton;
}
default:
return false;
return nullptr;
}
JSObject* proto = GetBuiltinPrototypePure(&script()->global(), key);
if (proto)
return testSingletonProperty(proto, name) == singleton;
return testSingletonProperty(proto, name);
return false;
return nullptr;
}
bool
@ -7731,6 +7726,8 @@ IonBuilder::jsop_getelem()
return pushTypeBarrier(ins, types, BarrierKind::TypeSet);
}
obj = maybeUnboxForPropertyAccess(obj);
bool emitted = false;
trackOptimizationAttempt(TrackedStrategy::GetElem_TypedObject);
@ -10009,6 +10006,21 @@ IonBuilder::shouldAbortOnPreliminaryGroups(MDefinition *obj)
return preliminary;
}
MDefinition*
IonBuilder::maybeUnboxForPropertyAccess(MDefinition* def)
{
if (def->type() != MIRType_Value)
return def;
MIRType type = inspector->expectedPropertyAccessInputType(pc);
if (type == MIRType_Value || !def->mightBeType(type))
return def;
MUnbox* unbox = MUnbox::New(alloc(), def, type, MUnbox::Fallible);
current->add(unbox);
return unbox;
}
bool
IonBuilder::jsop_getprop(PropertyName* name)
{
@ -10034,6 +10046,8 @@ IonBuilder::jsop_getprop(PropertyName* name)
return emitted;
}
obj = maybeUnboxForPropertyAccess(obj);
BarrierKind barrier = PropertyReadNeedsTypeBarrier(analysisContext, constraints(),
obj, name, types);
@ -10301,24 +10315,19 @@ IonBuilder::getPropTryConstant(bool* emitted, MDefinition* obj, PropertyName* na
{
MOZ_ASSERT(*emitted == false);
JSObject* singleton = types ? types->maybeSingleton() : nullptr;
if (!singleton) {
trackOptimizationOutcome(TrackedOutcome::NotSingleton);
if (!types->mightBeMIRType(MIRType_Object)) {
// If we have not observed an object result here, don't look for a
// singleton constant.
trackOptimizationOutcome(TrackedOutcome::NotObject);
return true;
}
bool testObject, testString;
if (!testSingletonPropertyTypes(obj, singleton, name, &testObject, &testString))
JSObject* singleton = testSingletonPropertyTypes(obj, name);
if (!singleton)
return true;
// Property access is a known constant -- safe to emit.
MOZ_ASSERT(!testString || !testObject);
if (testObject)
current->add(MGuardObject::New(alloc(), obj));
else if (testString)
current->add(MGuardString::New(alloc(), obj));
else
obj->setImplicitlyUsedUnchecked();
obj->setImplicitlyUsedUnchecked();
pushConstant(ObjectValue(*singleton));

Просмотреть файл

@ -422,6 +422,7 @@ class IonBuilder
bool shouldAbortOnPreliminaryGroups(MDefinition *obj);
MDefinition* tryInnerizeWindow(MDefinition* obj);
MDefinition* maybeUnboxForPropertyAccess(MDefinition* def);
// jsop_getprop() helpers.
bool checkIsDefinitelyOptimizedArguments(MDefinition* obj, bool* isOptimizedArgs);
@ -745,6 +746,7 @@ class IonBuilder
InliningStatus inlineArrayPopShift(CallInfo& callInfo, MArrayPopShift::Mode mode);
InliningStatus inlineArrayPush(CallInfo& callInfo);
InliningStatus inlineArrayConcat(CallInfo& callInfo);
InliningStatus inlineArraySlice(CallInfo& callInfo);
InliningStatus inlineArrayJoin(CallInfo& callInfo);
InliningStatus inlineArraySplice(CallInfo& callInfo);
@ -940,8 +942,8 @@ class IonBuilder
MGetPropertyCache* getInlineableGetPropertyCache(CallInfo& callInfo);
JSObject* testSingletonProperty(JSObject* obj, PropertyName* name);
bool testSingletonPropertyTypes(MDefinition* obj, JSObject* singleton, PropertyName* name,
bool* testObject, bool* testString);
JSObject* testSingletonPropertyTypes(MDefinition* obj, PropertyName* name);
uint32_t getDefiniteSlot(TemporaryTypeSet* types, PropertyName* name, uint32_t* pnfixed,
BaselineInspector::ObjectGroupVector& convertUnboxedGroups);
MDefinition* convertUnboxedObjects(MDefinition* obj,

Просмотреть файл

@ -4945,6 +4945,39 @@ class LArrayConcat : public LCallInstructionHelper<1, 2, 2>
}
};
class LArraySlice : public LCallInstructionHelper<1, 3, 2>
{
public:
LIR_HEADER(ArraySlice)
LArraySlice(const LAllocation& obj, const LAllocation& begin, const LAllocation& end,
const LDefinition& temp1, const LDefinition& temp2) {
setOperand(0, obj);
setOperand(1, begin);
setOperand(2, end);
setTemp(0, temp1);
setTemp(1, temp2);
}
const MArraySlice* mir() const {
return mir_->toArraySlice();
}
const LAllocation* object() {
return getOperand(0);
}
const LAllocation* begin() {
return getOperand(1);
}
const LAllocation* end() {
return getOperand(2);
}
const LDefinition* temp1() {
return getTemp(0);
}
const LDefinition* temp2() {
return getTemp(1);
}
};
class LArrayJoin : public LCallInstructionHelper<1, 2, 0>
{
public:

Просмотреть файл

@ -238,6 +238,7 @@
_(ArrayPushV) \
_(ArrayPushT) \
_(ArrayConcat) \
_(ArraySlice) \
_(ArrayJoin) \
_(StoreElementHoleV) \
_(StoreElementHoleT) \

Просмотреть файл

@ -2904,6 +2904,23 @@ LIRGenerator::visitArrayConcat(MArrayConcat* ins)
assignSafepoint(lir, ins);
}
void
LIRGenerator::visitArraySlice(MArraySlice* ins)
{
MOZ_ASSERT(ins->type() == MIRType_Object);
MOZ_ASSERT(ins->object()->type() == MIRType_Object);
MOZ_ASSERT(ins->begin()->type() == MIRType_Int32);
MOZ_ASSERT(ins->end()->type() == MIRType_Int32);
LArraySlice* lir = new(alloc()) LArraySlice(useFixed(ins->object(), CallTempReg0),
useFixed(ins->begin(), CallTempReg1),
useFixed(ins->end(), CallTempReg2),
tempFixed(CallTempReg3),
tempFixed(CallTempReg4));
defineReturn(lir, ins);
assignSafepoint(lir, ins);
}
void
LIRGenerator::visitArrayJoin(MArrayJoin* ins)
{

Просмотреть файл

@ -207,6 +207,7 @@ class LIRGenerator : public LIRGeneratorSpecific
void visitArrayPopShift(MArrayPopShift* ins);
void visitArrayPush(MArrayPush* ins);
void visitArrayConcat(MArrayConcat* ins);
void visitArraySlice(MArraySlice* ins);
void visitArrayJoin(MArrayJoin* ins);
void visitLoadUnboxedScalar(MLoadUnboxedScalar* ins);
void visitLoadTypedArrayElementHole(MLoadTypedArrayElementHole* ins);

Просмотреть файл

@ -82,6 +82,8 @@ IonBuilder::inlineNativeCall(CallInfo& callInfo, JSFunction* target)
return inlineArrayPush(callInfo);
if (native == js::array_concat)
return inlineArrayConcat(callInfo);
if (native == js::array_slice)
return inlineArraySlice(callInfo);
if (native == js::array_splice)
return inlineArraySplice(callInfo);
@ -981,6 +983,107 @@ IonBuilder::inlineArrayConcat(CallInfo& callInfo)
return InliningStatus_Inlined;
}
IonBuilder::InliningStatus
IonBuilder::inlineArraySlice(CallInfo& callInfo)
{
if (callInfo.constructing()) {
trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
return InliningStatus_NotInlined;
}
// Ensure |this| and result are objects.
if (getInlineReturnType() != MIRType_Object)
return InliningStatus_NotInlined;
if (callInfo.thisArg()->type() != MIRType_Object)
return InliningStatus_NotInlined;
// Arguments for the sliced region must be integers.
if (callInfo.argc() > 0) {
if (callInfo.getArg(0)->type() != MIRType_Int32)
return InliningStatus_NotInlined;
if (callInfo.argc() > 1) {
if (callInfo.getArg(1)->type() != MIRType_Int32)
return InliningStatus_NotInlined;
}
}
// |this| must be a dense array.
TemporaryTypeSet* thisTypes = callInfo.thisArg()->resultTypeSet();
if (!thisTypes)
return InliningStatus_NotInlined;
const Class* clasp = thisTypes->getKnownClass(constraints());
if (clasp != &ArrayObject::class_ && clasp != &UnboxedArrayObject::class_)
return InliningStatus_NotInlined;
if (thisTypes->hasObjectFlags(constraints(), OBJECT_FLAG_SPARSE_INDEXES |
OBJECT_FLAG_LENGTH_OVERFLOW))
{
trackOptimizationOutcome(TrackedOutcome::ArrayBadFlags);
return InliningStatus_NotInlined;
}
JSValueType unboxedType = JSVAL_TYPE_MAGIC;
if (clasp == &UnboxedArrayObject::class_) {
unboxedType = UnboxedArrayElementType(constraints(), callInfo.thisArg(), nullptr);
if (unboxedType == JSVAL_TYPE_MAGIC)
return InliningStatus_NotInlined;
}
// Watch out for indexed properties on the prototype.
if (ArrayPrototypeHasIndexedProperty(constraints(), script())) {
trackOptimizationOutcome(TrackedOutcome::ProtoIndexedProps);
return InliningStatus_NotInlined;
}
// The group of the result will be dynamically fixed up to match the input
// object, allowing us to handle 'this' objects that might have more than
// one group. Make sure that no singletons can be sliced here.
for (unsigned i = 0; i < thisTypes->getObjectCount(); i++) {
TypeSet::ObjectKey* key = thisTypes->getObject(i);
if (key && key->isSingleton())
return InliningStatus_NotInlined;
}
// Inline the call.
JSObject* templateObj = inspector->getTemplateObjectForNative(pc, js::array_slice);
if (!templateObj)
return InliningStatus_NotInlined;
callInfo.setImplicitlyUsedUnchecked();
MDefinition* begin;
if (callInfo.argc() > 0)
begin = callInfo.getArg(0);
else
begin = constant(Int32Value(0));
MDefinition* end;
if (callInfo.argc() > 1) {
end = callInfo.getArg(1);
} else if (clasp == &ArrayObject::class_) {
MElements* elements = MElements::New(alloc(), callInfo.thisArg());
current->add(elements);
end = MArrayLength::New(alloc(), elements);
current->add(end->toInstruction());
} else {
end = MUnboxedArrayLength::New(alloc(), callInfo.thisArg());
current->add(end->toInstruction());
}
MArraySlice* ins = MArraySlice::New(alloc(), constraints(),
callInfo.thisArg(), begin, end,
templateObj,
templateObj->group()->initialHeap(constraints()),
unboxedType);
current->add(ins);
current->push(ins);
if (!resumeAfter(ins))
return InliningStatus_Error;
return InliningStatus_Inlined;
}
IonBuilder::InliningStatus
IonBuilder::inlineMathAbs(CallInfo& callInfo)
{

Просмотреть файл

@ -9153,6 +9153,70 @@ class MArrayConcat
}
};
// Array.prototype.slice on a dense array.
class MArraySlice
: public MTernaryInstruction,
public Mix3Policy<ObjectPolicy<0>, IntPolicy<1>, IntPolicy<2>>::Data
{
AlwaysTenuredObject templateObj_;
gc::InitialHeap initialHeap_;
JSValueType unboxedType_;
MArraySlice(CompilerConstraintList* constraints, MDefinition* obj,
MDefinition* begin, MDefinition* end,
JSObject* templateObj, gc::InitialHeap initialHeap, JSValueType unboxedType)
: MTernaryInstruction(obj, begin, end),
templateObj_(templateObj),
initialHeap_(initialHeap),
unboxedType_(unboxedType)
{
setResultType(MIRType_Object);
setResultTypeSet(obj->resultTypeSet());
}
public:
INSTRUCTION_HEADER(ArraySlice)
static MArraySlice* New(TempAllocator& alloc, CompilerConstraintList* constraints,
MDefinition* obj, MDefinition* begin, MDefinition* end,
JSObject* templateObj, gc::InitialHeap initialHeap,
JSValueType unboxedType)
{
return new(alloc) MArraySlice(constraints, obj, begin, end, templateObj,
initialHeap, unboxedType);
}
MDefinition* object() const {
return getOperand(0);
}
MDefinition* begin() const {
return getOperand(1);
}
MDefinition* end() const {
return getOperand(2);
}
JSObject* templateObj() const {
return templateObj_;
}
gc::InitialHeap initialHeap() const {
return initialHeap_;
}
JSValueType unboxedType() const {
return unboxedType_;
}
AliasSet getAliasSet() const override {
return AliasSet::Store(AliasSet::BoxedOrUnboxedElements(unboxedType()) |
AliasSet::ObjectFields);
}
bool possiblyCalls() const override {
return true;
}
};
class MArrayJoin
: public MBinaryInstruction,
public MixPolicy<ObjectPolicy<0>, StringPolicy<1> >::Data

Просмотреть файл

@ -200,6 +200,7 @@ namespace jit {
_(ArrayPopShift) \
_(ArrayPush) \
_(ArrayConcat) \
_(ArraySlice) \
_(ArrayJoin) \
_(LoadTypedArrayElementHole) \
_(LoadTypedArrayElementStatic) \

Просмотреть файл

@ -26,12 +26,12 @@ void dbg_break() {}
// Note this is used for inter-AsmJS calls and may pass arguments and results in
// floating point registers even if the system ABI does not.
ABIArgGenerator::ABIArgGenerator() :
intRegIndex_(0),
ABIArgGenerator::ABIArgGenerator()
: intRegIndex_(0),
floatRegIndex_(0),
stackOffset_(0),
current_()
{}
{ }
ABIArg
ABIArgGenerator::next(MIRType type)
@ -498,15 +498,17 @@ InstMOV::IsTHIS(const Instruction& i)
}
Op2Reg
Operand2::toOp2Reg() {
Operand2::toOp2Reg() const {
return *(Op2Reg*)this;
}
O2RegImmShift
Op2Reg::toO2RegImmShift() {
Op2Reg::toO2RegImmShift() const {
return *(O2RegImmShift*)this;
}
O2RegRegShift
Op2Reg::toO2RegRegShift() {
Op2Reg::toO2RegRegShift() const {
return *(O2RegRegShift*)this;
}
@ -1257,7 +1259,7 @@ BOffImm::BOffImm(Instruction& inst)
}
Instruction*
BOffImm::getDest(Instruction* src)
BOffImm::getDest(Instruction* src) const
{
// TODO: It is probably worthwhile to verify that src is actually a branch.
// NOTE: This does not explicitly shift the offset of the destination left by 2,
@ -1425,120 +1427,119 @@ Assembler::as_nop()
}
static uint32_t
EncodeAlu(Register dest, Register src1, Operand2 op2, ALUOp op, SetCond_ sc,
Assembler::Condition c)
EncodeAlu(Register dest, Register src1, Operand2 op2, ALUOp op, SBit s, Assembler::Condition c)
{
return (int)op | (int)sc | (int) c | op2.encode() |
return (int)op | (int)s | (int)c | op2.encode() |
((dest == InvalidReg) ? 0 : RD(dest)) |
((src1 == InvalidReg) ? 0 : RN(src1));
}
BufferOffset
Assembler::as_alu(Register dest, Register src1, Operand2 op2,
ALUOp op, SetCond_ sc, Condition c)
ALUOp op, SBit s, Condition c)
{
return writeInst(EncodeAlu(dest, src1, op2, op, sc, c));
return writeInst(EncodeAlu(dest, src1, op2, op, s, c));
}
BufferOffset
Assembler::as_mov(Register dest, Operand2 op2, SetCond_ sc, Condition c)
Assembler::as_mov(Register dest, Operand2 op2, SBit s, Condition c)
{
return as_alu(dest, InvalidReg, op2, OpMov, sc, c);
return as_alu(dest, InvalidReg, op2, OpMov, s, c);
}
/* static */ void
Assembler::as_alu_patch(Register dest, Register src1, Operand2 op2, ALUOp op, SetCond_ sc,
Assembler::as_alu_patch(Register dest, Register src1, Operand2 op2, ALUOp op, SBit s,
Condition c, uint32_t* pos)
{
WriteInstStatic(EncodeAlu(dest, src1, op2, op, sc, c), pos);
WriteInstStatic(EncodeAlu(dest, src1, op2, op, s, c), pos);
}
/* static */ void
Assembler::as_mov_patch(Register dest, Operand2 op2, SetCond_ sc, Condition c, uint32_t* pos)
Assembler::as_mov_patch(Register dest, Operand2 op2, SBit s, Condition c, uint32_t* pos)
{
as_alu_patch(dest, InvalidReg, op2, OpMov, sc, c, pos);
as_alu_patch(dest, InvalidReg, op2, OpMov, s, c, pos);
}
BufferOffset
Assembler::as_mvn(Register dest, Operand2 op2, SetCond_ sc, Condition c)
Assembler::as_mvn(Register dest, Operand2 op2, SBit s, Condition c)
{
return as_alu(dest, InvalidReg, op2, OpMvn, sc, c);
return as_alu(dest, InvalidReg, op2, OpMvn, s, c);
}
// Logical operations.
BufferOffset
Assembler::as_and(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
Assembler::as_and(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
{
return as_alu(dest, src1, op2, OpAnd, sc, c);
return as_alu(dest, src1, op2, OpAnd, s, c);
}
BufferOffset
Assembler::as_bic(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
Assembler::as_bic(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
{
return as_alu(dest, src1, op2, OpBic, sc, c);
return as_alu(dest, src1, op2, OpBic, s, c);
}
BufferOffset
Assembler::as_eor(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
Assembler::as_eor(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
{
return as_alu(dest, src1, op2, OpEor, sc, c);
return as_alu(dest, src1, op2, OpEor, s, c);
}
BufferOffset
Assembler::as_orr(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
Assembler::as_orr(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
{
return as_alu(dest, src1, op2, OpOrr, sc, c);
return as_alu(dest, src1, op2, OpOrr, s, c);
}
// Mathematical operations.
BufferOffset
Assembler::as_adc(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
Assembler::as_adc(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
{
return as_alu(dest, src1, op2, OpAdc, sc, c);
return as_alu(dest, src1, op2, OpAdc, s, c);
}
BufferOffset
Assembler::as_add(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
Assembler::as_add(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
{
return as_alu(dest, src1, op2, OpAdd, sc, c);
return as_alu(dest, src1, op2, OpAdd, s, c);
}
BufferOffset
Assembler::as_sbc(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
Assembler::as_sbc(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
{
return as_alu(dest, src1, op2, OpSbc, sc, c);
return as_alu(dest, src1, op2, OpSbc, s, c);
}
BufferOffset
Assembler::as_sub(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
Assembler::as_sub(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
{
return as_alu(dest, src1, op2, OpSub, sc, c);
return as_alu(dest, src1, op2, OpSub, s, c);
}
BufferOffset
Assembler::as_rsb(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
Assembler::as_rsb(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
{
return as_alu(dest, src1, op2, OpRsb, sc, c);
return as_alu(dest, src1, op2, OpRsb, s, c);
}
BufferOffset
Assembler::as_rsc(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
Assembler::as_rsc(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
{
return as_alu(dest, src1, op2, OpRsc, sc, c);
return as_alu(dest, src1, op2, OpRsc, s, c);
}
// Test operations.
BufferOffset
Assembler::as_cmn(Register src1, Operand2 op2, Condition c)
{
return as_alu(InvalidReg, src1, op2, OpCmn, SetCond, c);
return as_alu(InvalidReg, src1, op2, OpCmn, SetCC, c);
}
BufferOffset
Assembler::as_cmp(Register src1, Operand2 op2, Condition c)
{
return as_alu(InvalidReg, src1, op2, OpCmp, SetCond, c);
return as_alu(InvalidReg, src1, op2, OpCmp, SetCC, c);
}
BufferOffset
Assembler::as_teq(Register src1, Operand2 op2, Condition c)
{
return as_alu(InvalidReg, src1, op2, OpTeq, SetCond, c);
return as_alu(InvalidReg, src1, op2, OpTeq, SetCC, c);
}
BufferOffset
Assembler::as_tst(Register src1, Operand2 op2, Condition c)
{
return as_alu(InvalidReg, src1, op2, OpTst, SetCond, c);
return as_alu(InvalidReg, src1, op2, OpTst, SetCC, c);
}
static MOZ_CONSTEXPR_VAR Register NoAddend = { Registers::pc };
@ -1619,59 +1620,59 @@ static const int mull_tag = 0x90;
BufferOffset
Assembler::as_genmul(Register dhi, Register dlo, Register rm, Register rn,
MULOp op, SetCond_ sc, Condition c)
MULOp op, SBit s, Condition c)
{
return writeInst(RN(dhi) | maybeRD(dlo) | RM(rm) | rn.code() | op | sc | c | mull_tag);
return writeInst(RN(dhi) | maybeRD(dlo) | RM(rm) | rn.code() | op | s | c | mull_tag);
}
BufferOffset
Assembler::as_mul(Register dest, Register src1, Register src2, SetCond_ sc, Condition c)
Assembler::as_mul(Register dest, Register src1, Register src2, SBit s, Condition c)
{
return as_genmul(dest, InvalidReg, src1, src2, OpmMul, sc, c);
return as_genmul(dest, InvalidReg, src1, src2, OpmMul, s, c);
}
BufferOffset
Assembler::as_mla(Register dest, Register acc, Register src1, Register src2,
SetCond_ sc, Condition c)
SBit s, Condition c)
{
return as_genmul(dest, acc, src1, src2, OpmMla, sc, c);
return as_genmul(dest, acc, src1, src2, OpmMla, s, c);
}
BufferOffset
Assembler::as_umaal(Register destHI, Register destLO, Register src1, Register src2, Condition c)
{
return as_genmul(destHI, destLO, src1, src2, OpmUmaal, NoSetCond, c);
return as_genmul(destHI, destLO, src1, src2, OpmUmaal, LeaveCC, c);
}
BufferOffset
Assembler::as_mls(Register dest, Register acc, Register src1, Register src2, Condition c)
{
return as_genmul(dest, acc, src1, src2, OpmMls, NoSetCond, c);
return as_genmul(dest, acc, src1, src2, OpmMls, LeaveCC, c);
}
BufferOffset
Assembler::as_umull(Register destHI, Register destLO, Register src1, Register src2,
SetCond_ sc, Condition c)
SBit s, Condition c)
{
return as_genmul(destHI, destLO, src1, src2, OpmUmull, sc, c);
return as_genmul(destHI, destLO, src1, src2, OpmUmull, s, c);
}
BufferOffset
Assembler::as_umlal(Register destHI, Register destLO, Register src1, Register src2,
SetCond_ sc, Condition c)
SBit s, Condition c)
{
return as_genmul(destHI, destLO, src1, src2, OpmUmlal, sc, c);
return as_genmul(destHI, destLO, src1, src2, OpmUmlal, s, c);
}
BufferOffset
Assembler::as_smull(Register destHI, Register destLO, Register src1, Register src2,
SetCond_ sc, Condition c)
SBit s, Condition c)
{
return as_genmul(destHI, destLO, src1, src2, OpmSmull, sc, c);
return as_genmul(destHI, destLO, src1, src2, OpmSmull, s, c);
}
BufferOffset
Assembler::as_smlal(Register destHI, Register destLO, Register src1, Register src2,
SetCond_ sc, Condition c)
SBit s, Condition c)
{
return as_genmul(destHI, destLO, src1, src2, OpmSmlal, sc, c);
return as_genmul(destHI, destLO, src1, src2, OpmSmlal, s, c);
}
BufferOffset
@ -1716,7 +1717,8 @@ Assembler::as_dtr_patch(LoadStore ls, int size, Index mode, Register rt, DTRAddr
WriteInstStatic(EncodeDtr(ls, size, mode, rt, addr, c), dest);
}
class PoolHintData {
class PoolHintData
{
public:
enum LoadType {
// Set 0 to bogus, since that is the value most likely to be
@ -1759,19 +1761,19 @@ class PoolHintData {
destReg_ = destReg.id();
destType_ = destReg.isDouble();
}
Assembler::Condition getCond() {
Assembler::Condition getCond() const {
return Assembler::Condition(cond_ << 28);
}
Register getReg() {
Register getReg() const {
return Register::FromCode(destReg_);
}
VFPRegister getVFPReg() {
VFPRegister getVFPReg() const {
VFPRegister r = VFPRegister(destReg_, destType_ ? VFPRegister::Double : VFPRegister::Single);
return r;
}
int32_t getIndex() {
int32_t getIndex() const {
return index_;
}
void setIndex(uint32_t index) {
@ -1780,7 +1782,7 @@ class PoolHintData {
MOZ_ASSERT(index_ == index);
}
LoadType getLoadType() {
LoadType getLoadType() const {
// If this *was* a PoolBranch, but the branch has already been bound
// then this isn't going to look like a real poolhintdata, but we still
// want to lie about it so everyone knows it *used* to be a branch.
@ -1789,7 +1791,7 @@ class PoolHintData {
return loadType_;
}
bool isValidPoolHint() {
bool isValidPoolHint() const {
// Most instructions cannot have a condition that is 0xf. Notable
// exceptions are blx and the entire NEON instruction set. For the
// purposes of pool loads, and possibly patched branches, the possible
@ -1799,7 +1801,8 @@ class PoolHintData {
}
};
union PoolHintPun {
union PoolHintPun
{
PoolHintData phd;
uint32_t raw;
};
@ -1844,8 +1847,7 @@ BufferOffset
Assembler::as_dtm(LoadStore ls, Register rn, uint32_t mask,
DTMMode mode, DTMWriteBack wb, Condition c)
{
return writeInst(0x08000000 | RN(rn) | ls |
mode | mask | c | wb);
return writeInst(0x08000000 | RN(rn) | ls | mode | mask | c | wb);
}
BufferOffset
@ -2056,12 +2058,14 @@ Assembler::as_bx(Register r, Condition c)
BufferOffset ret = writeInst(((int) c) | OpBx | r.code());
return ret;
}
void
Assembler::WritePoolGuard(BufferOffset branch, Instruction* dest, BufferOffset afterPool)
{
BOffImm off = afterPool.diffB<BOffImm>(branch);
*dest = InstBImm(off, Always);
}
// Branch can branch to an immediate *or* to a register.
// Branches to immediates are pc relative, branches to registers are absolute.
BufferOffset
@ -2106,6 +2110,7 @@ Assembler::as_b(Label* l, Condition c)
MOZ_ASSERT(check == old);
return ret;
}
BufferOffset
Assembler::as_b(BOffImm off, Condition c, BufferOffset inst)
{
@ -2168,6 +2173,7 @@ Assembler::as_bl(Label* l, Condition c)
MOZ_ASSERT(check == old);
return ret;
}
BufferOffset
Assembler::as_bl(BOffImm off, Condition c, BufferOffset inst)
{
@ -2195,6 +2201,7 @@ enum vfp_tags {
VfpTag = 0x0C000A00,
VfpArith = 0x02000000
};
BufferOffset
Assembler::writeVFPInst(vfp_size sz, uint32_t blob)
{
@ -2225,43 +2232,37 @@ Assembler::as_vfp_float(VFPRegister vd, VFPRegister vn, VFPRegister vm,
}
BufferOffset
Assembler::as_vadd(VFPRegister vd, VFPRegister vn, VFPRegister vm,
Condition c)
Assembler::as_vadd(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c)
{
return as_vfp_float(vd, vn, vm, OpvAdd, c);
}
BufferOffset
Assembler::as_vdiv(VFPRegister vd, VFPRegister vn, VFPRegister vm,
Condition c)
Assembler::as_vdiv(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c)
{
return as_vfp_float(vd, vn, vm, OpvDiv, c);
}
BufferOffset
Assembler::as_vmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
Condition c)
Assembler::as_vmul(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c)
{
return as_vfp_float(vd, vn, vm, OpvMul, c);
}
BufferOffset
Assembler::as_vnmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
Condition c)
Assembler::as_vnmul(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c)
{
return as_vfp_float(vd, vn, vm, OpvMul, c);
}
BufferOffset
Assembler::as_vnmla(VFPRegister vd, VFPRegister vn, VFPRegister vm,
Condition c)
Assembler::as_vnmla(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c)
{
MOZ_CRASH("Feature NYI");
}
BufferOffset
Assembler::as_vnmls(VFPRegister vd, VFPRegister vn, VFPRegister vm,
Condition c)
Assembler::as_vnmls(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c)
{
MOZ_CRASH("Feature NYI");
}
@ -2285,18 +2286,17 @@ Assembler::as_vabs(VFPRegister vd, VFPRegister vm, Condition c)
}
BufferOffset
Assembler::as_vsub(VFPRegister vd, VFPRegister vn, VFPRegister vm,
Condition c)
Assembler::as_vsub(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c)
{
return as_vfp_float(vd, vn, vm, OpvSub, c);
}
BufferOffset
Assembler::as_vcmp(VFPRegister vd, VFPRegister vm,
Condition c)
Assembler::as_vcmp(VFPRegister vd, VFPRegister vm, Condition c)
{
return as_vfp_float(vd, NoVFPRegister, vm, OpvCmp, c);
}
BufferOffset
Assembler::as_vcmpz(VFPRegister vd, Condition c)
{
@ -2309,6 +2309,7 @@ Assembler::as_vmov(VFPRegister vd, VFPRegister vsrc, Condition c)
{
return as_vfp_float(vd, NoVFPRegister, vsrc, OpvMov, c);
}
// Transfer between Core and VFP.
// Unlike the next function, moving between the core registers and vfp registers
@ -2339,15 +2340,13 @@ Assembler::as_vxfer(Register vt1, Register vt2, VFPRegister vm, FloatToCore_ f2c
MOZ_ASSERT(idx == 0);
}
if (vt2 == InvalidReg) {
return writeVFPInst(sz, WordTransfer | f2c | c |
RT(vt1) | maybeRN(vt2) | VN(vm) | idx);
} else {
// We are doing a 64 bit transfer.
return writeVFPInst(sz, DoubleTransfer | f2c | c |
RT(vt1) | maybeRN(vt2) | VM(vm) | idx);
}
if (vt2 == InvalidReg)
return writeVFPInst(sz, WordTransfer | f2c | c | RT(vt1) | maybeRN(vt2) | VN(vm) | idx);
// We are doing a 64 bit transfer.
return writeVFPInst(sz, DoubleTransfer | f2c | c | RT(vt1) | maybeRN(vt2) | VM(vm) | idx);
}
enum vcvt_destFloatness {
VcvtToInteger = 1 << 18,
VcvtToFloat = 0 << 18
@ -2384,9 +2383,9 @@ Assembler::as_vcvt(VFPRegister vd, VFPRegister vm, bool useFPSCR,
vcvt_Signedness opSign;
vcvt_toZero doToZero = VcvtToFPSCR;
MOZ_ASSERT(vd.isFloat() || vm.isFloat());
if (vd.isSingle() || vm.isSingle()) {
if (vd.isSingle() || vm.isSingle())
sz = IsSingle;
}
if (vd.isFloat()) {
destFloat = VcvtToFloat;
opSign = (vm.isSInt()) ? VcvtFromSigned : VcvtFromUnsigned;
@ -2459,6 +2458,7 @@ Assembler::as_vimm(VFPRegister vd, VFPImm imm, Condition c)
return writeVFPInst(sz, c | imm.encode() | VD(vd) | 0x02B00000);
}
BufferOffset
Assembler::as_vmrs(Register r, Condition c)
{
@ -2577,7 +2577,6 @@ Assembler::retarget(Label* label, Label* target)
}
static int stopBKPT = -1;
void
Assembler::as_bkpt()
@ -2632,6 +2631,7 @@ Assembler::GetBranchOffset(const Instruction* i_)
i->extractImm(&dest);
return dest.decode();
}
void
Assembler::RetargetNearBranch(Instruction* i, int offset, bool final)
{
@ -2667,7 +2667,8 @@ Assembler::RetargetFarBranch(Instruction* i, uint8_t** slot, uint8_t* dest, Cond
}
struct PoolHeader : Instruction {
struct PoolHeader : Instruction
{
struct Header
{
// The size should take into account the pool header.
@ -2708,6 +2709,7 @@ struct PoolHeader : Instruction {
Header tmp(this);
return tmp.isNatural;
}
static bool IsTHIS(const Instruction& i) {
return (*i.raw() & 0xffff0000) == 0xffff0000;
}
@ -2718,12 +2720,10 @@ struct PoolHeader : Instruction {
}
};
void
Assembler::WritePoolHeader(uint8_t* start, Pool* p, bool isNatural)
{
static_assert(sizeof(PoolHeader) == 4,
"PoolHandler must have the correct size.");
static_assert(sizeof(PoolHeader) == 4, "PoolHandler must have the correct size.");
uint8_t* pool = start + 4;
// Go through the usual rigmarole to get the size of the pool.
pool += p->getPoolSize();
@ -2735,7 +2735,6 @@ Assembler::WritePoolHeader(uint8_t* start, Pool* p, bool isNatural)
*(PoolHeader*)start = header;
}
// The size of an arbitrary 32-bit call in the instruction stream. On ARM this
// sequence is |pc = ldr pc - 4; imm32| given that we never reach the imm32.
uint32_t
@ -2743,6 +2742,7 @@ Assembler::PatchWrite_NearCallSize()
{
return sizeof(uint32_t);
}
void
Assembler::PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall)
{
@ -2754,8 +2754,8 @@ Assembler::PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall
new (inst) InstBLImm(BOffImm(dest - (uint8_t*)inst) , Always);
// Ensure everyone sees the code that was just written into memory.
AutoFlushICache::flush(uintptr_t(inst), 4);
}
void
Assembler::PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
PatchedImmPtr expectedValue)
@ -2796,7 +2796,6 @@ Assembler::PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm) {
*(raw - 1) = imm.value;
}
uint8_t*
Assembler::NextInstruction(uint8_t* inst_, uint32_t* count)
{
@ -2821,7 +2820,8 @@ InstIsGuard(Instruction* inst, const PoolHeader** ph)
}
static bool
InstIsBNop(Instruction* inst) {
InstIsBNop(Instruction* inst)
{
// In some special situations, it is necessary to insert a NOP into the
// instruction stream that nobody knows about, since nobody should know
// about it, make sure it gets skipped when Instruction::next() is called.
@ -3024,13 +3024,14 @@ void Assembler::UpdateBoundsCheck(uint32_t heapSize, Instruction* inst)
Imm8 imm8 = Imm8(heapSize);
MOZ_ASSERT(!imm8.invalid);
*inst = InstALU(InvalidReg, index, imm8, OpCmp, SetCond, Always);
*inst = InstALU(InvalidReg, index, imm8, OpCmp, SetCC, Always);
// NOTE: we don't update the Auto Flush Cache! this function is currently
// only called from within AsmJSModule::patchHeapAccesses, which does that
// for us. Don't call this!
}
InstructionIterator::InstructionIterator(Instruction* i_) : i(i_)
InstructionIterator::InstructionIterator(Instruction* i_)
: i(i_)
{
// Work around pools with an artificial pool guard and around nop-fill.
i = i->skipPool();

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -29,8 +29,8 @@ ICCompare_Int32::Compiler::generateStubCode(MacroAssembler& masm)
// Compare payload regs of R0 and R1.
Assembler::Condition cond = JSOpToCondition(op, /* signed = */true);
masm.cmp32(R0.payloadReg(), R1.payloadReg());
masm.ma_mov(Imm32(1), R0.payloadReg(), NoSetCond, cond);
masm.ma_mov(Imm32(0), R0.payloadReg(), NoSetCond, Assembler::InvertCondition(cond));
masm.ma_mov(Imm32(1), R0.payloadReg(), LeaveCC, cond);
masm.ma_mov(Imm32(0), R0.payloadReg(), LeaveCC, Assembler::InvertCondition(cond));
// Result is implicitly boxed already.
masm.tagValue(JSVAL_TYPE_BOOLEAN, R0.payloadReg(), R0);
@ -57,7 +57,7 @@ ICCompare_Double::Compiler::generateStubCode(MacroAssembler& masm)
masm.compareDouble(FloatReg0, FloatReg1);
masm.ma_mov(Imm32(0), dest);
masm.ma_mov(Imm32(1), dest, NoSetCond, cond);
masm.ma_mov(Imm32(1), dest, LeaveCC, cond);
masm.tagValue(JSVAL_TYPE_BOOLEAN, dest, R0);
EmitReturnFromIC(masm);
@ -93,7 +93,7 @@ ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm)
Label maybeNegZero, revertRegister;
switch(op_) {
case JSOP_ADD:
masm.ma_add(R0.payloadReg(), R1.payloadReg(), scratchReg, SetCond);
masm.ma_add(R0.payloadReg(), R1.payloadReg(), scratchReg, SetCC);
// Just jump to failure on overflow. R0 and R1 are preserved, so we can
// just jump to the next stub.
@ -104,7 +104,7 @@ ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm)
masm.mov(scratchReg, R0.payloadReg());
break;
case JSOP_SUB:
masm.ma_sub(R0.payloadReg(), R1.payloadReg(), scratchReg, SetCond);
masm.ma_sub(R0.payloadReg(), R1.payloadReg(), scratchReg, SetCC);
masm.j(Assembler::Overflow, &failure);
masm.mov(scratchReg, R0.payloadReg());
break;

Просмотреть файл

@ -135,7 +135,7 @@ CodeGeneratorARM::visitCompare(LCompare* comp)
else
masm.ma_cmp(ToRegister(left), ToOperand(right));
masm.ma_mov(Imm32(0), ToRegister(def));
masm.ma_mov(Imm32(1), ToRegister(def), NoSetCond, cond);
masm.ma_mov(Imm32(1), ToRegister(def), LeaveCC, cond);
}
void
@ -380,9 +380,9 @@ CodeGeneratorARM::visitAddI(LAddI* ins)
const LDefinition* dest = ins->getDef(0);
if (rhs->isConstant())
masm.ma_add(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), SetCond);
masm.ma_add(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), SetCC);
else
masm.ma_add(ToRegister(lhs), ToOperand(rhs), ToRegister(dest), SetCond);
masm.ma_add(ToRegister(lhs), ToOperand(rhs), ToRegister(dest), SetCC);
if (ins->snapshot())
bailoutIf(Assembler::Overflow, ins->snapshot());
@ -396,9 +396,9 @@ CodeGeneratorARM::visitSubI(LSubI* ins)
const LDefinition* dest = ins->getDef(0);
if (rhs->isConstant())
masm.ma_sub(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), SetCond);
masm.ma_sub(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), SetCC);
else
masm.ma_sub(ToRegister(lhs), ToOperand(rhs), ToRegister(dest), SetCond);
masm.ma_sub(ToRegister(lhs), ToOperand(rhs), ToRegister(dest), SetCC);
if (ins->snapshot())
bailoutIf(Assembler::Overflow, ins->snapshot());
@ -426,7 +426,7 @@ CodeGeneratorARM::visitMulI(LMulI* ins)
// TODO: move these to ma_mul.
switch (constant) {
case -1:
masm.ma_rsb(ToRegister(lhs), Imm32(0), ToRegister(dest), SetCond);
masm.ma_rsb(ToRegister(lhs), Imm32(0), ToRegister(dest), SetCC);
break;
case 0:
masm.ma_mov(Imm32(0), ToRegister(dest));
@ -436,7 +436,7 @@ CodeGeneratorARM::visitMulI(LMulI* ins)
masm.ma_mov(ToRegister(lhs), ToRegister(dest));
return; // Escape overflow check;
case 2:
masm.ma_add(ToRegister(lhs), ToRegister(lhs), ToRegister(dest), SetCond);
masm.ma_add(ToRegister(lhs), ToRegister(lhs), ToRegister(dest), SetCC);
// Overflow is handled later.
break;
default: {
@ -646,7 +646,7 @@ CodeGeneratorARM::visitDivPowTwoI(LDivPowTwoI* ins)
MDiv* mir = ins->mir();
if (!mir->isTruncated()) {
// If the remainder is != 0, bailout since this must be a double.
masm.as_mov(ScratchRegister, lsl(lhs, 32 - shift), SetCond);
masm.as_mov(ScratchRegister, lsl(lhs, 32 - shift), SetCC);
bailoutIf(Assembler::NonZero, ins->snapshot());
}
@ -811,11 +811,11 @@ CodeGeneratorARM::visitModPowTwoI(LModPowTwoI* ins)
Label fin;
// bug 739870, jbramley has a different sequence that may help with speed
// here.
masm.ma_mov(in, out, SetCond);
masm.ma_mov(in, out, SetCC);
masm.ma_b(&fin, Assembler::Zero);
masm.ma_rsb(Imm32(0), out, NoSetCond, Assembler::Signed);
masm.ma_rsb(Imm32(0), out, LeaveCC, Assembler::Signed);
masm.ma_and(Imm32((1 << ins->shift()) - 1), out);
masm.ma_rsb(Imm32(0), out, SetCond, Assembler::Signed);
masm.ma_rsb(Imm32(0), out, SetCC, Assembler::Signed);
if (mir->canBeNegativeDividend()) {
if (!mir->isTruncated()) {
MOZ_ASSERT(mir->fallible());
@ -1100,8 +1100,8 @@ CodeGeneratorARM::emitTableSwitchDispatch(MTableSwitch* mir, Register index, Reg
int32_t cases = mir->numCases();
// Lower value with low value.
masm.ma_sub(index, Imm32(mir->low()), index, SetCond);
masm.ma_rsb(index, Imm32(cases - 1), index, SetCond, Assembler::NotSigned);
masm.ma_sub(index, Imm32(mir->low()), index, SetCC);
masm.ma_rsb(index, Imm32(cases - 1), index, SetCC, Assembler::NotSigned);
// Inhibit pools within the following sequence because we are indexing into
// a pc relative table. The region will have one instruction for ma_ldr, one
// for ma_b, and each table case takes one word.
@ -1612,8 +1612,8 @@ CodeGeneratorARM::visitNotD(LNotD* ins)
} else {
masm.as_vmrs(pc);
masm.ma_mov(Imm32(0), dest);
masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Equal);
masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Overflow);
masm.ma_mov(Imm32(1), dest, LeaveCC, Assembler::Equal);
masm.ma_mov(Imm32(1), dest, LeaveCC, Assembler::Overflow);
}
}
@ -1640,8 +1640,8 @@ CodeGeneratorARM::visitNotF(LNotF* ins)
} else {
masm.as_vmrs(pc);
masm.ma_mov(Imm32(0), dest);
masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Equal);
masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Overflow);
masm.ma_mov(Imm32(1), dest, LeaveCC, Assembler::Equal);
masm.ma_mov(Imm32(1), dest, LeaveCC, Assembler::Overflow);
}
}
@ -1793,9 +1793,9 @@ CodeGeneratorARM::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
if (isFloat) {
VFPRegister vd(ToFloatRegister(ins->output()));
if (size == 32)
masm.ma_vldr(Operand(HeapReg, ptrImm), vd.singleOverlay(), Assembler::Always);
masm.ma_vldr(Address(HeapReg, ptrImm), vd.singleOverlay(), Assembler::Always);
else
masm.ma_vldr(Operand(HeapReg, ptrImm), vd, Assembler::Always);
masm.ma_vldr(Address(HeapReg, ptrImm), vd, Assembler::Always);
} else {
masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, Imm32(ptrImm),
ToRegister(ins->output()), Offset, Assembler::Always);
@ -1826,17 +1826,17 @@ CodeGeneratorARM::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
FloatRegister dst = ToFloatRegister(ins->output());
VFPRegister vd(dst);
if (size == 32) {
masm.ma_vldr(Operand(GlobalReg, AsmJSNaN32GlobalDataOffset - AsmJSGlobalRegBias),
masm.ma_vldr(Address(GlobalReg, AsmJSNaN32GlobalDataOffset - AsmJSGlobalRegBias),
vd.singleOverlay(), Assembler::AboveOrEqual);
masm.ma_vldr(vd.singleOverlay(), HeapReg, ptrReg, 0, Assembler::Below);
} else {
masm.ma_vldr(Operand(GlobalReg, AsmJSNaN64GlobalDataOffset - AsmJSGlobalRegBias),
masm.ma_vldr(Address(GlobalReg, AsmJSNaN64GlobalDataOffset - AsmJSGlobalRegBias),
vd, Assembler::AboveOrEqual);
masm.ma_vldr(vd, HeapReg, ptrReg, 0, Assembler::Below);
}
} else {
Register d = ToRegister(ins->output());
masm.ma_mov(Imm32(0), d, NoSetCond, Assembler::AboveOrEqual);
masm.ma_mov(Imm32(0), d, LeaveCC, Assembler::AboveOrEqual);
masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg, d, Offset, Assembler::Below);
}
memoryBarrier(mir->barrierAfter());
@ -1870,9 +1870,9 @@ CodeGeneratorARM::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
if (isFloat) {
VFPRegister vd(ToFloatRegister(ins->value()));
if (size == 32)
masm.ma_vstr(vd.singleOverlay(), Operand(HeapReg, ptrImm), Assembler::Always);
masm.ma_vstr(vd.singleOverlay(), Address(HeapReg, ptrImm), Assembler::Always);
else
masm.ma_vstr(vd, Operand(HeapReg, ptrImm), Assembler::Always);
masm.ma_vstr(vd, Address(HeapReg, ptrImm), Assembler::Always);
} else {
masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, Imm32(ptrImm),
ToRegister(ins->value()), Offset, Assembler::Always);
@ -2089,7 +2089,7 @@ void
CodeGeneratorARM::visitAsmJSPassStackArg(LAsmJSPassStackArg* ins)
{
const MAsmJSPassStackArg* mir = ins->mir();
Operand dst(StackPointer, mir->spOffset());
Address dst(StackPointer, mir->spOffset());
if (ins->arg()->isConstant()) {
//masm.as_bkpt();
masm.ma_storeImm(Imm32(ToInt32(ins->arg())), dst);
@ -2231,9 +2231,9 @@ CodeGeneratorARM::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar* ins)
masm.ma_dtr(IsLoad, GlobalReg, Imm32(addr), ToRegister(ins->output()));
} else if (mir->type() == MIRType_Float32) {
VFPRegister vd(ToFloatRegister(ins->output()));
masm.ma_vldr(Operand(GlobalReg, addr), vd.singleOverlay());
masm.ma_vldr(Address(GlobalReg, addr), vd.singleOverlay());
} else {
masm.ma_vldr(Operand(GlobalReg, addr), ToFloatRegister(ins->output()));
masm.ma_vldr(Address(GlobalReg, addr), ToFloatRegister(ins->output()));
}
}
@ -2250,9 +2250,9 @@ CodeGeneratorARM::visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar* ins)
masm.ma_dtr(IsStore, GlobalReg, Imm32(addr), ToRegister(ins->value()));
} else if (type == MIRType_Float32) {
VFPRegister vd(ToFloatRegister(ins->value()));
masm.ma_vstr(vd.singleOverlay(), Operand(GlobalReg, addr));
masm.ma_vstr(vd.singleOverlay(), Address(GlobalReg, addr));
} else {
masm.ma_vstr(ToFloatRegister(ins->value()), Operand(GlobalReg, addr));
masm.ma_vstr(ToFloatRegister(ins->value()), Address(GlobalReg, addr));
}
}
@ -2275,7 +2275,7 @@ CodeGeneratorARM::visitAsmJSLoadFFIFunc(LAsmJSLoadFFIFunc* ins)
{
const MAsmJSLoadFFIFunc* mir = ins->mir();
masm.ma_ldr(Operand(GlobalReg, mir->globalDataOffset() - AsmJSGlobalRegBias),
masm.ma_ldr(Address(GlobalReg, mir->globalDataOffset() - AsmJSGlobalRegBias),
ToRegister(ins->output()));
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -40,28 +40,27 @@ class MacroAssemblerARM : public Assembler
public:
// Higher level tag testing code.
Operand ToPayload(Operand base) {
// TODO: Can probably remove the Operand versions.
Operand ToPayload(Operand base) const {
return Operand(Register::FromCode(base.base()), base.disp());
}
Address ToPayload(Address base) {
return ToPayload(Operand(base)).toAddress();
Address ToPayload(const Address& base) const {
return base;
}
protected:
Operand ToType(Operand base) {
Operand ToType(Operand base) const {
return Operand(Register::FromCode(base.base()), base.disp() + sizeof(void*));
}
Address ToType(Address base) {
Address ToType(const Address& base) const {
return ToType(Operand(base)).toAddress();
}
Operand ToPayloadAfterStackPush(Operand base) {
Register baseReg = Register::FromCode(base.base());
Address ToPayloadAfterStackPush(const Address& base) const {
// If we are based on StackPointer, pass over the type tag just pushed.
if (baseReg == StackPointer)
return Operand(Register::FromCode(base.base()), base.disp() + sizeof(void*));
else
return ToPayload(base);
if (base.base == StackPointer)
return Address(base.base, base.offset + sizeof(void *));
return ToPayload(base);
}
public:
@ -106,17 +105,17 @@ class MacroAssemblerARM : public Assembler
// instructions.
private:
bool alu_dbl(Register src1, Imm32 imm, Register dest, ALUOp op,
SetCond_ sc, Condition c);
SBit s, Condition c);
public:
void ma_alu(Register src1, Operand2 op2, Register dest, ALUOp op,
SetCond_ sc = NoSetCond, Condition c = Always);
SBit s = LeaveCC, Condition c = Always);
void ma_alu(Register src1, Imm32 imm, Register dest,
ALUOp op,
SetCond_ sc = NoSetCond, Condition c = Always);
SBit s = LeaveCC, Condition c = Always);
void ma_alu(Register src1, Operand op2, Register dest, ALUOp op,
SetCond_ sc = NoSetCond, Condition c = Always);
SBit s = LeaveCC, Condition c = Always);
void ma_nop();
void ma_movPatchable(Imm32 imm, Register dest, Assembler::Condition c,
@ -136,12 +135,12 @@ class MacroAssemblerARM : public Assembler
// ALU based ops
// mov
void ma_mov(Register src, Register dest,
SetCond_ sc = NoSetCond, Condition c = Always);
SBit s = LeaveCC, Condition c = Always);
void ma_mov(Imm32 imm, Register dest,
SetCond_ sc = NoSetCond, Condition c = Always);
SBit s = LeaveCC, Condition c = Always);
void ma_mov(ImmWord imm, Register dest,
SetCond_ sc = NoSetCond, Condition c = Always);
SBit s = LeaveCC, Condition c = Always);
void ma_mov(ImmGCPtr ptr, Register dest);
@ -160,98 +159,98 @@ class MacroAssemblerARM : public Assembler
// Move not (dest <- ~src)
void ma_mvn(Imm32 imm, Register dest,
SetCond_ sc = NoSetCond, Condition c = Always);
SBit s = LeaveCC, Condition c = Always);
void ma_mvn(Register src1, Register dest,
SetCond_ sc = NoSetCond, Condition c = Always);
SBit s = LeaveCC, Condition c = Always);
// Negate (dest <- -src) implemented as rsb dest, src, 0
void ma_neg(Register src, Register dest,
SetCond_ sc = NoSetCond, Condition c = Always);
SBit s = LeaveCC, Condition c = Always);
// And
void ma_and(Register src, Register dest,
SetCond_ sc = NoSetCond, Condition c = Always);
SBit s = LeaveCC, Condition c = Always);
void ma_and(Register src1, Register src2, Register dest,
SetCond_ sc = NoSetCond, Condition c = Always);
SBit s = LeaveCC, Condition c = Always);
void ma_and(Imm32 imm, Register dest,
SetCond_ sc = NoSetCond, Condition c = Always);
SBit s = LeaveCC, Condition c = Always);
void ma_and(Imm32 imm, Register src1, Register dest,
SetCond_ sc = NoSetCond, Condition c = Always);
SBit s = LeaveCC, Condition c = Always);
// Bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2)
void ma_bic(Imm32 imm, Register dest,
SetCond_ sc = NoSetCond, Condition c = Always);
SBit s = LeaveCC, Condition c = Always);
// Exclusive or
void ma_eor(Register src, Register dest,
SetCond_ sc = NoSetCond, Condition c = Always);
SBit s = LeaveCC, Condition c = Always);
void ma_eor(Register src1, Register src2, Register dest,
SetCond_ sc = NoSetCond, Condition c = Always);
SBit s = LeaveCC, Condition c = Always);
void ma_eor(Imm32 imm, Register dest,
SetCond_ sc = NoSetCond, Condition c = Always);
SBit s = LeaveCC, Condition c = Always);
void ma_eor(Imm32 imm, Register src1, Register dest,
SetCond_ sc = NoSetCond, Condition c = Always);
SBit s = LeaveCC, Condition c = Always);
// Or
void ma_orr(Register src, Register dest,
SetCond_ sc = NoSetCond, Condition c = Always);
SBit s = LeaveCC, Condition c = Always);
void ma_orr(Register src1, Register src2, Register dest,
SetCond_ sc = NoSetCond, Condition c = Always);
SBit s = LeaveCC, Condition c = Always);
void ma_orr(Imm32 imm, Register dest,
SetCond_ sc = NoSetCond, Condition c = Always);
SBit s = LeaveCC, Condition c = Always);
void ma_orr(Imm32 imm, Register src1, Register dest,
SetCond_ sc = NoSetCond, Condition c = Always);
SBit s = LeaveCC, Condition c = Always);
// Arithmetic based ops.
// Add with carry:
void ma_adc(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_adc(Register src, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_adc(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_adc(Imm32 imm, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_adc(Register src, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_adc(Register src1, Register src2, Register dest, SBit s = LeaveCC, Condition c = Always);
// Add:
void ma_add(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_add(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_add(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_add(Register src1, Operand op, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_add(Register src1, Imm32 op, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_add(Imm32 imm, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_add(Register src1, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_add(Register src1, Register src2, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_add(Register src1, Operand op, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_add(Register src1, Imm32 op, Register dest, SBit s = LeaveCC, Condition c = Always);
// Subtract with carry:
void ma_sbc(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_sbc(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_sbc(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_sbc(Imm32 imm, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_sbc(Register src1, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_sbc(Register src1, Register src2, Register dest, SBit s = LeaveCC, Condition c = Always);
// Subtract:
void ma_sub(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_sub(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_sub(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_sub(Register src1, Operand op, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_sub(Register src1, Imm32 op, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_sub(Imm32 imm, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_sub(Register src1, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_sub(Register src1, Register src2, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_sub(Register src1, Operand op, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_sub(Register src1, Imm32 op, Register dest, SBit s = LeaveCC, Condition c = Always);
// Reverse subtract:
void ma_rsb(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_rsb(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_rsb(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_rsb(Register src1, Imm32 op2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_rsb(Imm32 imm, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_rsb(Register src1, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_rsb(Register src1, Register src2, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_rsb(Register src1, Imm32 op2, Register dest, SBit s = LeaveCC, Condition c = Always);
// Reverse subtract with carry:
void ma_rsc(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_rsc(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_rsc(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_rsc(Imm32 imm, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_rsc(Register src1, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_rsc(Register src1, Register src2, Register dest, SBit s = LeaveCC, Condition c = Always);
// Compares/tests.
// Compare negative (sets condition codes as src1 + src2 would):
@ -309,11 +308,11 @@ class MacroAssemblerARM : public Assembler
void ma_str(Register rt, DTRAddr addr, Index mode = Offset, Condition cc = Always);
void ma_str(Register rt, const Operand& addr, Index mode = Offset, Condition cc = Always);
void ma_dtr(LoadStore ls, Register rt, const Operand& addr, Index mode, Condition cc);
void ma_str(Register rt, const Address& addr, Index mode = Offset, Condition cc = Always);
void ma_dtr(LoadStore ls, Register rt, const Address& addr, Index mode, Condition cc);
void ma_ldr(DTRAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
void ma_ldr(const Operand& addr, Register rt, Index mode = Offset, Condition cc = Always);
void ma_ldr(const Address& addr, Register rt, Index mode = Offset, Condition cc = Always);
void ma_ldrb(DTRAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
void ma_ldrh(EDtrAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
@ -407,15 +406,15 @@ class MacroAssemblerARM : public Assembler
void ma_vxfer(Register src1, Register src2, FloatRegister dest, Condition cc = Always);
BufferOffset ma_vdtr(LoadStore ls, const Operand& addr, VFPRegister dest, Condition cc = Always);
BufferOffset ma_vdtr(LoadStore ls, const Address& addr, VFPRegister dest, Condition cc = Always);
BufferOffset ma_vldr(VFPAddr addr, VFPRegister dest, Condition cc = Always);
BufferOffset ma_vldr(const Operand& addr, VFPRegister dest, Condition cc = Always);
BufferOffset ma_vldr(const Address& addr, VFPRegister dest, Condition cc = Always);
BufferOffset ma_vldr(VFPRegister src, Register base, Register index, int32_t shift = defaultShift, Condition cc = Always);
BufferOffset ma_vstr(VFPRegister src, VFPAddr addr, Condition cc = Always);
BufferOffset ma_vstr(VFPRegister src, const Operand& addr, Condition cc = Always);
BufferOffset ma_vstr(VFPRegister src, const Address& addr, Condition cc = Always);
BufferOffset ma_vstr(VFPRegister src, Register base, Register index, int32_t shift,
int32_t offset, Condition cc = Always);
@ -665,8 +664,8 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
void push(ImmMaybeNurseryPtr imm) {
push(noteMaybeNurseryPtr(imm));
}
void push(const Address& address) {
ma_ldr(Operand(address.base, address.offset), ScratchRegister);
void push(const Address& addr) {
ma_ldr(addr, ScratchRegister);
ma_push(ScratchRegister);
}
void push(Register reg) {
@ -729,16 +728,16 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
void jump(Register reg) {
ma_bx(reg);
}
void jump(const Address& address) {
ma_ldr(Operand(address.base, address.offset), ScratchRegister);
void jump(const Address& addr) {
ma_ldr(addr, ScratchRegister);
ma_bx(ScratchRegister);
}
void neg32(Register reg) {
ma_neg(reg, reg, SetCond);
ma_neg(reg, reg, SetCC);
}
void negl(Register reg) {
ma_neg(reg, reg, SetCond);
ma_neg(reg, reg, SetCC);
}
void test32(Register lhs, Register rhs) {
ma_tst(lhs, rhs);
@ -746,8 +745,8 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
void test32(Register lhs, Imm32 imm) {
ma_tst(lhs, imm);
}
void test32(const Address& address, Imm32 imm) {
ma_ldr(Operand(address.base, address.offset), ScratchRegister);
void test32(const Address& addr, Imm32 imm) {
ma_ldr(addr, ScratchRegister);
ma_tst(ScratchRegister, imm);
}
void testPtr(Register lhs, Register rhs) {
@ -873,7 +872,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
void boolValueToDouble(const ValueOperand& operand, FloatRegister dest);
void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest);
void loadInt32OrDouble(const Operand& src, FloatRegister dest);
void loadInt32OrDouble(const Address& src, FloatRegister dest);
void loadInt32OrDouble(Register base, Register index,
FloatRegister dest, int32_t shift = defaultShift);
void loadConstantDouble(double dp, FloatRegister dest);
@ -909,7 +908,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
if (lhs.getTag() == Operand::OP2) {
branch32(cond, lhs.toReg(), rhs, label);
} else {
ma_ldr(lhs, ScratchRegister);
ma_ldr(lhs.toAddress(), ScratchRegister);
branch32(cond, ScratchRegister, rhs, label);
}
}
@ -918,7 +917,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
branch32(cond, lhs.toReg(), rhs, label);
} else {
// branch32 will use ScratchRegister.
ma_ldr(lhs, secondScratchReg_);
ma_ldr(lhs.toAddress(), secondScratchReg_);
branch32(cond, secondScratchReg_, rhs, label);
}
}
@ -1137,7 +1136,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) {
if (dest.isFloat())
loadInt32OrDouble(Operand(address), dest.fpu());
loadInt32OrDouble(address, dest.fpu());
else
ma_ldr(address, dest.gpr());
}
@ -1195,29 +1194,26 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
ma_mov(s1, d1);
}
void storeValue(ValueOperand val, Operand dst);
void storeValue(ValueOperand val, const Address& dst);
void storeValue(ValueOperand val, const BaseIndex& dest);
void storeValue(JSValueType type, Register reg, BaseIndex dest) {
ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, OpAdd);
storeValue(type, reg, Address(ScratchRegister, dest.offset));
}
void storeValue(ValueOperand val, const Address& dest) {
storeValue(val, Operand(dest));
}
void storeValue(JSValueType type, Register reg, Address dest) {
ma_str(reg, dest);
ma_mov(ImmTag(JSVAL_TYPE_TO_TAG(type)), secondScratchReg_);
ma_str(secondScratchReg_, Address(dest.base, dest.offset + 4));
}
void storeValue(const Value& val, Address dest) {
void storeValue(const Value& val, const Address& dest) {
jsval_layout jv = JSVAL_TO_IMPL(val);
ma_mov(Imm32(jv.s.tag), secondScratchReg_);
ma_str(secondScratchReg_, Address(dest.base, dest.offset + 4));
ma_str(secondScratchReg_, ToType(dest));
if (val.isMarkable())
ma_mov(ImmGCPtr(reinterpret_cast<gc::Cell*>(val.toGCThing())), secondScratchReg_);
else
ma_mov(Imm32(jv.s.payload.i32), secondScratchReg_);
ma_str(secondScratchReg_, dest);
ma_str(secondScratchReg_, ToPayload(dest));
}
void storeValue(const Value& val, BaseIndex dest) {
ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, OpAdd);
@ -1247,11 +1243,11 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
}
void pushValue(const Address& addr);
void storePayload(const Value& val, Operand dest);
void storePayload(Register src, Operand dest);
void storePayload(const Value& val, const Address& dest);
void storePayload(Register src, const Address& dest);
void storePayload(const Value& val, const BaseIndex& dest);
void storePayload(Register src, const BaseIndex& dest);
void storeTypeTag(ImmTag tag, Operand dest);
void storeTypeTag(ImmTag tag, const Address& dest);
void storeTypeTag(ImmTag tag, const BaseIndex& dest);
void makeFrameDescriptor(Register frameSizeReg, FrameType type) {
@ -1443,7 +1439,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
void storePtr(Register src, const BaseIndex& address);
void storePtr(Register src, AbsoluteAddress dest);
void storeDouble(FloatRegister src, Address addr) {
ma_vstr(src, Operand(addr));
ma_vstr(src, addr);
}
void storeDouble(FloatRegister src, BaseIndex addr) {
uint32_t scale = Imm32::ShiftOf(addr.scale).value;
@ -1453,10 +1449,10 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
ma_vmov(src, dest);
}
void storeFloat32(FloatRegister src, Address addr) {
ma_vstr(VFPRegister(src).singleOverlay(), Operand(addr));
void storeFloat32(FloatRegister src, const Address& addr) {
ma_vstr(VFPRegister(src).singleOverlay(), addr);
}
void storeFloat32(FloatRegister src, BaseIndex addr) {
void storeFloat32(FloatRegister src, const BaseIndex& addr) {
uint32_t scale = Imm32::ShiftOf(addr.scale).value;
ma_vstr(VFPRegister(src).singleOverlay(), addr.base, addr.index, scale, addr.offset);
}
@ -1702,9 +1698,9 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
void clampIntToUint8(Register reg) {
// Look at (reg >> 8) if it is 0, then reg shouldn't be clamped if it is
// <0, then we want to clamp to 0, otherwise, we wish to clamp to 255
as_mov(ScratchRegister, asr(reg, 8), SetCond);
ma_mov(Imm32(0xff), reg, NoSetCond, NotEqual);
ma_mov(Imm32(0), reg, NoSetCond, Signed);
as_mov(ScratchRegister, asr(reg, 8), SetCC);
ma_mov(Imm32(0xff), reg, LeaveCC, NotEqual);
ma_mov(Imm32(0), reg, LeaveCC, Signed);
}
void incrementInt32Value(const Address& addr) {
@ -1782,7 +1778,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
emitSet(Assembler::Condition cond, Register dest)
{
ma_mov(Imm32(0), dest);
ma_mov(Imm32(1), dest, NoSetCond, cond);
ma_mov(Imm32(1), dest, LeaveCC, cond);
}
template <typename T1, typename T2>
@ -1859,12 +1855,12 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
}
void computeEffectiveAddress(const Address& address, Register dest) {
ma_add(address.base, Imm32(address.offset), dest, NoSetCond);
ma_add(address.base, Imm32(address.offset), dest, LeaveCC);
}
void computeEffectiveAddress(const BaseIndex& address, Register dest) {
ma_alu(address.base, lsl(address.index, address.scale), dest, OpAdd, NoSetCond);
ma_alu(address.base, lsl(address.index, address.scale), dest, OpAdd, LeaveCC);
if (address.offset)
ma_add(dest, Imm32(address.offset), dest, NoSetCond);
ma_add(dest, Imm32(address.offset), dest, LeaveCC);
}
void floor(FloatRegister input, Register output, Label* handleNotAnInt);
void floorf(FloatRegister input, Register output, Label* handleNotAnInt);
@ -1901,19 +1897,11 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
ma_add(addr.baseReg(), Imm32(addr.disp()), dest);
}
void stackCheck(ImmWord limitAddr, Label* label) {
int* foo = 0;
*foo = 5;
movePtr(limitAddr, ScratchRegister);
ma_ldr(Address(ScratchRegister, 0), ScratchRegister);
ma_cmp(ScratchRegister, StackPointer);
ma_b(label, Assembler::AboveOrEqual);
}
void abiret() {
as_bx(lr);
}
void ma_storeImm(Imm32 c, const Operand& dest) {
void ma_storeImm(Imm32 c, const Address& dest) {
ma_mov(c, lr);
ma_str(lr, dest);
}

Просмотреть файл

@ -38,43 +38,36 @@ MoveEmitterARM::~MoveEmitterARM()
assertDone();
}
Operand
Address
MoveEmitterARM::cycleSlot(uint32_t slot, uint32_t subslot) const
{
int32_t offset = masm.framePushed() - pushedAtCycle_;
MOZ_ASSERT(offset < 4096 && offset > -4096);
return Operand(StackPointer, offset + slot * sizeof(double) + subslot);
return Address(StackPointer, offset + slot * sizeof(double) + subslot);
}
// THIS IS ALWAYS AN LDRAddr. It should not be wrapped in an operand, methinks.
Operand
Address
MoveEmitterARM::spillSlot() const
{
int32_t offset = masm.framePushed() - pushedAtSpill_;
MOZ_ASSERT(offset < 4096 && offset > -4096);
return Operand(StackPointer, offset);
return Address(StackPointer, offset);
}
Operand
MoveEmitterARM::toOperand(const MoveOperand& operand, bool isFloat) const
Address
MoveEmitterARM::toAddress(const MoveOperand& operand) const
{
if (operand.isMemoryOrEffectiveAddress()) {
if (operand.base() != StackPointer) {
MOZ_ASSERT(operand.disp() < 1024 && operand.disp() > -1024);
return Operand(operand.base(), operand.disp());
}
MOZ_ASSERT(operand.isMemoryOrEffectiveAddress());
MOZ_ASSERT(operand.disp() >= 0);
// Otherwise, the stack offset may need to be adjusted.
return Operand(StackPointer, operand.disp() + (masm.framePushed() - pushedAtStart_));
if (operand.base() != StackPointer) {
MOZ_ASSERT(operand.disp() < 1024 && operand.disp() > -1024);
return Operand(operand.base(), operand.disp()).toAddress();
}
if (operand.isGeneralReg())
return Operand(operand.reg());
MOZ_ASSERT(operand.disp() >= 0);
MOZ_ASSERT(operand.isFloatReg());
return Operand(operand.floatReg());
// Otherwise, the stack offset may need to be adjusted.
return Address(StackPointer, operand.disp() + (masm.framePushed() - pushedAtStart_));
}
Register
@ -113,7 +106,7 @@ MoveEmitterARM::breakCycle(const MoveOperand& from, const MoveOperand& to,
case MoveOp::FLOAT32:
if (to.isMemory()) {
VFPRegister temp = ScratchFloat32Reg;
masm.ma_vldr(toOperand(to, true), temp);
masm.ma_vldr(toAddress(to), temp);
// Since it is uncertain if the load will be aligned or not
// just fill both of them with the same value.
masm.ma_vstr(temp, cycleSlot(slotId, 0));
@ -128,7 +121,7 @@ MoveEmitterARM::breakCycle(const MoveOperand& from, const MoveOperand& to,
case MoveOp::DOUBLE:
if (to.isMemory()) {
FloatRegister temp = ScratchDoubleReg;
masm.ma_vldr(toOperand(to, true), temp);
masm.ma_vldr(toAddress(to), temp);
masm.ma_vstr(temp, cycleSlot(slotId, 0));
} else {
masm.ma_vstr(to.floatReg().doubleOverlay(), cycleSlot(slotId, 0));
@ -139,7 +132,7 @@ MoveEmitterARM::breakCycle(const MoveOperand& from, const MoveOperand& to,
// an non-vfp value
if (to.isMemory()) {
Register temp = tempReg();
masm.ma_ldr(toOperand(to, false), temp);
masm.ma_ldr(toAddress(to), temp);
masm.ma_str(temp, cycleSlot(0,0));
} else {
if (to.reg() == spilledReg_) {
@ -170,7 +163,7 @@ MoveEmitterARM::completeCycle(const MoveOperand& from, const MoveOperand& to, Mo
if (to.isMemory()) {
FloatRegister temp = ScratchDoubleReg;
masm.ma_vldr(cycleSlot(slotId, 0), temp);
masm.ma_vstr(temp, toOperand(to, true));
masm.ma_vstr(temp, toAddress(to));
} else {
uint32_t offset = 0;
if ((!from.isMemory()) && from.floatReg().numAlignedAliased() == 1)
@ -184,7 +177,7 @@ MoveEmitterARM::completeCycle(const MoveOperand& from, const MoveOperand& to, Mo
if (to.isMemory()) {
Register temp = tempReg();
masm.ma_ldr(cycleSlot(slotId, 0), temp);
masm.ma_str(temp, toOperand(to, false));
masm.ma_str(temp, toAddress(to));
} else {
if (to.reg() == spilledReg_) {
// Make sure we don't re-clobber the spilled register later.
@ -214,21 +207,14 @@ MoveEmitterARM::emitMove(const MoveOperand& from, const MoveOperand& to)
masm.ma_ldr(spillSlot(), spilledReg_);
spilledReg_ = InvalidReg;
}
switch (toOperand(to, false).getTag()) {
case Operand::OP2:
// secretly must be a register
if (to.isMemoryOrEffectiveAddress())
masm.ma_str(from.reg(), toAddress(to));
else
masm.ma_mov(from.reg(), to.reg());
break;
case Operand::MEM:
masm.ma_str(from.reg(), toOperand(to, false));
break;
default:
MOZ_CRASH("strange move!");
}
} else if (to.isGeneralReg()) {
MOZ_ASSERT(from.isMemoryOrEffectiveAddress());
if (from.isMemory())
masm.ma_ldr(toOperand(from, false), to.reg());
masm.ma_ldr(toAddress(from), to.reg());
else
masm.ma_add(from.base(), Imm32(from.disp()), to.reg());
} else {
@ -237,11 +223,11 @@ MoveEmitterARM::emitMove(const MoveOperand& from, const MoveOperand& to)
MOZ_ASSERT(from.isMemoryOrEffectiveAddress());
if (from.isMemory())
masm.ma_ldr(toOperand(from, false), reg);
masm.ma_ldr(toAddress(from), reg);
else
masm.ma_add(from.base(), Imm32(from.disp()), reg);
MOZ_ASSERT(to.base() != reg);
masm.ma_str(reg, toOperand(to, false));
masm.ma_str(reg, toAddress(to));
}
}
@ -252,19 +238,15 @@ MoveEmitterARM::emitFloat32Move(const MoveOperand& from, const MoveOperand& to)
if (to.isFloatReg())
masm.ma_vmov_f32(from.floatReg(), to.floatReg());
else
masm.ma_vstr(VFPRegister(from.floatReg()).singleOverlay(),
toOperand(to, true));
masm.ma_vstr(VFPRegister(from.floatReg()).singleOverlay(), toAddress(to));
} else if (to.isFloatReg()) {
masm.ma_vldr(toOperand(from, true),
VFPRegister(to.floatReg()).singleOverlay());
masm.ma_vldr(toAddress(from), VFPRegister(to.floatReg()).singleOverlay());
} else {
// Memory to memory move.
MOZ_ASSERT(from.isMemory());
FloatRegister reg = ScratchFloat32Reg;
masm.ma_vldr(toOperand(from, true),
VFPRegister(reg).singleOverlay());
masm.ma_vstr(VFPRegister(reg).singleOverlay(),
toOperand(to, true));
masm.ma_vldr(toAddress(from), VFPRegister(reg).singleOverlay());
masm.ma_vstr(VFPRegister(reg).singleOverlay(), toAddress(to));
}
}
@ -275,15 +257,15 @@ MoveEmitterARM::emitDoubleMove(const MoveOperand& from, const MoveOperand& to)
if (to.isFloatReg())
masm.ma_vmov(from.floatReg(), to.floatReg());
else
masm.ma_vstr(from.floatReg(), toOperand(to, true));
masm.ma_vstr(from.floatReg(), toAddress(to));
} else if (to.isFloatReg()) {
masm.ma_vldr(toOperand(from, true), to.floatReg());
masm.ma_vldr(toAddress(from), to.floatReg());
} else {
// Memory to memory move.
MOZ_ASSERT(from.isMemory());
FloatRegister reg = ScratchDoubleReg;
masm.ma_vldr(toOperand(from, true), reg);
masm.ma_vstr(reg, toOperand(to, true));
masm.ma_vldr(toAddress(from), reg);
masm.ma_vstr(reg, toAddress(to));
}
}

Просмотреть файл

@ -36,9 +36,9 @@ class MoveEmitterARM
void assertDone();
Register tempReg();
FloatRegister tempFloatReg();
Operand cycleSlot(uint32_t slot, uint32_t subslot) const;
Operand spillSlot() const;
Operand toOperand(const MoveOperand& operand, bool isFloat) const;
Address cycleSlot(uint32_t slot, uint32_t subslot) const;
Address spillSlot() const;
Address toAddress(const MoveOperand& operand) const;
void emitMove(const MoveOperand& from, const MoveOperand& to);
void emitFloat32Move(const MoveOperand& from, const MoveOperand& to);

Просмотреть файл

@ -171,7 +171,7 @@ JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type)
// Get a copy of the number of args to use as a decrement counter, also set
// the zero condition code.
aasm->as_mov(r5, O2Reg(r1), SetCond);
aasm->as_mov(r5, O2Reg(r1), SetCC);
// Loop over arguments, copying them from an unknown buffer onto the Ion
// stack so they can be accessed from JIT'ed code.
@ -181,7 +181,7 @@ JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type)
aasm->as_b(&footer, Assembler::Zero);
// Get the top of the loop.
masm.bind(&header);
aasm->as_sub(r5, r5, Imm8(1), SetCond);
aasm->as_sub(r5, r5, Imm8(1), SetCC);
// We could be more awesome, and unroll this, using a loadm
// (particularly since the offset is effectively 0) but that seems more
// error prone, and complex.
@ -473,7 +473,7 @@ JitRuntime::generateArgumentsRectifier(JSContext* cx, void** returnAddrOut)
Label undefLoopTop;
masm.bind(&undefLoopTop);
masm.ma_dataTransferN(IsStore, 64, true, sp, Imm32(-8), r4, PreIndex);
masm.ma_sub(r2, Imm32(1), r2, SetCond);
masm.ma_sub(r2, Imm32(1), r2, SetCC);
masm.ma_b(&undefLoopTop, Assembler::NonZero);
}
@ -490,7 +490,7 @@ JitRuntime::generateArgumentsRectifier(JSContext* cx, void** returnAddrOut)
masm.ma_dataTransferN(IsLoad, 64, true, r3, Imm32(-8), r4, PostIndex);
masm.ma_dataTransferN(IsStore, 64, true, sp, Imm32(-8), r4, PreIndex);
masm.ma_sub(r8, Imm32(1), r8, SetCond);
masm.ma_sub(r8, Imm32(1), r8, SetCC);
masm.ma_b(&copyLoopTop, Assembler::NotSigned);
}

Просмотреть файл

@ -2642,15 +2642,8 @@ ArrayConcatDenseKernel(JSContext* cx, JSObject* obj1, JSObject* obj2, JSObject*
MOZ_ASSERT(GetBoxedOrUnboxedInitializedLength<Type>(result) == 0);
if (Type == JSVAL_TYPE_MAGIC) {
if (!result->as<ArrayObject>().ensureElements(cx, len))
return DenseElementResult::Failure;
} else {
if (result->as<UnboxedArrayObject>().capacity() < len) {
if (!result->as<UnboxedArrayObject>().growElements(cx, len))
return DenseElementResult::Failure;
}
}
if (!EnsureBoxedOrUnboxedDenseElements<Type>(cx, result, len))
return DenseElementResult::Failure;
CopyBoxedOrUnboxedDenseElements<Type>(cx, result, obj1, 0, 0, initlen1);
CopyBoxedOrUnboxedDenseElements<Type>(cx, result, obj2, initlen1, 0, initlen2);
@ -2899,6 +2892,20 @@ SliceSparse(JSContext* cx, HandleObject obj, uint32_t begin, uint32_t end, Handl
return true;
}
template <typename T>
static inline uint32_t
NormalizeSliceTerm(T value, uint32_t length)
{
if (value < 0) {
value += length;
if (value < 0)
return 0;
} else if (double(value) > double(length)) {
return length;
}
return uint32_t(value);
}
bool
js::array_slice(JSContext* cx, unsigned argc, Value* vp)
{
@ -2918,26 +2925,12 @@ js::array_slice(JSContext* cx, unsigned argc, Value* vp)
double d;
if (!ToInteger(cx, args[0], &d))
return false;
if (d < 0) {
d += length;
if (d < 0)
d = 0;
} else if (d > length) {
d = length;
}
begin = (uint32_t)d;
begin = NormalizeSliceTerm(d, length);
if (args.hasDefined(1)) {
if (!ToInteger(cx, args[1], &d))
return false;
if (d < 0) {
d += length;
if (d < 0)
d = 0;
} else if (d > length) {
d = length;
}
end = (uint32_t)d;
end = NormalizeSliceTerm(d, length);
}
}
@ -3000,6 +2993,57 @@ js::array_slice(JSContext* cx, unsigned argc, Value* vp)
return true;
}
template <JSValueType Type>
DenseElementResult
ArraySliceDenseKernel(JSContext* cx, JSObject* obj, int32_t beginArg, int32_t endArg, JSObject* result)
{
int32_t length = GetAnyBoxedOrUnboxedArrayLength(obj);
uint32_t begin = NormalizeSliceTerm(beginArg, length);
uint32_t end = NormalizeSliceTerm(endArg, length);
if (begin > end)
begin = end;
size_t initlen = GetBoxedOrUnboxedInitializedLength<Type>(obj);
if (initlen > begin) {
size_t count = Min<size_t>(initlen - begin, end - begin);
if (count) {
if (!EnsureBoxedOrUnboxedDenseElements<Type>(cx, result, count))
return DenseElementResult::Failure;
CopyBoxedOrUnboxedDenseElements<Type>(cx, result, obj, 0, begin, count);
}
}
SetAnyBoxedOrUnboxedArrayLength(cx, result, end - begin);
return DenseElementResult::Success;
}
DefineBoxedOrUnboxedFunctor5(ArraySliceDenseKernel,
JSContext*, JSObject*, int32_t, int32_t, JSObject*);
JSObject*
js::array_slice_dense(JSContext* cx, HandleObject obj, int32_t begin, int32_t end,
HandleObject result)
{
if (result) {
ArraySliceDenseKernelFunctor functor(cx, obj, begin, end, result);
DenseElementResult rv = CallBoxedOrUnboxedSpecialization(functor, result);
MOZ_ASSERT(rv != DenseElementResult::Incomplete);
return rv == DenseElementResult::Success ? result : nullptr;
}
// Slower path if the JIT wasn't able to allocate an object inline.
JS::AutoValueArray<4> argv(cx);
argv[0].setUndefined();
argv[1].setObject(*obj);
argv[2].setInt32(begin);
argv[3].setInt32(end);
if (!array_slice(cx, 2, argv.begin()))
return nullptr;
return &argv[0].toObject();
}
/* ES5 15.4.4.20. */
static bool
array_filter(JSContext* cx, unsigned argc, Value* vp)

Просмотреть файл

@ -180,6 +180,9 @@ array_unshift(JSContext* cx, unsigned argc, js::Value* vp);
extern bool
array_slice(JSContext* cx, unsigned argc, js::Value* vp);
extern JSObject*
array_slice_dense(JSContext* cx, HandleObject obj, int32_t begin, int32_t end, HandleObject result);
/*
* Append the given (non-hole) value to the end of an array. The array must be
* a newborn array -- that is, one which has not been exposed to script for

Просмотреть файл

@ -7092,9 +7092,33 @@ JS::AbortIncrementalGC(JSRuntime* rt)
}
char16_t*
JS::GCDescription::formatMessage(JSRuntime* rt) const
JS::GCDescription::formatSliceMessage(JSRuntime* rt) const
{
return rt->gc.stats.formatMessage();
UniqueChars cstr = rt->gc.stats.formatCompactSliceMessage();
size_t nchars = strlen(cstr.get());
UniquePtr<char16_t, JS::FreePolicy> out(js_pod_malloc<char16_t>(nchars + 1));
if (!out)
return nullptr;
out.get()[nchars] = 0;
CopyAndInflateChars(out.get(), cstr.get(), nchars);
return out.release();
}
char16_t*
JS::GCDescription::formatSummaryMessage(JSRuntime* rt) const
{
UniqueChars cstr = rt->gc.stats.formatCompactSummaryMessage();
size_t nchars = strlen(cstr.get());
UniquePtr<char16_t, JS::FreePolicy> out(js_pod_malloc<char16_t>(nchars + 1));
if (!out)
return nullptr;
out.get()[nchars] = 0;
CopyAndInflateChars(out.get(), cstr.get(), nchars);
return out.release();
}
JS::dbg::GarbageCollectionEvent::Ptr

Просмотреть файл

@ -13,6 +13,7 @@
#include "mozilla/ArrayUtils.h"
#include "mozilla/DebugOnly.h"
#include "mozilla/FloatingPoint.h"
#include "mozilla/Maybe.h"
#include "mozilla/PodOperations.h"
#include <string.h>
@ -1654,6 +1655,58 @@ SetObjectElementOperation(JSContext* cx, HandleObject obj, HandleValue receiver,
result.checkStrictErrorOrWarning(cx, obj, id, strict);
}
/*
* As an optimization, the interpreter creates a handful of reserved Rooted<T>
* variables at the beginning, thus inserting them into the Rooted list once
* upon entry. ReservedRooted "borrows" a reserved Rooted variable and uses it
* within a local scope, resetting the value to nullptr (or the appropriate
* equivalent for T) at scope end. This avoids inserting/removing the Rooted
* from the rooter list, while preventing stale values from being kept alive
* unnecessarily.
*/
template<typename T>
class ReservedRootedBase {
};
template<typename T>
class ReservedRooted : public ReservedRootedBase<T>
{
Rooted<T>* savedRoot;
public:
ReservedRooted(Rooted<T>* root, const T& ptr) : savedRoot(root) {
*root = ptr;
}
explicit ReservedRooted(Rooted<T>* root) : savedRoot(root) {
*root = js::GCMethods<T>::initial();
}
~ReservedRooted() {
*savedRoot = js::GCMethods<T>::initial();
}
void set(const T& p) const { *savedRoot = p; }
operator Handle<T>() { return *savedRoot; }
operator Rooted<T>&() { return *savedRoot; }
MutableHandle<T> operator&() { return &*savedRoot; }
DECLARE_NONPOINTER_ACCESSOR_METHODS(savedRoot->get())
DECLARE_NONPOINTER_MUTABLE_ACCESSOR_METHODS(savedRoot->get())
DECLARE_POINTER_CONSTREF_OPS(T)
DECLARE_POINTER_ASSIGN_OPS(ReservedRooted, T)
};
template <>
class ReservedRootedBase<Value> : public ValueOperations<ReservedRooted<Value>>
{
friend class ValueOperations<ReservedRooted<Value>>;
const Value* extract() const {
return static_cast<const ReservedRooted<Value>*>(this)->address();
}
};
static MOZ_NEVER_INLINE bool
Interpret(JSContext* cx, RunState& state)
{
@ -2038,11 +2091,9 @@ END_CASE(JSOP_SETRVAL)
CASE(JSOP_ENTERWITH)
{
RootedValue& val = rootValue0;
RootedObject& staticWith = rootObject0;
val = REGS.sp[-1];
ReservedRooted<Value> val(&rootValue0, REGS.sp[-1]);
REGS.sp--;
staticWith = script->getObject(REGS.pc);
ReservedRooted<JSObject*> staticWith(&rootObject0, script->getObject(REGS.pc));
if (!EnterWithOperation(cx, REGS.fp(), val, staticWith))
goto error;
@ -2170,13 +2221,14 @@ CASE(JSOP_IN)
ReportValueError(cx, JSMSG_IN_NOT_OBJECT, -1, rref, nullptr);
goto error;
}
RootedObject& obj = rootObject0;
obj = &rref.toObject();
RootedId& id = rootId0;
FETCH_ELEMENT_ID(-2, id);
bool found;
if (!HasProperty(cx, obj, id, &found))
goto error;
{
ReservedRooted<JSObject*> obj(&rootObject0, &rref.toObject());
ReservedRooted<jsid> id(&rootId0);
FETCH_ELEMENT_ID(-2, id);
if (!HasProperty(cx, obj, id, &found))
goto error;
}
TRY_BRANCH_AFTER_COND(found, 2);
REGS.sp--;
REGS.sp[-1].setBoolean(found);
@ -2199,8 +2251,7 @@ CASE(JSOP_MOREITER)
MOZ_ASSERT(REGS.stackDepth() >= 1);
MOZ_ASSERT(REGS.sp[-1].isObject());
PUSH_NULL();
RootedObject& obj = rootObject0;
obj = &REGS.sp[-2].toObject();
ReservedRooted<JSObject*> obj(&rootObject0, &REGS.sp[-2].toObject());
if (!IteratorMore(cx, obj, REGS.stackHandleAt(-1)))
goto error;
}
@ -2216,8 +2267,7 @@ END_CASE(JSOP_ISNOITER)
CASE(JSOP_ENDITER)
{
MOZ_ASSERT(REGS.stackDepth() >= 1);
RootedObject& obj = rootObject0;
obj = &REGS.sp[-1].toObject();
ReservedRooted<JSObject*> obj(&rootObject0, &REGS.sp[-1].toObject());
bool ok = CloseIterator(cx, obj);
REGS.sp--;
if (!ok)
@ -2264,14 +2314,9 @@ END_CASE(JSOP_PICK)
CASE(JSOP_SETCONST)
{
RootedPropertyName& name = rootName0;
name = script->getName(REGS.pc);
RootedValue& rval = rootValue0;
rval = REGS.sp[-1];
RootedObject& obj = rootObject0;
obj = &REGS.fp()->varObj();
ReservedRooted<PropertyName*> name(&rootName0, script->getName(REGS.pc));
ReservedRooted<Value> rval(&rootValue0, REGS.sp[-1]);
ReservedRooted<JSObject*> obj(&rootObject0, &REGS.fp()->varObj());
if (!SetConstOperation(cx, obj, name, rval))
goto error;
@ -2287,14 +2332,11 @@ CASE(JSOP_BINDNAME)
{
JSOp op = JSOp(*REGS.pc);
if (op == JSOP_BINDNAME || script->hasPollutedGlobalScope()) {
RootedObject& scopeChain = rootObject0;
scopeChain = REGS.fp()->scopeChain();
RootedPropertyName& name = rootName0;
name = script->getName(REGS.pc);
ReservedRooted<JSObject*> scopeChain(&rootObject0, REGS.fp()->scopeChain());
ReservedRooted<PropertyName*> name(&rootName0, script->getName(REGS.pc));
/* Assigning to an undeclared name adds a property to the global object. */
RootedObject& scope = rootObject1;
ReservedRooted<JSObject*> scope(&rootObject1);
if (!LookupNameUnqualified(cx, name, scopeChain, &scope))
goto error;
@ -2482,10 +2524,8 @@ END_CASE(JSOP_ADD)
CASE(JSOP_SUB)
{
RootedValue& lval = rootValue0;
RootedValue& rval = rootValue1;
lval = REGS.sp[-2];
rval = REGS.sp[-1];
ReservedRooted<Value> lval(&rootValue0, REGS.sp[-2]);
ReservedRooted<Value> rval(&rootValue1, REGS.sp[-1]);
MutableHandleValue res = REGS.stackHandleAt(-2);
if (!SubOperation(cx, lval, rval, res))
goto error;
@ -2495,10 +2535,8 @@ END_CASE(JSOP_SUB)
CASE(JSOP_MUL)
{
RootedValue& lval = rootValue0;
RootedValue& rval = rootValue1;
lval = REGS.sp[-2];
rval = REGS.sp[-1];
ReservedRooted<Value> lval(&rootValue0, REGS.sp[-2]);
ReservedRooted<Value> rval(&rootValue1, REGS.sp[-1]);
MutableHandleValue res = REGS.stackHandleAt(-2);
if (!MulOperation(cx, lval, rval, res))
goto error;
@ -2508,10 +2546,8 @@ END_CASE(JSOP_MUL)
CASE(JSOP_DIV)
{
RootedValue& lval = rootValue0;
RootedValue& rval = rootValue1;
lval = REGS.sp[-2];
rval = REGS.sp[-1];
ReservedRooted<Value> lval(&rootValue0, REGS.sp[-2]);
ReservedRooted<Value> rval(&rootValue1, REGS.sp[-1]);
MutableHandleValue res = REGS.stackHandleAt(-2);
if (!DivOperation(cx, lval, rval, res))
goto error;
@ -2521,10 +2557,8 @@ END_CASE(JSOP_DIV)
CASE(JSOP_MOD)
{
RootedValue& lval = rootValue0;
RootedValue& rval = rootValue1;
lval = REGS.sp[-2];
rval = REGS.sp[-1];
ReservedRooted<Value> lval(&rootValue0, REGS.sp[-2]);
ReservedRooted<Value> rval(&rootValue1, REGS.sp[-1]);
MutableHandleValue res = REGS.stackHandleAt(-2);
if (!ModOperation(cx, lval, rval, res))
goto error;
@ -2552,8 +2586,7 @@ END_CASE(JSOP_BITNOT)
CASE(JSOP_NEG)
{
RootedValue& val = rootValue0;
val = REGS.sp[-1];
ReservedRooted<Value> val(&rootValue0, REGS.sp[-1]);
MutableHandleValue res = REGS.stackHandleAt(-1);
if (!NegOperation(cx, script, REGS.pc, val, res))
goto error;
@ -2567,11 +2600,8 @@ END_CASE(JSOP_POS)
CASE(JSOP_DELNAME)
{
RootedPropertyName& name = rootName0;
name = script->getName(REGS.pc);
RootedObject& scopeObj = rootObject0;
scopeObj = REGS.fp()->scopeChain();
ReservedRooted<PropertyName*> name(&rootName0, script->getName(REGS.pc));
ReservedRooted<JSObject*> scopeObj(&rootObject0, REGS.fp()->scopeChain());
PUSH_BOOLEAN(true);
MutableHandleValue res = REGS.stackHandleAt(-1);
@ -2585,10 +2615,8 @@ CASE(JSOP_STRICTDELPROP)
{
static_assert(JSOP_DELPROP_LENGTH == JSOP_STRICTDELPROP_LENGTH,
"delprop and strictdelprop must be the same size");
RootedId& id = rootId0;
id = NameToId(script->getName(REGS.pc));
RootedObject& obj = rootObject0;
ReservedRooted<jsid> id(&rootId0, NameToId(script->getName(REGS.pc)));
ReservedRooted<JSObject*> obj(&rootObject0);
FETCH_OBJECT(cx, -1, obj);
ObjectOpResult result;
@ -2609,14 +2637,13 @@ CASE(JSOP_STRICTDELELEM)
static_assert(JSOP_DELELEM_LENGTH == JSOP_STRICTDELELEM_LENGTH,
"delelem and strictdelelem must be the same size");
/* Fetch the left part and resolve it to a non-null object. */
RootedObject& obj = rootObject0;
ReservedRooted<JSObject*> obj(&rootObject0);
FETCH_OBJECT(cx, -2, obj);
RootedValue& propval = rootValue0;
propval = REGS.sp[-1];
ReservedRooted<Value> propval(&rootValue0, REGS.sp[-1]);
ObjectOpResult result;
RootedId& id = rootId0;
ReservedRooted<jsid> id(&rootId0);
if (!ValueToId<CanGC>(cx, propval, &id))
goto error;
if (!DeleteProperty(cx, obj, id, result))
@ -2639,11 +2666,8 @@ CASE(JSOP_TOID)
* but we need to avoid the observable stringification the second time.
* There must be an object value below the id, which will not be popped.
*/
RootedValue& objval = rootValue0;
RootedValue& idval = rootValue1;
objval = REGS.sp[-2];
idval = REGS.sp[-1];
ReservedRooted<Value> objval(&rootValue0, REGS.sp[-2]);
ReservedRooted<Value> idval(&rootValue1, REGS.sp[-1]);
MutableHandleValue res = REGS.stackHandleAt(-1);
if (!ToIdOperation(cx, script, REGS.pc, objval, idval, res))
goto error;
@ -2682,12 +2706,9 @@ END_CASE(JSOP_GETPROP)
CASE(JSOP_GETPROP_SUPER)
{
RootedObject& receiver = rootObject0;
RootedObject& obj = rootObject1;
ReservedRooted<JSObject*> receiver(&rootObject0);
FETCH_OBJECT(cx, -2, receiver);
obj = &REGS.sp[-1].toObject();
ReservedRooted<JSObject*> obj(&rootObject1, &REGS.sp[-1].toObject());
MutableHandleValue rref = REGS.stackHandleAt(-2);
if (!GetProperty(cx, obj, receiver, script->getName(REGS.pc), rref))
@ -2699,10 +2720,8 @@ END_CASE(JSOP_GETPROP_SUPER)
CASE(JSOP_GETXPROP)
{
RootedObject& obj = rootObject0;
obj = &REGS.sp[-1].toObject();
RootedId& id = rootId0;
id = NameToId(script->getName(REGS.pc));
ReservedRooted<JSObject*> obj(&rootObject0, &REGS.sp[-1].toObject());
ReservedRooted<jsid> id(&rootId0, NameToId(script->getName(REGS.pc)));
MutableHandleValue rval = REGS.stackHandleAt(-1);
if (!GetPropertyForNameLookup(cx, obj, id, rval))
goto error;
@ -2736,8 +2755,7 @@ CASE(JSOP_STRICTSETNAME)
static_assert(JSOP_SETNAME_LENGTH == JSOP_SETGNAME_LENGTH,
"We're sharing the END_CASE so the lengths better match");
RootedObject& scope = rootObject0;
scope = &REGS.sp[-2].toObject();
ReservedRooted<JSObject*> scope(&rootObject0, &REGS.sp[-2].toObject());
HandleValue value = REGS.stackHandleAt(-1);
if (!SetNameOperation(cx, script, REGS.pc, scope, value))
@ -2756,8 +2774,7 @@ CASE(JSOP_STRICTSETPROP)
HandleValue lval = REGS.stackHandleAt(-2);
HandleValue rval = REGS.stackHandleAt(-1);
RootedId& id = rootId0;
id = NameToId(script->getName(REGS.pc));
ReservedRooted<jsid> id(&rootId0, NameToId(script->getName(REGS.pc)));
if (!SetPropertyOperation(cx, JSOp(*REGS.pc), lval, id, rval))
goto error;
@ -2773,17 +2790,10 @@ CASE(JSOP_STRICTSETPROP_SUPER)
"setprop-super and strictsetprop-super must be the same size");
RootedValue& receiver = rootValue0;
receiver = REGS.sp[-3];
RootedObject& obj = rootObject0;
obj = &REGS.sp[-2].toObject();
RootedValue& rval = rootValue1;
rval = REGS.sp[-1];
RootedId& id = rootId0;
id = NameToId(script->getName(REGS.pc));
ReservedRooted<Value> receiver(&rootValue0, REGS.sp[-3]);
ReservedRooted<JSObject*> obj(&rootObject0, &REGS.sp[-2].toObject());
ReservedRooted<Value> rval(&rootValue1, REGS.sp[-1]);
ReservedRooted<jsid> id(&rootId0, NameToId(script->getName(REGS.pc)));
ObjectOpResult result;
if (!SetProperty(cx, obj, id, rval, receiver, result))
@ -2822,10 +2832,9 @@ END_CASE(JSOP_GETELEM)
CASE(JSOP_GETELEM_SUPER)
{
HandleValue rval = REGS.stackHandleAt(-3);
RootedObject& receiver = rootObject0;
ReservedRooted<JSObject*> receiver(&rootObject0);
FETCH_OBJECT(cx, -2, receiver);
RootedObject& obj = rootObject1;
obj = &REGS.sp[-1].toObject();
ReservedRooted<JSObject*> obj(&rootObject1, &REGS.sp[-1].toObject());
MutableHandleValue res = REGS.stackHandleAt(-3);
@ -2845,13 +2854,12 @@ CASE(JSOP_STRICTSETELEM)
{
static_assert(JSOP_SETELEM_LENGTH == JSOP_STRICTSETELEM_LENGTH,
"setelem and strictsetelem must be the same size");
RootedObject& obj = rootObject0;
ReservedRooted<JSObject*> obj(&rootObject0);
FETCH_OBJECT(cx, -3, obj);
RootedId& id = rootId0;
ReservedRooted<jsid> id(&rootId0);
FETCH_ELEMENT_ID(-2, id);
Value& value = REGS.sp[-1];
RootedValue& receiver = rootValue0;
receiver = ObjectValue(*obj);
ReservedRooted<Value> receiver(&rootValue0, ObjectValue(*obj));
if (!SetObjectElementOperation(cx, obj, receiver, id, value, *REGS.pc == JSOP_STRICTSETELEM))
goto error;
REGS.sp[-3] = value;
@ -2865,12 +2873,10 @@ CASE(JSOP_STRICTSETELEM_SUPER)
static_assert(JSOP_SETELEM_SUPER_LENGTH == JSOP_STRICTSETELEM_SUPER_LENGTH,
"setelem-super and strictsetelem-super must be the same size");
RootedId& id = rootId0;
ReservedRooted<jsid> id(&rootId0);
FETCH_ELEMENT_ID(-4, id);
RootedValue& receiver = rootValue0;
receiver = REGS.sp[-3];
RootedObject& obj = rootObject1;
obj = &REGS.sp[-2].toObject();
ReservedRooted<Value> receiver(&rootValue0, REGS.sp[-3]);
ReservedRooted<JSObject*> obj(&rootObject1, &REGS.sp[-2].toObject());
Value& value = REGS.sp[-1];
bool strict = JSOp(*REGS.pc) == JSOP_STRICTSETELEM_SUPER;
@ -2943,11 +2949,11 @@ CASE(JSOP_FUNCALL)
bool construct = (*REGS.pc == JSOP_NEW);
RootedFunction& fun = rootFunction0;
bool isFunction = IsFunctionObject(args.calleev(), fun.address());
JSFunction* maybeFun;
bool isFunction = IsFunctionObject(args.calleev(), &maybeFun);
/* Don't bother trying to fast-path calls to scripted non-constructors. */
if (!isFunction || !fun->isInterpreted() || !fun->isConstructor()) {
if (!isFunction || !maybeFun->isInterpreted() || !maybeFun->isConstructor()) {
if (construct) {
if (!InvokeConstructor(cx, args))
goto error;
@ -2961,27 +2967,30 @@ CASE(JSOP_FUNCALL)
ADVANCE_AND_DISPATCH(JSOP_CALL_LENGTH);
}
RootedScript& funScript = rootScript0;
funScript = fun->getOrCreateScript(cx);
if (!funScript)
goto error;
InitialFrameFlags initial = construct ? INITIAL_CONSTRUCT : INITIAL_NONE;
bool createSingleton = ObjectGroup::useSingletonForNewObject(cx, script, REGS.pc);
TypeMonitorCall(cx, args, construct);
{
InvokeState state(cx, args, initial);
MOZ_ASSERT(maybeFun);
ReservedRooted<JSFunction*> fun(&rootFunction0, maybeFun);
ReservedRooted<JSScript*> funScript(&rootScript0, fun->getOrCreateScript(cx));
if (!funScript)
goto error;
InitialFrameFlags initial = construct ? INITIAL_CONSTRUCT : INITIAL_NONE;
bool createSingleton = ObjectGroup::useSingletonForNewObject(cx, script, REGS.pc);
TypeMonitorCall(cx, args, construct);
mozilla::Maybe<InvokeState> state;
state.emplace(cx, args, initial);
if (createSingleton)
state.setCreateSingleton();
state->setCreateSingleton();
if (!createSingleton && jit::IsIonEnabled(cx)) {
jit::MethodStatus status = jit::CanEnter(cx, state);
jit::MethodStatus status = jit::CanEnter(cx, state.ref());
if (status == jit::Method_Error)
goto error;
if (status == jit::Method_Compiled) {
jit::JitExecStatus exec = jit::IonCannon(cx, state);
jit::JitExecStatus exec = jit::IonCannon(cx, state.ref());
CHECK_BRANCH();
REGS.sp = args.spAfterCall();
interpReturnOK = !IsErrorStatus(exec);
@ -2990,26 +2999,28 @@ CASE(JSOP_FUNCALL)
}
if (jit::IsBaselineEnabled(cx)) {
jit::MethodStatus status = jit::CanEnterBaselineMethod(cx, state);
jit::MethodStatus status = jit::CanEnterBaselineMethod(cx, state.ref());
if (status == jit::Method_Error)
goto error;
if (status == jit::Method_Compiled) {
jit::JitExecStatus exec = jit::EnterBaselineMethod(cx, state);
jit::JitExecStatus exec = jit::EnterBaselineMethod(cx, state.ref());
CHECK_BRANCH();
REGS.sp = args.spAfterCall();
interpReturnOK = !IsErrorStatus(exec);
goto jit_return;
}
}
state.reset();
funScript = fun->nonLazyScript();
if (!activation.pushInlineFrame(args, funScript, initial))
goto error;
if (createSingleton)
REGS.fp()->setCreateSingleton();
}
funScript = fun->nonLazyScript();
if (!activation.pushInlineFrame(args, funScript, initial))
goto error;
if (createSingleton)
REGS.fp()->setCreateSingleton();
SET_SCRIPT(REGS.fp()->script());
{
@ -3051,17 +3062,13 @@ CASE(JSOP_GIMPLICITTHIS)
{
JSOp op = JSOp(*REGS.pc);
if (op == JSOP_IMPLICITTHIS || script->hasPollutedGlobalScope()) {
RootedPropertyName& name = rootName0;
name = script->getName(REGS.pc);
RootedObject& scopeObj = rootObject0;
scopeObj = REGS.fp()->scopeChain();
RootedObject& scope = rootObject1;
ReservedRooted<PropertyName*> name(&rootName0, script->getName(REGS.pc));
ReservedRooted<JSObject*> scopeObj(&rootObject0, REGS.fp()->scopeChain());
ReservedRooted<JSObject*> scope(&rootObject1);
if (!LookupNameWithGlobalDefault(cx, name, scopeObj, &scope))
goto error;
RootedValue& v = rootValue0;
ReservedRooted<Value> v(&rootValue0);
if (!ComputeImplicitThis(cx, scope, &v))
goto error;
PUSH_COPY(v);
@ -3077,8 +3084,7 @@ END_CASE(JSOP_IMPLICITTHIS)
CASE(JSOP_GETGNAME)
CASE(JSOP_GETNAME)
{
RootedValue& rval = rootValue0;
ReservedRooted<Value> rval(&rootValue0);
if (!GetNameOperation(cx, REGS.fp(), REGS.pc, &rval))
goto error;
@ -3091,8 +3097,7 @@ END_CASE(JSOP_GETNAME)
CASE(JSOP_GETINTRINSIC)
{
RootedValue& rval = rootValue0;
ReservedRooted<Value> rval(&rootValue0);
if (!GetIntrinsicOperation(cx, REGS.pc, &rval))
goto error;
@ -3148,8 +3153,7 @@ END_CASE(JSOP_SYMBOL)
CASE(JSOP_OBJECT)
{
RootedObject& ref = rootObject0;
ref = script->getObject(REGS.pc);
ReservedRooted<JSObject*> ref(&rootObject0, script->getObject(REGS.pc));
if (JS::CompartmentOptionsRef(cx).cloneSingletons()) {
JSObject* obj = DeepCloneObjectLiteral(cx, ref, TenuredObject);
if (!obj)
@ -3165,12 +3169,9 @@ END_CASE(JSOP_OBJECT)
CASE(JSOP_CALLSITEOBJ)
{
RootedObject& cso = rootObject0;
cso = script->getObject(REGS.pc);
RootedObject& raw = rootObject1;
raw = script->getObject(GET_UINT32_INDEX(REGS.pc) + 1);
RootedValue& rawValue = rootValue0;
rawValue.setObject(*raw);
ReservedRooted<JSObject*> cso(&rootObject0, script->getObject(REGS.pc));
ReservedRooted<JSObject*> raw(&rootObject1, script->getObject(GET_UINT32_INDEX(REGS.pc) + 1));
ReservedRooted<Value> rawValue(&rootValue0, ObjectValue(*raw));
if (!ProcessCallSiteObjOperation(cx, cso, raw, rawValue))
goto error;
@ -3270,8 +3271,7 @@ END_CASE(JSOP_RUNONCE)
CASE(JSOP_REST)
{
RootedObject& rest = rootObject0;
rest = REGS.fp()->createRestParameter(cx);
ReservedRooted<JSObject*> rest(&rootObject0, REGS.fp()->createRestParameter(cx));
if (!rest)
goto error;
PUSH_COPY(ObjectValue(*rest));
@ -3281,8 +3281,7 @@ END_CASE(JSOP_REST)
CASE(JSOP_GETALIASEDVAR)
{
ScopeCoordinate sc = ScopeCoordinate(REGS.pc);
RootedValue& val = rootValue0;
val = REGS.fp()->aliasedVarScope(sc).aliasedVar(sc);
ReservedRooted<Value> val(&rootValue0, REGS.fp()->aliasedVarScope(sc).aliasedVar(sc));
MOZ_ASSERT(!IsUninitializedLexical(val));
PUSH_COPY(val);
TypeScript::Monitor(cx, script, REGS.pc, REGS.sp[-1]);
@ -3300,8 +3299,7 @@ END_CASE(JSOP_SETALIASEDVAR)
CASE(JSOP_CHECKLEXICAL)
{
uint32_t i = GET_LOCALNO(REGS.pc);
RootedValue& val = rootValue0;
val = REGS.fp()->unaliasedLocal(i);
ReservedRooted<Value> val(&rootValue0, REGS.fp()->unaliasedLocal(i));
if (!CheckUninitializedLexical(cx, script, REGS.pc, val))
goto error;
}
@ -3317,8 +3315,7 @@ END_CASE(JSOP_INITLEXICAL)
CASE(JSOP_CHECKALIASEDLEXICAL)
{
ScopeCoordinate sc = ScopeCoordinate(REGS.pc);
RootedValue& val = rootValue0;
val = REGS.fp()->aliasedVarScope(sc).aliasedVar(sc);
ReservedRooted<Value> val(&rootValue0, REGS.fp()->aliasedVarScope(sc).aliasedVar(sc));
if (!CheckUninitializedLexical(cx, script, REGS.pc, val))
goto error;
}
@ -3392,11 +3389,8 @@ CASE(JSOP_DEFVAR)
attrs |= JSPROP_PERMANENT;
/* Step 8b. */
RootedObject& obj = rootObject0;
obj = &REGS.fp()->varObj();
RootedPropertyName& name = rootName0;
name = script->getName(REGS.pc);
ReservedRooted<JSObject*> obj(&rootObject0, &REGS.fp()->varObj());
ReservedRooted<PropertyName*> name(&rootName0, script->getName(REGS.pc));
if (!DefVarOrConstOperation(cx, obj, name, attrs))
goto error;
@ -3411,9 +3405,7 @@ CASE(JSOP_DEFFUN)
* a compound statement (not at the top statement level of global code, or
* at the top level of a function body).
*/
RootedFunction& fun = rootFunction0;
fun = script->getFunction(GET_UINT32_INDEX(REGS.pc));
ReservedRooted<JSFunction*> fun(&rootFunction0, script->getFunction(GET_UINT32_INDEX(REGS.pc)));
if (!DefFunOperation(cx, script, REGS.fp()->scopeChain(), fun))
goto error;
}
@ -3422,9 +3414,7 @@ END_CASE(JSOP_DEFFUN)
CASE(JSOP_LAMBDA)
{
/* Load the specified function object literal. */
RootedFunction& fun = rootFunction0;
fun = script->getFunction(GET_UINT32_INDEX(REGS.pc));
ReservedRooted<JSFunction*> fun(&rootFunction0, script->getFunction(GET_UINT32_INDEX(REGS.pc)));
JSObject* obj = Lambda(cx, fun, REGS.fp()->scopeChain());
if (!obj)
goto error;
@ -3436,10 +3426,8 @@ END_CASE(JSOP_LAMBDA)
CASE(JSOP_LAMBDA_ARROW)
{
/* Load the specified function object literal. */
RootedFunction& fun = rootFunction0;
fun = script->getFunction(GET_UINT32_INDEX(REGS.pc));
RootedValue& thisv = rootValue0;
thisv = REGS.sp[-1];
ReservedRooted<JSFunction*> fun(&rootFunction0, script->getFunction(GET_UINT32_INDEX(REGS.pc)));
ReservedRooted<Value> thisv(&rootValue0, REGS.sp[-1]);
JSObject* obj = LambdaArrow(cx, fun, REGS.fp()->scopeChain(), thisv);
if (!obj)
goto error;
@ -3456,14 +3444,11 @@ END_CASE(JSOP_CALLEE)
CASE(JSOP_INITPROP_GETTER)
CASE(JSOP_INITPROP_SETTER)
{
RootedObject& obj = rootObject0;
RootedPropertyName& name = rootName0;
RootedObject& val = rootObject1;
MOZ_ASSERT(REGS.stackDepth() >= 2);
obj = &REGS.sp[-2].toObject();
name = script->getName(REGS.pc);
val = &REGS.sp[-1].toObject();
ReservedRooted<JSObject*> obj(&rootObject0, &REGS.sp[-2].toObject());
ReservedRooted<PropertyName*> name(&rootName0, script->getName(REGS.pc));
ReservedRooted<JSObject*> val(&rootObject1, &REGS.sp[-1].toObject());
if (!InitGetterSetterOperation(cx, REGS.pc, obj, name, val))
goto error;
@ -3475,14 +3460,11 @@ END_CASE(JSOP_INITPROP_GETTER)
CASE(JSOP_INITELEM_GETTER)
CASE(JSOP_INITELEM_SETTER)
{
RootedObject& obj = rootObject0;
RootedValue& idval = rootValue0;
RootedObject& val = rootObject1;
MOZ_ASSERT(REGS.stackDepth() >= 3);
obj = &REGS.sp[-3].toObject();
idval = REGS.sp[-2];
val = &REGS.sp[-1].toObject();
ReservedRooted<JSObject*> obj(&rootObject0, &REGS.sp[-3].toObject());
ReservedRooted<Value> idval(&rootValue0, REGS.sp[-2]);
ReservedRooted<JSObject*> val(&rootObject1, &REGS.sp[-1].toObject());
if (!InitGetterSetterOperation(cx, REGS.pc, obj, idval, val))
goto error;
@ -3524,13 +3506,11 @@ END_CASE(JSOP_NEWARRAY)
CASE(JSOP_NEWARRAY_COPYONWRITE)
{
RootedObject& baseobj = rootObject0;
baseobj = ObjectGroup::getOrFixupCopyOnWriteObject(cx, script, REGS.pc);
ReservedRooted<JSObject*> baseobj(&rootObject0, ObjectGroup::getOrFixupCopyOnWriteObject(cx, script, REGS.pc));
if (!baseobj)
goto error;
RootedObject& obj = rootObject1;
obj = NewDenseCopyOnWriteArray(cx, baseobj.as<ArrayObject>(), gc::DefaultHeap);
ReservedRooted<JSObject*> obj(&rootObject1, NewDenseCopyOnWriteArray(cx, ((RootedObject&)(baseobj)).as<ArrayObject>(), gc::DefaultHeap));
if (!obj)
goto error;
@ -3552,11 +3532,8 @@ CASE(JSOP_MUTATEPROTO)
MOZ_ASSERT(REGS.stackDepth() >= 2);
if (REGS.sp[-1].isObjectOrNull()) {
RootedObject& newProto = rootObject1;
rootObject1 = REGS.sp[-1].toObjectOrNull();
RootedObject& obj = rootObject0;
obj = &REGS.sp[-2].toObject();
ReservedRooted<JSObject*> newProto(&rootObject1, REGS.sp[-1].toObjectOrNull());
ReservedRooted<JSObject*> obj(&rootObject0, &REGS.sp[-2].toObject());
MOZ_ASSERT(obj->is<PlainObject>());
if (!SetPrototype(cx, obj, newProto))
@ -3577,12 +3554,10 @@ CASE(JSOP_INITHIDDENPROP)
"initprop and inithiddenprop must be the same size");
/* Load the property's initial value into rval. */
MOZ_ASSERT(REGS.stackDepth() >= 2);
RootedValue& rval = rootValue0;
rval = REGS.sp[-1];
ReservedRooted<Value> rval(&rootValue0, REGS.sp[-1]);
/* Load the object being initialized into lval/obj. */
RootedObject& obj = rootObject0;
obj = &REGS.sp[-2].toObject();
ReservedRooted<JSObject*> obj(&rootObject0, &REGS.sp[-2].toObject());
PropertyName* name = script->getName(REGS.pc);
@ -3602,8 +3577,7 @@ CASE(JSOP_INITELEM)
HandleValue val = REGS.stackHandleAt(-1);
HandleValue id = REGS.stackHandleAt(-2);
RootedObject& obj = rootObject0;
obj = &REGS.sp[-3].toObject();
ReservedRooted<JSObject*> obj(&rootObject0, &REGS.sp[-3].toObject());
if (!InitElemOperation(cx, obj, id, val))
goto error;
@ -3617,8 +3591,7 @@ CASE(JSOP_INITELEM_ARRAY)
MOZ_ASSERT(REGS.stackDepth() >= 2);
HandleValue val = REGS.stackHandleAt(-1);
RootedObject& obj = rootObject0;
obj = &REGS.sp[-2].toObject();
ReservedRooted<JSObject*> obj(&rootObject0, &REGS.sp[-2].toObject());
uint32_t index = GET_UINT24(REGS.pc);
if (!InitArrayElemOperation(cx, REGS.pc, obj, index, val))
@ -3633,8 +3606,7 @@ CASE(JSOP_INITELEM_INC)
MOZ_ASSERT(REGS.stackDepth() >= 3);
HandleValue val = REGS.stackHandleAt(-1);
RootedObject& obj = rootObject0;
obj = &REGS.sp[-3].toObject();
ReservedRooted<JSObject*> obj(&rootObject0, &REGS.sp[-3].toObject());
uint32_t index = REGS.sp[-2].toInt32();
if (!InitArrayElemOperation(cx, REGS.pc, obj, index, val))
@ -3693,7 +3665,7 @@ END_CASE(JSOP_FINALLY)
CASE(JSOP_THROWING)
{
RootedValue& v = rootValue0;
ReservedRooted<Value> v(&rootValue0);
POP_COPY_TO(v);
MOZ_ALWAYS_TRUE(ThrowingOperation(cx, v));
}
@ -3702,7 +3674,7 @@ END_CASE(JSOP_THROWING)
CASE(JSOP_THROW)
{
CHECK_BRANCH();
RootedValue& v = rootValue0;
ReservedRooted<Value> v(&rootValue0);
POP_COPY_TO(v);
JS_ALWAYS_FALSE(Throw(cx, v));
/* let the code at error try to catch the exception. */
@ -3711,14 +3683,12 @@ CASE(JSOP_THROW)
CASE(JSOP_INSTANCEOF)
{
RootedValue& rref = rootValue0;
rref = REGS.sp[-1];
if (rref.isPrimitive()) {
ReservedRooted<Value> rref(&rootValue0, REGS.sp[-1]);
if (HandleValue(rref).isPrimitive()) {
ReportValueError(cx, JSMSG_BAD_INSTANCEOF_RHS, -1, rref, nullptr);
goto error;
}
RootedObject& obj = rootObject0;
obj = &rref.toObject();
ReservedRooted<JSObject*> obj(&rootObject0, &rref.toObject());
bool cond = false;
if (!HasInstance(cx, obj, REGS.stackHandleAt(-2), &cond))
goto error;
@ -3814,8 +3784,7 @@ CASE(JSOP_INITIALYIELD)
{
MOZ_ASSERT(!cx->isExceptionPending());
MOZ_ASSERT(REGS.fp()->isNonEvalFunctionFrame());
RootedObject& obj = rootObject0;
obj = &REGS.sp[-1].toObject();
ReservedRooted<JSObject*> obj(&rootObject0, &REGS.sp[-1].toObject());
POP_RETURN_VALUE();
MOZ_ASSERT(REGS.stackDepth() == 0);
if (!GeneratorObject::initialSuspend(cx, obj, REGS.fp(), REGS.pc))
@ -3827,8 +3796,7 @@ CASE(JSOP_YIELD)
{
MOZ_ASSERT(!cx->isExceptionPending());
MOZ_ASSERT(REGS.fp()->isNonEvalFunctionFrame());
RootedObject& obj = rootObject0;
obj = &REGS.sp[-1].toObject();
ReservedRooted<JSObject*> obj(&rootObject0, &REGS.sp[-1].toObject());
if (!GeneratorObject::normalSuspend(cx, obj, REGS.fp(), REGS.pc,
REGS.spForStackDepth(0), REGS.stackDepth() - 2))
{
@ -3843,19 +3811,18 @@ CASE(JSOP_YIELD)
CASE(JSOP_RESUME)
{
RootedObject& gen = rootObject0;
RootedValue& val = rootValue0;
val = REGS.sp[-1];
gen = &REGS.sp[-2].toObject();
// popInlineFrame expects there to be an additional value on the stack to
// pop off, so leave "gen" on the stack.
GeneratorObject::ResumeKind resumeKind = GeneratorObject::getResumeKind(REGS.pc);
bool ok = GeneratorObject::resume(cx, activation, gen, val, resumeKind);
SET_SCRIPT(REGS.fp()->script());
if (!ok)
goto error;
{
ReservedRooted<JSObject*> gen(&rootObject0, &REGS.sp[-2].toObject());
ReservedRooted<Value> val(&rootValue0, REGS.sp[-1]);
// popInlineFrame expects there to be an additional value on the stack
// to pop off, so leave "gen" on the stack.
GeneratorObject::ResumeKind resumeKind = GeneratorObject::getResumeKind(REGS.pc);
bool ok = GeneratorObject::resume(cx, activation, gen, val, resumeKind);
SET_SCRIPT(REGS.fp()->script());
if (!ok)
goto error;
}
ADVANCE_AND_DISPATCH(0);
}
@ -3869,8 +3836,7 @@ END_CASE(JSOP_DEBUGAFTERYIELD)
CASE(JSOP_FINALYIELDRVAL)
{
RootedObject& gen = rootObject0;
gen = &REGS.sp[-1].toObject();
ReservedRooted<JSObject*> gen(&rootObject0, &REGS.sp[-1].toObject());
REGS.sp--;
if (!GeneratorObject::finalSuspend(cx, gen)) {
@ -3883,8 +3849,7 @@ CASE(JSOP_FINALYIELDRVAL)
CASE(JSOP_ARRAYPUSH)
{
RootedObject& obj = rootObject0;
obj = &REGS.sp[-1].toObject();
ReservedRooted<JSObject*> obj(&rootObject0, &REGS.sp[-1].toObject());
if (!NewbornArrayPush(cx, obj, REGS.sp[-2]))
goto error;
REGS.sp -= 2;
@ -3893,13 +3858,12 @@ END_CASE(JSOP_ARRAYPUSH)
CASE(JSOP_CLASSHERITAGE)
{
RootedValue& val = rootValue0;
val = REGS.sp[-1];
ReservedRooted<Value> val(&rootValue0, REGS.sp[-1]);
RootedValue& objProto = rootValue1;
RootedObject& funcProto = rootObject0;
ReservedRooted<Value> objProto(&rootValue1);
ReservedRooted<JSObject*> funcProto(&rootObject0);
if (val.isNull()) {
objProto.setNull();
objProto = NullValue();
if (!GetBuiltinPrototype(cx, JSProto_Function, &funcProto))
goto error;
} else {
@ -3926,12 +3890,10 @@ END_CASE(JSOP_CLASSHERITAGE)
CASE(JSOP_FUNWITHPROTO)
{
RootedObject& proto = rootObject1;
proto = &REGS.sp[-1].toObject();
ReservedRooted<JSObject*> proto(&rootObject1, &REGS.sp[-1].toObject());
/* Load the specified function object literal. */
RootedFunction& fun = rootFunction0;
fun = script->getFunction(GET_UINT32_INDEX(REGS.pc));
ReservedRooted<JSFunction*> fun(&rootFunction0, script->getFunction(GET_UINT32_INDEX(REGS.pc)));
JSObject* obj = CloneFunctionObjectIfNotSingleton(cx, fun, REGS.fp()->scopeChain(),
proto, GenericObject);
@ -3944,8 +3906,7 @@ END_CASE(JSOP_FUNWITHPROTO)
CASE(JSOP_OBJWITHPROTO)
{
RootedObject& proto = rootObject0;
proto = REGS.sp[-1].toObjectOrNull();
ReservedRooted<JSObject*> proto(&rootObject0, REGS.sp[-1].toObjectOrNull());
JSObject* obj = NewObjectWithGivenProto<PlainObject>(cx, proto);
if (!obj)
@ -3961,12 +3922,11 @@ CASE(JSOP_INITHOMEOBJECT)
MOZ_ASSERT(REGS.stackDepth() >= skipOver + 2);
/* Load the function to be initialized */
RootedFunction& func = rootFunction0;
func = &REGS.sp[-1].toObject().as<JSFunction>();
ReservedRooted<JSFunction*> func(&rootFunction0, &REGS.sp[-1].toObject().as<JSFunction>());
MOZ_ASSERT(func->allowSuperProperty());
/* Load the home object */
RootedNativeObject& obj = rootNativeObject0;
ReservedRooted<NativeObject*> obj(&rootNativeObject0);
obj = &REGS.sp[int(-2 - skipOver)].toObject().as<NativeObject>();
MOZ_ASSERT(obj->is<PlainObject>() || obj->is<JSFunction>());
@ -3991,10 +3951,8 @@ CASE(JSOP_SUPERBASE)
MOZ_ASSERT(callee.nonLazyScript()->needsHomeObject());
const Value& homeObjVal = callee.getExtendedSlot(FunctionExtended::METHOD_HOMEOBJECT_SLOT);
RootedObject& homeObj = rootObject0;
homeObj = &homeObjVal.toObject();
RootedObject& superBase = rootObject1;
ReservedRooted<JSObject*> homeObj(&rootObject0, &homeObjVal.toObject());
ReservedRooted<JSObject*> superBase(&rootObject1);
if (!GetPrototype(cx, homeObj, &superBase))
goto error;
@ -4037,12 +3995,12 @@ DEFAULT()
case CatchContinuation:
ADVANCE_AND_DISPATCH(0);
case FinallyContinuation:
case FinallyContinuation: {
/*
* Push (true, exception) pair for finally to indicate that [retsub]
* should rethrow the exception.
*/
RootedValue& exception = rootValue0;
ReservedRooted<Value> exception(&rootValue0);
if (!cx->getPendingException(&exception)) {
interpReturnOK = false;
goto return_continuation;
@ -4050,7 +4008,8 @@ DEFAULT()
PUSH_BOOLEAN(true);
PUSH_COPY(exception);
cx->clearPendingException();
ADVANCE_AND_DISPATCH(0);
}
ADVANCE_AND_DISPATCH(0);
}
MOZ_CRASH("Invalid HandleError continuation");

Просмотреть файл

@ -205,9 +205,9 @@ TypeSet::TypeString(TypeSet::Type type)
which = (which + 1) & 3;
if (type.isSingleton())
JS_snprintf(bufs[which], 40, "<0x%p>", (void*) type.singleton());
JS_snprintf(bufs[which], 40, "<0x%p>", (void*) type.singletonNoBarrier());
else
JS_snprintf(bufs[which], 40, "[0x%p]", (void*) type.group());
JS_snprintf(bufs[which], 40, "[0x%p]", (void*) type.groupNoBarrier());
return bufs[which];
}

Просмотреть файл

@ -387,6 +387,22 @@ SetBoxedOrUnboxedDenseElementNoTypeChange(JSObject* obj, size_t index, const Val
obj->as<UnboxedArrayObject>().setElementNoTypeChangeSpecific<Type>(index, value);
}
template <JSValueType Type>
static inline bool
EnsureBoxedOrUnboxedDenseElements(JSContext* cx, JSObject* obj, size_t count)
{
if (Type == JSVAL_TYPE_MAGIC) {
if (!obj->as<ArrayObject>().ensureElements(cx, count))
return false;
} else {
if (obj->as<UnboxedArrayObject>().capacity() < count) {
if (!obj->as<UnboxedArrayObject>().growElements(cx, count))
return false;
}
}
return true;
}
enum ShouldUpdateTypes
{
UpdateTypes = true,
@ -532,7 +548,7 @@ CopyBoxedOrUnboxedDenseElements(JSContext* cx, JSObject* dst, JSObject* src,
srcData + srcStart * elementSize,
length * elementSize);
// Add a post barrier if we might have copied a nursery pointer to dst.
// Add a store buffer entry if we might have copied a nursery pointer to dst.
if (UnboxedTypeNeedsPostBarrier(Type) && !IsInsideNursery(dst))
dst->runtimeFromMainThread()->gc.storeBuffer.putWholeCellFromMainThread(dst);
}

Просмотреть файл

@ -350,6 +350,7 @@ AccessibleCaretManager::SelectWordOrShortcut(const nsPoint& aPoint)
// Content is empty. No need to select word.
AC_LOG("%s, Cannot select word bacause content is empty", __FUNCTION__);
DispatchCaretStateChangedEvent(CaretChangedReason::Longpressonemptycontent);
UpdateCarets();
return NS_OK;
}

Просмотреть файл

@ -2395,6 +2395,15 @@ nsCSSFrameConstructor::ConstructDocElementFrame(Element* aDocEle
MOZ_ASSERT(!mDocElementContainingBlock,
"Shouldn't have a doc element containing block here");
// Resolve a new style context for the viewport since it may be affected
// by a new root element style (e.g. a propagated 'direction').
// @see nsStyleContext::ApplyStyleFixups
{
nsRefPtr<nsStyleContext> sc = mPresShell->StyleSet()->
ResolveAnonymousBoxStyle(nsCSSAnonBoxes::viewport, nullptr);
GetRootFrame()->SetStyleContextWithoutNotification(sc);
}
// Make sure to call PropagateScrollToViewport before
// SetUpDocElementContainingBlock, since it sets up our scrollbar state
// properly.

Просмотреть файл

@ -1547,34 +1547,53 @@ already_AddRefed<LayerManager> nsDisplayList::PaintRoot(nsDisplayListBuilder* aB
// want the root container layer to have metrics. If the parent process is
// using XUL windows, there is no root scrollframe, and without explicitly
// creating metrics there will be no guaranteed top-level APZC.
if (gfxPrefs::LayoutUseContainersForRootFrames() ||
(XRE_IsParentProcess() && !presShell->GetRootScrollFrame()))
{
bool addMetrics = gfxPrefs::LayoutUseContainersForRootFrames() ||
(XRE_IsParentProcess() && !presShell->GetRootScrollFrame());
// Add metrics if there are none in the layer tree with the id (create an id
// if there isn't one already) of the root scroll frame/root content.
bool ensureMetricsForRootId =
gfxPrefs::AsyncPanZoomEnabled() &&
!gfxPrefs::LayoutUseContainersForRootFrames() &&
aBuilder->IsPaintingToWindow() &&
!presContext->GetParentPresContext();
nsIContent* content = nullptr;
nsIFrame* rootScrollFrame = presShell->GetRootScrollFrame();
if (rootScrollFrame) {
content = rootScrollFrame->GetContent();
} else {
// If there is no root scroll frame, pick the document element instead.
// The only case we don't want to do this is in non-APZ fennec, where
// we want the root xul document to get a null scroll id so that the root
// content document gets the first non-null scroll id.
#if !defined(MOZ_WIDGET_ANDROID) || defined(MOZ_ANDROID_APZ)
content = document->GetDocumentElement();
#endif
}
if (ensureMetricsForRootId && content) {
ViewID scrollId = nsLayoutUtils::FindOrCreateIDFor(content);
if (nsLayoutUtils::ContainsMetricsWithId(root, scrollId)) {
ensureMetricsForRootId = false;
}
}
if (addMetrics || ensureMetricsForRootId) {
bool isRoot = presContext->IsRootContentDocument();
nsRect viewport(aBuilder->ToReferenceFrame(frame), frame->GetSize());
nsIFrame* scrollFrame = presShell->GetRootScrollFrame();
nsIContent* content = nullptr;
if (scrollFrame) {
content = scrollFrame->GetContent();
} else {
// If there is no root scroll frame, pick the document element instead.
// The only case we don't want to do this is in non-APZ fennec, where
// we want the root xul document to get a null scroll id so that the root
// content document gets the first non-null scroll id.
#if !defined(MOZ_WIDGET_ANDROID) || defined(MOZ_ANDROID_APZ)
content = document->GetDocumentElement();
#endif
}
root->SetFrameMetrics(
nsLayoutUtils::ComputeFrameMetrics(frame,
presShell->GetRootScrollFrame(),
content,
rootScrollFrame, content,
aBuilder->FindReferenceFrameFor(frame),
root, FrameMetrics::NULL_SCROLL_ID, viewport, Nothing(),
isRoot, containerParameters));
} else {
// Set empty metrics to clear any metrics that might be on a recycled layer.
root->SetFrameMetrics(nsTArray<FrameMetrics>());
}
// NS_WARNING is debug-only, so don't even bother checking the conditions in

Просмотреть файл

@ -1065,6 +1065,13 @@ nsLayoutUtils::GetDisplayPort(nsIContent* aContent, nsRect *aResult)
return GetDisplayPortImpl(aContent, aResult, 1.0f);
}
/* static */ bool
nsLayoutUtils::GetDisplayPortForVisibilityTesting(nsIContent* aContent,
nsRect* aResult)
{
return GetDisplayPortImpl(aContent, aResult, 1.0f);
}
bool
nsLayoutUtils::SetDisplayPortMargins(nsIContent* aContent,
nsIPresShell* aPresShell,
@ -1103,6 +1110,10 @@ nsLayoutUtils::SetDisplayPortMargins(nsIContent* aContent,
}
}
// Display port margins changing means that the set of visible images may
// have drastically changed. Schedule an update.
aPresShell->ScheduleImageVisibilityUpdate();
return true;
}
@ -8218,6 +8229,13 @@ nsLayoutUtils::ComputeFrameMetrics(nsIFrame* aForFrame,
metrics.SetIsRoot(aIsRoot);
metrics.SetScrollParentId(aScrollParentId);
if (scrollId != FrameMetrics::NULL_SCROLL_ID && !presContext->GetParentPresContext()) {
if ((aScrollFrame && (aScrollFrame == presShell->GetRootScrollFrame())) ||
aContent == presShell->GetDocument()->GetDocumentElement()) {
metrics.SetIsLayersIdRoot(true);
}
}
// Only the root scrollable frame for a given presShell should pick up
// the presShell's resolution. All the other frames are 1.0.
if (aScrollFrame == presShell->GetRootScrollFrame()) {
@ -8332,6 +8350,22 @@ nsLayoutUtils::ComputeFrameMetrics(nsIFrame* aForFrame,
return metrics;
}
/* static */ bool
nsLayoutUtils::ContainsMetricsWithId(const Layer* aLayer, const ViewID& aScrollId)
{
for (uint32_t i = aLayer->GetFrameMetricsCount(); i > 0; i--) {
if (aLayer->GetFrameMetrics(i-1).GetScrollId() == aScrollId) {
return true;
}
}
for (Layer* child = aLayer->GetFirstChild(); child; child = child->GetNextSibling()) {
if (ContainsMetricsWithId(child, aScrollId)) {
return true;
}
}
return false;
}
/* static */ uint32_t
nsLayoutUtils::GetTouchActionFromFrame(nsIFrame* aFrame)
{

Просмотреть файл

@ -165,6 +165,16 @@ public:
*/
static bool GetDisplayPort(nsIContent* aContent, nsRect *aResult = nullptr);
/**
* @return the display port for the given element which should be used for
* visibility testing purposes.
*
* If low-precision buffers are enabled, this is the critical display port;
* otherwise, it's the same display port returned by GetDisplayPort().
*/
static bool GetDisplayPortForVisibilityTesting(nsIContent* aContent,
nsRect* aResult = nullptr);
enum class RepaintMode : uint8_t {
Repaint,
DoNotRepaint
@ -2650,6 +2660,12 @@ public:
*/
static nsMargin ScrollbarAreaToExcludeFromCompositionBoundsFor(nsIFrame* aScrollFrame);
/**
* Looks in the layer subtree rooted at aLayer for a metrics with scroll id
* aScrollId. Returns true if such is found.
*/
static bool ContainsMetricsWithId(const Layer* aLayer, const ViewID& aScrollId);
private:
static uint32_t sFontSizeInflationEmPerLine;
static uint32_t sFontSizeInflationMinTwips;

Просмотреть файл

@ -5940,7 +5940,9 @@ PresShell::MarkImagesInSubtreeVisible(nsIFrame* aFrame, const nsRect& aRect)
nsIScrollableFrame* scrollFrame = do_QueryFrame(aFrame);
if (scrollFrame) {
nsRect displayPort;
bool usingDisplayport = nsLayoutUtils::GetDisplayPort(aFrame->GetContent(), &displayPort);
bool usingDisplayport =
nsLayoutUtils::GetDisplayPortForVisibilityTesting(aFrame->GetContent(),
&displayPort);
if (usingDisplayport) {
rect = displayPort;
} else {

Просмотреть файл

@ -78,6 +78,7 @@ static PRLogModuleInfo *gLog = nullptr;
#define DEFAULT_FRAME_RATE 60
#define DEFAULT_THROTTLED_FRAME_RATE 1
#define DEFAULT_RECOMPUTE_VISIBILITY_INTERVAL_MS 1000
// after 10 minutes, stop firing off inactive timers
#define DEFAULT_INACTIVE_TIMER_DISABLE_SECONDS 600
@ -970,6 +971,17 @@ nsRefreshDriver::GetThrottledTimerInterval()
return 1000.0 / rate;
}
/* static */ mozilla::TimeDuration
nsRefreshDriver::GetMinRecomputeVisibilityInterval()
{
int32_t interval =
Preferences::GetInt("layout.visibility.min-recompute-interval-ms", -1);
if (interval <= 0) {
interval = DEFAULT_RECOMPUTE_VISIBILITY_INTERVAL_MS;
}
return TimeDuration::FromMilliseconds(interval);
}
double
nsRefreshDriver::GetRefreshTimerInterval() const
{
@ -1016,7 +1028,9 @@ nsRefreshDriver::nsRefreshDriver(nsPresContext* aPresContext)
mFreezeCount(0),
mThrottledFrameRequestInterval(TimeDuration::FromMilliseconds(
GetThrottledTimerInterval())),
mMinRecomputeVisibilityInterval(GetMinRecomputeVisibilityInterval()),
mThrottled(false),
mNeedToRecomputeVisibility(false),
mTestControllingRefreshes(false),
mViewManagerFlushIsPending(false),
mRequestedHighPrecision(false),
@ -1028,6 +1042,7 @@ nsRefreshDriver::nsRefreshDriver(nsPresContext* aPresContext)
mMostRecentRefresh = TimeStamp::Now();
mMostRecentTick = mMostRecentRefresh;
mNextThrottledFrameRequestTick = mMostRecentTick;
mNextRecomputeVisibilityTick = mMostRecentTick;
}
nsRefreshDriver::~nsRefreshDriver()
@ -1676,6 +1691,8 @@ nsRefreshDriver::Tick(int64_t aNowEpoch, TimeStamp aNowTime)
NS_RELEASE(shell);
}
mNeedToRecomputeVisibility = true;
if (tracingStyleFlush) {
profiler_tracing("Paint", "Styles", TRACING_INTERVAL_END);
}
@ -1721,6 +1738,8 @@ nsRefreshDriver::Tick(int64_t aNowEpoch, TimeStamp aNowTime)
NS_RELEASE(shell);
}
mNeedToRecomputeVisibility = true;
if (tracingLayoutFlush) {
profiler_tracing("Paint", "Reflow", TRACING_INTERVAL_END);
}
@ -1728,6 +1747,17 @@ nsRefreshDriver::Tick(int64_t aNowEpoch, TimeStamp aNowTime)
}
}
// Recompute image visibility if it's necessary and enough time has passed
// since the last time we did it.
if (mNeedToRecomputeVisibility && !mThrottled &&
aNowTime >= mNextRecomputeVisibilityTick &&
!presShell->IsPaintingSuppressed()) {
mNextRecomputeVisibilityTick = aNowTime + mMinRecomputeVisibilityInterval;
mNeedToRecomputeVisibility = false;
presShell->ScheduleImageVisibilityUpdate();
}
/*
* Perform notification to imgIRequests subscribed to listen
* for refresh events.

Просмотреть файл

@ -339,6 +339,8 @@ private:
double GetRegularTimerInterval(bool *outIsDefault = nullptr) const;
static double GetThrottledTimerInterval();
static mozilla::TimeDuration GetMinRecomputeVisibilityInterval();
bool HaveFrameRequestCallbacks() const {
return mFrameRequestCallbackDocs.Length() != 0;
}
@ -367,7 +369,14 @@ private:
// non-visible) documents registered with a non-throttled refresh driver.
const mozilla::TimeDuration mThrottledFrameRequestInterval;
// How long we wait, at a minimum, before recomputing image visibility
// information. This is a minimum because, regardless of this interval, we
// only recompute visibility when we've seen a layout or style flush since the
// last time we did it.
const mozilla::TimeDuration mMinRecomputeVisibilityInterval;
bool mThrottled;
bool mNeedToRecomputeVisibility;
bool mTestControllingRefreshes;
bool mViewManagerFlushIsPending;
bool mRequestedHighPrecision;
@ -386,6 +395,7 @@ private:
mozilla::TimeStamp mMostRecentTick;
mozilla::TimeStamp mTickStart;
mozilla::TimeStamp mNextThrottledFrameRequestTick;
mozilla::TimeStamp mNextRecomputeVisibilityTick;
// separate arrays for each flush type we support
ObserverArray mObservers[3];

Просмотреть файл

@ -0,0 +1,8 @@
<!DOCTYPE html>
<html style="writing-mode: vertical-lr;">
<body>
<div style="display: inline-flex;">
<div style="-moz-margin-start: auto; -moz-margin-end: 75px; direction: rtl;"></div>
</div>
</body>
</html>

Просмотреть файл

@ -0,0 +1,8 @@
<!DOCTYPE html>
<html style="writing-mode: vertical-lr;">
<body>
<div style="display: inline-flex;">
<div style="margin-bottom: auto; margin-top: 75px; direction: rtl;"></div>
</div>
</body>
</html>

Просмотреть файл

@ -585,3 +585,5 @@ load 1146107.html
load 1146114.html
load 1156222.html
load 1157011.html
load 1169420-1.html
load 1169420-2.html

Просмотреть файл

@ -3092,7 +3092,15 @@ ScrollFrameHelper::ComputeFrameMetrics(Layer* aLayer,
parentLayerClip = Some(clip);
}
if (!gfxPrefs::AsyncPanZoomEnabled()) {
bool thisScrollFrameUsesAsyncScrolling = nsLayoutUtils::UsesAsyncScrolling();
#if defined(MOZ_WIDGET_ANDROID) && !defined(MOZ_ANDROID_APZ)
// Android without apzc (aka the java pan zoom code) only uses async scrolling
// for the root scroll frame of the root content document.
if (!isRoot) {
thisScrollFrameUsesAsyncScrolling = false;
}
#endif
if (!thisScrollFrameUsesAsyncScrolling) {
if (parentLayerClip) {
// If APZ is not enabled, we still need the displayport to be clipped
// in the compositor.

Просмотреть файл

@ -0,0 +1,34 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title></title>
<style type="text/css">
html { direction: ltr; }
div
{
background-color: green;
border-left: green none 0px;
border-right: green none 0px;
height: 50px;
left: 20%;
margin-left: 0px;
margin-right: 0px;
position: absolute;
right: 20%;
top: 0px;
width: 20%;
}
</style>
</head>
<body>
<div></div>
</body>
</html>

Просмотреть файл

@ -0,0 +1,34 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title></title>
<style type="text/css">
html { direction: rtl; }
div
{
background-color: green;
border-left: green none 0px;
border-right: green none 0px;
height: 50px;
left: 20%;
margin-left: 0px;
margin-right: 0px;
position: absolute;
right: 20%;
top: 0px;
width: 20%;
}
</style>
</head>
<body>
<div></div>
</body>
</html>

Просмотреть файл

@ -0,0 +1,32 @@
<!DOCTYPE html>
<html dir="ltr">
<head>
<meta charset="utf-8">
<title></title>
<style type="text/css">
div
{
background-color: green;
border-left: green none 0px;
border-right: green none 0px;
height: 50px;
left: 20%;
margin-left: 0px;
margin-right: 0px;
position: absolute;
right: 20%;
top: 0px;
width: 20%;
}
</style>
</head>
<body>
<div></div>
</body>
</html>

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше