diff --git a/accessible/atk/AccessibleWrap.cpp b/accessible/atk/AccessibleWrap.cpp index 7ad7cedd1191..6df4f57cc17d 100644 --- a/accessible/atk/AccessibleWrap.cpp +++ b/accessible/atk/AccessibleWrap.cpp @@ -11,6 +11,7 @@ #include "InterfaceInitFuncs.h" #include "nsAccUtils.h" #include "mozilla/a11y/PDocAccessible.h" +#include "OuterDocAccessible.h" #include "ProxyAccessible.h" #include "RootAccessible.h" #include "nsMai.h" @@ -96,7 +97,7 @@ static GType GetAtkTypeForMai(MaiInterfaceType type) return G_TYPE_INVALID; } -static const char* kNonUserInputEvent = ":system"; +#define NON_USER_EVENT ":system" static const GInterfaceInfo atk_if_infos[] = { {(GInterfaceInitFunc)componentInterfaceInitCB, @@ -828,26 +829,45 @@ getChildCountCB(AtkObject *aAtkObj) AtkObject * refChildCB(AtkObject *aAtkObj, gint aChildIndex) { - // aChildIndex should not be less than zero - if (aChildIndex < 0) { + // aChildIndex should not be less than zero + if (aChildIndex < 0) { + return nullptr; + } + + AtkObject* childAtkObj = nullptr; + AccessibleWrap* accWrap = GetAccessibleWrap(aAtkObj); + if (accWrap) { + if (nsAccUtils::MustPrune(accWrap)) { return nullptr; } - AccessibleWrap* accWrap = GetAccessibleWrap(aAtkObj); - if (!accWrap || nsAccUtils::MustPrune(accWrap)) { - return nullptr; - } - Accessible* accChild = accWrap->GetEmbeddedChildAt(aChildIndex); - if (!accChild) - return nullptr; + if (accChild) { + childAtkObj = AccessibleWrap::GetAtkObject(accChild); + } else { + OuterDocAccessible* docOwner = accWrap->AsOuterDoc(); + if (docOwner) { + ProxyAccessible* proxyDoc = docOwner->RemoteChildDoc(); + if (proxyDoc) + childAtkObj = GetWrapperFor(proxyDoc); + } + } + } else if (ProxyAccessible* proxy = GetProxy(aAtkObj)) { + if (proxy->MustPruneChildren()) + return nullptr; - AtkObject* childAtkObj = AccessibleWrap::GetAtkObject(accChild); + ProxyAccessible* child = proxy->EmbeddedChildAt(aChildIndex); + if (child) + childAtkObj = GetWrapperFor(child); + } else { + return nullptr; + } - NS_ASSERTION(childAtkObj, "Fail to get AtkObj"); - if (!childAtkObj) - return nullptr; - g_object_ref(childAtkObj); + NS_ASSERTION(childAtkObj, "Fail to get AtkObj"); + if (!childAtkObj) + return nullptr; + + g_object_ref(childAtkObj); if (aAtkObj != childAtkObj->accessible_parent) atk_object_set_parent(childAtkObj, aAtkObj); @@ -1431,6 +1451,21 @@ MaiAtkObject::FireStateChangeEvent(uint64_t aState, bool aEnabled) } } +#define OLD_TEXT_INSERTED "text_changed::insert" +#define OLD_TEXT_REMOVED "text_changed::delete" +static const char* oldTextChangeStrings[2][2] = { + { OLD_TEXT_REMOVED NON_USER_EVENT, OLD_TEXT_INSERTED NON_USER_EVENT }, + { OLD_TEXT_REMOVED, OLD_TEXT_INSERTED } +}; + +#define TEXT_INSERTED "text-insert" +#define TEXT_REMOVED "text-remove" +#define NON_USER_DETAIL "::system" +static const char* textChangedStrings[2][2] = { + { TEXT_REMOVED NON_USER_DETAIL, TEXT_INSERTED NON_USER_DETAIL }, + { TEXT_REMOVED, TEXT_INSERTED} +}; + nsresult AccessibleWrap::FireAtkTextChangedEvent(AccEvent* aEvent, AtkObject* aObject) @@ -1442,7 +1477,6 @@ AccessibleWrap::FireAtkTextChangedEvent(AccEvent* aEvent, uint32_t length = event->GetLength(); bool isInserted = event->IsTextInserted(); bool isFromUserInput = aEvent->IsFromUserInput(); - char* signal_name = nullptr; if (gAvailableAtkSignals == eUnknown) gAvailableAtkSignals = @@ -1453,23 +1487,29 @@ AccessibleWrap::FireAtkTextChangedEvent(AccEvent* aEvent, // XXX remove this code and the gHaveNewTextSignals check when we can // stop supporting old atk since it doesn't really work anyway // see bug 619002 - signal_name = g_strconcat(isInserted ? "text_changed::insert" : - "text_changed::delete", - isFromUserInput ? "" : kNonUserInputEvent, nullptr); + const char* signal_name = + oldTextChangeStrings[isFromUserInput][isInserted]; g_signal_emit_by_name(aObject, signal_name, start, length); } else { nsAutoString text; event->GetModifiedText(text); - signal_name = g_strconcat(isInserted ? "text-insert" : "text-remove", - isFromUserInput ? "" : "::system", nullptr); + const char* signal_name = + textChangedStrings[isFromUserInput][isInserted]; g_signal_emit_by_name(aObject, signal_name, start, length, NS_ConvertUTF16toUTF8(text).get()); } - g_free(signal_name); return NS_OK; } +#define ADD_EVENT "children_changed::add" +#define HIDE_EVENT "children_changed::remove" + +static const char *kMutationStrings[2][2] = { + { HIDE_EVENT NON_USER_EVENT, ADD_EVENT NON_USER_EVENT }, + { HIDE_EVENT, ADD_EVENT }, +}; + nsresult AccessibleWrap::FireAtkShowHideEvent(AccEvent* aEvent, AtkObject* aObject, bool aIsAdded) @@ -1479,10 +1519,8 @@ AccessibleWrap::FireAtkShowHideEvent(AccEvent* aEvent, NS_ENSURE_STATE(parentObject); bool isFromUserInput = aEvent->IsFromUserInput(); - char *signal_name = g_strconcat(aIsAdded ? "children_changed::add" : "children_changed::remove", - isFromUserInput ? "" : kNonUserInputEvent, nullptr); + const char *signal_name = kMutationStrings[isFromUserInput][aIsAdded]; g_signal_emit_by_name(parentObject, signal_name, indexInParent, aObject, nullptr); - g_free(signal_name); return NS_OK; } diff --git a/accessible/generic/Accessible.h b/accessible/generic/Accessible.h index 6d9a9cc271af..03e9e7c03def 100644 --- a/accessible/generic/Accessible.h +++ b/accessible/generic/Accessible.h @@ -38,6 +38,7 @@ class HTMLLIAccessible; class HyperTextAccessible; class ImageAccessible; class KeyBinding; +class OuterDocAccessible; class ProxyAccessible; class Relation; class RootAccessible; @@ -619,6 +620,9 @@ public: return mBits.proxy; } + bool IsOuterDoc() const { return mType == eOuterDocType; } + OuterDocAccessible* AsOuterDoc(); + bool IsProgress() const { return mType == eProgressType; } bool IsRoot() const { return mType == eRootType; } diff --git a/accessible/generic/OuterDocAccessible.cpp b/accessible/generic/OuterDocAccessible.cpp index 3fbbc38e4116..a780b96a39f1 100644 --- a/accessible/generic/OuterDocAccessible.cpp +++ b/accessible/generic/OuterDocAccessible.cpp @@ -8,6 +8,8 @@ #include "Accessible-inl.h" #include "nsAccUtils.h" #include "DocAccessible-inl.h" +#include "mozilla/a11y/DocAccessibleParent.h" +#include "mozilla/dom/TabParent.h" #include "Role.h" #include "States.h" @@ -26,6 +28,7 @@ OuterDocAccessible:: OuterDocAccessible(nsIContent* aContent, DocAccessible* aDoc) : AccessibleWrap(aContent, aDoc) { + mType = eOuterDocType; } OuterDocAccessible::~OuterDocAccessible() @@ -181,3 +184,24 @@ OuterDocAccessible::CacheChildren() GetAccService()->GetDocAccessible(innerDoc); } } + +ProxyAccessible* +OuterDocAccessible::RemoteChildDoc() const +{ + dom::TabParent* tab = dom::TabParent::GetFrom(GetContent()); + if (!tab) + return nullptr; + + // XXX Consider managing non top level remote documents with there parent + // document. + const nsTArray& docs = tab->ManagedPDocAccessibleParent(); + size_t docCount = docs.Length(); + for (size_t i = 0; i < docCount; i++) { + auto doc = static_cast(docs[i]); + if (!doc->ParentDoc()) + return doc; + } + + MOZ_ASSERT(false, "no top level tab document?"); + return nullptr; +} diff --git a/accessible/generic/OuterDocAccessible.h b/accessible/generic/OuterDocAccessible.h index 9b07f29f234d..02b455941378 100644 --- a/accessible/generic/OuterDocAccessible.h +++ b/accessible/generic/OuterDocAccessible.h @@ -10,6 +10,7 @@ namespace mozilla { namespace a11y { +class ProxyAccessible; /** * Used for , , + +

+
+
+
+
diff --git a/dom/workers/test/serviceworkers/thirdparty/iframe1.html b/dom/workers/test/serviceworkers/thirdparty/iframe1.html
new file mode 100644
index 000000000000..43fe8c5729b5
--- /dev/null
+++ b/dom/workers/test/serviceworkers/thirdparty/iframe1.html
@@ -0,0 +1,30 @@
+
+
+
+  
+  SW third party iframe test
+
+  
+
+
+
+
+  
+
+
+
diff --git a/dom/workers/test/serviceworkers/thirdparty/iframe2.html b/dom/workers/test/serviceworkers/thirdparty/iframe2.html
new file mode 100644
index 000000000000..fac6a9395dea
--- /dev/null
+++ b/dom/workers/test/serviceworkers/thirdparty/iframe2.html
@@ -0,0 +1,7 @@
+
+
diff --git a/dom/workers/test/serviceworkers/thirdparty/register.html b/dom/workers/test/serviceworkers/thirdparty/register.html
new file mode 100644
index 000000000000..59b8c5c41ad5
--- /dev/null
+++ b/dom/workers/test/serviceworkers/thirdparty/register.html
@@ -0,0 +1,27 @@
+
+
diff --git a/dom/workers/test/serviceworkers/thirdparty/sw.js b/dom/workers/test/serviceworkers/thirdparty/sw.js
new file mode 100644
index 000000000000..ca45698c8374
--- /dev/null
+++ b/dom/workers/test/serviceworkers/thirdparty/sw.js
@@ -0,0 +1,14 @@
+self.addEventListener("fetch", function(event) {
+  dump("fetch " + event.request.url + "\n");
+  if (event.request.url.indexOf("iframe2.html") >= 0) {
+    var body =
+      "";
+    event.respondWith(new Response(body, {
+      headers: {'Content-Type': 'text/html'}
+    }));
+  }
+});
diff --git a/dom/workers/test/serviceworkers/thirdparty/unregister.html b/dom/workers/test/serviceworkers/thirdparty/unregister.html
new file mode 100644
index 000000000000..a054f3bd2c09
--- /dev/null
+++ b/dom/workers/test/serviceworkers/thirdparty/unregister.html
@@ -0,0 +1,12 @@
+
+
diff --git a/embedding/components/webbrowserpersist/nsWebBrowserPersist.cpp b/embedding/components/webbrowserpersist/nsWebBrowserPersist.cpp
index b5f3cbf8686a..208f41469b8d 100644
--- a/embedding/components/webbrowserpersist/nsWebBrowserPersist.cpp
+++ b/embedding/components/webbrowserpersist/nsWebBrowserPersist.cpp
@@ -2529,6 +2529,36 @@ nsWebBrowserPersist::EnumCleanupUploadList(nsISupports *aKey, UploadData *aData,
     return PL_DHASH_NEXT;
 }
 
+static void
+AppendXMLAttr(const nsAString& key, const nsAString& aValue, nsAString& aBuffer)
+{
+    if (!aBuffer.IsEmpty()) {
+        aBuffer.Append(' ');
+    }
+    aBuffer.Append(key);
+    aBuffer.AppendLiteral("=\"");
+    for (size_t i = 0; i < aValue.Length(); ++i) {
+        switch (aValue[i]) {
+            case '&':
+                aBuffer.AppendLiteral("&");
+                break;
+            case '<':
+                aBuffer.AppendLiteral("<");
+                break;
+            case '>':
+                aBuffer.AppendLiteral(">");
+                break;
+            case '"':
+                aBuffer.AppendLiteral(""");
+                break;
+            default:
+                aBuffer.Append(aValue[i]);
+                break;
+        }
+    }
+    aBuffer.Append('"');
+}
+
 nsresult nsWebBrowserPersist::FixupXMLStyleSheetLink(nsIDOMProcessingInstruction *aPI, const nsAString &aHref)
 {
     NS_ENSURE_ARG_POINTER(aPI);
@@ -2568,30 +2598,28 @@ nsresult nsWebBrowserPersist::FixupXMLStyleSheetLink(nsIDOMProcessingInstruction
                                                 nsGkAtoms::media,
                                                 media);
 
-        NS_NAMED_LITERAL_STRING(kCloseAttr, "\" ");
         nsAutoString newData;
-        newData += NS_LITERAL_STRING("href=\"") + aHref + kCloseAttr;
+        AppendXMLAttr(NS_LITERAL_STRING("href"), aHref, newData);
         if (!title.IsEmpty())
         {
-            newData += NS_LITERAL_STRING("title=\"") + title + kCloseAttr;
+            AppendXMLAttr(NS_LITERAL_STRING("title"), title, newData);
         }
         if (!media.IsEmpty())
         {
-            newData += NS_LITERAL_STRING("media=\"") + media + kCloseAttr;
+            AppendXMLAttr(NS_LITERAL_STRING("media"), media, newData);
         }
         if (!type.IsEmpty())
         {
-            newData += NS_LITERAL_STRING("type=\"") + type + kCloseAttr;
+            AppendXMLAttr(NS_LITERAL_STRING("type"), type, newData);
         }
         if (!charset.IsEmpty())
         {
-            newData += NS_LITERAL_STRING("charset=\"") + charset + kCloseAttr;
+            AppendXMLAttr(NS_LITERAL_STRING("charset"), charset, newData);
         }
         if (!alternate.IsEmpty())
         {
-            newData += NS_LITERAL_STRING("alternate=\"") + alternate + kCloseAttr;
+            AppendXMLAttr(NS_LITERAL_STRING("alternate"), alternate, newData);
         }
-        newData.Truncate(newData.Length() - 1);  // Remove the extra space on the end.
         aPI->SetData(newData);
     }
 
diff --git a/embedding/test/bug1170334_iframe.xml b/embedding/test/bug1170334_iframe.xml
new file mode 100644
index 000000000000..1821e07f967d
--- /dev/null
+++ b/embedding/test/bug1170334_iframe.xml
@@ -0,0 +1,3 @@
+
+
+
diff --git a/embedding/test/bug1170334_style.css b/embedding/test/bug1170334_style.css
new file mode 100644
index 000000000000..476c22b695c2
--- /dev/null
+++ b/embedding/test/bug1170334_style.css
@@ -0,0 +1 @@
+/* This stylesheet intentionally left blank. */
diff --git a/embedding/test/chrome.ini b/embedding/test/chrome.ini
index 14dccff247a4..4684b7612937 100644
--- a/embedding/test/chrome.ini
+++ b/embedding/test/chrome.ini
@@ -3,6 +3,9 @@ skip-if = buildapp == 'b2g'
 support-files =
   320x240.ogv
   bug449141_page.html
+  bug1170334_iframe.xml
+  bug1170334_style.css
 
 [test_bug449141.html]
 skip-if = toolkit == 'android'
+[test_bug1170334_wbp_xmlstyle.html]
diff --git a/embedding/test/test_bug1170334_wbp_xmlstyle.html b/embedding/test/test_bug1170334_wbp_xmlstyle.html
new file mode 100644
index 000000000000..4dee9a6ba735
--- /dev/null
+++ b/embedding/test/test_bug1170334_wbp_xmlstyle.html
@@ -0,0 +1,80 @@
+
+
+
+
+  Test for Bug 1170334 (nsWebBrowserPersist vs. XML stylesheets)
+  
+  
+
+
+Mozilla Bug 1170334
+

+

+
+ +
+
+
+
+ + diff --git a/gfx/2d/2D.h b/gfx/2d/2D.h index 9bae3a3f61a7..3605f12253ea 100644 --- a/gfx/2d/2D.h +++ b/gfx/2d/2D.h @@ -395,6 +395,52 @@ public: READ_WRITE }; + /** + * This is a scoped version of Map(). Map() is called in the constructor and + * Unmap() in the destructor. Use this for automatic unmapping of your data + * surfaces. + * + * Use IsMapped() to verify whether Map() succeeded or not. + */ + class ScopedMap { + public: + explicit ScopedMap(DataSourceSurface* aSurface, MapType aType) + : mSurface(aSurface) + , mIsMapped(aSurface->Map(aType, &mMap)) {} + + virtual ~ScopedMap() + { + if (mIsMapped) { + mSurface->Unmap(); + } + } + + uint8_t* GetData() + { + MOZ_ASSERT(mIsMapped); + return mMap.mData; + } + + int32_t GetStride() + { + MOZ_ASSERT(mIsMapped); + return mMap.mStride; + } + + MappedSurface* GetMappedSurface() + { + MOZ_ASSERT(mIsMapped); + return &mMap; + } + + bool IsMapped() { return mIsMapped; } + + private: + RefPtr mSurface; + MappedSurface mMap; + bool mIsMapped; + }; + virtual SurfaceType GetType() const override { return SurfaceType::DATA; } /** @deprecated * Get the raw bitmap data of the surface. diff --git a/gfx/2d/DataSurfaceHelpers.cpp b/gfx/2d/DataSurfaceHelpers.cpp index d0f08fa295cd..dbccf5673e17 100644 --- a/gfx/2d/DataSurfaceHelpers.cpp +++ b/gfx/2d/DataSurfaceHelpers.cpp @@ -16,7 +16,9 @@ namespace mozilla { namespace gfx { uint8_t* -DataAtOffset(DataSourceSurface* aSurface, IntPoint aPoint) +DataAtOffset(DataSourceSurface* aSurface, + DataSourceSurface::MappedSurface* aMap, + IntPoint aPoint) { if (!SurfaceContainsPoint(aSurface, aPoint)) { MOZ_CRASH("sample position needs to be inside surface!"); @@ -25,10 +27,10 @@ DataAtOffset(DataSourceSurface* aSurface, IntPoint aPoint) MOZ_ASSERT(Factory::CheckSurfaceSize(aSurface->GetSize()), "surface size overflows - this should have been prevented when the surface was created"); - uint8_t* data = aSurface->GetData() + aPoint.y * aSurface->Stride() + + uint8_t* data = aMap->mData + aPoint.y * aMap->mStride + aPoint.x * BytesPerPixel(aSurface->GetFormat()); - if (data < aSurface->GetData()) { + if (data < aMap->mData) { MOZ_CRASH("out-of-range data access"); } @@ -250,10 +252,16 @@ CopyRect(DataSourceSurface* aSrc, DataSourceSurface* aDest, return; } - uint8_t* sourceData = DataAtOffset(aSrc, aSrcRect.TopLeft()); - uint32_t sourceStride = aSrc->Stride(); - uint8_t* destData = DataAtOffset(aDest, aDestPoint); - uint32_t destStride = aDest->Stride(); + DataSourceSurface::ScopedMap srcMap(aSrc, DataSourceSurface::READ); + DataSourceSurface::ScopedMap destMap(aDest, DataSourceSurface::WRITE); + if (MOZ2D_WARN_IF(!srcMap.IsMapped() || !destMap.IsMapped())) { + return; + } + + uint8_t* sourceData = DataAtOffset(aSrc, srcMap.GetMappedSurface(), aSrcRect.TopLeft()); + uint32_t sourceStride = srcMap.GetStride(); + uint8_t* destData = DataAtOffset(aDest, destMap.GetMappedSurface(), aDestPoint); + uint32_t destStride = destMap.GetStride(); if (BytesPerPixel(aSrc->GetFormat()) == 4) { for (int32_t y = 0; y < aSrcRect.height; y++) { diff --git a/gfx/2d/DataSurfaceHelpers.h b/gfx/2d/DataSurfaceHelpers.h index e1afecee1df0..4444f747c0c9 100644 --- a/gfx/2d/DataSurfaceHelpers.h +++ b/gfx/2d/DataSurfaceHelpers.h @@ -90,7 +90,9 @@ CreateDataSourceSurfaceByCloning(DataSourceSurface* aSource); * Return the byte at aPoint. */ uint8_t* -DataAtOffset(DataSourceSurface* aSurface, IntPoint aPoint); +DataAtOffset(DataSourceSurface* aSurface, + DataSourceSurface::MappedSurface* aMap, + IntPoint aPoint); /** * Check if aPoint is contained by the surface. diff --git a/gfx/2d/DrawTargetD2D.cpp b/gfx/2d/DrawTargetD2D.cpp index 57be72b4c3a9..98bee747cf7c 100644 --- a/gfx/2d/DrawTargetD2D.cpp +++ b/gfx/2d/DrawTargetD2D.cpp @@ -305,15 +305,22 @@ DrawTargetD2D::GetBitmapForSurface(SourceSurface *aSurface, return nullptr; } - int stride = srcSurf->Stride(); + HRESULT hr; + { + DataSourceSurface::ScopedMap srcMap(srcSurf, DataSourceSurface::READ); + if (MOZ2D_WARN_IF(!srcMap.IsMapped())) { + return nullptr; + } - unsigned char *data = srcSurf->GetData() + - (uint32_t)sourceRect.y * stride + - (uint32_t)sourceRect.x * BytesPerPixel(srcSurf->GetFormat()); + int stride = srcMap.GetStride(); + unsigned char *data = srcMap.GetData() + + (uint32_t)sourceRect.y * stride + + (uint32_t)sourceRect.x * BytesPerPixel(srcSurf->GetFormat()); - D2D1_BITMAP_PROPERTIES props = - D2D1::BitmapProperties(D2DPixelFormat(srcSurf->GetFormat())); - HRESULT hr = mRT->CreateBitmap(D2D1::SizeU(UINT32(sourceRect.width), UINT32(sourceRect.height)), data, stride, props, byRef(bitmap)); + D2D1_BITMAP_PROPERTIES props = + D2D1::BitmapProperties(D2DPixelFormat(srcSurf->GetFormat())); + hr = mRT->CreateBitmap(D2D1::SizeU(UINT32(sourceRect.width), UINT32(sourceRect.height)), data, stride, props, byRef(bitmap)); + } if (FAILED(hr)) { IntSize size(sourceRect.width, sourceRect.height); gfxCriticalError(CriticalLog::DefaultOptions(Factory::ReasonableSurfaceSize(size))) << "[D2D] 1CreateBitmap failure " << size << " Code: " << hexa(hr); diff --git a/gfx/2d/DrawTargetD2D1.cpp b/gfx/2d/DrawTargetD2D1.cpp index 8cbfad641369..4a402cc6c886 100644 --- a/gfx/2d/DrawTargetD2D1.cpp +++ b/gfx/2d/DrawTargetD2D1.cpp @@ -1536,22 +1536,22 @@ DrawTargetD2D1::OptimizeSourceSurface(SourceSurface* aSurface) const RefPtr data = aSurface->GetDataSurface(); - DataSourceSurface::MappedSurface map; - if (!data->Map(DataSourceSurface::MapType::READ, &map)) { - return nullptr; - } - RefPtr bitmap; - HRESULT hr = mDC->CreateBitmap(D2DIntSize(data->GetSize()), map.mData, map.mStride, - D2D1::BitmapProperties1(D2D1_BITMAP_OPTIONS_NONE, D2DPixelFormat(data->GetFormat())), - byRef(bitmap)); + { + DataSourceSurface::ScopedMap map(data, DataSourceSurface::READ); + if (MOZ2D_WARN_IF(!map.IsMapped())) { + return nullptr; + } - if (FAILED(hr)) { - gfxCriticalError(CriticalLog::DefaultOptions(Factory::ReasonableSurfaceSize(data->GetSize()))) << "[D2D1.1] 4CreateBitmap failure " << data->GetSize() << " Code: " << hexa(hr); + HRESULT hr = mDC->CreateBitmap(D2DIntSize(data->GetSize()), map.GetData(), map.GetStride(), + D2D1::BitmapProperties1(D2D1_BITMAP_OPTIONS_NONE, D2DPixelFormat(data->GetFormat())), + byRef(bitmap)); + + if (FAILED(hr)) { + gfxCriticalError(CriticalLog::DefaultOptions(Factory::ReasonableSurfaceSize(data->GetSize()))) << "[D2D1.1] 4CreateBitmap failure " << data->GetSize() << " Code: " << hexa(hr); + } } - data->Unmap(); - if (!bitmap) { return data.forget(); } diff --git a/gfx/2d/DrawTargetTiled.h b/gfx/2d/DrawTargetTiled.h index be020c2ca927..934237632ce1 100644 --- a/gfx/2d/DrawTargetTiled.h +++ b/gfx/2d/DrawTargetTiled.h @@ -177,7 +177,10 @@ public: RefPtr surf = Factory::CreateDataSourceSurface(GetSize(), GetFormat()); DataSourceSurface::MappedSurface mappedSurf; - surf->Map(DataSourceSurface::MapType::WRITE, &mappedSurf); + if (!surf->Map(DataSourceSurface::MapType::WRITE, &mappedSurf)) { + gfxCriticalError() << "DrawTargetTiled::GetDataSurface failed to map surface"; + return nullptr; + } { RefPtr dt = diff --git a/gfx/2d/FilterNodeSoftware.cpp b/gfx/2d/FilterNodeSoftware.cpp index cccc540aab0b..6b8611b84e4e 100644 --- a/gfx/2d/FilterNodeSoftware.cpp +++ b/gfx/2d/FilterNodeSoftware.cpp @@ -195,9 +195,12 @@ FillRectWithPixel(DataSourceSurface *aSurface, const IntRect &aFillRect, IntPoin MOZ_ASSERT(SurfaceContainsPoint(aSurface, aPixelPos), "aPixelPos needs to be inside the surface"); - int32_t stride = aSurface->Stride(); - uint8_t* sourcePixelData = DataAtOffset(aSurface, aPixelPos); - uint8_t* data = DataAtOffset(aSurface, aFillRect.TopLeft()); + DataSourceSurface::ScopedMap surfMap(aSurface, DataSourceSurface::READ_WRITE); + if(MOZ2D_WARN_IF(!surfMap.IsMapped())) { + return; + } + uint8_t* sourcePixelData = DataAtOffset(aSurface, surfMap.GetMappedSurface(), aPixelPos); + uint8_t* data = DataAtOffset(aSurface, surfMap.GetMappedSurface(), aFillRect.TopLeft()); int bpp = BytesPerPixel(aSurface->GetFormat()); // Fill the first row by hand. @@ -213,7 +216,7 @@ FillRectWithPixel(DataSourceSurface *aSurface, const IntRect &aFillRect, IntPoin // Copy the first row into the other rows. for (int32_t y = 1; y < aFillRect.height; y++) { - PodCopy(data + y * stride, data, aFillRect.width * bpp); + PodCopy(data + y * surfMap.GetStride(), data, aFillRect.width * bpp); } } @@ -229,18 +232,22 @@ FillRectWithVerticallyRepeatingHorizontalStrip(DataSourceSurface *aSurface, MOZ_ASSERT(IntRect(IntPoint(), aSurface->GetSize()).Contains(aSampleRect), "aSampleRect needs to be completely inside the surface"); - int32_t stride = aSurface->Stride(); - uint8_t* sampleData = DataAtOffset(aSurface, aSampleRect.TopLeft()); - uint8_t* data = DataAtOffset(aSurface, aFillRect.TopLeft()); + DataSourceSurface::ScopedMap surfMap(aSurface, DataSourceSurface::READ_WRITE); + if (MOZ2D_WARN_IF(!surfMap.IsMapped())) { + return; + } + + uint8_t* sampleData = DataAtOffset(aSurface, surfMap.GetMappedSurface(), aSampleRect.TopLeft()); + uint8_t* data = DataAtOffset(aSurface, surfMap.GetMappedSurface(), aFillRect.TopLeft()); if (BytesPerPixel(aSurface->GetFormat()) == 4) { for (int32_t y = 0; y < aFillRect.height; y++) { PodCopy((uint32_t*)data, (uint32_t*)sampleData, aFillRect.width); - data += stride; + data += surfMap.GetStride(); } } else if (BytesPerPixel(aSurface->GetFormat()) == 1) { for (int32_t y = 0; y < aFillRect.height; y++) { PodCopy(data, sampleData, aFillRect.width); - data += stride; + data += surfMap.GetStride(); } } } @@ -257,24 +264,28 @@ FillRectWithHorizontallyRepeatingVerticalStrip(DataSourceSurface *aSurface, MOZ_ASSERT(IntRect(IntPoint(), aSurface->GetSize()).Contains(aSampleRect), "aSampleRect needs to be completely inside the surface"); - int32_t stride = aSurface->Stride(); - uint8_t* sampleData = DataAtOffset(aSurface, aSampleRect.TopLeft()); - uint8_t* data = DataAtOffset(aSurface, aFillRect.TopLeft()); + DataSourceSurface::ScopedMap surfMap(aSurface, DataSourceSurface::READ_WRITE); + if (MOZ2D_WARN_IF(!surfMap.IsMapped())) { + return; + } + + uint8_t* sampleData = DataAtOffset(aSurface, surfMap.GetMappedSurface(), aSampleRect.TopLeft()); + uint8_t* data = DataAtOffset(aSurface, surfMap.GetMappedSurface(), aFillRect.TopLeft()); if (BytesPerPixel(aSurface->GetFormat()) == 4) { for (int32_t y = 0; y < aFillRect.height; y++) { int32_t sampleColor = *((uint32_t*)sampleData); for (int32_t x = 0; x < aFillRect.width; x++) { *((uint32_t*)data + x) = sampleColor; } - data += stride; - sampleData += stride; + data += surfMap.GetStride(); + sampleData += surfMap.GetStride(); } } else if (BytesPerPixel(aSurface->GetFormat()) == 1) { for (int32_t y = 0; y < aFillRect.height; y++) { uint8_t sampleColor = *sampleData; memset(data, sampleColor, aFillRect.width); - data += stride; - sampleData += stride; + data += surfMap.GetStride(); + sampleData += surfMap.GetStride(); } } } @@ -987,12 +998,18 @@ FilterNodeBlendSoftware::Render(const IntRect& aRect) CopyRect(input1, target, IntRect(IntPoint(), size), IntPoint()); + // This needs to stay in scope until the draw target has been flushed. + DataSourceSurface::ScopedMap targetMap(target, DataSourceSurface::READ_WRITE); + if (MOZ2D_WARN_IF(!targetMap.IsMapped())) { + return nullptr; + } + RefPtr dt = - Factory::CreateDrawTargetForData(BackendType::CAIRO, - target->GetData(), - target->GetSize(), - target->Stride(), - target->GetFormat()); + Factory::CreateDrawTargetForData(BackendType::CAIRO, + targetMap.GetData(), + target->GetSize(), + targetMap.GetStride(), + target->GetFormat()); if (!dt) { gfxWarning() << "FilterNodeBlendSoftware::Render failed in CreateDrawTargetForData"; @@ -1095,7 +1112,10 @@ FilterNodeTransformSoftware::Render(const IntRect& aRect) } DataSourceSurface::MappedSurface mapping; - surf->Map(DataSourceSurface::MapType::WRITE, &mapping); + if (!surf->Map(DataSourceSurface::MapType::WRITE, &mapping)) { + gfxCriticalError() << "FilterNodeTransformSoftware::Render failed to map surface"; + return nullptr; + } RefPtr dt = Factory::CreateDrawTargetForData(BackendType::CAIRO, @@ -1195,14 +1215,18 @@ ApplyMorphology(const IntRect& aSourceRect, DataSourceSurface* aInput, return nullptr; } - int32_t sourceStride = aInput->Stride(); - uint8_t* sourceData = DataAtOffset(aInput, destRect.TopLeft() - srcRect.TopLeft()); - - int32_t tmpStride = tmp->Stride(); - uint8_t* tmpData = DataAtOffset(tmp, destRect.TopLeft() - tmpRect.TopLeft()); + DataSourceSurface::ScopedMap sourceMap(aInput, DataSourceSurface::READ); + DataSourceSurface::ScopedMap tmpMap(tmp, DataSourceSurface::WRITE); + if (MOZ2D_WARN_IF(!sourceMap.IsMapped() || !tmpMap.IsMapped())) { + return nullptr; + } + uint8_t* sourceData = DataAtOffset(aInput, sourceMap.GetMappedSurface(), + destRect.TopLeft() - srcRect.TopLeft()); + uint8_t* tmpData = DataAtOffset(tmp, tmpMap.GetMappedSurface(), + destRect.TopLeft() - tmpRect.TopLeft()); FilterProcessing::ApplyMorphologyHorizontal( - sourceData, sourceStride, tmpData, tmpStride, tmpRect, rx, aOperator); + sourceData, sourceMap.GetStride(), tmpData, tmpMap.GetStride(), tmpRect, rx, aOperator); } RefPtr dest; @@ -1214,11 +1238,16 @@ ApplyMorphology(const IntRect& aSourceRect, DataSourceSurface* aInput, return nullptr; } - int32_t tmpStride = tmp->Stride(); - uint8_t* tmpData = DataAtOffset(tmp, destRect.TopLeft() - tmpRect.TopLeft()); + DataSourceSurface::ScopedMap tmpMap(tmp, DataSourceSurface::READ); + DataSourceSurface::ScopedMap destMap(dest, DataSourceSurface::WRITE); + if (MOZ2D_WARN_IF(!tmpMap.IsMapped() || !destMap.IsMapped())) { + return nullptr; + } + int32_t tmpStride = tmpMap.GetStride(); + uint8_t* tmpData = DataAtOffset(tmp, tmpMap.GetMappedSurface(), destRect.TopLeft() - tmpRect.TopLeft()); - int32_t destStride = dest->Stride(); - uint8_t* destData = dest->GetData(); + int32_t destStride = destMap.GetStride(); + uint8_t* destData = destMap.GetData(); FilterProcessing::ApplyMorphologyVertical( tmpData, tmpStride, destData, destStride, destRect, ry, aOperator); @@ -1313,10 +1342,16 @@ Premultiply(DataSourceSurface* aSurface) return nullptr; } - uint8_t* inputData = aSurface->GetData(); - int32_t inputStride = aSurface->Stride(); - uint8_t* targetData = target->GetData(); - int32_t targetStride = target->Stride(); + DataSourceSurface::ScopedMap inputMap(aSurface, DataSourceSurface::READ); + DataSourceSurface::ScopedMap targetMap(target, DataSourceSurface::WRITE); + if (MOZ2D_WARN_IF(!inputMap.IsMapped() || !targetMap.IsMapped())) { + return nullptr; + } + + uint8_t* inputData = inputMap.GetData(); + int32_t inputStride = inputMap.GetStride(); + uint8_t* targetData = targetMap.GetData(); + int32_t targetStride = targetMap.GetStride(); FilterProcessing::DoPremultiplicationCalculation( size, targetData, targetStride, inputData, inputStride); @@ -1339,10 +1374,16 @@ Unpremultiply(DataSourceSurface* aSurface) return nullptr; } - uint8_t* inputData = aSurface->GetData(); - int32_t inputStride = aSurface->Stride(); - uint8_t* targetData = target->GetData(); - int32_t targetStride = target->Stride(); + DataSourceSurface::ScopedMap inputMap(aSurface, DataSourceSurface::READ); + DataSourceSurface::ScopedMap targetMap(target, DataSourceSurface::WRITE); + if (MOZ2D_WARN_IF(!inputMap.IsMapped() || !targetMap.IsMapped())) { + return nullptr; + } + + uint8_t* inputData = inputMap.GetData(); + int32_t inputStride = inputMap.GetStride(); + uint8_t* targetData = targetMap.GetData(); + int32_t targetStride = targetMap.GetStride(); FilterProcessing::DoUnpremultiplicationCalculation( size, targetData, targetStride, inputData, inputStride); @@ -1429,8 +1470,13 @@ FilterNodeFloodSoftware::Render(const IntRect& aRect) return nullptr; } - uint8_t* targetData = target->GetData(); - uint32_t stride = target->Stride(); + DataSourceSurface::ScopedMap targetMap(target, DataSourceSurface::WRITE); + if (MOZ2D_WARN_IF(!targetMap.IsMapped())) { + return nullptr; + } + + uint8_t* targetData = targetMap.GetData(); + int32_t stride = targetMap.GetStride(); if (format == SurfaceFormat::B8G8R8A8) { uint32_t color = ColorToBGRA(mColor); @@ -1649,10 +1695,16 @@ static void TransferComponents(DataSourceSurface* aInput, MOZ_ASSERT(aInput->GetFormat() == aTarget->GetFormat(), "different formats"); IntSize size = aInput->GetSize(); - uint8_t* sourceData = aInput->GetData(); - uint8_t* targetData = aTarget->GetData(); - uint32_t sourceStride = aInput->Stride(); - uint32_t targetStride = aTarget->Stride(); + DataSourceSurface::ScopedMap sourceMap(aInput, DataSourceSurface::READ); + DataSourceSurface::ScopedMap targetMap(aTarget, DataSourceSurface::WRITE); + if (MOZ2D_WARN_IF(!sourceMap.IsMapped() || !targetMap.IsMapped())) { + return; + } + + uint8_t* sourceData = sourceMap.GetData(); + int32_t sourceStride = sourceMap.GetStride(); + uint8_t* targetData = targetMap.GetData(); + int32_t targetStride = targetMap.GetStride(); for (int32_t y = 0; y < size.height; y++) { for (int32_t x = 0; x < size.width; x++) { @@ -2392,10 +2444,16 @@ FilterNodeConvolveMatrixSoftware::DoRender(const IntRect& aRect, IntPoint offset = aRect.TopLeft() - srcRect.TopLeft(); - uint8_t* sourceData = DataAtOffset(input, offset); - int32_t sourceStride = input->Stride(); - uint8_t* targetData = target->GetData(); - int32_t targetStride = target->Stride(); + DataSourceSurface::ScopedMap sourceMap(input, DataSourceSurface::READ); + DataSourceSurface::ScopedMap targetMap(target, DataSourceSurface::WRITE); + if (MOZ2D_WARN_IF(!sourceMap.IsMapped() || !targetMap.IsMapped())) { + return nullptr; + } + + uint8_t* sourceData = DataAtOffset(input, sourceMap.GetMappedSurface(), offset); + int32_t sourceStride = sourceMap.GetStride(); + uint8_t* targetData = targetMap.GetData(); + int32_t targetStride = targetMap.GetStride(); // Why exactly are we reversing the kernel? std::vector kernel = ReversedVector(mKernelMatrix); @@ -2538,12 +2596,19 @@ FilterNodeDisplacementMapSoftware::Render(const IntRect& aRect) IntPoint offset = aRect.TopLeft() - srcRect.TopLeft(); - uint8_t* sourceData = DataAtOffset(input, offset); - int32_t sourceStride = input->Stride(); - uint8_t* mapData = map->GetData(); - int32_t mapStride = map->Stride(); - uint8_t* targetData = target->GetData(); - int32_t targetStride = target->Stride(); + DataSourceSurface::ScopedMap inputMap(input, DataSourceSurface::READ); + DataSourceSurface::ScopedMap mapMap(map, DataSourceSurface::READ); + DataSourceSurface::ScopedMap targetMap(target, DataSourceSurface::WRITE); + if (MOZ2D_WARN_IF(!(inputMap.IsMapped() && mapMap.IsMapped() && targetMap.IsMapped()))) { + return nullptr; + } + + uint8_t* sourceData = DataAtOffset(input, inputMap.GetMappedSurface(), offset); + int32_t sourceStride = inputMap.GetStride(); + uint8_t* mapData = mapMap.GetData(); + int32_t mapStride = mapMap.GetStride(); + uint8_t* targetData = targetMap.GetData(); + int32_t targetStride = targetMap.GetStride(); static const ptrdiff_t channelMap[4] = { B8G8R8A8_COMPONENT_BYTEOFFSET_R, @@ -2885,19 +2950,35 @@ FilterNodeBlurXYSoftware::Render(const IntRect& aRect) return nullptr; } CopyRect(input, target, IntRect(IntPoint(), input->GetSize()), IntPoint()); - AlphaBoxBlur blur(r, target->Stride(), sigmaXY.width, sigmaXY.height); - blur.Blur(target->GetData()); + + DataSourceSurface::ScopedMap targetMap(target, DataSourceSurface::READ_WRITE); + if (MOZ2D_WARN_IF(!targetMap.IsMapped())) { + return nullptr; + } + AlphaBoxBlur blur(r, targetMap.GetStride(), sigmaXY.width, sigmaXY.height); + blur.Blur(targetMap.GetData()); } else { RefPtr channel0, channel1, channel2, channel3; FilterProcessing::SeparateColorChannels(input, channel0, channel1, channel2, channel3); - if (MOZ2D_WARN_IF(!(channel0 && channel1 && channel2))) { + if (MOZ2D_WARN_IF(!(channel0 && channel1 && channel2 && channel3))) { return nullptr; } - AlphaBoxBlur blur(r, channel0->Stride(), sigmaXY.width, sigmaXY.height); - blur.Blur(channel0->GetData()); - blur.Blur(channel1->GetData()); - blur.Blur(channel2->GetData()); - blur.Blur(channel3->GetData()); + { + DataSourceSurface::ScopedMap channel0Map(channel0, DataSourceSurface::READ_WRITE); + DataSourceSurface::ScopedMap channel1Map(channel1, DataSourceSurface::READ_WRITE); + DataSourceSurface::ScopedMap channel2Map(channel2, DataSourceSurface::READ_WRITE); + DataSourceSurface::ScopedMap channel3Map(channel3, DataSourceSurface::READ_WRITE); + if (MOZ2D_WARN_IF(!(channel0Map.IsMapped() && channel1Map.IsMapped() && + channel2Map.IsMapped() && channel3Map.IsMapped()))) { + return nullptr; + } + + AlphaBoxBlur blur(r, channel0Map.GetStride(), sigmaXY.width, sigmaXY.height); + blur.Blur(channel0Map.GetData()); + blur.Blur(channel1Map.GetData()); + blur.Blur(channel2Map.GetData()); + blur.Blur(channel3Map.GetData()); + } target = FilterProcessing::CombineColorChannels(channel0, channel1, channel2, channel3); } @@ -3414,10 +3495,17 @@ FilterNodeLightingSoftware::DoRender(const IntRect& aRe IntPoint offset = aRect.TopLeft() - srcRect.TopLeft(); - uint8_t* sourceData = DataAtOffset(input, offset); - int32_t sourceStride = input->Stride(); - uint8_t* targetData = target->GetData(); - int32_t targetStride = target->Stride(); + + DataSourceSurface::ScopedMap sourceMap(input, DataSourceSurface::READ); + DataSourceSurface::ScopedMap targetMap(target, DataSourceSurface::WRITE); + if (MOZ2D_WARN_IF(!(sourceMap.IsMapped() && targetMap.IsMapped()))) { + return nullptr; + } + + uint8_t* sourceData = DataAtOffset(input, sourceMap.GetMappedSurface(), offset); + int32_t sourceStride = sourceMap.GetStride(); + uint8_t* targetData = targetMap.GetData(); + int32_t targetStride = targetMap.GetStride(); uint32_t lightColor = ColorToBGRA(mColor); mLight.Prepare(); diff --git a/gfx/2d/FilterProcessing.cpp b/gfx/2d/FilterProcessing.cpp index 084f9476c53a..5d03f2da214c 100644 --- a/gfx/2d/FilterProcessing.cpp +++ b/gfx/2d/FilterProcessing.cpp @@ -17,10 +17,17 @@ FilterProcessing::ExtractAlpha(DataSourceSurface* aSource) if (MOZ2D_WARN_IF(!alpha)) { return nullptr; } - uint8_t* sourceData = aSource->GetData(); - int32_t sourceStride = aSource->Stride(); - uint8_t* alphaData = alpha->GetData(); - int32_t alphaStride = alpha->Stride(); + + DataSourceSurface::ScopedMap sourceMap(aSource, DataSourceSurface::READ); + DataSourceSurface::ScopedMap alphaMap(alpha, DataSourceSurface::WRITE); + if (MOZ2D_WARN_IF(!sourceMap.IsMapped() || !alphaMap.IsMapped())) { + return nullptr; + } + + uint8_t* sourceData = sourceMap.GetData(); + int32_t sourceStride = sourceMap.GetStride(); + uint8_t* alphaData = alphaMap.GetData(); + int32_t alphaStride = alphaMap.GetStride(); if (Factory::HasSSE2()) { #ifdef USE_SSE2 @@ -130,13 +137,23 @@ FilterProcessing::SeparateColorChannels(DataSourceSurface* aSource, return; } - uint8_t* sourceData = aSource->GetData(); - int32_t sourceStride = aSource->Stride(); - uint8_t* channel0Data = aChannel0->GetData(); - uint8_t* channel1Data = aChannel1->GetData(); - uint8_t* channel2Data = aChannel2->GetData(); - uint8_t* channel3Data = aChannel3->GetData(); - int32_t channelStride = aChannel0->Stride(); + DataSourceSurface::ScopedMap sourceMap(aSource, DataSourceSurface::READ); + DataSourceSurface::ScopedMap channel0Map(aChannel0, DataSourceSurface::WRITE); + DataSourceSurface::ScopedMap channel1Map(aChannel1, DataSourceSurface::WRITE); + DataSourceSurface::ScopedMap channel2Map(aChannel2, DataSourceSurface::WRITE); + DataSourceSurface::ScopedMap channel3Map(aChannel3, DataSourceSurface::WRITE); + if (MOZ2D_WARN_IF(!(sourceMap.IsMapped() && + channel0Map.IsMapped() && channel1Map.IsMapped() && + channel2Map.IsMapped() && channel3Map.IsMapped()))) { + return; + } + uint8_t* sourceData = sourceMap.GetData(); + int32_t sourceStride = sourceMap.GetStride(); + uint8_t* channel0Data = channel0Map.GetData(); + uint8_t* channel1Data = channel1Map.GetData(); + uint8_t* channel2Data = channel2Map.GetData(); + uint8_t* channel3Data = channel3Map.GetData(); + int32_t channelStride = channel0Map.GetStride(); if (Factory::HasSSE2()) { #ifdef USE_SSE2 @@ -157,13 +174,23 @@ FilterProcessing::CombineColorChannels(DataSourceSurface* aChannel0, DataSourceS if (MOZ2D_WARN_IF(!result)) { return nullptr; } - int32_t resultStride = result->Stride(); - uint8_t* resultData = result->GetData(); - int32_t channelStride = aChannel0->Stride(); - uint8_t* channel0Data = aChannel0->GetData(); - uint8_t* channel1Data = aChannel1->GetData(); - uint8_t* channel2Data = aChannel2->GetData(); - uint8_t* channel3Data = aChannel3->GetData(); + DataSourceSurface::ScopedMap resultMap(result, DataSourceSurface::WRITE); + DataSourceSurface::ScopedMap channel0Map(aChannel0, DataSourceSurface::READ); + DataSourceSurface::ScopedMap channel1Map(aChannel1, DataSourceSurface::READ); + DataSourceSurface::ScopedMap channel2Map(aChannel2, DataSourceSurface::READ); + DataSourceSurface::ScopedMap channel3Map(aChannel3, DataSourceSurface::READ); + if (MOZ2D_WARN_IF(!(resultMap.IsMapped() && + channel0Map.IsMapped() && channel1Map.IsMapped() && + channel2Map.IsMapped() && channel3Map.IsMapped()))) { + return nullptr; + } + int32_t resultStride = resultMap.GetStride(); + uint8_t* resultData = resultMap.GetData(); + int32_t channelStride = channel0Map.GetStride(); + uint8_t* channel0Data = channel0Map.GetData(); + uint8_t* channel1Data = channel1Map.GetData(); + uint8_t* channel2Data = channel2Map.GetData(); + uint8_t* channel3Data = channel3Map.GetData(); if (Factory::HasSSE2()) { #ifdef USE_SSE2 diff --git a/gfx/2d/HelpersD2D.h b/gfx/2d/HelpersD2D.h index 579a2d8ba9c9..2b9d3c40b8c4 100644 --- a/gfx/2d/HelpersD2D.h +++ b/gfx/2d/HelpersD2D.h @@ -601,18 +601,22 @@ CreatePartialBitmapForSurface(DataSourceSurface *aSurface, const Matrix &aDestin uploadRect.height = rect.height; } - - int stride = aSurface->Stride(); - if (uploadRect.width <= aRT->GetMaximumBitmapSize() && uploadRect.height <= aRT->GetMaximumBitmapSize()) { + { + // Scope to auto-Unmap() |mapping|. + DataSourceSurface::ScopedMap mapping(aSurface, DataSourceSurface::READ); + if (MOZ2D_WARN_IF(!mapping.IsMapped())) { + return nullptr; + } - // A partial upload will suffice. - aRT->CreateBitmap(D2D1::SizeU(uint32_t(uploadRect.width), uint32_t(uploadRect.height)), - aSurface->GetData() + int(uploadRect.x) * 4 + int(uploadRect.y) * stride, - stride, - D2D1::BitmapProperties(D2DPixelFormat(aSurface->GetFormat())), - byRef(bitmap)); + // A partial upload will suffice. + aRT->CreateBitmap(D2D1::SizeU(uint32_t(uploadRect.width), uint32_t(uploadRect.height)), + mapping.GetData() + int(uploadRect.x) * 4 + int(uploadRect.y) * mapping.GetStride(), + mapping.GetStride(), + D2D1::BitmapProperties(D2DPixelFormat(aSurface->GetFormat())), + byRef(bitmap)); + } aSourceTransform.PreTranslate(uploadRect.x, uploadRect.y); @@ -626,46 +630,53 @@ CreatePartialBitmapForSurface(DataSourceSurface *aSurface, const Matrix &aDestin return nullptr; } - ImageHalfScaler scaler(aSurface->GetData(), stride, size); + { + // Scope to auto-Unmap() |mapping|. + DataSourceSurface::ScopedMap mapping(aSurface, DataSourceSurface::READ); + if (MOZ2D_WARN_IF(!mapping.IsMapped())) { + return nullptr; + } + ImageHalfScaler scaler(mapping.GetData(), mapping.GetStride(), size); - // Calculate the maximum width/height of the image post transform. - Point topRight = transform * Point(Float(size.width), 0); - Point topLeft = transform * Point(0, 0); - Point bottomRight = transform * Point(Float(size.width), Float(size.height)); - Point bottomLeft = transform * Point(0, Float(size.height)); - - IntSize scaleSize; + // Calculate the maximum width/height of the image post transform. + Point topRight = transform * Point(Float(size.width), 0); + Point topLeft = transform * Point(0, 0); + Point bottomRight = transform * Point(Float(size.width), Float(size.height)); + Point bottomLeft = transform * Point(0, Float(size.height)); - scaleSize.width = int32_t(std::max(Distance(topRight, topLeft), - Distance(bottomRight, bottomLeft))); - scaleSize.height = int32_t(std::max(Distance(topRight, bottomRight), - Distance(topLeft, bottomLeft))); + IntSize scaleSize; - if (unsigned(scaleSize.width) > aRT->GetMaximumBitmapSize()) { - // Ok, in this case we'd really want a downscale of a part of the bitmap, - // perhaps we can do this later but for simplicity let's do something - // different here and assume it's good enough, this should be rare! - scaleSize.width = 4095; + scaleSize.width = int32_t(std::max(Distance(topRight, topLeft), + Distance(bottomRight, bottomLeft))); + scaleSize.height = int32_t(std::max(Distance(topRight, bottomRight), + Distance(topLeft, bottomLeft))); + + if (unsigned(scaleSize.width) > aRT->GetMaximumBitmapSize()) { + // Ok, in this case we'd really want a downscale of a part of the bitmap, + // perhaps we can do this later but for simplicity let's do something + // different here and assume it's good enough, this should be rare! + scaleSize.width = 4095; + } + if (unsigned(scaleSize.height) > aRT->GetMaximumBitmapSize()) { + scaleSize.height = 4095; + } + + scaler.ScaleForSize(scaleSize); + + IntSize newSize = scaler.GetSize(); + + if (newSize.IsEmpty()) { + return nullptr; + } + + aRT->CreateBitmap(D2D1::SizeU(newSize.width, newSize.height), + scaler.GetScaledData(), scaler.GetStride(), + D2D1::BitmapProperties(D2DPixelFormat(aSurface->GetFormat())), + byRef(bitmap)); + + aSourceTransform.PreScale(Float(size.width) / newSize.width, + Float(size.height) / newSize.height); } - if (unsigned(scaleSize.height) > aRT->GetMaximumBitmapSize()) { - scaleSize.height = 4095; - } - - scaler.ScaleForSize(scaleSize); - - IntSize newSize = scaler.GetSize(); - - if (newSize.IsEmpty()) { - return nullptr; - } - - aRT->CreateBitmap(D2D1::SizeU(newSize.width, newSize.height), - scaler.GetScaledData(), scaler.GetStride(), - D2D1::BitmapProperties(D2DPixelFormat(aSurface->GetFormat())), - byRef(bitmap)); - - aSourceTransform.PreScale(Float(size.width) / newSize.width, - Float(size.height) / newSize.height); return bitmap.forget(); } } diff --git a/gfx/2d/SourceSurfaceD2D1.cpp b/gfx/2d/SourceSurfaceD2D1.cpp index bd79d918c495..1f26d26b756c 100644 --- a/gfx/2d/SourceSurfaceD2D1.cpp +++ b/gfx/2d/SourceSurfaceD2D1.cpp @@ -182,7 +182,10 @@ DataSourceSurfaceD2D1::Map(MapType aMapType, MappedSurface *aMappedSurface) } D2D1_MAPPED_RECT map; - mBitmap->Map(D2D1_MAP_OPTIONS_READ, &map); + if (FAILED(mBitmap->Map(D2D1_MAP_OPTIONS_READ, &map))) { + gfxCriticalError() << "Failed to map bitmap."; + return false; + } aMappedSurface->mData = map.bits; aMappedSurface->mStride = map.pitch; @@ -215,7 +218,10 @@ DataSourceSurfaceD2D1::EnsureMapped() if (mMapped) { return; } - mBitmap->Map(D2D1_MAP_OPTIONS_READ, &mMap); + if (FAILED(mBitmap->Map(D2D1_MAP_OPTIONS_READ, &mMap))) { + gfxCriticalError() << "Failed to map bitmap."; + return; + } mMapped = true; } diff --git a/gfx/angle/src/libGLESv2/renderer/d3d/d3d11/PixelTransfer11.cpp b/gfx/angle/src/libGLESv2/renderer/d3d/d3d11/PixelTransfer11.cpp index 6054c545f39a..8f949b4dabd1 100644 --- a/gfx/angle/src/libGLESv2/renderer/d3d/d3d11/PixelTransfer11.cpp +++ b/gfx/angle/src/libGLESv2/renderer/d3d/d3d11/PixelTransfer11.cpp @@ -201,6 +201,21 @@ gl::Error PixelTransfer11::copyBufferToTexture(const gl::PixelUnpackState &unpac GLenum unsizedFormat = gl::GetInternalFormatInfo(destinationFormat).format; GLenum sourceFormat = gl::GetFormatTypeInfo(unsizedFormat, sourcePixelsType).internalFormat; + CopyShaderParams shaderParams; + setBufferToTextureCopyParams(destArea, destSize, sourceFormat, unpack, offset, &shaderParams); + + ID3D11DeviceContext *deviceContext = mRenderer->getDeviceContext(); + + if (!StructEquals(mParamsData, shaderParams)) + { + HRESULT result = d3d11::SetBufferData(deviceContext, mParamsConstantBuffer, shaderParams); + if (FAILED(result)) + { + return gl::Error(GL_OUT_OF_MEMORY, "Failed to set shader parameters, result: 0x%X.", result); + } + mParamsData = shaderParams; + } + const d3d11::TextureFormat &sourceFormatInfo = d3d11::GetTextureFormatInfo(sourceFormat); DXGI_FORMAT srvFormat = sourceFormatInfo.srvFormat; ASSERT(srvFormat != DXGI_FORMAT_UNKNOWN); @@ -211,11 +226,6 @@ gl::Error PixelTransfer11::copyBufferToTexture(const gl::PixelUnpackState &unpac ID3D11RenderTargetView *textureRTV = RenderTarget11::makeRenderTarget11(destRenderTarget)->getRenderTargetView(); ASSERT(textureRTV != NULL); - CopyShaderParams shaderParams; - setBufferToTextureCopyParams(destArea, destSize, sourceFormat, unpack, offset, &shaderParams); - - ID3D11DeviceContext *deviceContext = mRenderer->getDeviceContext(); - ID3D11Buffer *nullBuffer = NULL; UINT zero = 0; @@ -236,12 +246,6 @@ gl::Error PixelTransfer11::copyBufferToTexture(const gl::PixelUnpackState &unpac mRenderer->setOneTimeRenderTarget(textureRTV); - if (!StructEquals(mParamsData, shaderParams)) - { - d3d11::SetBufferData(deviceContext, mParamsConstantBuffer, shaderParams); - mParamsData = shaderParams; - } - deviceContext->VSSetConstantBuffers(0, 1, &mParamsConstantBuffer); // Set the viewport diff --git a/gfx/angle/src/libGLESv2/renderer/d3d/d3d11/renderer11_utils.h b/gfx/angle/src/libGLESv2/renderer/d3d/d3d11/renderer11_utils.h index 9df9c95763e5..7c0ad78e3452 100644 --- a/gfx/angle/src/libGLESv2/renderer/d3d/d3d11/renderer11_utils.h +++ b/gfx/angle/src/libGLESv2/renderer/d3d/d3d11/renderer11_utils.h @@ -168,14 +168,17 @@ inline ID3D11PixelShader *CompilePS(ID3D11Device *device, const BYTE (&byteCode) // Copy data to small D3D11 buffers, such as for small constant buffers, which use one struct to // represent an entire buffer. template -inline void SetBufferData(ID3D11DeviceContext *context, ID3D11Buffer *constantBuffer, const T &value) +inline HRESULT SetBufferData(ID3D11DeviceContext *context, ID3D11Buffer *constantBuffer, const T &value) { D3D11_MAPPED_SUBRESOURCE mappedResource; - context->Map(constantBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedResource); + HRESULT result = context->Map(constantBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedResource); + if(SUCCEEDED(result)) + { + memcpy(mappedResource.pData, &value, sizeof(T)); - memcpy(mappedResource.pData, &value, sizeof(T)); - - context->Unmap(constantBuffer, 0); + context->Unmap(constantBuffer, 0); + } + return result; } gl::Error GetAttachmentRenderTarget(gl::FramebufferAttachment *attachment, RenderTarget11 **outRT); diff --git a/gfx/cairo/README b/gfx/cairo/README index c2022d0c63a3..154e26727101 100644 --- a/gfx/cairo/README +++ b/gfx/cairo/README @@ -242,6 +242,8 @@ win32-d3dsurface9.patch: Create a win32 d3d9 surface to support LockRect win32-avoid-extend-pad-fallback: Avoid falling back to pixman when using EXTEND_PAD +support-new-style-atomic-primitives.patch: Support the __atomic_* primitives for atomic operations + ==== disable printing patch ==== disable-printing.patch: allows us to use NS_PRINTING to disable printing. diff --git a/gfx/cairo/cairo/src/cairo-atomic-private.h b/gfx/cairo/cairo/src/cairo-atomic-private.h index 8d02ec948cbb..dd42a8ad135c 100644 --- a/gfx/cairo/cairo/src/cairo-atomic-private.h +++ b/gfx/cairo/cairo/src/cairo-atomic-private.h @@ -53,6 +53,96 @@ CAIRO_BEGIN_DECLS +/* C++11 atomic primitives were designed to be more flexible than the + * __sync_* family of primitives. Despite the name, they are available + * in C as well as C++. The motivating reason for using them is that + * for _cairo_atomic_{int,ptr}_get, the compiler is able to see that + * the load is intended to be atomic, as opposed to the __sync_* + * version, below, where the load looks like a plain load. Having + * the load appear atomic to the compiler is particular important for + * tools like ThreadSanitizer so they don't report false positives on + * memory operations that we intend to be atomic. + */ +#if HAVE_CXX11_ATOMIC_PRIMITIVES + +#define HAS_ATOMIC_OPS 1 + +typedef int cairo_atomic_int_t; + +static cairo_always_inline cairo_atomic_int_t +_cairo_atomic_int_get (cairo_atomic_int_t *x) +{ + return __atomic_load_n(x, __ATOMIC_SEQ_CST); +} + +static cairo_always_inline void * +_cairo_atomic_ptr_get (void **x) +{ + return __atomic_load_n(x, __ATOMIC_SEQ_CST); +} + +# define _cairo_atomic_int_inc(x) ((void) __atomic_fetch_add(x, 1, __ATOMIC_SEQ_CST)) +# define _cairo_atomic_int_dec(x) ((void) __atomic_fetch_sub(x, 1, __ATOMIC_SEQ_CST)) +# define _cairo_atomic_int_dec_and_test(x) (__atomic_fetch_sub(x, 1, __ATOMIC_SEQ_CST) == 1) + +#if SIZEOF_VOID_P==SIZEOF_INT +typedef int cairo_atomic_intptr_t; +#elif SIZEOF_VOID_P==SIZEOF_LONG +typedef long cairo_atomic_intptr_t; +#elif SIZEOF_VOID_P==SIZEOF_LONG_LONG +typedef long long cairo_atomic_intptr_t; +#else +#error No matching integer pointer type +#endif + +static cairo_always_inline cairo_bool_t +_cairo_atomic_int_cmpxchg_impl(cairo_atomic_int_t *x, + cairo_atomic_int_t oldv, + cairo_atomic_int_t newv) +{ + cairo_atomic_int_t expected = oldv; + return __atomic_compare_exchange_n(x, &expected, newv, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + +#define _cairo_atomic_int_cmpxchg(x, oldv, newv) \ + _cairo_atomic_int_cmpxchg_impl(x, oldv, newv) + +static cairo_always_inline cairo_atomic_int_t +_cairo_atomic_int_cmpxchg_return_old_impl(cairo_atomic_int_t *x, + cairo_atomic_int_t oldv, + cairo_atomic_int_t newv) +{ + cairo_atomic_int_t expected = oldv; + (void) __atomic_compare_exchange_n(x, &expected, newv, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); + return expected; +} + +#define _cairo_atomic_int_cmpxchg_return_old(x, oldv, newv) \ + _cairo_atomic_int_cmpxchg_return_old_impl(x, oldv, newv) + +static cairo_always_inline cairo_bool_t +_cairo_atomic_ptr_cmpxchg_impl(void **x, void *oldv, void *newv) +{ + void *expected = oldv; + return __atomic_compare_exchange_n(x, &expected, newv, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + +#define _cairo_atomic_ptr_cmpxchg(x, oldv, newv) \ + _cairo_atomic_ptr_cmpxchg_impl(x, oldv, newv) + +static cairo_always_inline void * +_cairo_atomic_ptr_cmpxchg_return_old_impl(void **x, void *oldv, void *newv) +{ + void *expected = oldv; + (void) __atomic_compare_exchange_n(x, &expected, newv, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); + return expected; +} + +#define _cairo_atomic_ptr_cmpxchg_return_old(x, oldv, newv) \ + _cairo_atomic_ptr_cmpxchg_return_old_impl(x, oldv, newv) + +#endif + #if HAVE_INTEL_ATOMIC_PRIMITIVES #define HAS_ATOMIC_OPS 1 diff --git a/gfx/cairo/cairo/src/moz.build b/gfx/cairo/cairo/src/moz.build index 3fd0129f6b9c..9e5ec88713b7 100644 --- a/gfx/cairo/cairo/src/moz.build +++ b/gfx/cairo/cairo/src/moz.build @@ -198,7 +198,7 @@ for var in ('MOZ_TREE_CAIRO', 'MOZ_TREE_PIXMAN'): DEFINES[var] = True if CONFIG['GNU_CC']: - DEFINES['HAVE_INTEL_ATOMIC_PRIMITIVES'] = True + DEFINES['HAVE_CXX11_ATOMIC_PRIMITIVES'] = True # We would normally use autoconf to set these up, using AC_CHECK_SIZEOF. # But AC_CHECK_SIZEOF requires running programs to determine the sizes, # and that doesn't work so well with cross-compiling. So instead we diff --git a/gfx/cairo/support-new-style-atomic-primitives.patch b/gfx/cairo/support-new-style-atomic-primitives.patch new file mode 100644 index 000000000000..1830a4691f03 --- /dev/null +++ b/gfx/cairo/support-new-style-atomic-primitives.patch @@ -0,0 +1,121 @@ +From 5d150ee111c222f09e78f4f88540964476327844 Mon Sep 17 00:00:00 2001 +From: Nathan Froyd +Date: Mon, 4 May 2015 13:38:41 -0400 +Subject: Support new-style __atomic_* primitives + +Recent versions of GCC/clang feature a new set of compiler intrinsics +for performing atomic operations, motivated by the operations needed to +support the C++11 memory model. These intrinsics are more flexible than +the old __sync_* intrinstics and offer efficient support for atomic load +and store operations. + +Having the load appear atomic to the compiler is particular important +for tools like ThreadSanitizer so they don't report false positives on +memory operations that we intend to be atomic. + +Patch from Nathan Froyd + +diff --git a/src/cairo-atomic-private.h b/src/cairo-atomic-private.h +index 327fed1..11b2887 100644 +--- a/src/cairo-atomic-private.h ++++ b/src/cairo-atomic-private.h +@@ -53,6 +53,96 @@ + + CAIRO_BEGIN_DECLS + ++/* C++11 atomic primitives were designed to be more flexible than the ++ * __sync_* family of primitives. Despite the name, they are available ++ * in C as well as C++. The motivating reason for using them is that ++ * for _cairo_atomic_{int,ptr}_get, the compiler is able to see that ++ * the load is intended to be atomic, as opposed to the __sync_* ++ * version, below, where the load looks like a plain load. Having ++ * the load appear atomic to the compiler is particular important for ++ * tools like ThreadSanitizer so they don't report false positives on ++ * memory operations that we intend to be atomic. ++ */ ++#if HAVE_CXX11_ATOMIC_PRIMITIVES ++ ++#define HAS_ATOMIC_OPS 1 ++ ++typedef int cairo_atomic_int_t; ++ ++static cairo_always_inline cairo_atomic_int_t ++_cairo_atomic_int_get (cairo_atomic_int_t *x) ++{ ++ return __atomic_load_n(x, __ATOMIC_SEQ_CST); ++} ++ ++static cairo_always_inline void * ++_cairo_atomic_ptr_get (void **x) ++{ ++ return __atomic_load_n(x, __ATOMIC_SEQ_CST); ++} ++ ++# define _cairo_atomic_int_inc(x) ((void) __atomic_fetch_add(x, 1, __ATOMIC_SEQ_CST)) ++# define _cairo_atomic_int_dec(x) ((void) __atomic_fetch_sub(x, 1, __ATOMIC_SEQ_CST)) ++# define _cairo_atomic_int_dec_and_test(x) (__atomic_fetch_sub(x, 1, __ATOMIC_SEQ_CST) == 1) ++ ++#if SIZEOF_VOID_P==SIZEOF_INT ++typedef int cairo_atomic_intptr_t; ++#elif SIZEOF_VOID_P==SIZEOF_LONG ++typedef long cairo_atomic_intptr_t; ++#elif SIZEOF_VOID_P==SIZEOF_LONG_LONG ++typedef long long cairo_atomic_intptr_t; ++#else ++#error No matching integer pointer type ++#endif ++ ++static cairo_always_inline cairo_bool_t ++_cairo_atomic_int_cmpxchg_impl(cairo_atomic_int_t *x, ++ cairo_atomic_int_t oldv, ++ cairo_atomic_int_t newv) ++{ ++ cairo_atomic_int_t expected = oldv; ++ return __atomic_compare_exchange_n(x, &expected, newv, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); ++} ++ ++#define _cairo_atomic_int_cmpxchg(x, oldv, newv) \ ++ _cairo_atomic_int_cmpxchg_impl(x, oldv, newv) ++ ++static cairo_always_inline cairo_atomic_int_t ++_cairo_atomic_int_cmpxchg_return_old_impl(cairo_atomic_int_t *x, ++ cairo_atomic_int_t oldv, ++ cairo_atomic_int_t newv) ++{ ++ cairo_atomic_int_t expected = oldv; ++ (void) __atomic_compare_exchange_n(x, &expected, newv, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); ++ return expected; ++} ++ ++#define _cairo_atomic_int_cmpxchg_return_old(x, oldv, newv) \ ++ _cairo_atomic_int_cmpxchg_return_old_impl(x, oldv, newv) ++ ++static cairo_always_inline cairo_bool_t ++_cairo_atomic_ptr_cmpxchg_impl(void **x, void *oldv, void *newv) ++{ ++ void *expected = oldv; ++ return __atomic_compare_exchange_n(x, &expected, newv, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); ++} ++ ++#define _cairo_atomic_ptr_cmpxchg(x, oldv, newv) \ ++ _cairo_atomic_ptr_cmpxchg_impl(x, oldv, newv) ++ ++static cairo_always_inline void * ++_cairo_atomic_ptr_cmpxchg_return_old_impl(void **x, void *oldv, void *newv) ++{ ++ void *expected = oldv; ++ (void) __atomic_compare_exchange_n(x, &expected, newv, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); ++ return expected; ++} ++ ++#define _cairo_atomic_ptr_cmpxchg_return_old(x, oldv, newv) \ ++ _cairo_atomic_ptr_cmpxchg_return_old_impl(x, oldv, newv) ++ ++#endif ++ + #if HAVE_INTEL_ATOMIC_PRIMITIVES + + #define HAS_ATOMIC_OPS 1 +-- +cgit v0.10.2 + diff --git a/gfx/gl/GLConsts.h b/gfx/gl/GLConsts.h index b21dfe79a1a8..0c1e43e8aacf 100644 --- a/gfx/gl/GLConsts.h +++ b/gfx/gl/GLConsts.h @@ -283,7 +283,6 @@ #define LOCAL_GL_BOOL_VEC4_ARB 0x8B59 #define LOCAL_GL_BOUNDING_BOX_NV 0x908D #define LOCAL_GL_BOUNDING_BOX_OF_BOUNDING_BOXES_NV 0x909C -#define LOCAL_GL_BROWSER_DEFAULT_WEBGL 0x9244 #define LOCAL_GL_BUFFER 0x82E0 #define LOCAL_GL_BUFFER_ACCESS 0x88BB #define LOCAL_GL_BUFFER_ACCESS_ARB 0x88BB @@ -731,7 +730,6 @@ #define LOCAL_GL_CONTEXT_FLAG_DEBUG_BIT_KHR 0x00000002 #define LOCAL_GL_CONTEXT_FLAG_FORWARD_COMPATIBLE_BIT 0x00000001 #define LOCAL_GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB 0x00000004 -#define LOCAL_GL_CONTEXT_LOST_WEBGL 0x9242 #define LOCAL_GL_CONTEXT_PROFILE_MASK 0x9126 #define LOCAL_GL_CONTEXT_ROBUST_ACCESS_EXT 0x90F3 #define LOCAL_GL_CONTINUOUS_AMD 0x9007 @@ -4680,19 +4678,16 @@ #define LOCAL_GL_UNPACK_ALIGNMENT 0x0CF5 #define LOCAL_GL_UNPACK_CLIENT_STORAGE_APPLE 0x85B2 #define LOCAL_GL_UNPACK_CMYK_HINT_EXT 0x800F -#define LOCAL_GL_UNPACK_COLORSPACE_CONVERSION_WEBGL 0x9243 #define LOCAL_GL_UNPACK_COMPRESSED_BLOCK_DEPTH 0x9129 #define LOCAL_GL_UNPACK_COMPRESSED_BLOCK_HEIGHT 0x9128 #define LOCAL_GL_UNPACK_COMPRESSED_BLOCK_SIZE 0x912A #define LOCAL_GL_UNPACK_COMPRESSED_BLOCK_WIDTH 0x9127 #define LOCAL_GL_UNPACK_COMPRESSED_SIZE_SGIX 0x831A #define LOCAL_GL_UNPACK_CONSTANT_DATA_SUNX 0x81D5 -#define LOCAL_GL_UNPACK_FLIP_Y_WEBGL 0x9240 #define LOCAL_GL_UNPACK_IMAGE_DEPTH_SGIS 0x8133 #define LOCAL_GL_UNPACK_IMAGE_HEIGHT 0x806E #define LOCAL_GL_UNPACK_IMAGE_HEIGHT_EXT 0x806E #define LOCAL_GL_UNPACK_LSB_FIRST 0x0CF1 -#define LOCAL_GL_UNPACK_PREMULTIPLY_ALPHA_WEBGL 0x9241 #define LOCAL_GL_UNPACK_RESAMPLE_OML 0x8985 #define LOCAL_GL_UNPACK_RESAMPLE_SGIX 0x842D #define LOCAL_GL_UNPACK_ROW_BYTES_APPLE 0x8A16 diff --git a/gfx/gl/GLContext.cpp b/gfx/gl/GLContext.cpp index aa78909cc894..1c4fa209a6b3 100644 --- a/gfx/gl/GLContext.cpp +++ b/gfx/gl/GLContext.cpp @@ -794,7 +794,7 @@ GLContext::InitWithPrefix(const char *prefix, bool trygl) } } - if (IsExtensionSupported(ARB_sync)) { + if (IsSupported(GLFeature::sync)) { SymLoadStruct syncSymbols[] = { { (PRFuncPtr*) &mSymbols.fFenceSync, { "FenceSync", nullptr } }, { (PRFuncPtr*) &mSymbols.fIsSync, { "IsSync", nullptr } }, @@ -807,7 +807,7 @@ GLContext::InitWithPrefix(const char *prefix, bool trygl) }; if (!LoadSymbols(&syncSymbols[0], trygl, prefix)) { - NS_ERROR("GL supports ARB_sync without supplying its functions."); + NS_ERROR("GL supports sync without supplying its functions."); MarkExtensionUnsupported(ARB_sync); ClearSymbols(syncSymbols); diff --git a/gfx/gl/GLContext.h b/gfx/gl/GLContext.h index ee3382349755..83d2fb8ad25e 100644 --- a/gfx/gl/GLContext.h +++ b/gfx/gl/GLContext.h @@ -122,6 +122,7 @@ enum class GLFeature { sRGB_texture, sampler_objects, standard_derivatives, + sync, texture_3D, texture_3D_compressed, texture_3D_copy, diff --git a/gfx/gl/GLContextFeatures.cpp b/gfx/gl/GLContextFeatures.cpp index 7506ca84ccfc..fefba7e38839 100644 --- a/gfx/gl/GLContextFeatures.cpp +++ b/gfx/gl/GLContextFeatures.cpp @@ -529,6 +529,15 @@ static const FeatureInfo sFeatureInfoArr[] = { GLContext::Extensions_End } }, + { + "sync", + GLVersion::GL3_2, + GLESVersion::ES3, + GLContext::ARB_sync, + { + GLContext::Extensions_End + } + }, { "texture_3D", GLVersion::GL1_2, diff --git a/gfx/gl/GLReadTexImageHelper.cpp b/gfx/gl/GLReadTexImageHelper.cpp index 2b72cc139f75..48705a5949c7 100644 --- a/gfx/gl/GLReadTexImageHelper.cpp +++ b/gfx/gl/GLReadTexImageHelper.cpp @@ -217,7 +217,10 @@ static void SwapRAndBComponents(DataSourceSurface* surf) { DataSourceSurface::MappedSurface map; - MOZ_ALWAYS_TRUE( surf->Map(DataSourceSurface::MapType::READ_WRITE, &map) ); + if (!surf->Map(DataSourceSurface::MapType::READ_WRITE, &map)) { + MOZ_ASSERT(false, "SwapRAndBComponents: Failed to map surface."); + return; + } MOZ_ASSERT(map.mStride >= 0); const size_t rowBytes = surf->GetSize().width*4; @@ -288,8 +291,11 @@ CopyDataSourceSurface(DataSourceSurface* aSource, DataSourceSurface::MappedSurface srcMap; DataSourceSurface::MappedSurface destMap; - MOZ_ALWAYS_TRUE( aSource->Map(DataSourceSurface::MapType::READ, &srcMap) ); - MOZ_ALWAYS_TRUE( aDest->Map(DataSourceSurface::MapType::WRITE, &destMap) ); + if (!aSource->Map(DataSourceSurface::MapType::READ, &srcMap) || + !aDest->Map(DataSourceSurface::MapType::WRITE, &destMap)) { + MOZ_ASSERT(false, "CopyDataSourceSurface: Failed to map surface."); + return; + } MOZ_ASSERT(srcMap.mStride >= 0); MOZ_ASSERT(destMap.mStride >= 0); diff --git a/gfx/ipc/GfxMessageUtils.h b/gfx/ipc/GfxMessageUtils.h index 4704fa2f7c0b..d83dac7abda0 100644 --- a/gfx/ipc/GfxMessageUtils.h +++ b/gfx/ipc/GfxMessageUtils.h @@ -721,7 +721,7 @@ struct ParamTraits WriteParam(aMsg, aParam.mZoom); WriteParam(aMsg, aParam.mDevPixelsPerCSSPixel); WriteParam(aMsg, aParam.mPresShellId); - WriteParam(aMsg, aParam.mIsRoot); + WriteParam(aMsg, aParam.mIsRootContent); WriteParam(aMsg, aParam.mHasScrollgrab); WriteParam(aMsg, aParam.mUpdateScrollOffset); WriteParam(aMsg, aParam.mScrollGeneration); @@ -766,7 +766,7 @@ struct ParamTraits ReadParam(aMsg, aIter, &aResult->mZoom) && ReadParam(aMsg, aIter, &aResult->mDevPixelsPerCSSPixel) && ReadParam(aMsg, aIter, &aResult->mPresShellId) && - ReadParam(aMsg, aIter, &aResult->mIsRoot) && + ReadParam(aMsg, aIter, &aResult->mIsRootContent) && ReadParam(aMsg, aIter, &aResult->mHasScrollgrab) && ReadParam(aMsg, aIter, &aResult->mUpdateScrollOffset) && ReadParam(aMsg, aIter, &aResult->mScrollGeneration) && diff --git a/gfx/layers/FrameMetrics.h b/gfx/layers/FrameMetrics.h index f31c4adde318..d5cf7af8bbdc 100644 --- a/gfx/layers/FrameMetrics.h +++ b/gfx/layers/FrameMetrics.h @@ -48,7 +48,7 @@ public: , mScrollableRect(0, 0, 0, 0) , mCumulativeResolution() , mDevPixelsPerCSSPixel(1) - , mIsRoot(false) + , mIsRootContent(false) , mHasScrollgrab(false) , mScrollId(NULL_SCROLL_ID) , mScrollParentId(NULL_SCROLL_ID) @@ -89,7 +89,7 @@ public: mCumulativeResolution == aOther.mCumulativeResolution && mDevPixelsPerCSSPixel == aOther.mDevPixelsPerCSSPixel && mPresShellId == aOther.mPresShellId && - mIsRoot == aOther.mIsRoot && + mIsRootContent == aOther.mIsRootContent && mScrollId == aOther.mScrollId && mScrollParentId == aOther.mScrollParentId && mScrollOffset == aOther.mScrollOffset && @@ -120,11 +120,6 @@ public: return (def == *this); } - bool IsRootScrollable() const - { - return mIsRoot; - } - bool IsScrollable() const { return mScrollId != NULL_SCROLL_ID; @@ -298,14 +293,14 @@ public: return mDevPixelsPerCSSPixel; } - void SetIsRoot(bool aIsRoot) + void SetIsRootContent(bool aIsRootContent) { - mIsRoot = aIsRoot; + mIsRootContent = aIsRootContent; } - bool GetIsRoot() const + bool IsRootContent() const { - return mIsRoot; + return mIsRootContent; } void SetHasScrollgrab(bool aHasScrollgrab) @@ -626,7 +621,7 @@ private: CSSToLayoutDeviceScale mDevPixelsPerCSSPixel; // Whether or not this is the root scroll frame for the root content document. - bool mIsRoot; + bool mIsRootContent; // Whether or not this frame is for an element marked 'scrollgrab'. bool mHasScrollgrab; diff --git a/gfx/layers/ImageContainer.cpp b/gfx/layers/ImageContainer.cpp index 4398008abbb4..154572dea126 100644 --- a/gfx/layers/ImageContainer.cpp +++ b/gfx/layers/ImageContainer.cpp @@ -476,7 +476,12 @@ PlanarYCbCrImage::GetAsSourceSurface() return nullptr; } - gfx::ConvertYCbCrToRGB(mData, format, size, surface->GetData(), surface->Stride()); + DataSourceSurface::ScopedMap mapping(surface, DataSourceSurface::WRITE); + if (NS_WARN_IF(!mapping.IsMapped())) { + return nullptr; + } + + gfx::ConvertYCbCrToRGB(mData, format, size, mapping.GetData(), mapping.GetStride()); mSourceSurface = surface; diff --git a/gfx/layers/YCbCrImageDataSerializer.cpp b/gfx/layers/YCbCrImageDataSerializer.cpp index af32e0552777..7553bb126a31 100644 --- a/gfx/layers/YCbCrImageDataSerializer.cpp +++ b/gfx/layers/YCbCrImageDataSerializer.cpp @@ -287,7 +287,9 @@ YCbCrImageDataDeserializer::ToDataSourceSurface() } DataSourceSurface::MappedSurface map; - result->Map(DataSourceSurface::MapType::WRITE, &map); + if (NS_WARN_IF(!result->Map(DataSourceSurface::MapType::WRITE, &map))) { + return nullptr; + } gfx::ConvertYCbCrToRGB32(GetYData(), GetCbData(), GetCrData(), map.mData, diff --git a/gfx/layers/apz/public/GeckoContentController.h b/gfx/layers/apz/public/GeckoContentController.h index 9756f8e95294..79db6c805d10 100644 --- a/gfx/layers/apz/public/GeckoContentController.h +++ b/gfx/layers/apz/public/GeckoContentController.h @@ -79,7 +79,7 @@ public: * |aContentRect| is in CSS pixels, relative to the current cssPage. * |aScrollableSize| is the current content width/height in CSS pixels. */ - virtual void SendAsyncScrollDOMEvent(bool aIsRoot, + virtual void SendAsyncScrollDOMEvent(bool aIsRootContent, const CSSRect &aContentRect, const CSSSize &aScrollableSize) = 0; diff --git a/gfx/layers/apz/src/APZCTreeManager.cpp b/gfx/layers/apz/src/APZCTreeManager.cpp index ad20adfb6d32..c847248462c8 100644 --- a/gfx/layers/apz/src/APZCTreeManager.cpp +++ b/gfx/layers/apz/src/APZCTreeManager.cpp @@ -464,21 +464,22 @@ APZCTreeManager::PrepareNodeForLayer(const LayerMetricsWrapper& aLayer, } if (newApzc) { - if (apzc->HasNoParentWithSameLayersId()) { - // If we just created a new apzc that is the root for its layers ID, then - // we need to update its zoom constraints which might have arrived before this - // was created + if (apzc->IsRootContent()) { + // If we just created a new root-content apzc, then we need to update + // its zoom constraints which might have arrived before it was created. ZoomConstraints constraints; if (state->mController->GetRootZoomConstraints(&constraints)) { apzc->UpdateZoomConstraints(constraints); } - } else { - // For an apzc that is not the root for its layers ID, we give it the - // same zoom constraints as its parent. This ensures that if e.g. - // user-scalable=no was specified, none of the APZCs allow double-tap - // to zoom. + } else if (!apzc->HasNoParentWithSameLayersId()) { + // Otherwise, an APZC that has a parent in the same layer tree gets + // the same zoom constraints as its parent. This ensures that if e.g. + // user-scalable=no was specified on the root, none of the APZCs allow + // double-tap to zoom. apzc->UpdateZoomConstraints(apzc->GetParent()->GetZoomConstraints()); } + // Otherwise, if the APZC has no parent in the same layer tree, leave + // it with the existing zoom constraints. } // Add a guid -> APZC mapping for the newly created APZC. @@ -1033,9 +1034,9 @@ APZCTreeManager::UpdateZoomConstraints(const ScrollableLayerGuid& aGuid, nsRefPtr node = GetTargetNode(aGuid, nullptr); MOZ_ASSERT(!node || node->GetApzc()); // any node returned must have an APZC - // For a given layers id, non-root APZCs inherit the zoom constraints + // For a given layers id, non-{root content} APZCs inherit the zoom constraints // of their root. - if (node && node->GetApzc()->HasNoParentWithSameLayersId()) { + if (node && node->GetApzc()->IsRootContent()) { UpdateZoomConstraintsRecursively(node.get(), aConstraints); } } @@ -1338,7 +1339,7 @@ APZCTreeManager::BuildOverscrollHandoffChain(const nsRefPtrAdd(apzc); if (apzc->GetScrollHandoffParentId() == FrameMetrics::NULL_SCROLL_ID) { - if (!apzc->HasNoParentWithSameLayersId()) { + if (!apzc->IsRootForLayersId()) { // This probably indicates a bug or missed case in layout code NS_WARNING("Found a non-root APZ with no handoff parent"); } @@ -1351,9 +1352,9 @@ APZCTreeManager::BuildOverscrollHandoffChain(const nsRefPtrGetScrollHandoffParentId() != apzc->GetGuid().mScrollId); // Find the AsyncPanZoomController instance with a matching layersId and - // the scroll id that matches apzc->GetScrollHandoffParentId(). To do this - // search the subtree with the same layersId for the apzc with the specified - // scroll id. + // the scroll id that matches apzc->GetScrollHandoffParentId(). + // As an optimization, we start by walking up the APZC tree from 'apzc' + // until we reach the top of the layer subtree for this layers id. AsyncPanZoomController* scrollParent = nullptr; AsyncPanZoomController* parent = apzc; while (!parent->HasNoParentWithSameLayersId()) { @@ -1366,6 +1367,7 @@ APZCTreeManager::BuildOverscrollHandoffChain(const nsRefPtrGetGuid().mLayersId, 0, apzc->GetScrollHandoffParentId()); nsRefPtr node = GetTargetNode(guid, &GuidComparatorIgnoringPresShell); @@ -1477,26 +1479,30 @@ APZCTreeManager::GetAPZCAtPoint(HitTestingTreeNode* aNode, return nullptr; } -AsyncPanZoomController* -APZCTreeManager::FindRootApzcForLayersId(uint64_t aLayersId) const +/* + * Do a breadth-first search of the tree rooted at |aRoot|, and return the + * first visited node that satisfies |aCondition|, or nullptr if no such node + * was found. + * + * |Node| should have methods GetLastChild() and GetPrevSibling(). + */ +template +static const Node* BreadthFirstSearch(const Node* aRoot, const Condition& aCondition) { - mTreeLock.AssertCurrentThreadOwns(); - - if (!mRootNode) { + if (!aRoot) { return nullptr; } - std::deque queue; - queue.push_back(mRootNode); + std::deque queue; + queue.push_back(aRoot); while (!queue.empty()) { - const HitTestingTreeNode* node = queue.front(); + const Node* node = queue.front(); queue.pop_front(); - AsyncPanZoomController* apzc = node->GetApzc(); - if (apzc && apzc->GetLayersId() == aLayersId && apzc->IsRootForLayersId()) { - return apzc; + if (aCondition(node)) { + return node; } - for (HitTestingTreeNode* child = node->GetLastChild(); + for (const Node* child = node->GetLastChild(); child; child = child->GetPrevSibling()) { queue.push_back(child); @@ -1506,6 +1512,44 @@ APZCTreeManager::FindRootApzcForLayersId(uint64_t aLayersId) const return nullptr; } +AsyncPanZoomController* +APZCTreeManager::FindRootApzcForLayersId(uint64_t aLayersId) const +{ + mTreeLock.AssertCurrentThreadOwns(); + + struct RootForLayersIdMatcher { + uint64_t mLayersId; + bool operator()(const HitTestingTreeNode* aNode) const { + AsyncPanZoomController* apzc = aNode->GetApzc(); + return apzc + && apzc->GetLayersId() == mLayersId + && apzc->IsRootForLayersId(); + } + }; + const HitTestingTreeNode* resultNode = BreadthFirstSearch(mRootNode.get(), + RootForLayersIdMatcher{aLayersId}); + return resultNode ? resultNode->GetApzc() : nullptr; +} + +AsyncPanZoomController* +APZCTreeManager::FindRootContentApzcForLayersId(uint64_t aLayersId) const +{ + mTreeLock.AssertCurrentThreadOwns(); + + struct RootContentForLayersIdMatcher { + uint64_t mLayersId; + bool operator()(const HitTestingTreeNode* aNode) const { + AsyncPanZoomController* apzc = aNode->GetApzc(); + return apzc + && apzc->GetLayersId() == mLayersId + && apzc->IsRootContent(); + } + }; + const HitTestingTreeNode* resultNode = BreadthFirstSearch(mRootNode.get(), + RootContentForLayersIdMatcher{aLayersId}); + return resultNode ? resultNode->GetApzc() : nullptr; +} + /* The methods GetScreenToApzcTransform() and GetApzcToGeckoTransform() return some useful transformations that input events may need applied. This is best illustrated with an example. Consider a chain of layers, L, M, N, O, P, Q, R. Layer L @@ -1674,10 +1718,23 @@ APZCTreeManager::GetApzcToGeckoTransform(const AsyncPanZoomController *aApzc) co already_AddRefed APZCTreeManager::GetMultitouchTarget(AsyncPanZoomController* aApzc1, AsyncPanZoomController* aApzc2) const { - nsRefPtr apzc = CommonAncestor(aApzc1, aApzc2); - // For now, we only ever want to do pinching on the root APZC for a given layers id. So - // when we find the common ancestor of multiple points, also walk up to the root APZC. - apzc = RootAPZCForLayersId(apzc); + nsRefPtr apzc; + // For now, we only ever want to do pinching on the root-content APZC for + // a given layers id. + if (aApzc1 && aApzc2 && aApzc1->GetLayersId() == aApzc2->GetLayersId()) { + // If the two APZCs have the same layers id, find the root-content APZC + // for that layers id. Don't call CommonAncestor() because there may not + // be a common ancestor for the layers id (e.g. if one APZCs is inside a + // fixed-position element). + apzc = FindRootContentApzcForLayersId(aApzc1->GetLayersId()); + } else { + // Otherwise, find the common ancestor (to reach a common layers id), and + // get the root-content APZC for that layers id. + apzc = CommonAncestor(aApzc1, aApzc2); + if (apzc) { + apzc = FindRootContentApzcForLayersId(apzc->GetLayersId()); + } + } return apzc.forget(); } @@ -1727,16 +1784,5 @@ APZCTreeManager::CommonAncestor(AsyncPanZoomController* aApzc1, AsyncPanZoomCont return ancestor.forget(); } -already_AddRefed -APZCTreeManager::RootAPZCForLayersId(AsyncPanZoomController* aApzc) const -{ - MonitorAutoLock lock(mTreeLock); - nsRefPtr apzc = aApzc; - while (apzc && !apzc->HasNoParentWithSameLayersId()) { - apzc = apzc->GetParent(); - } - return apzc.forget(); -} - } } diff --git a/gfx/layers/apz/src/APZCTreeManager.h b/gfx/layers/apz/src/APZCTreeManager.h index cd4707a23d93..daa569c9aa29 100644 --- a/gfx/layers/apz/src/APZCTreeManager.h +++ b/gfx/layers/apz/src/APZCTreeManager.h @@ -413,9 +413,9 @@ private: const ParentLayerPoint& aHitTestPoint, HitTestResult* aOutHitResult); AsyncPanZoomController* FindRootApzcForLayersId(uint64_t aLayersId) const; + AsyncPanZoomController* FindRootContentApzcForLayersId(uint64_t aLayersId) const; already_AddRefed GetMultitouchTarget(AsyncPanZoomController* aApzc1, AsyncPanZoomController* aApzc2) const; already_AddRefed CommonAncestor(AsyncPanZoomController* aApzc1, AsyncPanZoomController* aApzc2) const; - already_AddRefed RootAPZCForLayersId(AsyncPanZoomController* aApzc) const; already_AddRefed GetTouchInputBlockAPZC(const MultiTouchInput& aEvent, HitTestResult* aOutHitResult); nsEventStatus ProcessTouchInput(MultiTouchInput& aInput, diff --git a/gfx/layers/apz/src/AsyncPanZoomController.cpp b/gfx/layers/apz/src/AsyncPanZoomController.cpp index b8b3db877446..6ac35f994614 100644 --- a/gfx/layers/apz/src/AsyncPanZoomController.cpp +++ b/gfx/layers/apz/src/AsyncPanZoomController.cpp @@ -1287,7 +1287,7 @@ nsEventStatus AsyncPanZoomController::OnScale(const PinchGestureInput& aEvent) { // would have to be adjusted (as e.g. it would no longer be valid to take // the minimum or maximum of the ratios of the widths and heights of the // page rect and the composition bounds). - MOZ_ASSERT(mFrameMetrics.IsRootScrollable()); + MOZ_ASSERT(mFrameMetrics.IsRootContent()); MOZ_ASSERT(mFrameMetrics.GetZoom().AreScalesSame()); float prevSpan = aEvent.mPreviousSpan; @@ -1433,7 +1433,7 @@ AsyncPanZoomController::GetScrollWheelDelta(const ScrollWheelInput& aEvent) cons MOZ_ASSERT_UNREACHABLE("unexpected scroll delta type"); } - if (mFrameMetrics.GetIsRoot() && gfxPrefs::MouseWheelHasRootScrollDeltaOverride()) { + if (mFrameMetrics.IsRootContent() && gfxPrefs::MouseWheelHasRootScrollDeltaOverride()) { // Only apply delta multipliers if we're increasing the delta. double hfactor = double(gfxPrefs::MouseWheelRootHScrollDeltaFactor()) / 100; double vfactor = double(gfxPrefs::MouseWheelRootVScrollDeltaFactor()) / 100; @@ -2996,7 +2996,7 @@ void AsyncPanZoomController::ZoomToRect(CSSRect aRect) { // would have to be adjusted (as e.g. it would no longer be valid to take // the minimum or maximum of the ratios of the widths and heights of the // page rect and the composition bounds). - MOZ_ASSERT(mFrameMetrics.IsRootScrollable()); + MOZ_ASSERT(mFrameMetrics.IsRootContent()); MOZ_ASSERT(mFrameMetrics.GetZoom().AreScalesSame()); SetState(ANIMATING_ZOOM); @@ -3190,19 +3190,19 @@ void AsyncPanZoomController::SendAsyncScrollEvent() { return; } - bool isRoot; + bool isRootContent; CSSRect contentRect; CSSSize scrollableSize; { ReentrantMonitorAutoEnter lock(mMonitor); - isRoot = mFrameMetrics.GetIsRoot(); + isRootContent = mFrameMetrics.IsRootContent(); scrollableSize = mFrameMetrics.GetScrollableRect().Size(); contentRect = mFrameMetrics.CalculateCompositedRectInCssPixels(); contentRect.MoveTo(mCurrentAsyncScrollOffset); } - controller->SendAsyncScrollDOMEvent(isRoot, contentRect, scrollableSize); + controller->SendAsyncScrollDOMEvent(isRootContent, contentRect, scrollableSize); } bool AsyncPanZoomController::Matches(const ScrollableLayerGuid& aGuid) diff --git a/gfx/layers/apz/src/AsyncPanZoomController.h b/gfx/layers/apz/src/AsyncPanZoomController.h index afaf3c22a4a1..52ad0f68dbe2 100644 --- a/gfx/layers/apz/src/AsyncPanZoomController.h +++ b/gfx/layers/apz/src/AsyncPanZoomController.h @@ -899,8 +899,7 @@ public: } /* Returns true if there is no APZC higher in the tree with the same - * layers id. Deprecated. New code shouldn't use this. Old code should be - * updated to not use this. + * layers id. */ bool HasNoParentWithSameLayersId() const { return !mParent || (mParent->mLayersId != mLayersId); @@ -911,6 +910,11 @@ public: return mFrameMetrics.IsLayersIdRoot(); } + bool IsRootContent() const { + ReentrantMonitorAutoEnter lock(mMonitor); + return mFrameMetrics.IsRootContent(); + } + private: // This is a raw pointer to avoid introducing a reference cycle between // AsyncPanZoomController and APZCTreeManager. Since these objects don't diff --git a/gfx/layers/apz/util/ChromeProcessController.h b/gfx/layers/apz/util/ChromeProcessController.h index 19e301d1f3e9..500162ac698f 100644 --- a/gfx/layers/apz/util/ChromeProcessController.h +++ b/gfx/layers/apz/util/ChromeProcessController.h @@ -49,7 +49,7 @@ public: virtual void HandleLongTap(const mozilla::CSSPoint& aPoint, Modifiers aModifiers, const ScrollableLayerGuid& aGuid, uint64_t aInputBlockId) override; - virtual void SendAsyncScrollDOMEvent(bool aIsRoot, const mozilla::CSSRect &aContentRect, + virtual void SendAsyncScrollDOMEvent(bool aIsRootContent, const mozilla::CSSRect &aContentRect, const mozilla::CSSSize &aScrollableSize) override {} virtual void NotifyAPZStateChange(const ScrollableLayerGuid& aGuid, APZStateChange aChange, diff --git a/gfx/layers/composite/AsyncCompositionManager.cpp b/gfx/layers/composite/AsyncCompositionManager.cpp index 31786dabccb8..76df089fbc21 100644 --- a/gfx/layers/composite/AsyncCompositionManager.cpp +++ b/gfx/layers/composite/AsyncCompositionManager.cpp @@ -813,11 +813,11 @@ ApplyAsyncTransformToScrollbarForContent(Layer* aScrollbar, const ParentLayerCoord thumbOriginDeltaPL = thumbOriginDelta * effectiveZoom; yTranslation -= thumbOriginDeltaPL; - if (metrics.IsRootScrollable()) { + if (metrics.IsRootContent()) { // Scrollbar for the root are painted at the same resolution as the // content. Since the coordinate space we apply this transform in includes // the resolution, we need to adjust for it as well here. Note that in - // another metrics.IsRootScrollable() hunk below we apply a + // another metrics.IsRootContent() hunk below we apply a // resolution-cancelling transform which ensures the scroll thumb isn't // actually rendered at a larger scale. yTranslation *= metrics.GetPresShellResolution(); @@ -846,7 +846,7 @@ ApplyAsyncTransformToScrollbarForContent(Layer* aScrollbar, const ParentLayerCoord thumbOriginDeltaPL = thumbOriginDelta * effectiveZoom; xTranslation -= thumbOriginDeltaPL; - if (metrics.IsRootScrollable()) { + if (metrics.IsRootContent()) { xTranslation *= metrics.GetPresShellResolution(); } @@ -862,7 +862,7 @@ ApplyAsyncTransformToScrollbarForContent(Layer* aScrollbar, // thumb's size to vary with the zoom (other than its length reflecting the // fraction of the scrollable length that's in view, which is taken care of // above), we apply a transform to cancel out this resolution. - if (metrics.IsRootScrollable()) { + if (metrics.IsRootContent()) { compensation = Matrix4x4::Scaling(metrics.GetPresShellResolution(), metrics.GetPresShellResolution(), diff --git a/gfx/layers/composite/FrameUniformityData.cpp b/gfx/layers/composite/FrameUniformityData.cpp index eb2c3db78927..6027a14706f7 100644 --- a/gfx/layers/composite/FrameUniformityData.cpp +++ b/gfx/layers/composite/FrameUniformityData.cpp @@ -137,7 +137,8 @@ FrameUniformityData::ToJS(JS::MutableHandleValue aOutValue, JSContext* aContext) uintptr_t layerAddr = iter->first; float uniformity = iter->second; - layers.AppendElement(); + // FIXME: Make this infallible after bug 968520 is done. + MOZ_ALWAYS_TRUE(layers.AppendElement(fallible)); dom::FrameUniformity& entry = layers.LastElement(); entry.mLayerAddress.Construct() = layerAddr; diff --git a/gfx/layers/d3d11/TextureD3D11.cpp b/gfx/layers/d3d11/TextureD3D11.cpp index 366215d1ea23..a1baca00bad5 100644 --- a/gfx/layers/d3d11/TextureD3D11.cpp +++ b/gfx/layers/d3d11/TextureD3D11.cpp @@ -851,7 +851,11 @@ DataTextureSourceD3D11::Update(DataSourceSurface* aSurface, } DataSourceSurface::MappedSurface map; - aSurface->Map(DataSourceSurface::MapType::READ, &map); + if (!aSurface->Map(DataSourceSurface::MapType::READ, &map)) { + gfxCriticalError() << "Failed to map surface."; + Reset(); + return false; + } if (aDestRegion) { nsIntRegionRectIterator iter(*aDestRegion); diff --git a/gfx/src/nsRect.h b/gfx/src/nsRect.h index 8d402c749ceb..5bcb7cfc7206 100644 --- a/gfx/src/nsRect.h +++ b/gfx/src/nsRect.h @@ -10,7 +10,6 @@ #include // for FILE #include // for int32_t, int64_t #include // for min/max -#include "nsDebug.h" // for NS_WARNING #include "gfxCore.h" // for NS_GFX #include "mozilla/Likely.h" // for MOZ_UNLIKELY #include "mozilla/gfx/Rect.h" @@ -80,7 +79,6 @@ struct NS_GFX nsRect : result.x = std::min(aRect.x, x); int64_t w = std::max(int64_t(aRect.x) + aRect.width, int64_t(x) + width) - result.x; if (MOZ_UNLIKELY(w > nscoord_MAX)) { - NS_WARNING("Overflowed nscoord_MAX in conversion to nscoord width"); // Clamp huge negative x to nscoord_MIN / 2 and try again. result.x = std::max(result.x, nscoord_MIN / 2); w = std::max(int64_t(aRect.x) + aRect.width, int64_t(x) + width) - result.x; @@ -93,7 +91,6 @@ struct NS_GFX nsRect : result.y = std::min(aRect.y, y); int64_t h = std::max(int64_t(aRect.y) + aRect.height, int64_t(y) + height) - result.y; if (MOZ_UNLIKELY(h > nscoord_MAX)) { - NS_WARNING("Overflowed nscoord_MAX in conversion to nscoord height"); // Clamp huge negative y to nscoord_MIN / 2 and try again. result.y = std::max(result.y, nscoord_MIN / 2); h = std::max(int64_t(aRect.y) + aRect.height, int64_t(y) + height) - result.y; diff --git a/gfx/tests/gtest/TestAsyncPanZoomController.cpp b/gfx/tests/gtest/TestAsyncPanZoomController.cpp index b8e8aae3f351..8051f4cfe5a4 100644 --- a/gfx/tests/gtest/TestAsyncPanZoomController.cpp +++ b/gfx/tests/gtest/TestAsyncPanZoomController.cpp @@ -780,7 +780,7 @@ protected: fm.SetScrollOffset(CSSPoint(300, 300)); fm.SetZoom(CSSToParentLayerScale2D(2.0, 2.0)); // APZC only allows zooming on the root scrollable frame. - fm.SetIsRoot(true); + fm.SetIsRootContent(true); // the visible area of the document in CSS pixels is x=300 y=300 w=50 h=100 return fm; } @@ -921,7 +921,7 @@ TEST_F(APZCBasicTester, Overzoom) { fm.SetScrollableRect(CSSRect(0, 0, 125, 150)); fm.SetScrollOffset(CSSPoint(10, 0)); fm.SetZoom(CSSToParentLayerScale2D(1.0, 1.0)); - fm.SetIsRoot(true); + fm.SetIsRootContent(true); apzc->SetFrameMetrics(fm); MakeApzcZoomable(); diff --git a/gfx/thebes/gfxContext.cpp b/gfx/thebes/gfxContext.cpp index 4c93001b84ce..c17c20c7c24b 100644 --- a/gfx/thebes/gfxContext.cpp +++ b/gfx/thebes/gfxContext.cpp @@ -636,6 +636,40 @@ gfxContext::HasComplexClip() const return false; } +bool +gfxContext::ExportClip(ClipExporter& aExporter) +{ + unsigned int lastReset = 0; + for (int i = mStateStack.Length() - 1; i > 0; i--) { + if (mStateStack[i].clipWasReset) { + lastReset = i; + break; + } + } + + for (unsigned int i = lastReset; i < mStateStack.Length(); i++) { + for (unsigned int c = 0; c < mStateStack[i].pushedClips.Length(); c++) { + AzureState::PushedClip &clip = mStateStack[i].pushedClips[c]; + gfx::Matrix transform = clip.transform; + transform.PostTranslate(-GetDeviceOffset()); + + aExporter.BeginClip(transform); + if (clip.path) { + clip.path->StreamToSink(&aExporter); + } else { + aExporter.MoveTo(clip.rect.TopLeft()); + aExporter.LineTo(clip.rect.TopRight()); + aExporter.LineTo(clip.rect.BottomRight()); + aExporter.LineTo(clip.rect.BottomLeft()); + aExporter.Close(); + } + aExporter.EndClip(); + } + } + + return true; +} + bool gfxContext::ClipContainsRect(const gfxRect& aRect) { diff --git a/gfx/thebes/gfxContext.h b/gfx/thebes/gfxContext.h index d88cc3677171..268a59fd4221 100644 --- a/gfx/thebes/gfxContext.h +++ b/gfx/thebes/gfxContext.h @@ -28,6 +28,8 @@ struct RectCornerRadii; } } +class ClipExporter; + /** * This is the main class for doing actual drawing. It is initialized using * a surface and can be drawn on. It manages various state information like @@ -457,6 +459,11 @@ public: */ bool ClipContainsRect(const gfxRect& aRect); + /** + * Exports the current clip using the provided exporter. + */ + bool ExportClip(ClipExporter& aExporter); + /** * Groups */ @@ -728,4 +735,12 @@ private: mozilla::gfx::Pattern *mPattern; }; +/* This interface should be implemented to handle exporting the clip from a context. + */ +class ClipExporter : public mozilla::gfx::PathSink { +public: + virtual void BeginClip(const mozilla::gfx::Matrix& aMatrix) = 0; + virtual void EndClip() = 0; +}; + #endif /* GFX_CONTEXT_H */ diff --git a/gfx/thebes/gfxDWriteFontList.cpp b/gfx/thebes/gfxDWriteFontList.cpp index 17bd3af2f10f..66e7f0c93d90 100644 --- a/gfx/thebes/gfxDWriteFontList.cpp +++ b/gfx/thebes/gfxDWriteFontList.cpp @@ -311,7 +311,7 @@ gfxDWriteFontFamily::LocalizedName(nsAString &aLocalizedName) return; } - if (!famName.SetLength(length + 1)) { + if (!famName.SetLength(length + 1, fallible)) { // Eeep - running out of memory. Unlikely to end well. return; } @@ -403,7 +403,7 @@ gfxDWriteFontEntry::CopyFontTable(uint32_t aTableTag, NativeEndian::swapToBigEndian(aTableTag), 0, nullptr, 0); if (tableSize != GDI_ERROR) { - if (aBuffer.SetLength(tableSize)) { + if (aBuffer.SetLength(tableSize, fallible)) { ::GetFontData(dc.GetDC(), NativeEndian::swapToBigEndian(aTableTag), 0, aBuffer.Elements(), aBuffer.Length()); @@ -433,7 +433,7 @@ gfxDWriteFontEntry::CopyFontTable(uint32_t aTableTag, return NS_ERROR_FAILURE; } - if (aBuffer.SetLength(len)) { + if (aBuffer.SetLength(len, fallible)) { memcpy(aBuffer.Elements(), tableData, len); rv = NS_OK; } else { @@ -1122,7 +1122,7 @@ gfxDWriteFontList::GetFontsFromCollection(IDWriteFontCollection* aCollection) continue; } - if (!enName.SetLength(length + 1)) { + if (!enName.SetLength(length + 1, fallible)) { // Eeep - running out of memory. Unlikely to end well. continue; } @@ -1171,7 +1171,7 @@ gfxDWriteFontList::GetFontsFromCollection(IDWriteFontCollection* aCollection) continue; } - if (!localizedName.SetLength(nameLen + 1)) { + if (!localizedName.SetLength(nameLen + 1, fallible)) { continue; } @@ -1406,7 +1406,7 @@ static HRESULT GetFamilyName(IDWriteFont *aFont, nsString& aFamilyName) return hr; } - if (!name.SetLength(length + 1)) { + if (!name.SetLength(length + 1, fallible)) { return E_FAIL; } hr = familyNames->GetString(index, name.Elements(), length + 1); @@ -1587,7 +1587,7 @@ DirectWriteFontInfo::LoadFontFamilyData(const nsAString& aFamilyName) nsAutoTArray famName; uint32_t len = aFamilyName.Length(); - famName.SetLength(len + 1); + famName.SetLength(len + 1, fallible); memcpy(famName.Elements(), aFamilyName.BeginReading(), len * sizeof(char16_t)); famName[len] = 0; diff --git a/gfx/thebes/gfxFT2FontList.cpp b/gfx/thebes/gfxFT2FontList.cpp index a8b9bae4562d..e246c478d996 100644 --- a/gfx/thebes/gfxFT2FontList.cpp +++ b/gfx/thebes/gfxFT2FontList.cpp @@ -532,7 +532,7 @@ FT2FontEntry::CopyFontTable(uint32_t aTableTag, return NS_ERROR_FAILURE; } - if (!aBuffer.SetLength(len)) { + if (!aBuffer.SetLength(len, fallible)) { return NS_ERROR_OUT_OF_MEMORY; } uint8_t *buf = aBuffer.Elements(); diff --git a/gfx/thebes/gfxFcPlatformFontList.cpp b/gfx/thebes/gfxFcPlatformFontList.cpp index 7d8af32614d9..4ede0e87a15d 100644 --- a/gfx/thebes/gfxFcPlatformFontList.cpp +++ b/gfx/thebes/gfxFcPlatformFontList.cpp @@ -805,7 +805,7 @@ gfxFontconfigFontEntry::CopyFontTable(uint32_t aTableTag, if (FT_Load_Sfnt_Table(mFTFace, aTableTag, 0, nullptr, &length) != 0) { return NS_ERROR_NOT_AVAILABLE; } - if (!aBuffer.SetLength(length)) { + if (!aBuffer.SetLength(length, fallible)) { return NS_ERROR_OUT_OF_MEMORY; } if (FT_Load_Sfnt_Table(mFTFace, aTableTag, 0, aBuffer.Elements(), &length) != 0) { diff --git a/gfx/thebes/gfxFontconfigFonts.cpp b/gfx/thebes/gfxFontconfigFonts.cpp index 069d3a1cfb9c..3a577021e2a3 100644 --- a/gfx/thebes/gfxFontconfigFonts.cpp +++ b/gfx/thebes/gfxFontconfigFonts.cpp @@ -184,9 +184,11 @@ public: { cairo_font_face_reference(mFontFace); cairo_font_face_set_user_data(mFontFace, &sFontEntryKey, this, nullptr); - mPatterns.AppendElement(); + // mPatterns is an nsAutoTArray with 1 space always available, so the // AppendElement always succeeds. + // FIXME: Make this infallible after bug 968520 is done. + MOZ_ALWAYS_TRUE(mPatterns.AppendElement(fallible)); mPatterns[0] = aFontPattern; FcChar8 *name; @@ -248,7 +250,7 @@ gfxSystemFcFontEntry::CopyFontTable(uint32_t aTableTag, if (FT_Load_Sfnt_Table(mFTFace, aTableTag, 0, nullptr, &length) != 0) { return NS_ERROR_NOT_AVAILABLE; } - if (!aBuffer.SetLength(length)) { + if (!aBuffer.SetLength(length, fallible)) { return NS_ERROR_OUT_OF_MEMORY; } if (FT_Load_Sfnt_Table(mFTFace, aTableTag, 0, aBuffer.Elements(), &length) != 0) { @@ -419,7 +421,7 @@ public: const nsTArray< nsCountedRef >& aPatterns) : gfxUserFcFontEntry(aFontName, aWeight, aStretch, aItalic) { - if (!mPatterns.SetCapacity(aPatterns.Length())) + if (!mPatterns.SetCapacity(aPatterns.Length(), fallible)) return; // OOM for (uint32_t i = 0; i < aPatterns.Length(); ++i) { @@ -429,7 +431,8 @@ public: AdjustPatternToCSS(pattern); - mPatterns.AppendElement(); + // FIXME: Make this infallible after bug 968520 is done. + MOZ_ALWAYS_TRUE(mPatterns.AppendElement(fallible)); mPatterns[i].own(pattern); } mIsLocalUserFont = true; @@ -617,7 +620,8 @@ gfxDownloadedFcFontEntry::InitPattern() AddDownloadedFontEntry(pattern, this); // There is never more than one pattern - mPatterns.AppendElement(); + // FIXME: Make this infallible after bug 968520 is done. + MOZ_ALWAYS_TRUE(mPatterns.AppendElement(fallible)); mPatterns[0].own(pattern); } diff --git a/gfx/thebes/gfxGDIFontList.cpp b/gfx/thebes/gfxGDIFontList.cpp index 7a20e2b0eab1..ac2dc854373e 100644 --- a/gfx/thebes/gfxGDIFontList.cpp +++ b/gfx/thebes/gfxGDIFontList.cpp @@ -245,7 +245,7 @@ GDIFontEntry::CopyFontTable(uint32_t aTableTag, NativeEndian::swapToBigEndian(aTableTag), 0, nullptr, 0); if (tableSize != GDI_ERROR) { - if (aBuffer.SetLength(tableSize)) { + if (aBuffer.SetLength(tableSize, fallible)) { ::GetFontData(dc.GetDC(), NativeEndian::swapToBigEndian(aTableTag), 0, aBuffer.Elements(), tableSize); @@ -976,7 +976,7 @@ int CALLBACK GDIFontInfo::EnumerateFontsForFamily( nameSize = ::GetFontData(hdc, kNAME, 0, nullptr, 0); if (nameSize != GDI_ERROR && nameSize > 0 && - nameData.SetLength(nameSize)) { + nameData.SetLength(nameSize, fallible)) { ::GetFontData(hdc, kNAME, 0, nameData.Elements(), nameSize); // face names @@ -1019,7 +1019,7 @@ int CALLBACK GDIFontInfo::EnumerateFontsForFamily( cmapSize = ::GetFontData(hdc, kCMAP, 0, nullptr, 0); if (cmapSize != GDI_ERROR && cmapSize > 0 && - cmapData.SetLength(cmapSize)) { + cmapData.SetLength(cmapSize, fallible)) { ::GetFontData(hdc, kCMAP, 0, cmapData.Elements(), cmapSize); bool cmapLoaded = false; bool unicodeFont = false, symbolFont = false; diff --git a/gfx/thebes/gfxPlatformMac.cpp b/gfx/thebes/gfxPlatformMac.cpp index cde0459ef6ad..50fe59c3047b 100644 --- a/gfx/thebes/gfxPlatformMac.cpp +++ b/gfx/thebes/gfxPlatformMac.cpp @@ -514,6 +514,7 @@ public: } mPreviousTimestamp = TimeStamp::Now(); + mStartingVsync = true; if (CVDisplayLinkStart(mDisplayLink) != kCVReturnSuccess) { NS_WARNING("Could not activate the display link"); CVDisplayLinkRelease(mDisplayLink); @@ -547,6 +548,7 @@ public: // Normalize the timestamps given to the VsyncDispatchers to the vsync // that just occured, not the vsync that is upcoming. TimeStamp mPreviousTimestamp; + bool mStartingVsync; private: // Manages the display link render thread @@ -575,12 +577,16 @@ static CVReturn VsyncCallback(CVDisplayLinkRef aDisplayLink, mozilla::TimeStamp nextVsync = mozilla::TimeStamp::FromSystemTime(nextVsyncTimestamp); mozilla::TimeStamp previousVsync = display->mPreviousTimestamp; + bool firstVsync = display->mStartingVsync; + + display->mStartingVsync = false; display->mPreviousTimestamp = nextVsync; mozilla::TimeStamp now = TimeStamp::Now(); if (nextVsync <= previousVsync) { TimeDuration next = nextVsync - now; TimeDuration prev = now - previousVsync; - printf_stderr("Next from now: %f, prev from now: %f\n", next.ToMilliseconds(), prev.ToMilliseconds()); + printf_stderr("Next from now: %f, prev from now: %f, first vsync %d\n", + next.ToMilliseconds(), prev.ToMilliseconds(), firstVsync); MOZ_ASSERT(false, "Next vsync less than previous vsync\n"); } diff --git a/image/imgFrame.cpp b/image/imgFrame.cpp index 4504a7c43f76..2b033e748851 100644 --- a/image/imgFrame.cpp +++ b/image/imgFrame.cpp @@ -484,9 +484,11 @@ imgFrame::Optimize() } DataSourceSurface::MappedSurface mapping; - DebugOnly success = - surf->Map(DataSourceSurface::MapType::WRITE, &mapping); - NS_ASSERTION(success, "Failed to map surface"); + if (!surf->Map(DataSourceSurface::MapType::WRITE, &mapping)) { + gfxCriticalError() << "imgFrame::Optimize failed to map surface"; + return NS_ERROR_FAILURE; + } + RefPtr target = Factory::CreateDrawTargetForData(BackendType::CAIRO, mapping.mData, @@ -910,9 +912,11 @@ imgFrame::Deoptimize() } DataSourceSurface::MappedSurface mapping; - DebugOnly success = - surf->Map(DataSourceSurface::MapType::WRITE, &mapping); - NS_ASSERTION(success, "Failed to map surface"); + if (!surf->Map(DataSourceSurface::MapType::WRITE, &mapping)) { + gfxCriticalError() << "imgFrame::Deoptimize failed to map surface"; + return NS_ERROR_FAILURE; + } + RefPtr target = Factory::CreateDrawTargetForData(BackendType::CAIRO, mapping.mData, diff --git a/ipc/glue/GeckoChildProcessHost.cpp b/ipc/glue/GeckoChildProcessHost.cpp index f37616bbdf9b..00efc418fdc3 100644 --- a/ipc/glue/GeckoChildProcessHost.cpp +++ b/ipc/glue/GeckoChildProcessHost.cpp @@ -636,7 +636,7 @@ GeckoChildProcessHost::PerformAsyncLaunchInternal(std::vector& aExt // been set up by whatever may have launched the browser. const char* prevInterpose = PR_GetEnv("DYLD_INSERT_LIBRARIES"); nsCString interpose; - if (prevInterpose) { + if (prevInterpose && strlen(prevInterpose) > 0) { interpose.Assign(prevInterpose); interpose.Append(':'); } diff --git a/js/public/GCAPI.h b/js/public/GCAPI.h index 3482048d9098..baa2ec7fcc08 100644 --- a/js/public/GCAPI.h +++ b/js/public/GCAPI.h @@ -108,7 +108,8 @@ using mozilla::UniquePtr; D(FULL_GC_TIMER) \ D(SHUTDOWN_CC) \ D(FINISH_LARGE_EVALUATE) \ - D(USER_INACTIVE) + D(USER_INACTIVE) \ + D(XPCONNECT_SHUTDOWN) namespace gcreason { @@ -332,9 +333,10 @@ enum GCProgress { struct JS_PUBLIC_API(GCDescription) { bool isCompartment_; JSGCInvocationKind invocationKind_; + gcreason::Reason reason_; - GCDescription(bool isCompartment, JSGCInvocationKind kind) - : isCompartment_(isCompartment), invocationKind_(kind) {} + GCDescription(bool isCompartment, JSGCInvocationKind kind, gcreason::Reason reason) + : isCompartment_(isCompartment), invocationKind_(kind), reason_(reason) {} char16_t* formatSliceMessage(JSRuntime* rt) const; char16_t* formatSummaryMessage(JSRuntime* rt) const; diff --git a/js/public/SliceBudget.h b/js/public/SliceBudget.h index 316bf9e62c49..8ac0def7ac84 100644 --- a/js/public/SliceBudget.h +++ b/js/public/SliceBudget.h @@ -70,9 +70,9 @@ struct JS_PUBLIC_API(SliceBudget) return checkOverBudget(); } - bool isUnlimited() const { - return deadline == unlimitedDeadline; - } + bool isWorkBudget() const { return deadline == 0; } + bool isTimeBudget() const { return deadline > 0 && !isUnlimited(); } + bool isUnlimited() const { return deadline == unlimitedDeadline; } int describe(char* buffer, size_t maxlen) const; diff --git a/js/src/devtools/rootAnalysis/annotations.js b/js/src/devtools/rootAnalysis/annotations.js index 4b82505db766..e71ed3486eeb 100644 --- a/js/src/devtools/rootAnalysis/annotations.js +++ b/js/src/devtools/rootAnalysis/annotations.js @@ -330,10 +330,6 @@ function isOverridableField(initialCSU, csu, field) if (field == 'GetWindowProxy' || field == 'GetWindowProxyPreserveColor') return false; } - if (initialCSU == 'nsICycleCollectorListener' && field == 'NoteWeakMapEntry') - return false; - if (initialCSU == 'nsICycleCollectorListener' && field == 'NoteEdge') - return false; return true; } diff --git a/js/src/gc/GCRuntime.h b/js/src/gc/GCRuntime.h index 01d3cc0b0e6a..51013b99e5cb 100644 --- a/js/src/gc/GCRuntime.h +++ b/js/src/gc/GCRuntime.h @@ -771,28 +771,30 @@ class GCRuntime JS::Zone* getCurrentZoneGroup() { return currentZoneGroup; } void setFoundBlackGrayEdges() { foundBlackGrayEdges = true; } - uint64_t gcNumber() { return number; } + uint64_t gcNumber() const { return number; } void incGcNumber() { ++number; } - uint64_t minorGCCount() { return minorGCNumber; } + uint64_t minorGCCount() const { return minorGCNumber; } void incMinorGcNumber() { ++minorGCNumber; } - uint64_t majorGCCount() { return majorGCNumber; } + uint64_t majorGCCount() const { return majorGCNumber; } void incMajorGcNumber() { ++majorGCNumber; } - bool isIncrementalGc() { return isIncremental; } - bool isFullGc() { return isFull; } + int64_t defaultSliceBudget() const { return sliceBudget; } + + bool isIncrementalGc() const { return isIncremental; } + bool isFullGc() const { return isFull; } bool shouldCleanUpEverything() { return cleanUpEverything; } - bool areGrayBitsValid() { return grayBitsValid; } + bool areGrayBitsValid() const { return grayBitsValid; } void setGrayBitsInvalid() { grayBitsValid = false; } bool minorGCRequested() const { return minorGCTriggerReason != JS::gcreason::NO_REASON; } bool majorGCRequested() const { return majorGCTriggerReason != JS::gcreason::NO_REASON; } bool isGcNeeded() { return minorGCRequested() || majorGCRequested(); } - bool fullGCForAtomsRequested() { return fullGCForAtomsRequested_; } + bool fullGCForAtomsRequested() const { return fullGCForAtomsRequested_; } double computeHeapGrowthFactor(size_t lastBytes); size_t computeTriggerBytes(double growthFactor, size_t lastBytes); @@ -1161,7 +1163,7 @@ class GCRuntime */ bool interFrameGC; - /* Default budget for incremental GC slice. See SliceBudget in jsgc.h. */ + /* Default budget for incremental GC slice. See js/SliceBudget.h. */ int64_t sliceBudget; /* diff --git a/js/src/gc/Statistics.cpp b/js/src/gc/Statistics.cpp index 7e7acdd183ab..314fac5c7479 100644 --- a/js/src/gc/Statistics.cpp +++ b/js/src/gc/Statistics.cpp @@ -7,6 +7,7 @@ #include "gc/Statistics.h" #include "mozilla/ArrayUtils.h" +#include "mozilla/IntegerRange.h" #include "mozilla/PodOperations.h" #include "mozilla/UniquePtr.h" @@ -27,6 +28,7 @@ using namespace js; using namespace js::gc; using namespace js::gcstats; +using mozilla::MakeRange; using mozilla::PodArrayZero; using mozilla::PodZero; @@ -185,7 +187,7 @@ static ExtraPhaseInfo phaseExtra[PHASE_LIMIT] = { { 0, 0 } }; // Mapping from all nodes with a multi-parented child to a Vector of all // multi-parented children and their descendants. (Single-parented children will // not show up in this list.) -static mozilla::Vector dagDescendants[Statistics::MAX_MULTIPARENT_PHASES + 1]; +static mozilla::Vector dagDescendants[Statistics::NumTimingArrays]; struct AllPhaseIterator { int current; @@ -193,7 +195,7 @@ struct AllPhaseIterator { size_t activeSlot; mozilla::Vector::Range descendants; - explicit AllPhaseIterator(Statistics::PhaseTimeTable table) + explicit AllPhaseIterator(const Statistics::PhaseTimeTable table) : current(0) , baseLevel(0) , activeSlot(PHASE_DAG_NONE) @@ -293,7 +295,7 @@ Join(const FragmentVector& fragments, const char* separator = "") { } static int64_t -SumChildTimes(size_t phaseSlot, Phase phase, Statistics::PhaseTimeTable phaseTimes) +SumChildTimes(size_t phaseSlot, Phase phase, const Statistics::PhaseTimeTable phaseTimes) { // Sum the contributions from single-parented children. int64_t total = 0; @@ -398,7 +400,7 @@ Statistics::formatCompactSummaryMessage() const } UniqueChars -Statistics::formatCompactSlicePhaseTimes(PhaseTimeTable phaseTimes) const +Statistics::formatCompactSlicePhaseTimes(const PhaseTimeTable phaseTimes) const { static const int64_t MaxUnaccountedTimeUS = 100; @@ -524,7 +526,7 @@ Statistics::formatDetailedSliceDescription(unsigned i, const SliceData& slice) } UniqueChars -Statistics::formatDetailedPhaseTimes(PhaseTimeTable phaseTimes) +Statistics::formatDetailedPhaseTimes(const PhaseTimeTable phaseTimes) { static const char* LevelToIndent[] = { "", " ", " ", " " }; static const int64_t MaxUnaccountedChildTimeUS = 50; @@ -711,7 +713,7 @@ FilterJsonKey(const char*const buffer) } UniqueChars -Statistics::formatJsonPhaseTimes(PhaseTimeTable phaseTimes) +Statistics::formatJsonPhaseTimes(const PhaseTimeTable phaseTimes) { FragmentVector fragments; char buffer[128]; @@ -749,7 +751,7 @@ Statistics::Statistics(JSRuntime* rt) PodArrayZero(phaseTotals); PodArrayZero(counts); PodArrayZero(phaseStartTimes); - for (size_t d = 0; d < MAX_MULTIPARENT_PHASES + 1; d++) + for (auto d : MakeRange(NumTimingArrays)) PodArrayZero(phaseTimes[d]); static bool initialized = false; @@ -778,7 +780,7 @@ Statistics::Statistics(JSRuntime* rt) j++; } while (j != PHASE_LIMIT && phases[j].parent != PHASE_MULTI_PARENTS); } - MOZ_ASSERT(dagSlot <= MAX_MULTIPARENT_PHASES); + MOZ_ASSERT(dagSlot <= MaxMultiparentPhases - 1); // Fill in the depth of each node in the tree. Multi-parented nodes // have depth 0. @@ -846,7 +848,7 @@ static int64_t SumPhase(Phase phase, Statistics::PhaseTimeTable times) { int64_t sum = 0; - for (size_t i = 0; i < Statistics::MAX_MULTIPARENT_PHASES + 1; i++) + for (auto i : MakeRange(Statistics::NumTimingArrays)) sum += times[i][phase]; return sum; } @@ -878,7 +880,7 @@ Statistics::beginGC(JSGCInvocationKind kind) void Statistics::endGC() { - for (size_t j = 0; j < MAX_MULTIPARENT_PHASES + 1; j++) + for (auto j : MakeRange(NumTimingArrays)) for (int i = 0; i < PHASE_LIMIT; i++) phaseTotals[j][i] += phaseTimes[j][i]; @@ -913,7 +915,7 @@ Statistics::endGC() // Clear the timers at the end of a GC because we accumulate time in // between GCs for some (which come before PHASE_GC_BEGIN in the list.) PodZero(&phaseStartTimes[PHASE_GC_BEGIN], PHASE_LIMIT - PHASE_GC_BEGIN); - for (size_t d = PHASE_DAG_NONE; d < MAX_MULTIPARENT_PHASES + 1; d++) + for (size_t d = PHASE_DAG_NONE; d < NumTimingArrays; d++) PodZero(&phaseTimes[d][PHASE_GC_BEGIN], PHASE_LIMIT - PHASE_GC_BEGIN); aborted = false; @@ -943,7 +945,7 @@ Statistics::beginSlice(const ZoneGCStats& zoneStats, JSGCInvocationKind gckind, bool wasFullGC = zoneStats.isCollectingAllZones(); if (sliceCallback) (*sliceCallback)(runtime, first ? JS::GC_CYCLE_BEGIN : JS::GC_SLICE_BEGIN, - JS::GCDescription(!wasFullGC, gckind)); + JS::GCDescription(!wasFullGC, gckind, reason)); } } @@ -954,8 +956,16 @@ Statistics::endSlice() slices.back().end = PRMJ_Now(); slices.back().endFaults = GetPageFaultCount(); - runtime->addTelemetry(JS_TELEMETRY_GC_SLICE_MS, t(slices.back().end - slices.back().start)); + int64_t sliceTime = slices.back().end - slices.back().start; + runtime->addTelemetry(JS_TELEMETRY_GC_SLICE_MS, t(sliceTime)); runtime->addTelemetry(JS_TELEMETRY_GC_RESET, !!slices.back().resetReason); + + if (slices.back().budget.isTimeBudget()) { + int64_t budget = slices.back().budget.timeBudget.budget; + runtime->addTelemetry(JS_TELEMETRY_GC_BUDGET_MS, t(budget)); + if (budget == runtime->gc.defaultSliceBudget()) + runtime->addTelemetry(JS_TELEMETRY_GC_ANIMATION_MS, t(sliceTime)); + } } bool last = !runtime->gc.isIncrementalGCInProgress(); @@ -967,7 +977,7 @@ Statistics::endSlice() bool wasFullGC = zoneStats.isCollectingAllZones(); if (sliceCallback) (*sliceCallback)(runtime, last ? JS::GC_CYCLE_END : JS::GC_SLICE_END, - JS::GCDescription(!wasFullGC, gckind)); + JS::GCDescription(!wasFullGC, gckind, slices.back().reason)); } /* Do this after the slice callback since it uses these values. */ diff --git a/js/src/gc/Statistics.h b/js/src/gc/Statistics.h index c97d3bf64559..ceb7219af322 100644 --- a/js/src/gc/Statistics.h +++ b/js/src/gc/Statistics.h @@ -8,6 +8,7 @@ #define gc_Statistics_h #include "mozilla/DebugOnly.h" +#include "mozilla/IntegerRange.h" #include "mozilla/PodOperations.h" #include "mozilla/UniquePtr.h" @@ -151,7 +152,11 @@ struct Statistics * the few hundred bytes of savings. If we want to extend things to full * DAGs, this decision should be reconsidered. */ - static const size_t MAX_MULTIPARENT_PHASES = 6; + static const size_t MaxMultiparentPhases = 6; + static const size_t NumTimingArrays = MaxMultiparentPhases + 1; + + /* Create a convenient type for referring to tables of phase times. */ + using PhaseTimeTable = int64_t[NumTimingArrays][PHASE_LIMIT]; explicit Statistics(JSRuntime* rt); ~Statistics(); @@ -211,7 +216,7 @@ struct Statistics resetReason(nullptr), start(start), startFaults(startFaults) { - for (size_t i = 0; i < MAX_MULTIPARENT_PHASES + 1; i++) + for (auto i : mozilla::MakeRange(NumTimingArrays)) mozilla::PodArrayZero(phaseTimes[i]); } @@ -220,7 +225,7 @@ struct Statistics const char* resetReason; int64_t start, end; size_t startFaults, endFaults; - int64_t phaseTimes[MAX_MULTIPARENT_PHASES + 1][PHASE_LIMIT]; + PhaseTimeTable phaseTimes; int64_t duration() const { return end - start; } }; @@ -231,9 +236,6 @@ struct Statistics SliceRange sliceRange() const { return slices.all(); } size_t slicesLength() const { return slices.length(); } - /* Create a convenient typedef for referring tables of phase times. */ - typedef int64_t const (*PhaseTimeTable)[PHASE_LIMIT]; - private: JSRuntime* runtime; @@ -264,10 +266,10 @@ struct Statistics int64_t timedGCTime; /* Total time in a given phase for this GC. */ - int64_t phaseTimes[MAX_MULTIPARENT_PHASES + 1][PHASE_LIMIT]; + PhaseTimeTable phaseTimes; /* Total time in a given phase over all GCs. */ - int64_t phaseTotals[MAX_MULTIPARENT_PHASES + 1][PHASE_LIMIT]; + PhaseTimeTable phaseTotals; /* Number of events of this type for this GC. */ unsigned int counts[STAT_LIMIT]; @@ -312,16 +314,16 @@ struct Statistics void sccDurations(int64_t* total, int64_t* maxPause); void printStats(); - UniqueChars formatCompactSlicePhaseTimes(PhaseTimeTable phaseTimes) const; + UniqueChars formatCompactSlicePhaseTimes(const PhaseTimeTable phaseTimes) const; UniqueChars formatDetailedDescription(); UniqueChars formatDetailedSliceDescription(unsigned i, const SliceData& slice); - UniqueChars formatDetailedPhaseTimes(PhaseTimeTable phaseTimes); + UniqueChars formatDetailedPhaseTimes(const PhaseTimeTable phaseTimes); UniqueChars formatDetailedTotals(); UniqueChars formatJsonDescription(uint64_t timestamp); UniqueChars formatJsonSliceDescription(unsigned i, const SliceData& slice); - UniqueChars formatJsonPhaseTimes(PhaseTimeTable phaseTimes); + UniqueChars formatJsonPhaseTimes(const PhaseTimeTable phaseTimes); double computeMMU(int64_t resolution) const; }; diff --git a/js/src/jit-test/tests/basic/statement-after-return.js b/js/src/jit-test/tests/basic/statement-after-return.js index bfb9bed6ec46..7f3906a845ae 100644 --- a/js/src/jit-test/tests/basic/statement-after-return.js +++ b/js/src/jit-test/tests/basic/statement-after-return.js @@ -2,47 +2,36 @@ load(libdir + "class.js"); -if (options().indexOf("werror") == -1) - options("werror"); - function testWarn(code, lineNumber, columnNumber) { - var caught = false; - try { - eval(code); - } catch (e) { - caught = true; - assertEq(e.constructor, SyntaxError); - assertEq(e.lineNumber, lineNumber); - assertEq(e.columnNumber, columnNumber); - } - assertEq(caught, true, "warning should be caught for " + code); + enableLastWarning(); + eval(code); + var warning = getLastWarning(); + assertEq(warning !== null, true, "warning should be caught for " + code); + assertEq(warning.name, "None"); + assertEq(warning.lineNumber, lineNumber); + assertEq(warning.columnNumber, columnNumber); - caught = false; - try { - Reflect.parse(code); - } catch (e) { - caught = true; - assertEq(e.constructor, SyntaxError); - } - assertEq(caught, true, "warning should be caught for " + code); + clearLastWarning(); + Reflect.parse(code); + warning = getLastWarning(); + assertEq(warning !== null, true, "warning should be caught for " + code); + assertEq(warning.name, "None"); + // Warning generated by Reflect.parse has line/column number for Reflect.parse + // itself, not parsed code. + disableLastWarning(); } function testPass(code) { - var caught = false; - try { - eval(code); - } catch (e) { - caught = true; - } - assertEq(caught, false, "warning should not be caught for " + code); + enableLastWarning(); + eval(code); + var warning = getLastWarning(); + assertEq(warning, null, "warning should not be caught for " + code); - caught = false; - try { - Reflect.parse(code); - } catch (e) { - caught = true; - } - assertEq(caught, false, "warning should not be caught for " + code); + clearLastWarning(); + Reflect.parse(code); + warning = getLastWarning(); + assertEq(warning, null, "warning should not be caught for " + code); + disableLastWarning(); } testPass(` diff --git a/js/src/jit-test/tests/basic/syntax-error-illegal-character.js b/js/src/jit-test/tests/basic/syntax-error-illegal-character.js index 1a8551bafa70..0205bbcdd45f 100644 --- a/js/src/jit-test/tests/basic/syntax-error-illegal-character.js +++ b/js/src/jit-test/tests/basic/syntax-error-illegal-character.js @@ -1,3 +1,5 @@ +load(libdir + "class.js"); + var JSMSG_ILLEGAL_CHARACTER = "illegal character"; var JSMSG_UNTERMINATED_STRING = "unterminated string literal"; @@ -25,7 +27,6 @@ function test_eval(code) { assertEq(caught, true); } - function test(code) { test_reflect(code); test_reflect("'use strict'; " + code); @@ -468,16 +469,18 @@ test_no_fun_no_eval("export const a = 1, b = @"); test_no_fun_no_eval("export const a = 1, b = 2 @"); test_no_fun_no_eval("export const a = 1, b = 2; @"); -test_no_fun_no_eval("export class @"); -test_no_fun_no_eval("export class Foo @"); -test_no_fun_no_eval("export class Foo { @"); -test_no_fun_no_eval("export class Foo { constructor @"); -test_no_fun_no_eval("export class Foo { constructor( @"); -test_no_fun_no_eval("export class Foo { constructor() @"); -test_no_fun_no_eval("export class Foo { constructor() { @"); -test_no_fun_no_eval("export class Foo { constructor() {} @"); -test_no_fun_no_eval("export class Foo { constructor() {} } @"); -test_no_fun_no_eval("export class Foo { constructor() {} }; @"); +if (classesEnabled()) { + test_no_fun_no_eval("export class @"); + test_no_fun_no_eval("export class Foo @"); + test_no_fun_no_eval("export class Foo { @"); + test_no_fun_no_eval("export class Foo { constructor @"); + test_no_fun_no_eval("export class Foo { constructor( @"); + test_no_fun_no_eval("export class Foo { constructor() @"); + test_no_fun_no_eval("export class Foo { constructor() { @"); + test_no_fun_no_eval("export class Foo { constructor() {} @"); + test_no_fun_no_eval("export class Foo { constructor() {} } @"); + test_no_fun_no_eval("export class Foo { constructor() {} }; @"); +} test_no_fun_no_eval("export default @"); test_no_fun_no_eval("export default 1 @"); @@ -496,25 +499,27 @@ test_no_fun_no_eval("export default function foo() { @"); test_no_fun_no_eval("export default function foo() {} @"); test_no_fun_no_eval("export default function foo() {}; @"); -test_no_fun_no_eval("export default class @"); -test_no_fun_no_eval("export default class { @"); -test_no_fun_no_eval("export default class { constructor @"); -test_no_fun_no_eval("export default class { constructor( @"); -test_no_fun_no_eval("export default class { constructor() @"); -test_no_fun_no_eval("export default class { constructor() { @"); -test_no_fun_no_eval("export default class { constructor() {} @"); -test_no_fun_no_eval("export default class { constructor() {} } @"); -test_no_fun_no_eval("export default class { constructor() {} }; @"); +if (classesEnabled()) { + test_no_fun_no_eval("export default class @"); + test_no_fun_no_eval("export default class { @"); + test_no_fun_no_eval("export default class { constructor @"); + test_no_fun_no_eval("export default class { constructor( @"); + test_no_fun_no_eval("export default class { constructor() @"); + test_no_fun_no_eval("export default class { constructor() { @"); + test_no_fun_no_eval("export default class { constructor() {} @"); + test_no_fun_no_eval("export default class { constructor() {} } @"); + test_no_fun_no_eval("export default class { constructor() {} }; @"); -test_no_fun_no_eval("export default class Foo @"); -test_no_fun_no_eval("export default class Foo { @"); -test_no_fun_no_eval("export default class Foo { constructor @"); -test_no_fun_no_eval("export default class Foo { constructor( @"); -test_no_fun_no_eval("export default class Foo { constructor() @"); -test_no_fun_no_eval("export default class Foo { constructor() { @"); -test_no_fun_no_eval("export default class Foo { constructor() {} @"); -test_no_fun_no_eval("export default class Foo { constructor() {} } @"); -test_no_fun_no_eval("export default class Foo { constructor() {} }; @"); + test_no_fun_no_eval("export default class Foo @"); + test_no_fun_no_eval("export default class Foo { @"); + test_no_fun_no_eval("export default class Foo { constructor @"); + test_no_fun_no_eval("export default class Foo { constructor( @"); + test_no_fun_no_eval("export default class Foo { constructor() @"); + test_no_fun_no_eval("export default class Foo { constructor() { @"); + test_no_fun_no_eval("export default class Foo { constructor() {} @"); + test_no_fun_no_eval("export default class Foo { constructor() {} } @"); + test_no_fun_no_eval("export default class Foo { constructor() {} }; @"); +} // import diff --git a/js/src/jit-test/tests/collections/Map-constructor-1.js b/js/src/jit-test/tests/collections/Map-constructor-1.js index 44a6be9ee3f8..528f43cc2340 100644 --- a/js/src/jit-test/tests/collections/Map-constructor-1.js +++ b/js/src/jit-test/tests/collections/Map-constructor-1.js @@ -10,8 +10,7 @@ m = new Map(null); assertEq(m.size, 0); // FIXME: bug 1083752 -options("werror"); -assertEq(evaluate("Map()", {catchTermination: true}), "terminated"); +assertWarning(() => Map(), "None"); // assertThrowsInstanceOf(() => Map(), TypeError); // assertThrowsInstanceOf(() => Map(undefined), TypeError); // assertThrowsInstanceOf(() => Map(null), TypeError); diff --git a/js/src/jit-test/tests/collections/Set-constructor-1.js b/js/src/jit-test/tests/collections/Set-constructor-1.js index 35a2dedb5e04..47913f869160 100644 --- a/js/src/jit-test/tests/collections/Set-constructor-1.js +++ b/js/src/jit-test/tests/collections/Set-constructor-1.js @@ -10,8 +10,7 @@ s = new Set(null); assertEq(s.size, 0); // FIXME: bug 1083752 -options("werror"); -assertEq(evaluate("Set()", {catchTermination: true}), "terminated"); +assertWarning(() => Set(), "None"); // assertThrowsInstanceOf(() => Set(), TypeError); // assertThrowsInstanceOf(() => Set(undefined), TypeError); // assertThrowsInstanceOf(() => Set(null), TypeError); diff --git a/js/src/jit-test/tests/collections/WeakMap-constructor-1.js b/js/src/jit-test/tests/collections/WeakMap-constructor-1.js index 826aa1d27416..bfeaa2511a3f 100644 --- a/js/src/jit-test/tests/collections/WeakMap-constructor-1.js +++ b/js/src/jit-test/tests/collections/WeakMap-constructor-1.js @@ -7,8 +7,7 @@ new WeakMap(undefined); new WeakMap(null); // FIXME: bug 1083752 -options("werror"); -assertEq(evaluate("WeakMap()", {catchTermination: true}), "terminated"); +assertWarning(() => WeakMap(), "None"); // assertThrowsInstanceOf(() => WeakMap(), TypeError); // assertThrowsInstanceOf(() => WeakMap(undefined), TypeError); // assertThrowsInstanceOf(() => WeakMap(null), TypeError); diff --git a/js/src/jit-test/tests/ion/recover-empty-new-object.js b/js/src/jit-test/tests/ion/recover-empty-new-object.js index 459f11f3b744..a34f0d36fb44 100644 --- a/js/src/jit-test/tests/ion/recover-empty-new-object.js +++ b/js/src/jit-test/tests/ion/recover-empty-new-object.js @@ -1,3 +1,8 @@ +// |jit-test| test-join=--no-unboxed-objects +// +// Unboxed object optimization might not trigger in all cases, thus we ensure +// that Sink optimization is working well independently of the +// object representation. // Ion eager fails the test below because we have not yet created any // template object in baseline before running the content of the top-level @@ -5,6 +10,9 @@ if (getJitCompilerOptions()["ion.warmup.trigger"] <= 20) setJitCompilerOption("ion.warmup.trigger", 20); +// These arguments have to be computed by baseline, and thus captured in a +// resume point. The next function checks that we can remove the computation of +// these arguments. function f(a, b, c, d) { } function topLevel() { diff --git a/js/src/jit-test/tests/ion/recover-iterator-next.js b/js/src/jit-test/tests/ion/recover-iterator-next.js new file mode 100644 index 000000000000..ca51fb168151 --- /dev/null +++ b/js/src/jit-test/tests/ion/recover-iterator-next.js @@ -0,0 +1,39 @@ +// |jit-test| test-join=--no-unboxed-objects +// +// Unboxed object optimization might not trigger in all cases, thus we ensure +// that Scalar Replacement optimization is working well independently of the +// object representation. + +// Ion eager fails the test below because we have not yet created any +// template object in baseline before running the content of the top-level +// function. +if (getJitCompilerOptions()["ion.warmup.trigger"] <= 90) + setJitCompilerOption("ion.warmup.trigger", 90); + +// This test checks that we are able to remove the getprop & setprop with scalar +// replacement, so we should not force inline caches, as this would skip the +// generation of getprop & setprop instructions. +if (getJitCompilerOptions()["ion.forceinlineCaches"]) + setJitCompilerOption("ion.forceinlineCaches", 0); + +var arr = new Array(); +var max = 2000; +for (var i=0; i < max; i++) + arr[i] = i; + +function f() { + var res = 0; + var nextObj; + var itr = arr[Symbol.iterator](); + do { + nextObj = itr.next(); + if (nextObj.done) + break; + res += nextObj.value; + assertRecoveredOnBailout(nextObj, true); + } while (true); + return res; +} + +for (var j = 0; j < 10; j++) + assertEq(f(), max * (max - 1) / 2); diff --git a/js/src/jit-test/tests/ion/recover-objects.js b/js/src/jit-test/tests/ion/recover-objects.js index c2cbf50af4c7..30274f7efa65 100644 --- a/js/src/jit-test/tests/ion/recover-objects.js +++ b/js/src/jit-test/tests/ion/recover-objects.js @@ -1,3 +1,9 @@ +// |jit-test| test-join=--no-unboxed-objects +// +// Unboxed object optimization might not trigger in all cases, thus we ensure +// that Scalar Replacement optimization is working well independently of the +// object representation. + // Ion eager fails the test below because we have not yet created any // template object in baseline before running the content of the top-level // function. @@ -10,12 +16,14 @@ if (getJitCompilerOptions()["ion.warmup.trigger"] <= 90) if (getJitCompilerOptions()["ion.forceinlineCaches"]) setJitCompilerOption("ion.forceinlineCaches", 0); +function resumeHere() {} var uceFault = function (i) { if (i > 98) uceFault = function (i) { return true; }; return false; }; + // Without "use script" in the inner function, the arguments might be // obersvable. function inline_notSoEmpty1(a, b, c, d) { @@ -25,11 +33,11 @@ function inline_notSoEmpty1(a, b, c, d) { } var uceFault_notSoEmpty1 = eval(uneval(uceFault).replace('uceFault', 'uceFault_notSoEmpty1')); function notSoEmpty1() { - var a = { v: i, notunboxed: undefined }; - var b = { v: 1 + a.v, notunboxed: undefined }; - var c = { v: 2 + b.v, notunboxed: undefined }; - var d = { v: 3 + c.v, notunboxed: undefined }; - var unused = { v: 4 + d.v, notunboxed: undefined }; + var a = { v: i }; + var b = { v: 1 + a.v }; + var c = { v: 2 + b.v }; + var d = { v: 3 + c.v }; + var unused = { v: 4 + d.v }; var res = inline_notSoEmpty1(a, b, c, d); if (uceFault_notSoEmpty1(i) || uceFault_notSoEmpty1(i)) assertEq(i, res.v); @@ -42,23 +50,23 @@ function notSoEmpty1() { assertRecoveredOnBailout(c, true); assertRecoveredOnBailout(d, true); assertRecoveredOnBailout(unused, true); - // Scalar Replacement is coming after the branch removal made by GVN, and - // the ucefault branch is not taken yet. - assertRecoveredOnBailout(res, false); + // The ucefault branch is not taken yet, and GVN removes it. Scalar + // Replacement thus removes the creation of the object. + assertRecoveredOnBailout(res, true); } // Check that we can recover objects with their content. function inline_notSoEmpty2(a, b, c, d) { "use strict"; - return { v: (a.v + b.v + c.v + d.v - 10) / 4, notunboxed: undefined }; + return { v: (a.v + b.v + c.v + d.v - 10) / 4 }; } var uceFault_notSoEmpty2 = eval(uneval(uceFault).replace('uceFault', 'uceFault_notSoEmpty2')); function notSoEmpty2(i) { - var a = { v: i, notunboxed: undefined }; - var b = { v: 1 + a.v, notunboxed: undefined }; - var c = { v: 2 + b.v, notunboxed: undefined }; - var d = { v: 3 + c.v, notunboxed: undefined }; - var unused = { v: 4 + d.v, notunboxed: undefined }; + var a = { v: i }; + var b = { v: 1 + a.v }; + var c = { v: 2 + b.v }; + var d = { v: 3 + c.v }; + var unused = { v: 4 + d.v }; var res = inline_notSoEmpty2(a, b, c, d); if (uceFault_notSoEmpty2(i) || uceFault_notSoEmpty2(i)) assertEq(i, res.v); @@ -67,22 +75,22 @@ function notSoEmpty2(i) { assertRecoveredOnBailout(c, true); assertRecoveredOnBailout(d, true); assertRecoveredOnBailout(unused, true); - // Scalar Replacement is coming after the branch removal made by GVN, and - // the ucefault branch is not taken yet. - assertRecoveredOnBailout(res, false); + // The ucefault branch is not taken yet, and GVN removes it. Scalar + // Replacement thus removes the creation of the object. + assertRecoveredOnBailout(res, true); } // Check that we can recover objects with their content. var argFault_observeArg = function (i) { if (i > 98) return inline_observeArg.arguments[0]; - return { test : i, notunboxed: undefined }; + return { test : i }; }; function inline_observeArg(obj, i) { return argFault_observeArg(i); } function observeArg(i) { - var obj = { test: i, notunboxed: undefined }; + var obj = { test: i }; var res = inline_observeArg(obj, i); assertEq(res.test, i); assertRecoveredOnBailout(obj, true); @@ -90,7 +98,7 @@ function observeArg(i) { // Check case where one successor can have multiple times the same predecessor. function complexPhi(i) { - var obj = { test: i, notunboxed: undefined }; + var obj = { test: i }; switch (i) { // TableSwitch case 0: obj.test = 0; break; case 1: obj.test = 1; break; @@ -109,12 +117,12 @@ function complexPhi(i) { function withinIf(i) { var x = undefined; if (i % 2 == 0) { - let obj = { foo: i, notunboxed: undefined }; + let obj = { foo: i }; x = obj.foo; assertRecoveredOnBailout(obj, true); obj = undefined; } else { - let obj = { bar: i, notunboxed: undefined }; + let obj = { bar: i }; x = obj.bar; assertRecoveredOnBailout(obj, true); obj = undefined; @@ -124,22 +132,20 @@ function withinIf(i) { // Check case where one successor can have multiple times the same predecessor. function unknownLoad(i) { - var obj = { foo: i, notunboxed: undefined }; + var obj = { foo: i }; assertEq(obj.bar, undefined); // Unknown properties are using GetPropertyCache. assertRecoveredOnBailout(obj, false); } // Check with dynamic slots. -function resumeHere() {} function dynamicSlots(i) { var obj = { p0: i + 0, p1: i + 1, p2: i + 2, p3: i + 3, p4: i + 4, p5: i + 5, p6: i + 6, p7: i + 7, p8: i + 8, p9: i + 9, p10: i + 10, p11: i + 11, p12: i + 12, p13: i + 13, p14: i + 14, p15: i + 15, p16: i + 16, p17: i + 17, p18: i + 18, p19: i + 19, p20: i + 20, p21: i + 21, p22: i + 22, p23: i + 23, p24: i + 24, p25: i + 25, p26: i + 26, p27: i + 27, p28: i + 28, p29: i + 29, p30: i + 30, p31: i + 31, p32: i + 32, p33: i + 33, p34: i + 34, p35: i + 35, p36: i + 36, p37: i + 37, p38: i + 38, p39: i + 39, p40: i + 40, - p41: i + 41, p42: i + 42, p43: i + 43, p44: i + 44, p45: i + 45, p46: i + 46, p47: i + 47, p48: i + 48, p49: i + 49, p50: i + 50, - notunboxed: undefined + p41: i + 41, p42: i + 42, p43: i + 43, p44: i + 44, p45: i + 45, p46: i + 46, p47: i + 47, p48: i + 48, p49: i + 49, p50: i + 50 }; // Add a function call to capture a resumepoint at the end of the call or // inside the inlined block, such as the bailout does not rewind to the @@ -154,7 +160,6 @@ function Point(x, y) { this.x = x; this.y = y; - this.notUnboxed = undefined; } function createThisWithTemplate(i) @@ -165,7 +170,6 @@ function createThisWithTemplate(i) assertRecoveredOnBailout(p, true); } - for (var i = 0; i < 100; i++) { notSoEmpty1(i); notSoEmpty2(i); diff --git a/js/src/jit-test/tests/modules/export-declaration.js b/js/src/jit-test/tests/modules/export-declaration.js index 3dc2869b2b30..4036a7488524 100644 --- a/js/src/jit-test/tests/modules/export-declaration.js +++ b/js/src/jit-test/tests/modules/export-declaration.js @@ -1,5 +1,6 @@ load(libdir + "match.js"); load(libdir + "asserts.js"); +load(libdir + "class.js"); var { Pattern, MatchError } = Match; @@ -202,16 +203,18 @@ program([ ) ]).assert(Reflect.parse("export function f() {}")); -program([ - exportDeclaration( - classDeclaration( - ident("Foo") - ), - null, - null, - false - ) -]).assert(Reflect.parse("export class Foo { constructor() {} }")); +if (classesEnabled()) { + program([ + exportDeclaration( + classDeclaration( + ident("Foo") + ), + null, + null, + false + ) + ]).assert(Reflect.parse("export class Foo { constructor() {} }")); +} program([ exportDeclaration( @@ -292,27 +295,29 @@ program([ ) ]).assert(Reflect.parse("export default function foo() {}")); -program([ - exportDeclaration( - classDeclaration( - ident("*default*") - ), - null, - null, - true - ) -]).assert(Reflect.parse("export default class { constructor() {} }")); +if (classesEnabled()) { + program([ + exportDeclaration( + classDeclaration( + ident("*default*") + ), + null, + null, + true + ) + ]).assert(Reflect.parse("export default class { constructor() {} }")); -program([ - exportDeclaration( - classDeclaration( - ident("Foo") - ), - null, - null, - true - ) -]).assert(Reflect.parse("export default class Foo { constructor() {} }")); + program([ + exportDeclaration( + classDeclaration( + ident("Foo") + ), + null, + null, + true + ) + ]).assert(Reflect.parse("export default class Foo { constructor() {} }")); +} program([ exportDeclaration( diff --git a/js/src/jit/BaselineBailouts.cpp b/js/src/jit/BaselineBailouts.cpp index de1e86e9da9b..7815a0cd3f19 100644 --- a/js/src/jit/BaselineBailouts.cpp +++ b/js/src/jit/BaselineBailouts.cpp @@ -372,8 +372,9 @@ struct BaselineStackBuilder MOZ_ASSERT(BaselineFrameReg == FramePointer); priorOffset -= sizeof(void*); return virtualPointerAtStackOffset(priorOffset); -#elif defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS) - // On X64, ARM and MIPS, the frame pointer save location depends on +#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \ + defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_MIPS) + // On X64, ARM, ARM64, and MIPS, the frame pointer save location depends on // the caller of the rectifier frame. BufferPointer priorFrame = pointerAtStackOffset(priorOffset); @@ -1569,9 +1570,9 @@ jit::BailoutIonToBaseline(JSContext* cx, JitActivation* activation, JitFrameIter // Do stack check. bool overRecursed = false; - BaselineBailoutInfo* info = builder.info(); + BaselineBailoutInfo *info = builder.info(); uint8_t* newsp = info->incomingStack - (info->copyStackTop - info->copyStackBottom); -#if defined(JS_ARM_SIMULATOR) || defined(JS_MIPS_SIMULATOR) +#if defined(JS_ARM_SIMULATOR) || defined(JS_ARM64_SIMULATOR) || defined(JS_MIPS_SIMULATOR) if (Simulator::Current()->overRecursed(uintptr_t(newsp))) overRecursed = true; #else diff --git a/js/src/jit/BaselineCompiler.cpp b/js/src/jit/BaselineCompiler.cpp index 14a812acd6f4..c7383d82f63c 100644 --- a/js/src/jit/BaselineCompiler.cpp +++ b/js/src/jit/BaselineCompiler.cpp @@ -345,13 +345,11 @@ BaselineCompiler::emitPrologue() emitProfilerEnterFrame(); masm.push(BaselineFrameReg); - masm.mov(BaselineStackReg, BaselineFrameReg); - - masm.subPtr(Imm32(BaselineFrame::Size()), BaselineStackReg); + masm.moveStackPtrTo(BaselineFrameReg); + masm.subFromStackPtr(Imm32(BaselineFrame::Size())); // Initialize BaselineFrame. For eval scripts, the scope chain - // is passed in R1, so we have to be careful not to clobber - // it. + // is passed in R1, so we have to be careful not to clobber it. // Initialize BaselineFrame::flags. uint32_t flags = 0; @@ -453,7 +451,7 @@ BaselineCompiler::emitEpilogue() return false; #endif - masm.mov(BaselineFrameReg, BaselineStackReg); + masm.moveToStackPtr(BaselineFrameReg); masm.pop(BaselineFrameReg); emitProfilerExitFrame(); @@ -479,7 +477,7 @@ BaselineCompiler::emitOutOfLinePostBarrierSlot() regs.take(objReg); regs.take(BaselineFrameReg); Register scratch = regs.takeAny(); -#if defined(JS_CODEGEN_ARM) +#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) // On ARM, save the link register before calling. It contains the return // address. The |masm.ret()| later will pop this into |pc| to return. masm.push(lr); @@ -527,7 +525,7 @@ BaselineCompiler::emitStackCheck(bool earlyCheck) uint32_t slotsSize = script->nslots() * sizeof(Value); uint32_t tolerance = earlyCheck ? slotsSize : 0; - masm.movePtr(BaselineStackReg, R1.scratchReg()); + masm.moveStackPtrTo(R1.scratchReg()); // If this is the early stack check, locals haven't been pushed yet. Adjust the // stack pointer to account for the locals that would be pushed before performing @@ -3710,7 +3708,7 @@ BaselineCompiler::emit_JSOP_RESUME() // Update BaselineFrame frameSize field and create the frame descriptor. masm.computeEffectiveAddress(Address(BaselineFrameReg, BaselineFrame::FramePointerOffset), scratch2); - masm.subPtr(BaselineStackReg, scratch2); + masm.subStackPtrFrom(scratch2); masm.store32(scratch2, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize())); masm.makeFrameDescriptor(scratch2, JitFrame_BaselineJS); @@ -3755,8 +3753,8 @@ BaselineCompiler::emit_JSOP_RESUME() // Construct BaselineFrame. masm.push(BaselineFrameReg); - masm.mov(BaselineStackReg, BaselineFrameReg); - masm.subPtr(Imm32(BaselineFrame::Size()), BaselineStackReg); + masm.moveStackPtrTo(BaselineFrameReg); + masm.subFromStackPtr(Imm32(BaselineFrame::Size())); masm.checkStackAlignment(); // Store flags and scope chain. @@ -3823,7 +3821,7 @@ BaselineCompiler::emit_JSOP_RESUME() masm.computeEffectiveAddress(Address(BaselineFrameReg, BaselineFrame::FramePointerOffset), scratch2); masm.movePtr(scratch2, scratch1); - masm.subPtr(BaselineStackReg, scratch2); + masm.subStackPtrFrom(scratch2); masm.store32(scratch2, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize())); masm.loadBaselineFramePtr(BaselineFrameReg, scratch2); @@ -3838,14 +3836,18 @@ BaselineCompiler::emit_JSOP_RESUME() return false; // Create the frame descriptor. - masm.subPtr(BaselineStackReg, scratch1); + masm.subStackPtrFrom(scratch1); masm.makeFrameDescriptor(scratch1, JitFrame_BaselineJS); // Push the frame descriptor and a dummy return address (it doesn't // matter what we push here, frame iterators will use the frame pc // set in jit::GeneratorThrowOrClose). masm.push(scratch1); + + // On ARM64, the callee will push the return address. +#ifndef JS_CODEGEN_ARM64 masm.push(ImmWord(0)); +#endif masm.jump(code); } @@ -3872,7 +3874,7 @@ BaselineCompiler::emit_JSOP_RESUME() // After the generator returns, we restore the stack pointer, push the // return value and we're done. masm.bind(&returnTarget); - masm.computeEffectiveAddress(frame.addressOfStackValue(frame.peek(-1)), BaselineStackReg); + masm.computeEffectiveAddress(frame.addressOfStackValue(frame.peek(-1)), masm.getStackPointer()); frame.popn(2); frame.push(R0); return true; diff --git a/js/src/jit/BaselineCompiler.h b/js/src/jit/BaselineCompiler.h index 492f469602d9..b13a74e4ce5b 100644 --- a/js/src/jit/BaselineCompiler.h +++ b/js/src/jit/BaselineCompiler.h @@ -14,6 +14,8 @@ # include "jit/x64/BaselineCompiler-x64.h" #elif defined(JS_CODEGEN_ARM) # include "jit/arm/BaselineCompiler-arm.h" +#elif defined(JS_CODEGEN_ARM64) +# include "jit/arm64/BaselineCompiler-arm64.h" #elif defined(JS_CODEGEN_MIPS) # include "jit/mips/BaselineCompiler-mips.h" #elif defined(JS_CODEGEN_NONE) diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp index 7cf1f090a9d2..765eed07d6e7 100644 --- a/js/src/jit/CodeGenerator.cpp +++ b/js/src/jit/CodeGenerator.cpp @@ -189,7 +189,7 @@ CodeGenerator::visitValueToInt32(LValueToInt32* lir) Register stringReg; if (input->mightBeType(MIRType_String)) { stringReg = ToRegister(lir->temp()); - OutOfLineCode* oolString = oolCallVM(StringToNumberInfo, lir, (ArgList(), stringReg), + OutOfLineCode* oolString = oolCallVM(StringToNumberInfo, lir, ArgList(stringReg), StoreFloatRegisterTo(temp)); stringEntry = oolString->entry(); stringRejoin = oolString->rejoin(); @@ -850,7 +850,7 @@ CodeGenerator::visitIntToString(LIntToString* lir) Register input = ToRegister(lir->input()); Register output = ToRegister(lir->output()); - OutOfLineCode* ool = oolCallVM(IntToStringInfo, lir, (ArgList(), input), + OutOfLineCode* ool = oolCallVM(IntToStringInfo, lir, ArgList(input), StoreRegisterTo(output)); emitIntToString(input, output, ool->entry()); @@ -868,7 +868,7 @@ CodeGenerator::visitDoubleToString(LDoubleToString* lir) Register temp = ToRegister(lir->tempInt()); Register output = ToRegister(lir->output()); - OutOfLineCode* ool = oolCallVM(DoubleToStringInfo, lir, (ArgList(), input), + OutOfLineCode* ool = oolCallVM(DoubleToStringInfo, lir, ArgList(input), StoreRegisterTo(output)); // Try double to integer conversion and run integer to string code. @@ -887,7 +887,7 @@ CodeGenerator::visitValueToString(LValueToString* lir) ValueOperand input = ToValue(lir, LValueToString::Input); Register output = ToRegister(lir->output()); - OutOfLineCode* ool = oolCallVM(PrimitiveToStringInfo, lir, (ArgList(), input), + OutOfLineCode* ool = oolCallVM(PrimitiveToStringInfo, lir, ArgList(input), StoreRegisterTo(output)); Label done; @@ -982,7 +982,7 @@ CodeGenerator::visitValueToObjectOrNull(LValueToObjectOrNull* lir) ValueOperand input = ToValue(lir, LValueToObjectOrNull::Input); Register output = ToRegister(lir->output()); - OutOfLineCode* ool = oolCallVM(ToObjectInfo, lir, (ArgList(), input, Imm32(0)), + OutOfLineCode* ool = oolCallVM(ToObjectInfo, lir, ArgList(input, Imm32(0)), StoreRegisterTo(output)); Label done; @@ -1701,7 +1701,7 @@ CodeGenerator::visitLambda(LLambda* lir) Register tempReg = ToRegister(lir->temp()); const LambdaFunctionInfo& info = lir->mir()->info(); - OutOfLineCode* ool = oolCallVM(LambdaInfo, lir, (ArgList(), ImmGCPtr(info.fun), scopeChain), + OutOfLineCode* ool = oolCallVM(LambdaInfo, lir, ArgList(ImmGCPtr(info.fun), scopeChain), StoreRegisterTo(output)); MOZ_ASSERT(!info.singletonType); @@ -2515,7 +2515,7 @@ CodeGenerator::visitConvertElementsToDoubles(LConvertElementsToDoubles* lir) Register elements = ToRegister(lir->elements()); OutOfLineCode* ool = oolCallVM(ConvertElementsToDoublesInfo, lir, - (ArgList(), elements), StoreNothing()); + ArgList(elements), StoreNothing()); Address convertedAddress(elements, ObjectElements::offsetOfFlags()); Imm32 bit(ObjectElements::CONVERT_DOUBLE_ELEMENTS); @@ -2561,7 +2561,7 @@ CodeGenerator::visitMaybeCopyElementsForWrite(LMaybeCopyElementsForWrite* lir) Register temp = ToRegister(lir->temp()); OutOfLineCode* ool = oolCallVM(CopyElementsForWriteInfo, lir, - (ArgList(), object), StoreNothing()); + ArgList(object), StoreNothing()); if (lir->mir()->checkNative()) { masm.loadObjClass(object, temp); @@ -4299,7 +4299,7 @@ CodeGenerator::visitNewArrayCopyOnWrite(LNewArrayCopyOnWrite* lir) // If we have a template object, we can inline call object creation. OutOfLineCode* ool = oolCallVM(NewArrayCopyOnWriteInfo, lir, - (ArgList(), ImmGCPtr(templateObject), Imm32(initialHeap)), + ArgList(ImmGCPtr(templateObject), Imm32(initialHeap)), StoreRegisterTo(objReg)); masm.createGCObject(objReg, tempReg, templateObject, initialHeap, ool->entry()); @@ -4322,7 +4322,7 @@ CodeGenerator::visitNewArrayDynamicLength(LNewArrayDynamicLength* lir) gc::InitialHeap initialHeap = lir->mir()->initialHeap(); OutOfLineCode* ool = oolCallVM(ArrayConstructorOneArgInfo, lir, - (ArgList(), ImmGCPtr(templateObject->group()), lengthReg), + ArgList(ImmGCPtr(templateObject->group()), lengthReg), StoreRegisterTo(objReg)); bool canInline = true; @@ -4559,7 +4559,7 @@ CodeGenerator::visitNewTypedObject(LNewTypedObject* lir) gc::InitialHeap initialHeap = lir->mir()->initialHeap(); OutOfLineCode* ool = oolCallVM(NewTypedObjectInfo, lir, - (ArgList(), ImmGCPtr(templateObject), Imm32(initialHeap)), + ArgList(ImmGCPtr(templateObject), Imm32(initialHeap)), StoreRegisterTo(object)); masm.createGCObject(object, temp, templateObject, initialHeap, ool->entry()); @@ -4581,7 +4581,7 @@ CodeGenerator::visitSimdBox(LSimdBox* lir) MOZ_ASSERT(lir->safepoint()->liveRegs().has(in), "Save the input register across the oolCallVM"); OutOfLineCode* ool = oolCallVM(NewTypedObjectInfo, lir, - (ArgList(), ImmGCPtr(templateObject), Imm32(initialHeap)), + ArgList(ImmGCPtr(templateObject), Imm32(initialHeap)), StoreRegisterTo(object)); masm.createGCObject(object, temp, templateObject, initialHeap, ool->entry()); @@ -4705,8 +4705,7 @@ CodeGenerator::visitNewDeclEnvObject(LNewDeclEnvObject* lir) // If we have a template object, we can inline call object creation. OutOfLineCode* ool = oolCallVM(NewDeclEnvObjectInfo, lir, - (ArgList(), ImmGCPtr(info.funMaybeLazy()), - Imm32(gc::DefaultHeap)), + ArgList(ImmGCPtr(info.funMaybeLazy()), Imm32(gc::DefaultHeap)), StoreRegisterTo(objReg)); bool initContents = ShouldInitFixedSlots(lir, templateObj); @@ -4731,9 +4730,9 @@ CodeGenerator::visitNewCallObject(LNewCallObject* lir) JSScript* script = lir->mir()->block()->info().script(); uint32_t lexicalBegin = script->bindings.aliasedBodyLevelLexicalBegin(); OutOfLineCode* ool = oolCallVM(NewCallObjectInfo, lir, - (ArgList(), ImmGCPtr(templateObj->lastProperty()), - ImmGCPtr(templateObj->group()), - Imm32(lexicalBegin)), + ArgList(ImmGCPtr(templateObj->lastProperty()), + ImmGCPtr(templateObj->group()), + Imm32(lexicalBegin)), StoreRegisterTo(objReg)); // Inline call object creation, using the OOL path only for tricky cases. @@ -4759,8 +4758,8 @@ CodeGenerator::visitNewSingletonCallObject(LNewSingletonCallObject* lir) uint32_t lexicalBegin = script->bindings.aliasedBodyLevelLexicalBegin(); OutOfLineCode* ool; ool = oolCallVM(NewSingletonCallObjectInfo, lir, - (ArgList(), ImmGCPtr(templateObj->as().lastProperty()), - Imm32(lexicalBegin)), + ArgList(ImmGCPtr(templateObj->as().lastProperty()), + Imm32(lexicalBegin)), StoreRegisterTo(objReg)); // Objects can only be given singleton types in VM calls. We make the call @@ -4782,7 +4781,7 @@ CodeGenerator::visitNewStringObject(LNewStringObject* lir) StringObject* templateObj = lir->mir()->templateObj(); - OutOfLineCode* ool = oolCallVM(NewStringObjectInfo, lir, (ArgList(), input), + OutOfLineCode* ool = oolCallVM(NewStringObjectInfo, lir, ArgList(input), StoreRegisterTo(output)); masm.createGCObject(output, temp, templateObj, gc::DefaultHeap, ool->entry()); @@ -4934,7 +4933,7 @@ CodeGenerator::visitCreateThisWithTemplate(LCreateThisWithTemplate* lir) Register tempReg = ToRegister(lir->temp()); OutOfLineCode* ool = oolCallVM(NewInitObjectWithTemplateInfo, lir, - (ArgList(), ImmGCPtr(templateObject)), + ArgList(ImmGCPtr(templateObject)), StoreRegisterTo(objReg)); // Allocate. If the FreeList is empty, call to VM, which may GC. @@ -5037,7 +5036,7 @@ CodeGenerator::visitComputeThis(LComputeThis* lir) ValueOperand value = ToValue(lir, LComputeThis::ValueIndex); Register output = ToRegister(lir->output()); - OutOfLineCode* ool = oolCallVM(BoxNonStrictThisInfo, lir, (ArgList(), value), + OutOfLineCode* ool = oolCallVM(BoxNonStrictThisInfo, lir, ArgList(value), StoreRegisterTo(output)); masm.branchTestObject(Assembler::NotEqual, value, ool->entry()); @@ -5465,10 +5464,10 @@ CodeGenerator::emitCompareS(LInstruction* lir, JSOp op, Register left, Register OutOfLineCode* ool = nullptr; if (op == JSOP_EQ || op == JSOP_STRICTEQ) { - ool = oolCallVM(StringsEqualInfo, lir, (ArgList(), left, right), StoreRegisterTo(output)); + ool = oolCallVM(StringsEqualInfo, lir, ArgList(left, right), StoreRegisterTo(output)); } else { MOZ_ASSERT(op == JSOP_NE || op == JSOP_STRICTNE); - ool = oolCallVM(StringsNotEqualInfo, lir, (ArgList(), left, right), StoreRegisterTo(output)); + ool = oolCallVM(StringsNotEqualInfo, lir, ArgList(left, right), StoreRegisterTo(output)); } masm.compareStrings(op, left, right, output, ool->entry()); @@ -5819,7 +5818,7 @@ static const VMFunction ConcatStringsInfo = FunctionInfo(Concat void CodeGenerator::emitConcat(LInstruction* lir, Register lhs, Register rhs, Register output) { - OutOfLineCode* ool = oolCallVM(ConcatStringsInfo, lir, (ArgList(), lhs, rhs), + OutOfLineCode* ool = oolCallVM(ConcatStringsInfo, lir, ArgList(lhs, rhs), StoreRegisterTo(output)); JitCode* stringConcatStub = gen->compartment->jitCompartment()->stringConcatStubNoBarrier(); @@ -6009,8 +6008,8 @@ CodeGenerator::visitSubstr(LSubstr* lir) // can be handled by allocate in ool code and returning to jit code to fill // in all data. OutOfLineCode* ool = oolCallVM(SubstringKernelInfo, lir, - (ArgList(), string, begin, length), - StoreRegisterTo(output)); + ArgList(string, begin, length), + StoreRegisterTo(output)); Label* slowPath = ool->entry(); Label* done = ool->rejoin(); @@ -6338,7 +6337,7 @@ CodeGenerator::visitCharCodeAt(LCharCodeAt* lir) Register index = ToRegister(lir->index()); Register output = ToRegister(lir->output()); - OutOfLineCode* ool = oolCallVM(CharCodeAtInfo, lir, (ArgList(), str, index), StoreRegisterTo(output)); + OutOfLineCode* ool = oolCallVM(CharCodeAtInfo, lir, ArgList(str, index), StoreRegisterTo(output)); masm.branchIfRope(str, ool->entry()); masm.loadStringChar(str, index, output); @@ -6355,7 +6354,7 @@ CodeGenerator::visitFromCharCode(LFromCharCode* lir) Register code = ToRegister(lir->code()); Register output = ToRegister(lir->output()); - OutOfLineCode* ool = oolCallVM(StringFromCharCodeInfo, lir, (ArgList(), code), StoreRegisterTo(output)); + OutOfLineCode* ool = oolCallVM(StringFromCharCodeInfo, lir, ArgList(code), StoreRegisterTo(output)); // OOL path if code >= UNIT_STATIC_LIMIT. masm.branch32(Assembler::AboveOrEqual, code, Imm32(StaticStrings::UNIT_STATIC_LIMIT), @@ -6996,7 +6995,7 @@ CodeGenerator::visitConvertUnboxedObjectToNative(LConvertUnboxedObjectToNative* OutOfLineCode* ool = oolCallVM(lir->mir()->group()->unboxedLayoutDontCheckGeneration().isArray() ? ConvertUnboxedArrayObjectToNativeInfo : ConvertUnboxedPlainObjectToNativeInfo, - lir, (ArgList(), object), StoreNothing()); + lir, ArgList(object), StoreNothing()); masm.branchPtr(Assembler::Equal, Address(object, JSObject::offsetOfGroup()), ImmGCPtr(lir->mir()->group()), ool->entry()); @@ -7014,10 +7013,10 @@ CodeGenerator::emitArrayPopShift(LInstruction* lir, const MArrayPopShift* mir, R OutOfLineCode* ool; if (mir->mode() == MArrayPopShift::Pop) { - ool = oolCallVM(ArrayPopDenseInfo, lir, (ArgList(), obj), StoreValueTo(out)); + ool = oolCallVM(ArrayPopDenseInfo, lir, ArgList(obj), StoreValueTo(out)); } else { MOZ_ASSERT(mir->mode() == MArrayPopShift::Shift); - ool = oolCallVM(ArrayShiftDenseInfo, lir, (ArgList(), obj), StoreValueTo(out)); + ool = oolCallVM(ArrayShiftDenseInfo, lir, ArgList(obj), StoreValueTo(out)); } // VM call if a write barrier is necessary. @@ -7138,7 +7137,7 @@ void CodeGenerator::emitArrayPush(LInstruction* lir, const MArrayPush* mir, Register obj, ConstantOrRegister value, Register elementsTemp, Register length) { - OutOfLineCode* ool = oolCallVM(ArrayPushDenseInfo, lir, (ArgList(), obj, value), StoreRegisterTo(length)); + OutOfLineCode* ool = oolCallVM(ArrayPushDenseInfo, lir, ArgList(obj, value), StoreRegisterTo(length)); Int32Key key = Int32Key(length); if (mir->unboxedType() == JSVAL_TYPE_MAGIC) { @@ -7328,7 +7327,7 @@ CodeGenerator::visitIteratorStart(LIteratorStart* lir) uint32_t flags = lir->mir()->flags(); OutOfLineCode* ool = oolCallVM(GetIteratorObjectInfo, lir, - (ArgList(), obj, Imm32(flags)), StoreRegisterTo(output)); + ArgList(obj, Imm32(flags)), StoreRegisterTo(output)); const Register temp1 = ToRegister(lir->temp1()); const Register temp2 = ToRegister(lir->temp2()); @@ -7457,7 +7456,7 @@ CodeGenerator::visitIteratorMore(LIteratorMore* lir) const ValueOperand output = ToOutValue(lir); const Register temp = ToRegister(lir->temp()); - OutOfLineCode* ool = oolCallVM(IteratorMoreInfo, lir, (ArgList(), obj), StoreValueTo(output)); + OutOfLineCode* ool = oolCallVM(IteratorMoreInfo, lir, ArgList(obj), StoreValueTo(output)); Register outputScratch = output.scratchReg(); LoadNativeIterator(masm, obj, outputScratch, ool->entry()); @@ -7512,7 +7511,7 @@ CodeGenerator::visitIteratorEnd(LIteratorEnd* lir) const Register temp2 = ToRegister(lir->temp2()); const Register temp3 = ToRegister(lir->temp3()); - OutOfLineCode* ool = oolCallVM(CloseIteratorInfo, lir, (ArgList(), obj), StoreNothing()); + OutOfLineCode* ool = oolCallVM(CloseIteratorInfo, lir, ArgList(obj), StoreNothing()); LoadNativeIterator(masm, obj, temp1, ool->entry()); @@ -8869,11 +8868,10 @@ CodeGenerator::visitToIdV(LToIdV* lir) ValueOperand index = ToValue(lir, LToIdV::Index); OutOfLineCode* ool = oolCallVM(ToIdInfo, lir, - (ArgList(), - ImmGCPtr(current->mir()->info().script()), - ImmPtr(lir->mir()->resumePoint()->pc()), - ToValue(lir, LToIdV::Object), - ToValue(lir, LToIdV::Index)), + ArgList(ImmGCPtr(current->mir()->info().script()), + ImmPtr(lir->mir()->resumePoint()->pc()), + ToValue(lir, LToIdV::Object), + ToValue(lir, LToIdV::Index)), StoreValueTo(out)); Register tag = masm.splitTagForTest(index); @@ -9092,7 +9090,7 @@ CodeGenerator::visitLoadUnboxedScalar(LLoadUnboxedScalar* lir) Scalar::Type readType = mir->readType(); unsigned numElems = mir->numElems(); - int width = Scalar::byteSize(mir->indexType()); + int width = Scalar::byteSize(mir->storageType()); bool canonicalizeDouble = mir->canonicalizeDoubles(); Label fail; @@ -9179,7 +9177,7 @@ CodeGenerator::visitStoreUnboxedScalar(LStoreUnboxedScalar* lir) Scalar::Type writeType = mir->writeType(); unsigned numElems = mir->numElems(); - int width = Scalar::byteSize(mir->indexType()); + int width = Scalar::byteSize(mir->storageType()); if (lir->index()->isConstant()) { Address dest(elements, ToInt32(lir->index()) * width + mir->offsetAdjustment()); @@ -9346,7 +9344,7 @@ CodeGenerator::visitClampVToUint8(LClampVToUint8* lir) Label* stringEntry; Label* stringRejoin; if (input->mightBeType(MIRType_String)) { - OutOfLineCode* oolString = oolCallVM(StringToNumberInfo, lir, (ArgList(), output), + OutOfLineCode* oolString = oolCallVM(StringToNumberInfo, lir, ArgList(output), StoreFloatRegisterTo(tempFloat)); stringEntry = oolString->entry(); stringRejoin = oolString->rejoin(); @@ -9398,7 +9396,7 @@ CodeGenerator::visitInArray(LInArray* lir) MOZ_ASSERT_IF(index < 0, mir->needsNegativeIntCheck()); if (mir->needsNegativeIntCheck()) { ool = oolCallVM(OperatorInIInfo, lir, - (ArgList(), Imm32(index), ToRegister(lir->object())), + ArgList(Imm32(index), ToRegister(lir->object())), StoreRegisterTo(output)); failedInitLength = ool->entry(); } @@ -9426,7 +9424,7 @@ CodeGenerator::visitInArray(LInArray* lir) if (mir->needsNegativeIntCheck()) { masm.bind(&negativeIntCheck); ool = oolCallVM(OperatorInIInfo, lir, - (ArgList(), index, ToRegister(lir->object())), + ArgList(index, ToRegister(lir->object())), StoreRegisterTo(output)); masm.branch32(Assembler::LessThan, index, Imm32(0), ool->entry()); @@ -9528,7 +9526,7 @@ CodeGenerator::emitInstanceOf(LInstruction* ins, JSObject* prototypeObject) // register is already correct. OutOfLineCode* ool = oolCallVM(IsDelegateObjectInfo, ins, - (ArgList(), ImmGCPtr(prototypeObject), objReg), + ArgList(ImmGCPtr(prototypeObject), objReg), StoreRegisterTo(output)); // Regenerate the original lhs object for the VM call. @@ -10060,7 +10058,7 @@ CodeGenerator::visitAssertRangeV(LAssertRangeV* ins) void CodeGenerator::visitInterruptCheck(LInterruptCheck* lir) { - OutOfLineCode* ool = oolCallVM(InterruptCheckInfo, lir, (ArgList()), StoreNothing()); + OutOfLineCode* ool = oolCallVM(InterruptCheckInfo, lir, ArgList(), StoreNothing()); AbsoluteAddress interruptAddr(GetJitContext()->runtime->addressOfInterruptUint32()); masm.branch32(Assembler::NotEqual, interruptAddr, Imm32(0), ool->entry()); @@ -10096,9 +10094,9 @@ CodeGenerator::visitRecompileCheck(LRecompileCheck* ins) Register tmp = ToRegister(ins->scratch()); OutOfLineCode* ool; if (ins->mir()->forceRecompilation()) - ool = oolCallVM(ForcedRecompileFnInfo, ins, (ArgList()), StoreRegisterTo(tmp)); + ool = oolCallVM(ForcedRecompileFnInfo, ins, ArgList(), StoreRegisterTo(tmp)); else - ool = oolCallVM(RecompileFnInfo, ins, (ArgList()), StoreRegisterTo(tmp)); + ool = oolCallVM(RecompileFnInfo, ins, ArgList(), StoreRegisterTo(tmp)); // Check if warm-up counter is high enough. AbsoluteAddress warmUpCount = AbsoluteAddress(ins->mir()->script()->addressOfWarmUpCounter()); diff --git a/js/src/jit/Ion.cpp b/js/src/jit/Ion.cpp index 1c9599881feb..93bdab24501e 100644 --- a/js/src/jit/Ion.cpp +++ b/js/src/jit/Ion.cpp @@ -1350,16 +1350,14 @@ OptimizeMIR(MIRGenerator* mir) return false; } - if (mir->optimizationInfo().scalarReplacementEnabled()) { - AutoTraceLog log(logger, TraceLogger_ScalarReplacement); - if (!ScalarReplacement(mir, graph)) - return false; - gs.spewPass("Scalar Replacement"); - AssertGraphCoherency(graph); + ValueNumberer gvn(mir, graph); + if (!gvn.init()) + return false; - if (mir->shouldCancel("Scalar Replacement")) - return false; - } + size_t doRepeatOptimizations = 0; + repeatOptimizations: + doRepeatOptimizations++; + MOZ_ASSERT(doRepeatOptimizations <= 2); if (!mir->compilingAsmJS()) { AutoTraceLog log(logger, TraceLogger_ApplyTypes); @@ -1395,10 +1393,6 @@ OptimizeMIR(MIRGenerator* mir) return false; } - ValueNumberer gvn(mir, graph); - if (!gvn.init()) - return false; - // Alias analysis is required for LICM and GVN so that we don't move // loads across stores. if (mir->optimizationInfo().licmEnabled() || @@ -1454,6 +1448,26 @@ OptimizeMIR(MIRGenerator* mir) } } + if (mir->optimizationInfo().scalarReplacementEnabled() && doRepeatOptimizations <= 1) { + AutoTraceLog log(logger, TraceLogger_ScalarReplacement); + bool success = false; + if (!ScalarReplacement(mir, graph, &success)) + return false; + gs.spewPass("Scalar Replacement"); + AssertGraphCoherency(graph); + + if (mir->shouldCancel("Scalar Replacement")) + return false; + + // We got some success at removing objects allocation and removing the + // loads and stores, unfortunately, this phase is terrible at keeping + // the type consistency, so we re-run the Apply Type phase. As this + // optimization folds loads and stores, it might also introduce new + // opportunities for GVN and LICM, so re-run them as well. + if (success) + goto repeatOptimizations; + } + if (mir->optimizationInfo().rangeAnalysisEnabled()) { AutoTraceLog log(logger, TraceLogger_RangeAnalysis); RangeAnalysis r(mir, graph); @@ -2415,8 +2429,10 @@ jit::CanEnter(JSContext* cx, RunState& state) return Method_CantCompile; } - if (!state.maybeCreateThisForConstructor(cx)) + if (!state.maybeCreateThisForConstructor(cx)) { + cx->recoverFromOutOfMemory(); return Method_Skipped; + } } // If --ion-eager is used, compile with Baseline first, so that we diff --git a/js/src/jit/IonBuilder.cpp b/js/src/jit/IonBuilder.cpp index 068884a70643..f9843525d5b5 100644 --- a/js/src/jit/IonBuilder.cpp +++ b/js/src/jit/IonBuilder.cpp @@ -7628,17 +7628,13 @@ IonBuilder::jsop_getgname(PropertyName* name) if (!getStaticName(obj, name, &emitted) || emitted) return emitted; - if (MOZ_UNLIKELY(js_JitOptions.forceInlineCaches)) - goto do_InlineCache; - - { + if (!forceInlineCaches()) { TemporaryTypeSet* types = bytecodeTypes(pc); MDefinition* globalObj = constant(ObjectValue(*obj)); if (!getPropTryCommonGetter(&emitted, globalObj, name, types) || emitted) return emitted; } - do_InlineCache: return jsop_getname(name); } @@ -7762,38 +7758,37 @@ IonBuilder::jsop_getelem() obj = maybeUnboxForPropertyAccess(obj); bool emitted = false; - if (MOZ_UNLIKELY(js_JitOptions.forceInlineCaches)) - goto do_InlineCache; - trackOptimizationAttempt(TrackedStrategy::GetElem_TypedObject); - if (!getElemTryTypedObject(&emitted, obj, index) || emitted) - return emitted; + if (!forceInlineCaches()) { + trackOptimizationAttempt(TrackedStrategy::GetElem_TypedObject); + if (!getElemTryTypedObject(&emitted, obj, index) || emitted) + return emitted; - trackOptimizationAttempt(TrackedStrategy::GetElem_Dense); - if (!getElemTryDense(&emitted, obj, index) || emitted) - return emitted; + trackOptimizationAttempt(TrackedStrategy::GetElem_Dense); + if (!getElemTryDense(&emitted, obj, index) || emitted) + return emitted; - trackOptimizationAttempt(TrackedStrategy::GetElem_TypedStatic); - if (!getElemTryTypedStatic(&emitted, obj, index) || emitted) - return emitted; + trackOptimizationAttempt(TrackedStrategy::GetElem_TypedStatic); + if (!getElemTryTypedStatic(&emitted, obj, index) || emitted) + return emitted; - trackOptimizationAttempt(TrackedStrategy::GetElem_TypedArray); - if (!getElemTryTypedArray(&emitted, obj, index) || emitted) - return emitted; + trackOptimizationAttempt(TrackedStrategy::GetElem_TypedArray); + if (!getElemTryTypedArray(&emitted, obj, index) || emitted) + return emitted; - trackOptimizationAttempt(TrackedStrategy::GetElem_String); - if (!getElemTryString(&emitted, obj, index) || emitted) - return emitted; + trackOptimizationAttempt(TrackedStrategy::GetElem_String); + if (!getElemTryString(&emitted, obj, index) || emitted) + return emitted; - trackOptimizationAttempt(TrackedStrategy::GetElem_Arguments); - if (!getElemTryArguments(&emitted, obj, index) || emitted) - return emitted; + trackOptimizationAttempt(TrackedStrategy::GetElem_Arguments); + if (!getElemTryArguments(&emitted, obj, index) || emitted) + return emitted; - trackOptimizationAttempt(TrackedStrategy::GetElem_ArgumentsInlined); - if (!getElemTryArgumentsInlined(&emitted, obj, index) || emitted) - return emitted; + trackOptimizationAttempt(TrackedStrategy::GetElem_ArgumentsInlined); + if (!getElemTryArgumentsInlined(&emitted, obj, index) || emitted) + return emitted; + } - do_InlineCache: if (script()->argumentsHasVarBinding() && obj->mightBeType(MIRType_MagicOptimizedArguments)) return abort("Type is not definitely lazy arguments."); @@ -8816,34 +8811,30 @@ IonBuilder::jsop_setelem() return resumeAfter(ins); } - if (MOZ_UNLIKELY(js_JitOptions.forceInlineCaches)) - goto do_InlineCache; + if (!forceInlineCaches()) { + trackOptimizationAttempt(TrackedStrategy::SetElem_TypedObject); + if (!setElemTryTypedObject(&emitted, object, index, value) || emitted) + return emitted; - trackOptimizationAttempt(TrackedStrategy::SetElem_TypedObject); - if (!setElemTryTypedObject(&emitted, object, index, value) || emitted) - return emitted; + trackOptimizationAttempt(TrackedStrategy::SetElem_TypedStatic); + if (!setElemTryTypedStatic(&emitted, object, index, value) || emitted) + return emitted; - trackOptimizationAttempt(TrackedStrategy::SetElem_TypedStatic); - if (!setElemTryTypedStatic(&emitted, object, index, value) || emitted) - return emitted; + trackOptimizationAttempt(TrackedStrategy::SetElem_TypedArray); + if (!setElemTryTypedArray(&emitted, object, index, value) || emitted) + return emitted; - trackOptimizationAttempt(TrackedStrategy::SetElem_TypedArray); - if (!setElemTryTypedArray(&emitted, object, index, value) || emitted) - return emitted; - - { trackOptimizationAttempt(TrackedStrategy::SetElem_Dense); SetElemICInspector icInspect(inspector->setElemICInspector(pc)); bool writeHole = icInspect.sawOOBDenseWrite(); if (!setElemTryDense(&emitted, object, index, value, writeHole) || emitted) return emitted; + + trackOptimizationAttempt(TrackedStrategy::SetElem_Arguments); + if (!setElemTryArguments(&emitted, object, index, value) || emitted) + return emitted; } - trackOptimizationAttempt(TrackedStrategy::SetElem_Arguments); - if (!setElemTryArguments(&emitted, object, index, value) || emitted) - return emitted; - - do_InlineCache: if (script()->argumentsHasVarBinding() && object->mightBeType(MIRType_MagicOptimizedArguments) && info().analysisMode() != Analysis_ArgumentsUsage) @@ -10122,45 +10113,43 @@ IonBuilder::jsop_getprop(PropertyName* name) if (!getPropTryInnerize(&emitted, obj, name, types) || emitted) return emitted; - if (MOZ_UNLIKELY(js_JitOptions.forceInlineCaches)) - goto do_InlineCache; + if (!forceInlineCaches()) { + // Try to hardcode known constants. + trackOptimizationAttempt(TrackedStrategy::GetProp_Constant); + if (!getPropTryConstant(&emitted, obj, name, types) || emitted) + return emitted; - // Try to hardcode known constants. - trackOptimizationAttempt(TrackedStrategy::GetProp_Constant); - if (!getPropTryConstant(&emitted, obj, name, types) || emitted) - return emitted; + // Try to emit SIMD getter loads + trackOptimizationAttempt(TrackedStrategy::GetProp_SimdGetter); + if (!getPropTrySimdGetter(&emitted, obj, name) || emitted) + return emitted; - // Try to emit SIMD getter loads - trackOptimizationAttempt(TrackedStrategy::GetProp_SimdGetter); - if (!getPropTrySimdGetter(&emitted, obj, name) || emitted) - return emitted; + // Try to emit loads from known binary data blocks + trackOptimizationAttempt(TrackedStrategy::GetProp_TypedObject); + if (!getPropTryTypedObject(&emitted, obj, name) || emitted) + return emitted; - // Try to emit loads from known binary data blocks - trackOptimizationAttempt(TrackedStrategy::GetProp_TypedObject); - if (!getPropTryTypedObject(&emitted, obj, name) || emitted) - return emitted; + // Try to emit loads from definite slots. + trackOptimizationAttempt(TrackedStrategy::GetProp_DefiniteSlot); + if (!getPropTryDefiniteSlot(&emitted, obj, name, barrier, types) || emitted) + return emitted; - // Try to emit loads from definite slots. - trackOptimizationAttempt(TrackedStrategy::GetProp_DefiniteSlot); - if (!getPropTryDefiniteSlot(&emitted, obj, name, barrier, types) || emitted) - return emitted; + // Try to emit loads from unboxed objects. + trackOptimizationAttempt(TrackedStrategy::GetProp_Unboxed); + if (!getPropTryUnboxed(&emitted, obj, name, barrier, types) || emitted) + return emitted; - // Try to emit loads from unboxed objects. - trackOptimizationAttempt(TrackedStrategy::GetProp_Unboxed); - if (!getPropTryUnboxed(&emitted, obj, name, barrier, types) || emitted) - return emitted; + // Try to inline a common property getter, or make a call. + trackOptimizationAttempt(TrackedStrategy::GetProp_CommonGetter); + if (!getPropTryCommonGetter(&emitted, obj, name, types) || emitted) + return emitted; - // Try to inline a common property getter, or make a call. - trackOptimizationAttempt(TrackedStrategy::GetProp_CommonGetter); - if (!getPropTryCommonGetter(&emitted, obj, name, types) || emitted) - return emitted; + // Try to emit a monomorphic/polymorphic access based on baseline caches. + trackOptimizationAttempt(TrackedStrategy::GetProp_InlineAccess); + if (!getPropTryInlineAccess(&emitted, obj, name, barrier, types) || emitted) + return emitted; + } - // Try to emit a monomorphic/polymorphic access based on baseline caches. - trackOptimizationAttempt(TrackedStrategy::GetProp_InlineAccess); - if (!getPropTryInlineAccess(&emitted, obj, name, barrier, types) || emitted) - return emitted; - - do_InlineCache: // Try to emit a polymorphic cache. trackOptimizationAttempt(TrackedStrategy::GetProp_InlineCache); if (!getPropTryCache(&emitted, obj, name, barrier, types) || emitted) @@ -10612,43 +10601,45 @@ MInstruction* IonBuilder::loadUnboxedProperty(MDefinition* obj, size_t offset, JSValueType unboxedType, BarrierKind barrier, TemporaryTypeSet* types) { - size_t scaledOffsetConstant = offset / UnboxedTypeSize(unboxedType); - MInstruction* scaledOffset = MConstant::New(alloc(), Int32Value(scaledOffsetConstant)); - current->add(scaledOffset); + // loadUnboxedValue is designed to load any value as if it were contained in + // an array. Thus a property offset is converted to an index, when the + // object is reinterpreted as an array of properties of the same size. + size_t index = offset / UnboxedTypeSize(unboxedType); + MInstruction* indexConstant = MConstant::New(alloc(), Int32Value(index)); + current->add(indexConstant); return loadUnboxedValue(obj, UnboxedPlainObject::offsetOfData(), - scaledOffset, unboxedType, barrier, types); + indexConstant, unboxedType, barrier, types); } MInstruction* IonBuilder::loadUnboxedValue(MDefinition* elements, size_t elementsOffset, - MDefinition* scaledOffset, JSValueType unboxedType, + MDefinition* index, JSValueType unboxedType, BarrierKind barrier, TemporaryTypeSet* types) { - MInstruction* load; switch (unboxedType) { case JSVAL_TYPE_BOOLEAN: - load = MLoadUnboxedScalar::New(alloc(), elements, scaledOffset, Scalar::Uint8, + load = MLoadUnboxedScalar::New(alloc(), elements, index, Scalar::Uint8, DoesNotRequireMemoryBarrier, elementsOffset); load->setResultType(MIRType_Boolean); break; case JSVAL_TYPE_INT32: - load = MLoadUnboxedScalar::New(alloc(), elements, scaledOffset, Scalar::Int32, + load = MLoadUnboxedScalar::New(alloc(), elements, index, Scalar::Int32, DoesNotRequireMemoryBarrier, elementsOffset); load->setResultType(MIRType_Int32); break; case JSVAL_TYPE_DOUBLE: - load = MLoadUnboxedScalar::New(alloc(), elements, scaledOffset, Scalar::Float64, + load = MLoadUnboxedScalar::New(alloc(), elements, index, Scalar::Float64, DoesNotRequireMemoryBarrier, elementsOffset, /* canonicalizeDoubles = */ false); load->setResultType(MIRType_Double); break; case JSVAL_TYPE_STRING: - load = MLoadUnboxedString::New(alloc(), elements, scaledOffset, elementsOffset); + load = MLoadUnboxedString::New(alloc(), elements, index, elementsOffset); break; case JSVAL_TYPE_OBJECT: { @@ -10657,7 +10648,7 @@ IonBuilder::loadUnboxedValue(MDefinition* elements, size_t elementsOffset, nullBehavior = MLoadUnboxedObjectOrNull::HandleNull; else nullBehavior = MLoadUnboxedObjectOrNull::NullNotPossible; - load = MLoadUnboxedObjectOrNull::New(alloc(), elements, scaledOffset, nullBehavior, + load = MLoadUnboxedObjectOrNull::New(alloc(), elements, index, nullBehavior, elementsOffset); break; } @@ -11150,31 +11141,29 @@ IonBuilder::getPropTryInnerize(bool* emitted, MDefinition* obj, PropertyName* na if (inner == obj) return true; - if (MOZ_UNLIKELY(js_JitOptions.forceInlineCaches)) - goto do_InlineCache; + if (!forceInlineCaches()) { + // Note: the Baseline ICs don't know about this optimization, so it's + // possible the global property's HeapTypeSet has not been initialized + // yet. In this case we'll fall back to getPropTryCache for now. - // Note: the Baseline ICs don't know about this optimization, so it's - // possible the global property's HeapTypeSet has not been initialized - // yet. In this case we'll fall back to getPropTryCache for now. + // Note that it's important that we do this _before_ we'd try to + // do the optimizations below on obj normally, since some of those + // optimizations have fallback paths that are slower than the path + // we'd produce here. - // Note that it's important that we do this _before_ we'd try to - // do the optimizations below on obj normally, since some of those - // optimizations have fallback paths that are slower than the path - // we'd produce here. + trackOptimizationAttempt(TrackedStrategy::GetProp_Constant); + if (!getPropTryConstant(emitted, inner, name, types) || *emitted) + return *emitted; - trackOptimizationAttempt(TrackedStrategy::GetProp_Constant); - if (!getPropTryConstant(emitted, inner, name, types) || *emitted) - return *emitted; + trackOptimizationAttempt(TrackedStrategy::GetProp_StaticName); + if (!getStaticName(&script()->global(), name, emitted) || *emitted) + return *emitted; - trackOptimizationAttempt(TrackedStrategy::GetProp_StaticName); - if (!getStaticName(&script()->global(), name, emitted) || *emitted) - return *emitted; + trackOptimizationAttempt(TrackedStrategy::GetProp_CommonGetter); + if (!getPropTryCommonGetter(emitted, inner, name, types) || *emitted) + return *emitted; + } - trackOptimizationAttempt(TrackedStrategy::GetProp_CommonGetter); - if (!getPropTryCommonGetter(emitted, inner, name, types) || *emitted) - return *emitted; - - do_InlineCache: // Passing the inner object to GetProperty IC is safe, see the // needsOuterizedThisObject check in IsCacheableGetPropCallNative. BarrierKind barrier = PropertyReadNeedsTypeBarrier(analysisContext, constraints(), @@ -11208,52 +11197,46 @@ IonBuilder::jsop_setprop(PropertyName* name) return resumeAfter(ins); } - if (MOZ_UNLIKELY(js_JitOptions.forceInlineCaches)) - goto do_InlineCache_step1; + if (!forceInlineCaches()) { + // Try to inline a common property setter, or make a call. + trackOptimizationAttempt(TrackedStrategy::SetProp_CommonSetter); + if (!setPropTryCommonSetter(&emitted, obj, name, value) || emitted) + return emitted; - // Try to inline a common property setter, or make a call. - trackOptimizationAttempt(TrackedStrategy::SetProp_CommonSetter); - if (!setPropTryCommonSetter(&emitted, obj, name, value) || emitted) - return emitted; + // Try to emit stores to known binary data blocks + trackOptimizationAttempt(TrackedStrategy::SetProp_TypedObject); + if (!setPropTryTypedObject(&emitted, obj, name, value) || emitted) + return emitted; + } - // Try to emit stores to known binary data blocks - trackOptimizationAttempt(TrackedStrategy::SetProp_TypedObject); - if (!setPropTryTypedObject(&emitted, obj, name, value) || emitted) - return emitted; - - do_InlineCache_step1: TemporaryTypeSet* objTypes = obj->resultTypeSet(); bool barrier = PropertyWriteNeedsTypeBarrier(alloc(), constraints(), current, &obj, name, &value, /* canModify = */ true); - if (MOZ_UNLIKELY(js_JitOptions.forceInlineCaches)) - goto do_InlineCache_step2; + if (!forceInlineCaches()) { + // Try to emit stores to unboxed objects. + trackOptimizationAttempt(TrackedStrategy::SetProp_Unboxed); + if (!setPropTryUnboxed(&emitted, obj, name, value, barrier, objTypes) || emitted) + return emitted; + } - // Try to emit stores to unboxed objects. - trackOptimizationAttempt(TrackedStrategy::SetProp_Unboxed); - if (!setPropTryUnboxed(&emitted, obj, name, value, barrier, objTypes) || emitted) - return emitted; - - do_InlineCache_step2: // Add post barrier if needed. The instructions above manage any post // barriers they need directly. if (NeedsPostBarrier(info(), value)) current->add(MPostWriteBarrier::New(alloc(), obj, value)); - if (MOZ_UNLIKELY(js_JitOptions.forceInlineCaches)) - goto do_InlineCache_step3; + if (!forceInlineCaches()) { + // Try to emit store from definite slots. + trackOptimizationAttempt(TrackedStrategy::SetProp_DefiniteSlot); + if (!setPropTryDefiniteSlot(&emitted, obj, name, value, barrier, objTypes) || emitted) + return emitted; - // Try to emit store from definite slots. - trackOptimizationAttempt(TrackedStrategy::SetProp_DefiniteSlot); - if (!setPropTryDefiniteSlot(&emitted, obj, name, value, barrier, objTypes) || emitted) - return emitted; + // Try to emit a monomorphic/polymorphic store based on baseline caches. + trackOptimizationAttempt(TrackedStrategy::SetProp_InlineAccess); + if (!setPropTryInlineAccess(&emitted, obj, name, value, barrier, objTypes) || emitted) + return emitted; + } - // Try to emit a monomorphic/polymorphic store based on baseline caches. - trackOptimizationAttempt(TrackedStrategy::SetProp_InlineAccess); - if (!setPropTryInlineAccess(&emitted, obj, name, value, barrier, objTypes) || emitted) - return emitted; - - do_InlineCache_step3: // Emit a polymorphic cache. trackOptimizationAttempt(TrackedStrategy::SetProp_InlineCache); return setPropTryCache(&emitted, obj, name, value, barrier, objTypes); diff --git a/js/src/jit/IonBuilder.h b/js/src/jit/IonBuilder.h index 0aca0fb31eca..46303637523f 100644 --- a/js/src/jit/IonBuilder.h +++ b/js/src/jit/IonBuilder.h @@ -1210,6 +1210,10 @@ class IonBuilder trackInlineSuccessUnchecked(status); } + bool forceInlineCaches() { + return MOZ_UNLIKELY(js_JitOptions.forceInlineCaches); + } + // Out-of-line variants that don't check if optimization tracking is // enabled. void trackTypeInfoUnchecked(JS::TrackedTypeSite site, MIRType mirType, diff --git a/js/src/jit/JitSpewer.cpp b/js/src/jit/JitSpewer.cpp index 08fccf6ad710..4b064bf2be3c 100644 --- a/js/src/jit/JitSpewer.cpp +++ b/js/src/jit/JitSpewer.cpp @@ -97,6 +97,13 @@ static const char * const ChannelNames[] = #undef JITSPEW_CHANNEL }; +static size_t ChannelIndentLevel[] = +{ +#define JITSPEW_CHANNEL(name) 0, + JITSPEW_CHANNEL_LIST(JITSPEW_CHANNEL) +#undef JITSPEW_CHANNEL +}; + static bool FilterContainsLocation(JSScript* function) { @@ -501,6 +508,17 @@ jit::CheckLogging() JitSpewPrinter().init(stderr); } +JitSpewIndent::JitSpewIndent(JitSpewChannel channel) + : channel_(channel) +{ + ChannelIndentLevel[channel]++; +} + +JitSpewIndent::~JitSpewIndent() +{ + ChannelIndentLevel[channel_]--; +} + void jit::JitSpewStartVA(JitSpewChannel channel, const char* fmt, va_list ap) { @@ -508,7 +526,8 @@ jit::JitSpewStartVA(JitSpewChannel channel, const char* fmt, va_list ap) return; JitSpewHeader(channel); - vfprintf(stderr, fmt, ap); + Fprinter& out = JitSpewPrinter(); + out.vprintf(fmt, ap); } void @@ -517,7 +536,8 @@ jit::JitSpewContVA(JitSpewChannel channel, const char* fmt, va_list ap) if (!JitSpewEnabled(channel)) return; - vfprintf(stderr, fmt, ap); + Fprinter& out = JitSpewPrinter(); + out.vprintf(fmt, ap); } void @@ -526,7 +546,8 @@ jit::JitSpewFin(JitSpewChannel channel) if (!JitSpewEnabled(channel)) return; - fprintf(stderr, "\n"); + Fprinter& out = JitSpewPrinter(); + out.put("\n"); } void @@ -581,7 +602,10 @@ jit::JitSpewHeader(JitSpewChannel channel) if (!JitSpewEnabled(channel)) return; - fprintf(stderr, "[%s] ", ChannelNames[channel]); + Fprinter& out = JitSpewPrinter(); + out.printf("[%s] ", ChannelNames[channel]); + for (size_t i = ChannelIndentLevel[channel]; i != 0; i--) + out.put(" "); } bool diff --git a/js/src/jit/JitSpewer.h b/js/src/jit/JitSpewer.h index 261392f06014..b46e3caa70bb 100644 --- a/js/src/jit/JitSpewer.h +++ b/js/src/jit/JitSpewer.h @@ -150,6 +150,16 @@ class AutoSpewEndFunction void CheckLogging(); Fprinter& JitSpewPrinter(); + +class JitSpewIndent +{ + JitSpewChannel channel_; + + public: + explicit JitSpewIndent(JitSpewChannel channel); + ~JitSpewIndent(); +}; + void JitSpew(JitSpewChannel channel, const char* fmt, ...); void JitSpewStart(JitSpewChannel channel, const char* fmt, ...); void JitSpewCont(JitSpewChannel channel, const char* fmt, ...); @@ -199,6 +209,14 @@ static inline Fprinter& JitSpewPrinter() { MOZ_CRASH("No empty backend for JitSpewPrinter"); } + +class JitSpewIndent +{ + public: + explicit JitSpewIndent(JitSpewChannel channel) {} + ~JitSpewIndent() {} +}; + static inline void JitSpew(JitSpewChannel, const char* fmt, ...) { } static inline void JitSpewStart(JitSpewChannel channel, const char* fmt, ...) diff --git a/js/src/jit/MIR.cpp b/js/src/jit/MIR.cpp index dd3ca8d9b8d2..adbcd2fb945d 100644 --- a/js/src/jit/MIR.cpp +++ b/js/src/jit/MIR.cpp @@ -775,12 +775,12 @@ MConstant::printOpcode(GenericPrinter& out) const out.printf("0x%x", value().toInt32()); break; case MIRType_Double: - out.printf("%f", value().toDouble()); + out.printf("%.16g", value().toDouble()); break; case MIRType_Float32: { float val = value().toDouble(); - out.printf("%f", val); + out.printf("%.16g", val); break; } case MIRType_Object: @@ -1076,7 +1076,7 @@ void MLoadUnboxedScalar::printOpcode(GenericPrinter& out) const { MDefinition::printOpcode(out); - out.printf(" %s", ScalarTypeDescr::typeName(indexType())); + out.printf(" %s", ScalarTypeDescr::typeName(storageType())); } void @@ -3929,20 +3929,75 @@ MCreateThisWithTemplate::canRecoverOnBailout() const return true; } -MObjectState::MObjectState(MDefinition* obj) +bool +OperandIndexMap::init(TempAllocator& alloc, JSObject* templateObject) +{ + const UnboxedLayout& layout = + templateObject->as().layoutDontCheckGeneration(); + + // 0 is used as an error code. + const UnboxedLayout::PropertyVector& properties = layout.properties(); + MOZ_ASSERT(properties.length() < 255); + + // Allocate an array of indexes, where the top of each field correspond to + // the index of the operand in the MObjectState instance. + if (!map.init(alloc, layout.size())) + return false; + + // Reset all indexes to 0, which is an error code. + for (size_t i = 0; i < map.length(); i++) + map[i] = 0; + + // Map the property offsets to the indexes of MObjectState operands. + uint8_t index = 1; + for (size_t i = 0; i < properties.length(); i++, index++) + map[properties[i].offset] = index; + + return true; +} + +MObjectState::MObjectState(MObjectState* state) + : numSlots_(state->numSlots_), + numFixedSlots_(state->numFixedSlots_), + operandIndex_(state->operandIndex_) { // This instruction is only used as a summary for bailout paths. setResultType(MIRType_Object); setRecoveredOnBailout(); - NativeObject* templateObject = nullptr; +} + +MObjectState::MObjectState(JSObject *templateObject, OperandIndexMap* operandIndex) +{ + // This instruction is only used as a summary for bailout paths. + setResultType(MIRType_Object); + setRecoveredOnBailout(); + + if (templateObject->is()) { + NativeObject* nativeObject = &templateObject->as(); + numSlots_ = nativeObject->slotSpan(); + numFixedSlots_ = nativeObject->numFixedSlots(); + } else { + const UnboxedLayout& layout = + templateObject->as().layoutDontCheckGeneration(); + // Same as UnboxedLayout::makeNativeGroup + numSlots_ = layout.properties().length(); + numFixedSlots_ = gc::GetGCKindSlots(layout.getAllocKind()); + } + + operandIndex_ = operandIndex; +} + +JSObject* +MObjectState::templateObjectOf(MDefinition* obj) +{ if (obj->isNewObject()) - templateObject = &obj->toNewObject()->templateObject()->as(); + return obj->toNewObject()->templateObject(); else if (obj->isCreateThisWithTemplate()) - templateObject = &obj->toCreateThisWithTemplate()->templateObject()->as(); + return obj->toCreateThisWithTemplate()->templateObject(); else - templateObject = obj->toNewCallObject()->templateObject(); - numSlots_ = templateObject->slotSpan(); - numFixedSlots_ = templateObject->numFixedSlots(); + return obj->toNewCallObject()->templateObject(); + + return nullptr; } bool @@ -3958,7 +4013,17 @@ MObjectState::init(TempAllocator& alloc, MDefinition* obj) MObjectState* MObjectState::New(TempAllocator& alloc, MDefinition* obj, MDefinition* undefinedVal) { - MObjectState* res = new(alloc) MObjectState(obj); + JSObject* templateObject = templateObjectOf(obj); + MOZ_ASSERT(templateObject, "Unexpected object creation."); + + OperandIndexMap* operandIndex = nullptr; + if (templateObject->is()) { + operandIndex = new(alloc) OperandIndexMap; + if (!operandIndex || !operandIndex->init(alloc, templateObject)) + return nullptr; + } + + MObjectState* res = new(alloc) MObjectState(templateObject, operandIndex); if (!res || !res->init(alloc, obj)) return nullptr; for (size_t i = 0; i < res->numSlots(); i++) @@ -3969,9 +4034,8 @@ MObjectState::New(TempAllocator& alloc, MDefinition* obj, MDefinition* undefined MObjectState* MObjectState::Copy(TempAllocator& alloc, MObjectState* state) { - MDefinition* obj = state->object(); - MObjectState* res = new(alloc) MObjectState(obj); - if (!res || !res->init(alloc, obj)) + MObjectState* res = new(alloc) MObjectState(state); + if (!res || !res->init(alloc, state->object())) return nullptr; for (size_t i = 0; i < res->numSlots(); i++) res->initSlot(i, state->getSlot(i)); diff --git a/js/src/jit/MIR.h b/js/src/jit/MIR.h index a2cff67290ab..f5040e628e00 100644 --- a/js/src/jit/MIR.h +++ b/js/src/jit/MIR.h @@ -3359,6 +3359,18 @@ class MNewDerivedTypedObject } }; +// This vector is used when the recovered object is kept unboxed. We map the +// offset of each property to the index of the corresponding operands in the +// object state. +struct OperandIndexMap : public TempObject +{ + // The number of properties is limited by scalar replacement. Thus we cannot + // have any large number of properties. + FixedList map; + + bool init(TempAllocator& alloc, JSObject* templateObject); +}; + // Represent the content of all slots of an object. This instruction is not // lowered and is not used to generate code. class MObjectState @@ -3367,9 +3379,15 @@ class MObjectState { private: uint32_t numSlots_; - uint32_t numFixedSlots_; + uint32_t numFixedSlots_; // valid if isUnboxed() == false. + OperandIndexMap* operandIndex_; // valid if isUnboxed() == true. - explicit MObjectState(MDefinition* obj); + bool isUnboxed() const { + return operandIndex_ != nullptr; + } + + MObjectState(JSObject *templateObject, OperandIndexMap* operandIndex); + explicit MObjectState(MObjectState* state); bool init(TempAllocator& alloc, MDefinition* obj); @@ -3380,6 +3398,10 @@ class MObjectState public: INSTRUCTION_HEADER(ObjectState) + // Return the template object of any object creation which can be recovered + // on bailout. + static JSObject* templateObjectOf(MDefinition* obj); + static MObjectState* New(TempAllocator& alloc, MDefinition* obj, MDefinition* undefinedVal); static MObjectState* Copy(TempAllocator& alloc, MObjectState* state); @@ -3388,6 +3410,7 @@ class MObjectState } size_t numFixedSlots() const { + MOZ_ASSERT(!isUnboxed()); return numFixedSlots_; } size_t numSlots() const { @@ -3423,6 +3446,18 @@ class MObjectState setSlot(slot + numFixedSlots(), def); } + // Interface reserved for unboxed objects. + bool hasOffset(uint32_t offset) const { + MOZ_ASSERT(isUnboxed()); + return offset < operandIndex_->map.length() && operandIndex_->map[offset] != 0; + } + MDefinition* getOffset(uint32_t offset) const { + return getOperand(operandIndex_->map[offset]); + } + void setOffset(uint32_t offset, MDefinition* def) { + replaceOperand(operandIndex_->map[offset], def); + } + bool writeRecoverData(CompactBufferWriter& writer) const override; bool canRecoverOnBailout() const override { return true; @@ -9323,7 +9358,7 @@ class MLoadUnboxedScalar : public MBinaryInstruction, public SingleObjectPolicy::Data { - Scalar::Type indexType_; + Scalar::Type storageType_; Scalar::Type readType_; unsigned numElems_; // used only for SIMD bool requiresBarrier_; @@ -9331,11 +9366,11 @@ class MLoadUnboxedScalar bool canonicalizeDoubles_; MLoadUnboxedScalar(MDefinition* elements, MDefinition* index, - Scalar::Type indexType, MemoryBarrierRequirement requiresBarrier, + Scalar::Type storageType, MemoryBarrierRequirement requiresBarrier, int32_t offsetAdjustment, bool canonicalizeDoubles) : MBinaryInstruction(elements, index), - indexType_(indexType), - readType_(indexType), + storageType_(storageType), + readType_(storageType), numElems_(1), requiresBarrier_(requiresBarrier == DoesRequireMemoryBarrier), offsetAdjustment_(offsetAdjustment), @@ -9348,20 +9383,20 @@ class MLoadUnboxedScalar setMovable(); MOZ_ASSERT(IsValidElementsType(elements, offsetAdjustment)); MOZ_ASSERT(index->type() == MIRType_Int32); - MOZ_ASSERT(indexType >= 0 && indexType < Scalar::MaxTypedArrayViewType); + MOZ_ASSERT(storageType >= 0 && storageType < Scalar::MaxTypedArrayViewType); } public: INSTRUCTION_HEADER(LoadUnboxedScalar) static MLoadUnboxedScalar* New(TempAllocator& alloc, MDefinition* elements, MDefinition* index, - Scalar::Type indexType, + Scalar::Type storageType, MemoryBarrierRequirement requiresBarrier = DoesNotRequireMemoryBarrier, int32_t offsetAdjustment = 0, bool canonicalizeDoubles = true) { - return new(alloc) MLoadUnboxedScalar(elements, index, indexType, + return new(alloc) MLoadUnboxedScalar(elements, index, storageType, requiresBarrier, offsetAdjustment, canonicalizeDoubles); } @@ -9377,8 +9412,8 @@ class MLoadUnboxedScalar return readType_; } - Scalar::Type indexType() const { - return indexType_; + Scalar::Type storageType() const { + return storageType_; } bool fallible() const { // Bailout if the result does not fit in an int32. @@ -9413,7 +9448,7 @@ class MLoadUnboxedScalar if (!ins->isLoadUnboxedScalar()) return false; const MLoadUnboxedScalar* other = ins->toLoadUnboxedScalar(); - if (indexType_ != other->indexType_) + if (storageType_ != other->storageType_) return false; if (readType_ != other->readType_) return false; @@ -9430,7 +9465,7 @@ class MLoadUnboxedScalar void computeRange(TempAllocator& alloc) override; - bool canProduceFloat32() const override { return indexType_ == Scalar::Float32; } + bool canProduceFloat32() const override { return storageType_ == Scalar::Float32; } ALLOW_CLONE(MLoadUnboxedScalar) }; @@ -9609,17 +9644,17 @@ class MStoreUnboxedScalar public StoreUnboxedScalarBase, public StoreUnboxedScalarPolicy::Data { - Scalar::Type indexType_; + Scalar::Type storageType_; bool requiresBarrier_; int32_t offsetAdjustment_; unsigned numElems_; // used only for SIMD MStoreUnboxedScalar(MDefinition* elements, MDefinition* index, MDefinition* value, - Scalar::Type indexType, MemoryBarrierRequirement requiresBarrier, + Scalar::Type storageType, MemoryBarrierRequirement requiresBarrier, int32_t offsetAdjustment) : MTernaryInstruction(elements, index, value), - StoreUnboxedScalarBase(indexType), - indexType_(indexType), + StoreUnboxedScalarBase(storageType), + storageType_(storageType), requiresBarrier_(requiresBarrier == DoesRequireMemoryBarrier), offsetAdjustment_(offsetAdjustment), numElems_(1) @@ -9630,7 +9665,7 @@ class MStoreUnboxedScalar setMovable(); MOZ_ASSERT(IsValidElementsType(elements, offsetAdjustment)); MOZ_ASSERT(index->type() == MIRType_Int32); - MOZ_ASSERT(indexType >= 0 && indexType < Scalar::MaxTypedArrayViewType); + MOZ_ASSERT(storageType >= 0 && storageType < Scalar::MaxTypedArrayViewType); } public: @@ -9638,12 +9673,12 @@ class MStoreUnboxedScalar static MStoreUnboxedScalar* New(TempAllocator& alloc, MDefinition* elements, MDefinition* index, - MDefinition* value, Scalar::Type indexType, + MDefinition* value, Scalar::Type storageType, MemoryBarrierRequirement requiresBarrier = DoesNotRequireMemoryBarrier, int32_t offsetAdjustment = 0) { - return new(alloc) MStoreUnboxedScalar(elements, index, value, indexType, + return new(alloc) MStoreUnboxedScalar(elements, index, value, storageType, requiresBarrier, offsetAdjustment); } @@ -9655,8 +9690,8 @@ class MStoreUnboxedScalar unsigned numElems() const { return numElems_; } - Scalar::Type indexType() const { - return indexType_; + Scalar::Type storageType() const { + return storageType_; } MDefinition* elements() const { return getOperand(0); diff --git a/js/src/jit/Recover.cpp b/js/src/jit/Recover.cpp index 1c315946fd61..51e441ad7bb4 100644 --- a/js/src/jit/Recover.cpp +++ b/js/src/jit/Recover.cpp @@ -1253,24 +1253,20 @@ MCreateThisWithTemplate::writeRecoverData(CompactBufferWriter& writer) const { MOZ_ASSERT(canRecoverOnBailout()); writer.writeUnsigned(uint32_t(RInstruction::Recover_CreateThisWithTemplate)); - writer.writeByte(bool(initialHeap() == gc::TenuredHeap)); return true; } RCreateThisWithTemplate::RCreateThisWithTemplate(CompactBufferReader& reader) { - tenuredHeap_ = reader.readByte(); } bool RCreateThisWithTemplate::recover(JSContext* cx, SnapshotIterator& iter) const { - RootedPlainObject templateObject(cx, &iter.read().toObject().as()); + RootedObject templateObject(cx, &iter.read().toObject()); // See CodeGenerator::visitCreateThisWithTemplate - gc::AllocKind allocKind = templateObject->asTenured().getAllocKind(); - gc::InitialHeap initialHeap = tenuredHeap_ ? gc::TenuredHeap : gc::DefaultHeap; - JSObject* resultObject = NativeObject::copy(cx, allocKind, initialHeap, templateObject); + JSObject* resultObject = NewObjectOperationWithTemplate(cx, templateObject); if (!resultObject) return false; @@ -1373,13 +1369,30 @@ RObjectState::RObjectState(CompactBufferReader& reader) bool RObjectState::recover(JSContext* cx, SnapshotIterator& iter) const { - RootedNativeObject object(cx, &iter.read().toObject().as()); - MOZ_ASSERT(object->slotSpan() == numSlots()); - + RootedObject object(cx, &iter.read().toObject()); RootedValue val(cx); - for (size_t i = 0; i < numSlots(); i++) { - val = iter.read(); - object->setSlot(i, val); + + if (object->is()) { + const UnboxedLayout& layout = object->as().layout(); + + const UnboxedLayout::PropertyVector& properties = layout.properties(); + for (size_t i = 0; i < properties.length(); i++) { + val = iter.read(); + // This is the default placeholder value of MObjectState, when no + // properties are defined yet. + if (val.isUndefined()) + continue; + + MOZ_ALWAYS_TRUE(object->as().setValue(cx, properties[i], val)); + } + } else { + RootedNativeObject nativeObject(cx, &object->as()); + MOZ_ASSERT(nativeObject->slotSpan() == numSlots()); + + for (size_t i = 0; i < numSlots(); i++) { + val = iter.read(); + nativeObject->setSlot(i, val); + } } val.setObject(*object); diff --git a/js/src/jit/Recover.h b/js/src/jit/Recover.h index 84e82fe63cc2..81473c07dbad 100644 --- a/js/src/jit/Recover.h +++ b/js/src/jit/Recover.h @@ -19,6 +19,43 @@ struct JSContext; namespace js { namespace jit { +// This file contains all recover instructions. +// +// A recover instruction is an equivalent of a MIR instruction which is executed +// before the reconstruction of a baseline frame. Recover instructions are used +// by resume points to fill the value which are not produced by the code +// compiled by IonMonkey. For example, if a value is optimized away by +// IonMonkey, but required by Baseline, then we should have a recover +// instruction to fill the missing baseline frame slot. +// +// Recover instructions are executed either during a bailout, or under a call +// when the stack frame is introspected. If the stack is introspected, then any +// use of recover instruction must lead to an invalidation of the code. +// +// For each MIR instruction where |canRecoverOnBailout| might return true, we +// have a RInstruction of the same name. +// +// Recover instructions are encoded by code generator into a compact buffer +// (RecoverWriter). The MIR instruction method |writeRecoverData| should write a +// tag in the |CompactBufferWriter| which is used by +// |RInstruction::readRecoverData| to dispatch to the right Recover +// instruction. Then |writeRecoverData| writes any local fields which are +// necessary for the execution of the |recover| method. These fields are decoded +// by the Recover instruction constructor which has a |CompactBufferReader| as +// argument. The constructor of the Recover instruction should follow the same +// sequence as the |writeRecoverData| method of the MIR instruction. +// +// Recover instructions are decoded by the |SnapshotIterator| (RecoverReader), +// which is given as argument of the |recover| methods, in order to read the +// operands. The number of operands read should be the same as the result of +// |numOperands|, which corresponds to the number of operands of the MIR +// instruction. Operands should be decoded in the same order as the operands of +// the MIR instruction. +// +// The result of the |recover| method should either be a failure, or a value +// stored on the |SnapshotIterator|, by using the |storeInstructionResult| +// method. + #define RECOVER_OPCODE_LIST(_) \ _(ResumePoint) \ _(BitNot) \ @@ -660,9 +697,6 @@ class RNewDerivedTypedObject final : public RInstruction class RCreateThisWithTemplate final : public RInstruction { - private: - bool tenuredHeap_; - public: RINSTRUCTION_HEADER_(CreateThisWithTemplate) diff --git a/js/src/jit/ScalarReplacement.cpp b/js/src/jit/ScalarReplacement.cpp index 915b9ad6876d..d708e7a028d9 100644 --- a/js/src/jit/ScalarReplacement.cpp +++ b/js/src/jit/ScalarReplacement.cpp @@ -91,34 +91,68 @@ EmulateStateOf::run(MemoryView& view) return true; } +static bool +IsObjectEscaped(MInstruction* ins, JSObject* objDefault = nullptr); + +// Returns False if the lambda is not escaped and if it is optimizable by +// ScalarReplacementOfObject. +static bool +IsLambdaEscaped(MLambda* lambda, JSObject* obj) +{ + JitSpewDef(JitSpew_Escape, "Check lambda\n", lambda); + JitSpewIndent spewIndent(JitSpew_Escape); + + // The scope chain is not escaped if none of the Lambdas which are + // capturing it are escaped. + for (MUseIterator i(lambda->usesBegin()); i != lambda->usesEnd(); i++) { + MNode* consumer = (*i)->consumer(); + if (!consumer->isDefinition()) { + // Cannot optimize if it is observable from fun.arguments or others. + if (!consumer->toResumePoint()->isRecoverableOperand(*i)) { + JitSpew(JitSpew_Escape, "Observable lambda cannot be recovered"); + return true; + } + continue; + } + + MDefinition* def = consumer->toDefinition(); + if (!def->isFunctionEnvironment()) { + JitSpewDef(JitSpew_Escape, "is escaped by\n", def); + return true; + } + + if (IsObjectEscaped(def->toInstruction(), obj)) { + JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); + return true; + } + } + JitSpew(JitSpew_Escape, "Lambda is not escaped"); + return false; +} + // Returns False if the object is not escaped and if it is optimizable by // ScalarReplacementOfObject. // // For the moment, this code is dumb as it only supports objects which are not // changing shape, and which are known by TI at the object creation. static bool -IsObjectEscaped(MInstruction* ins, JSObject* objDefault = nullptr) +IsObjectEscaped(MInstruction* ins, JSObject* objDefault) { MOZ_ASSERT(ins->type() == MIRType_Object); MOZ_ASSERT(ins->isNewObject() || ins->isGuardShape() || ins->isCreateThisWithTemplate() || ins->isNewCallObject() || ins->isFunctionEnvironment()); - JSObject* obj = nullptr; - if (ins->isNewObject()) - obj = ins->toNewObject()->templateObject(); - else if (ins->isCreateThisWithTemplate()) - obj = ins->toCreateThisWithTemplate()->templateObject(); - else if (ins->isNewCallObject()) - obj = ins->toNewCallObject()->templateObject(); - else - obj = objDefault; + JitSpewDef(JitSpew_Escape, "Check object\n", ins); + JitSpewIndent spewIndent(JitSpew_Escape); + JSObject* obj = objDefault; if (!obj) - return true; + obj = MObjectState::templateObjectOf(ins); - // Don't optimize unboxed objects, which aren't handled by MObjectState. - if (obj->is()) + if (!obj) { + JitSpew(JitSpew_Escape, "No template object defined."); return true; + } // Check if the object is escaped. If the object is not the first argument // of either a known Store / Load, then we consider it as escaped. This is a @@ -128,7 +162,7 @@ IsObjectEscaped(MInstruction* ins, JSObject* objDefault = nullptr) if (!consumer->isDefinition()) { // Cannot optimize if it is observable from fun.arguments or others. if (!consumer->toResumePoint()->isRecoverableOperand(*i)) { - JitSpewDef(JitSpew_Escape, "Observable object cannot be recovered\n", ins); + JitSpew(JitSpew_Escape, "Observable object cannot be recovered"); return true; } continue; @@ -142,10 +176,28 @@ IsObjectEscaped(MInstruction* ins, JSObject* objDefault = nullptr) if (def->indexOf(*i) == 0) break; - JitSpewDef(JitSpew_Escape, "Object ", ins); - JitSpewDef(JitSpew_Escape, " is escaped by\n", def); + JitSpewDef(JitSpew_Escape, "is escaped by\n", def); return true; + case MDefinition::Op_LoadUnboxedScalar: + case MDefinition::Op_StoreUnboxedScalar: + case MDefinition::Op_LoadUnboxedObjectOrNull: + case MDefinition::Op_StoreUnboxedObjectOrNull: + case MDefinition::Op_LoadUnboxedString: + case MDefinition::Op_StoreUnboxedString: + // Not escaped if it is the first argument. + if (def->indexOf(*i) != 0) { + JitSpewDef(JitSpew_Escape, "is escaped by\n", def); + return true; + } + + if (!def->getOperand(1)->isConstant()) { + JitSpewDef(JitSpew_Escape, "is addressed with unknown index\n", def); + return true; + } + + break; + case MDefinition::Op_PostWriteBarrier: break; @@ -169,38 +221,22 @@ IsObjectEscaped(MInstruction* ins, JSObject* objDefault = nullptr) MGuardShape* guard = def->toGuardShape(); MOZ_ASSERT(!ins->isGuardShape()); if (obj->as().lastProperty() != guard->shape()) { - JitSpewDef(JitSpew_Escape, "Object ", ins); - JitSpewDef(JitSpew_Escape, " has a non-matching guard shape\n", guard); + JitSpewDef(JitSpew_Escape, "has a non-matching guard shape\n", guard); return true; } - if (IsObjectEscaped(def->toInstruction(), obj)) + if (IsObjectEscaped(def->toInstruction(), obj)) { + JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def); return true; + } break; } case MDefinition::Op_Lambda: { MLambda* lambda = def->toLambda(); - // The scope chain is not escaped if none of the Lambdas which are - // capturing it are escaped. - for (MUseIterator i(lambda->usesBegin()); i != lambda->usesEnd(); i++) { - MNode* consumer = (*i)->consumer(); - if (!consumer->isDefinition()) { - // Cannot optimize if it is observable from fun.arguments or others. - if (!consumer->toResumePoint()->isRecoverableOperand(*i)) { - JitSpewDef(JitSpew_Escape, "Observable object cannot be recovered\n", ins); - return true; - } - continue; - } - - MDefinition* def = consumer->toDefinition(); - if (!def->isFunctionEnvironment() || IsObjectEscaped(def->toInstruction(), obj)) { - JitSpewDef(JitSpew_Escape, "Object ", ins); - JitSpewDef(JitSpew_Escape, " is escaped through a lambda by\n", def); - return true; - } + if (IsLambdaEscaped(lambda, obj)) { + JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", lambda); + return true; } - break; } @@ -210,13 +246,12 @@ IsObjectEscaped(MInstruction* ins, JSObject* objDefault = nullptr) break; default: - JitSpewDef(JitSpew_Escape, "Object ", ins); - JitSpewDef(JitSpew_Escape, " is escaped by\n", def); + JitSpewDef(JitSpew_Escape, "is escaped by\n", def); return true; } } - JitSpewDef(JitSpew_Escape, "Object is not escaped\n", ins); + JitSpew(JitSpew_Escape, "Object is not escaped"); return false; } @@ -262,6 +297,16 @@ class ObjectMemoryView : public MDefinitionVisitorDefaultNoop void visitGuardShape(MGuardShape* ins); void visitFunctionEnvironment(MFunctionEnvironment* ins); void visitLambda(MLambda* ins); + void visitStoreUnboxedScalar(MStoreUnboxedScalar* ins); + void visitLoadUnboxedScalar(MLoadUnboxedScalar* ins); + void visitStoreUnboxedObjectOrNull(MStoreUnboxedObjectOrNull* ins); + void visitLoadUnboxedObjectOrNull(MLoadUnboxedObjectOrNull* ins); + void visitStoreUnboxedString(MStoreUnboxedString* ins); + void visitLoadUnboxedString(MLoadUnboxedString* ins); + + private: + void storeOffset(MInstruction* ins, size_t offset, MDefinition* value); + void loadOffset(MInstruction* ins, size_t offset); }; const char* ObjectMemoryView::phaseName = "Scalar Replacement of Object"; @@ -574,6 +619,116 @@ ObjectMemoryView::visitLambda(MLambda* ins) ins->setIncompleteObject(); } +static size_t +GetOffsetOf(MDefinition* index, size_t width, int32_t baseOffset) +{ + int32_t idx = index->toConstant()->value().toInt32(); + MOZ_ASSERT(idx >= 0); + MOZ_ASSERT(baseOffset >= 0 && size_t(baseOffset) >= UnboxedPlainObject::offsetOfData()); + return idx * width + baseOffset - UnboxedPlainObject::offsetOfData(); +} + +static size_t +GetOffsetOf(MDefinition* index, Scalar::Type type, int32_t baseOffset) +{ + return GetOffsetOf(index, Scalar::byteSize(type), baseOffset); +} + +void +ObjectMemoryView::storeOffset(MInstruction* ins, size_t offset, MDefinition* value) +{ + // Clone the state and update the slot value. + MOZ_ASSERT(state_->hasOffset(offset)); + state_ = BlockState::Copy(alloc_, state_); + state_->setOffset(offset, value); + ins->block()->insertBefore(ins, state_); + + // Remove original instruction. + ins->block()->discard(ins); +} + +void +ObjectMemoryView::loadOffset(MInstruction* ins, size_t offset) +{ + // Replace load by the slot value. + MOZ_ASSERT(state_->hasOffset(offset)); + ins->replaceAllUsesWith(state_->getOffset(offset)); + + // Remove original instruction. + ins->block()->discard(ins); +} + +void +ObjectMemoryView::visitStoreUnboxedScalar(MStoreUnboxedScalar* ins) +{ + // Skip stores made on other objects. + if (ins->elements() != obj_) + return; + + size_t offset = GetOffsetOf(ins->index(), ins->storageType(), ins->offsetAdjustment()); + storeOffset(ins, offset, ins->value()); +} + +void +ObjectMemoryView::visitLoadUnboxedScalar(MLoadUnboxedScalar* ins) +{ + // Skip loads made on other objects. + if (ins->elements() != obj_) + return; + + // Replace load by the slot value. + size_t offset = GetOffsetOf(ins->index(), ins->storageType(), ins->offsetAdjustment()); + loadOffset(ins, offset); +} + +void +ObjectMemoryView::visitStoreUnboxedObjectOrNull(MStoreUnboxedObjectOrNull* ins) +{ + // Skip stores made on other objects. + if (ins->elements() != obj_) + return; + + // Clone the state and update the slot value. + size_t offset = GetOffsetOf(ins->index(), sizeof(uintptr_t), ins->offsetAdjustment()); + storeOffset(ins, offset, ins->value()); +} + +void +ObjectMemoryView::visitLoadUnboxedObjectOrNull(MLoadUnboxedObjectOrNull* ins) +{ + // Skip loads made on other objects. + if (ins->elements() != obj_) + return; + + // Replace load by the slot value. + size_t offset = GetOffsetOf(ins->index(), sizeof(uintptr_t), ins->offsetAdjustment()); + loadOffset(ins, offset); +} + +void +ObjectMemoryView::visitStoreUnboxedString(MStoreUnboxedString* ins) +{ + // Skip stores made on other objects. + if (ins->elements() != obj_) + return; + + // Clone the state and update the slot value. + size_t offset = GetOffsetOf(ins->index(), sizeof(uintptr_t), ins->offsetAdjustment()); + storeOffset(ins, offset, ins->value()); +} + +void +ObjectMemoryView::visitLoadUnboxedString(MLoadUnboxedString* ins) +{ + // Skip loads made on other objects. + if (ins->elements() != obj_) + return; + + // Replace load by the slot value. + size_t offset = GetOffsetOf(ins->index(), sizeof(uintptr_t), ins->offsetAdjustment()); + loadOffset(ins, offset); +} + static bool IndexOf(MDefinition* ins, int32_t* res) { @@ -593,6 +748,105 @@ IndexOf(MDefinition* ins, int32_t* res) return true; } +// Returns False if the elements is not escaped and if it is optimizable by +// ScalarReplacementOfArray. +static bool +IsElementEscaped(MElements* def, uint32_t arraySize) +{ + JitSpewDef(JitSpew_Escape, "Check elements\n", def); + JitSpewIndent spewIndent(JitSpew_Escape); + + for (MUseIterator i(def->usesBegin()); i != def->usesEnd(); i++) { + // The MIRType_Elements cannot be captured in a resume point as + // it does not represent a value allocation. + MDefinition* access = (*i)->consumer()->toDefinition(); + + switch (access->op()) { + case MDefinition::Op_LoadElement: { + MOZ_ASSERT(access->toLoadElement()->elements() == def); + + // If we need hole checks, then the array cannot be escaped + // as the array might refer to the prototype chain to look + // for properties, thus it might do additional side-effects + // which are not reflected by the alias set, is we are + // bailing on holes. + if (access->toLoadElement()->needsHoleCheck()) { + JitSpewDef(JitSpew_Escape, + "has a load element with a hole check\n", access); + return true; + } + + // If the index is not a constant then this index can alias + // all others. We do not handle this case. + int32_t index; + if (!IndexOf(access, &index)) { + JitSpewDef(JitSpew_Escape, + "has a load element with a non-trivial index\n", access); + return true; + } + if (index < 0 || arraySize <= uint32_t(index)) { + JitSpewDef(JitSpew_Escape, + "has a load element with an out-of-bound index\n", access); + return true; + } + break; + } + + case MDefinition::Op_StoreElement: { + MOZ_ASSERT(access->toStoreElement()->elements() == def); + + // If we need hole checks, then the array cannot be escaped + // as the array might refer to the prototype chain to look + // for properties, thus it might do additional side-effects + // which are not reflected by the alias set, is we are + // bailing on holes. + if (access->toStoreElement()->needsHoleCheck()) { + JitSpewDef(JitSpew_Escape, + "has a store element with a hole check\n", access); + return true; + } + + // If the index is not a constant then this index can alias + // all others. We do not handle this case. + int32_t index; + if (!IndexOf(access, &index)) { + JitSpewDef(JitSpew_Escape, "has a store element with a non-trivial index\n", access); + return true; + } + if (index < 0 || arraySize <= uint32_t(index)) { + JitSpewDef(JitSpew_Escape, "has a store element with an out-of-bound index\n", access); + return true; + } + + // We are not yet encoding magic hole constants in resume points. + if (access->toStoreElement()->value()->type() == MIRType_MagicHole) { + JitSpewDef(JitSpew_Escape, "has a store element with an magic-hole constant\n", access); + return true; + } + break; + } + + case MDefinition::Op_SetInitializedLength: + MOZ_ASSERT(access->toSetInitializedLength()->elements() == def); + break; + + case MDefinition::Op_InitializedLength: + MOZ_ASSERT(access->toInitializedLength()->elements() == def); + break; + + case MDefinition::Op_ArrayLength: + MOZ_ASSERT(access->toArrayLength()->elements() == def); + break; + + default: + JitSpewDef(JitSpew_Escape, "is escaped by\n", access); + return true; + } + } + JitSpew(JitSpew_Escape, "Elements is not escaped"); + return false; +} + // Returns False if the array is not escaped and if it is optimizable by // ScalarReplacementOfArray. // @@ -605,12 +859,22 @@ IsArrayEscaped(MInstruction* ins) MOZ_ASSERT(ins->isNewArray()); uint32_t count = ins->toNewArray()->count(); + JitSpewDef(JitSpew_Escape, "Check array\n", ins); + JitSpewIndent spewIndent(JitSpew_Escape); + JSObject* obj = ins->toNewArray()->templateObject(); - if (!obj || obj->is()) + if (!obj) { + JitSpew(JitSpew_Escape, "No template object defined."); return true; + } + + if (obj->is()) { + JitSpew(JitSpew_Escape, "Template object is an unboxed plain object."); + return true; + } if (count >= 16) { - JitSpewDef(JitSpew_Escape, "Array has too many elements\n", ins); + JitSpew(JitSpew_Escape, "Array has too many elements"); return true; } @@ -622,7 +886,7 @@ IsArrayEscaped(MInstruction* ins) if (!consumer->isDefinition()) { // Cannot optimize if it is observable from fun.arguments or others. if (!consumer->toResumePoint()->isRecoverableOperand(*i)) { - JitSpewDef(JitSpew_Escape, "Observable array cannot be recovered\n", ins); + JitSpew(JitSpew_Escape, "Observable array cannot be recovered"); return true; } continue; @@ -631,101 +895,11 @@ IsArrayEscaped(MInstruction* ins) MDefinition* def = consumer->toDefinition(); switch (def->op()) { case MDefinition::Op_Elements: { - MOZ_ASSERT(def->toElements()->object() == ins); - for (MUseIterator i(def->usesBegin()); i != def->usesEnd(); i++) { - // The MIRType_Elements cannot be captured in a resume point as - // it does not represent a value allocation. - MDefinition* access = (*i)->consumer()->toDefinition(); - - switch (access->op()) { - case MDefinition::Op_LoadElement: { - MOZ_ASSERT(access->toLoadElement()->elements() == def); - - // If we need hole checks, then the array cannot be escaped - // as the array might refer to the prototype chain to look - // for properties, thus it might do additional side-effects - // which are not reflected by the alias set, is we are - // bailing on holes. - if (access->toLoadElement()->needsHoleCheck()) { - JitSpewDef(JitSpew_Escape, "Array ", ins); - JitSpewDef(JitSpew_Escape, - " has a load element with a hole check\n", access); - return true; - } - - // If the index is not a constant then this index can alias - // all others. We do not handle this case. - int32_t index; - if (!IndexOf(access, &index)) { - JitSpewDef(JitSpew_Escape, "Array ", ins); - JitSpewDef(JitSpew_Escape, - " has a load element with a non-trivial index\n", access); - return true; - } - if (index < 0 || count <= uint32_t(index)) { - JitSpewDef(JitSpew_Escape, "Array ", ins); - JitSpewDef(JitSpew_Escape, - " has a load element with an out-of-bound index\n", access); - return true; - } - break; - } - - case MDefinition::Op_StoreElement: { - MOZ_ASSERT(access->toStoreElement()->elements() == def); - - // If we need hole checks, then the array cannot be escaped - // as the array might refer to the prototype chain to look - // for properties, thus it might do additional side-effects - // which are not reflected by the alias set, is we are - // bailing on holes. - if (access->toStoreElement()->needsHoleCheck()) { - JitSpewDef(JitSpew_Escape, "Array ", ins); - JitSpewDef(JitSpew_Escape, - " has a store element with a hole check\n", access); - return true; - } - - // If the index is not a constant then this index can alias - // all others. We do not handle this case. - int32_t index; - if (!IndexOf(access, &index)) { - JitSpewDef(JitSpew_Escape, "Array ", ins); - JitSpewDef(JitSpew_Escape, " has a store element with a non-trivial index\n", access); - return true; - } - if (index < 0 || count <= uint32_t(index)) { - JitSpewDef(JitSpew_Escape, "Array ", ins); - JitSpewDef(JitSpew_Escape, " has a store element with an out-of-bound index\n", access); - return true; - } - - // We are not yet encoding magic hole constants in resume points. - if (access->toStoreElement()->value()->type() == MIRType_MagicHole) { - JitSpewDef(JitSpew_Escape, "Array ", ins); - JitSpewDef(JitSpew_Escape, " has a store element with an magic-hole constant\n", access); - return true; - } - break; - } - - case MDefinition::Op_SetInitializedLength: - MOZ_ASSERT(access->toSetInitializedLength()->elements() == def); - break; - - case MDefinition::Op_InitializedLength: - MOZ_ASSERT(access->toInitializedLength()->elements() == def); - break; - - case MDefinition::Op_ArrayLength: - MOZ_ASSERT(access->toArrayLength()->elements() == def); - break; - - default: - JitSpewDef(JitSpew_Escape, "Array's element ", ins); - JitSpewDef(JitSpew_Escape, " is escaped by\n", def); - return true; - } + MElements *elem = def->toElements(); + MOZ_ASSERT(elem->object() == ins); + if (IsElementEscaped(elem, count)) { + JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", elem); + return true; } break; @@ -737,13 +911,12 @@ IsArrayEscaped(MInstruction* ins) break; default: - JitSpewDef(JitSpew_Escape, "Array ", ins); - JitSpewDef(JitSpew_Escape, " is escaped by\n", def); + JitSpewDef(JitSpew_Escape, "is escaped by\n", def); return true; } } - JitSpewDef(JitSpew_Escape, "Array is not escaped\n", ins); + JitSpew(JitSpew_Escape, "Array is not escaped"); return false; } @@ -1067,11 +1240,12 @@ ArrayMemoryView::visitArrayLength(MArrayLength* ins) } bool -ScalarReplacement(MIRGenerator* mir, MIRGraph& graph) +ScalarReplacement(MIRGenerator* mir, MIRGraph& graph, bool* success) { EmulateStateOf replaceObject(mir, graph); EmulateStateOf replaceArray(mir, graph); bool addedPhi = false; + *success = false; for (ReversePostorderIterator block = graph.rpoBegin(); block != graph.rpoEnd(); block++) { if (mir->shouldCancel("Scalar Replacement (main loop)")) @@ -1085,6 +1259,7 @@ ScalarReplacement(MIRGenerator* mir, MIRGraph& graph) if (!replaceObject.run(view)) return false; view.assertSuccess(); + *success = true; addedPhi = true; continue; } @@ -1094,6 +1269,7 @@ ScalarReplacement(MIRGenerator* mir, MIRGraph& graph) if (!replaceArray.run(view)) return false; view.assertSuccess(); + *success = true; addedPhi = true; continue; } diff --git a/js/src/jit/ScalarReplacement.h b/js/src/jit/ScalarReplacement.h index 836367f0f0f3..1869471a3147 100644 --- a/js/src/jit/ScalarReplacement.h +++ b/js/src/jit/ScalarReplacement.h @@ -15,7 +15,7 @@ class MIRGenerator; class MIRGraph; bool -ScalarReplacement(MIRGenerator* mir, MIRGraph& graph); +ScalarReplacement(MIRGenerator* mir, MIRGraph& graph, bool* success); } // namespace jit } // namespace js diff --git a/js/src/jit/SharedICHelpers.h b/js/src/jit/SharedICHelpers.h index df81577dc6d1..beaad3c07476 100644 --- a/js/src/jit/SharedICHelpers.h +++ b/js/src/jit/SharedICHelpers.h @@ -13,6 +13,8 @@ # include "jit/x64/SharedICHelpers-x64.h" #elif defined(JS_CODEGEN_ARM) # include "jit/arm/SharedICHelpers-arm.h" +#elif defined(JS_CODEGEN_ARM64) +# include "jit/arm64/SharedICHelpers-arm64.h" #elif defined(JS_CODEGEN_MIPS) # include "jit/mips/SharedICHelpers-mips.h" #elif defined(JS_CODEGEN_NONE) diff --git a/js/src/jit/SharedICRegisters.h b/js/src/jit/SharedICRegisters.h index df52efddaea3..efa2029e2b30 100644 --- a/js/src/jit/SharedICRegisters.h +++ b/js/src/jit/SharedICRegisters.h @@ -13,6 +13,8 @@ # include "jit/x64/SharedICRegisters-x64.h" #elif defined(JS_CODEGEN_ARM) # include "jit/arm/SharedICRegisters-arm.h" +#elif defined(JS_CODEGEN_ARM64) +# include "jit/arm64/SharedICRegisters-arm64.h" #elif defined(JS_CODEGEN_MIPS) # include "jit/mips/SharedICRegisters-mips.h" #elif defined(JS_CODEGEN_NONE) diff --git a/js/src/jit/VMFunctions.h b/js/src/jit/VMFunctions.h index 74bb7ab615e8..0fee01d4fd77 100644 --- a/js/src/jit/VMFunctions.h +++ b/js/src/jit/VMFunctions.h @@ -7,6 +7,8 @@ #ifndef jit_VMFunctions_h #define jit_VMFunctions_h +#include "mozilla/Attributes.h" + #include "jspubtd.h" #include "jit/CompileInfo.h" @@ -440,176 +442,114 @@ template <> struct MatchContext { static const bool valid = true; }; -#define FOR_EACH_ARGS_1(Macro, Sep, Last) Macro(1) Last(1) -#define FOR_EACH_ARGS_2(Macro, Sep, Last) FOR_EACH_ARGS_1(Macro, Sep, Sep) Macro(2) Last(2) -#define FOR_EACH_ARGS_3(Macro, Sep, Last) FOR_EACH_ARGS_2(Macro, Sep, Sep) Macro(3) Last(3) -#define FOR_EACH_ARGS_4(Macro, Sep, Last) FOR_EACH_ARGS_3(Macro, Sep, Sep) Macro(4) Last(4) -#define FOR_EACH_ARGS_5(Macro, Sep, Last) FOR_EACH_ARGS_4(Macro, Sep, Sep) Macro(5) Last(5) -#define FOR_EACH_ARGS_6(Macro, Sep, Last) FOR_EACH_ARGS_5(Macro, Sep, Sep) Macro(6) Last(6) -#define FOR_EACH_ARGS_7(Macro, Sep, Last) FOR_EACH_ARGS_6(Macro, Sep, Sep) Macro(7) Last(7) +// Extract the last element of a list of types. +template +struct LastArg; -#define COMPUTE_INDEX(NbArg) NbArg -#define COMPUTE_OUTPARAM_RESULT(NbArg) OutParamToDataType::result -#define COMPUTE_OUTPARAM_ROOT(NbArg) OutParamToRootType::result -#define COMPUTE_ARG_PROP(NbArg) (TypeToArgProperties::result << (2 * (NbArg - 1))) -#define COMPUTE_ARG_ROOT(NbArg) (uint64_t(TypeToRootType::result) << (3 * (NbArg - 1))) -#define COMPUTE_ARG_FLOAT(NbArg) (TypeToPassInFloatReg::result) << (NbArg - 1) -#define SEP_OR(_) | -#define NOTHING(_) - -#define FUNCTION_INFO_STRUCT_BODY(ForEachNb) \ - static inline DataType returnType() { \ - return TypeToDataType::result; \ - } \ - static inline DataType outParam() { \ - return ForEachNb(NOTHING, NOTHING, COMPUTE_OUTPARAM_RESULT); \ - } \ - static inline RootType outParamRootType() { \ - return ForEachNb(NOTHING, NOTHING, COMPUTE_OUTPARAM_ROOT); \ - } \ - static inline size_t NbArgs() { \ - return ForEachNb(NOTHING, NOTHING, COMPUTE_INDEX); \ - } \ - static inline size_t explicitArgs() { \ - return NbArgs() - (outParam() != Type_Void ? 1 : 0); \ - } \ - static inline uint32_t argumentProperties() { \ - return ForEachNb(COMPUTE_ARG_PROP, SEP_OR, NOTHING); \ - } \ - static inline uint32_t argumentPassedInFloatRegs() { \ - return ForEachNb(COMPUTE_ARG_FLOAT, SEP_OR, NOTHING); \ - } \ - static inline uint64_t argumentRootTypes() { \ - return ForEachNb(COMPUTE_ARG_ROOT, SEP_OR, NOTHING); \ - } \ - explicit FunctionInfo(pf fun, MaybeTailCall expectTailCall, \ - PopValues extraValuesToPop = PopValues(0)) \ - : VMFunction(JS_FUNC_TO_DATA_PTR(void*, fun), explicitArgs(), \ - argumentProperties(), argumentPassedInFloatRegs(), \ - argumentRootTypes(), outParam(), outParamRootType(), \ - returnType(), extraValuesToPop.numValues, expectTailCall) \ - { \ - static_assert(MatchContext::valid, "Invalid cx type in VMFunction"); \ - } \ - explicit FunctionInfo(pf fun, PopValues extraValuesToPop = PopValues(0)) \ - : VMFunction(JS_FUNC_TO_DATA_PTR(void*, fun), explicitArgs(), \ - argumentProperties(), argumentPassedInFloatRegs(), \ - argumentRootTypes(), outParam(), outParamRootType(), \ - returnType(), extraValuesToPop.numValues, NonTailCall) \ - { \ - static_assert(MatchContext::valid, "Invalid cx type in VMFunction"); \ - } - -template -struct FunctionInfo { +template <> +struct LastArg<> +{ + typedef void Type; + static MOZ_CONSTEXPR_VAR size_t nbArgs = 0; }; -// VMFunction wrapper with no explicit arguments. -template -struct FunctionInfo : public VMFunction { - typedef R (*pf)(Context); +template +struct LastArg +{ + typedef HeadType Type; + static MOZ_CONSTEXPR_VAR size_t nbArgs = 1; +}; - static inline DataType returnType() { +template +struct LastArg +{ + typedef typename LastArg::Type Type; + static MOZ_CONSTEXPR_VAR size_t nbArgs = LastArg::nbArgs + 1; +}; + +// Construct a bit mask from a list of types. The mask is constructed as an OR +// of the mask produced for each argument. The result of each argument is +// shifted by its index, such that the result of the first argument is on the +// low bits of the mask, and the result of the last argument in part of the +// high bits of the mask. +template class Each, typename ResultType, size_t Shift, + typename... Args> +struct BitMask; + +template class Each, typename ResultType, size_t Shift> +struct BitMask +{ + static MOZ_CONSTEXPR_VAR ResultType result = ResultType(); +}; + +template class Each, typename ResultType, size_t Shift, + typename HeadType, typename... TailTypes> +struct BitMask +{ + static_assert(ResultType(Each::result) < (1 << Shift), + "not enough bits reserved by the shift for individual results"); + static_assert(LastArg::nbArgs < (8 * sizeof(ResultType) / Shift), + "not enough bits in the result type to store all bit masks"); + + static MOZ_CONSTEXPR_VAR ResultType result = + ResultType(Each::result) | + (BitMask::result << Shift); +}; + +// Extract VMFunction properties based on the signature of the function. The +// properties are used to generate the logic for calling the VM function, and +// also for marking the stack during GCs. +template +struct FunctionInfo; + +template +struct FunctionInfo : public VMFunction +{ + typedef R (*pf)(Context, Args...); + + static DataType returnType() { return TypeToDataType::result; } - static inline DataType outParam() { - return Type_Void; + static DataType outParam() { + return OutParamToDataType::Type>::result; } - static inline RootType outParamRootType() { - return RootNone; + static RootType outParamRootType() { + return OutParamToRootType::Type>::result; } - static inline size_t explicitArgs() { - return 0; + static size_t NbArgs() { + return LastArg::nbArgs; } - static inline uint32_t argumentProperties() { - return 0; + static size_t explicitArgs() { + return NbArgs() - (outParam() != Type_Void ? 1 : 0); } - static inline uint32_t argumentPassedInFloatRegs() { - return 0; + static uint32_t argumentProperties() { + return BitMask::result; } - static inline uint64_t argumentRootTypes() { - return 0; + static uint32_t argumentPassedInFloatRegs() { + return BitMask::result; } - explicit FunctionInfo(pf fun) - : VMFunction(JS_FUNC_TO_DATA_PTR(void*, fun), explicitArgs(), - argumentProperties(), argumentPassedInFloatRegs(), - argumentRootTypes(), outParam(), outParamRootType(), - returnType(), 0, NonTailCall) + static uint64_t argumentRootTypes() { + return BitMask::result; + } + explicit FunctionInfo(pf fun, PopValues extraValuesToPop = PopValues(0)) + : VMFunction(JS_FUNC_TO_DATA_PTR(void*, fun), explicitArgs(), + argumentProperties(), argumentPassedInFloatRegs(), + argumentRootTypes(), outParam(), outParamRootType(), + returnType(), extraValuesToPop.numValues, NonTailCall) { static_assert(MatchContext::valid, "Invalid cx type in VMFunction"); } - explicit FunctionInfo(pf fun, MaybeTailCall expectTailCall) - : VMFunction(JS_FUNC_TO_DATA_PTR(void*, fun), explicitArgs(), - argumentProperties(), argumentPassedInFloatRegs(), - argumentRootTypes(), outParam(), outParamRootType(), - returnType(), expectTailCall) + explicit FunctionInfo(pf fun, MaybeTailCall expectTailCall, + PopValues extraValuesToPop = PopValues(0)) + : VMFunction(JS_FUNC_TO_DATA_PTR(void*, fun), explicitArgs(), + argumentProperties(), argumentPassedInFloatRegs(), + argumentRootTypes(), outParam(), outParamRootType(), + returnType(), extraValuesToPop.numValues, expectTailCall) { static_assert(MatchContext::valid, "Invalid cx type in VMFunction"); } }; -// Specialize the class for each number of argument used by VMFunction. -// Keep it verbose unless you find a readable macro for it. -template -struct FunctionInfo : public VMFunction { - typedef R (*pf)(Context, A1); - FUNCTION_INFO_STRUCT_BODY(FOR_EACH_ARGS_1) -}; - -template -struct FunctionInfo : public VMFunction { - typedef R (*pf)(Context, A1, A2); - FUNCTION_INFO_STRUCT_BODY(FOR_EACH_ARGS_2) -}; - -template -struct FunctionInfo : public VMFunction { - typedef R (*pf)(Context, A1, A2, A3); - FUNCTION_INFO_STRUCT_BODY(FOR_EACH_ARGS_3) -}; - -template -struct FunctionInfo : public VMFunction { - typedef R (*pf)(Context, A1, A2, A3, A4); - FUNCTION_INFO_STRUCT_BODY(FOR_EACH_ARGS_4) -}; - -template - struct FunctionInfo : public VMFunction { - typedef R (*pf)(Context, A1, A2, A3, A4, A5); - FUNCTION_INFO_STRUCT_BODY(FOR_EACH_ARGS_5) -}; - -template - struct FunctionInfo : public VMFunction { - typedef R (*pf)(Context, A1, A2, A3, A4, A5, A6); - FUNCTION_INFO_STRUCT_BODY(FOR_EACH_ARGS_6) -}; - -template - struct FunctionInfo : public VMFunction { - typedef R (*pf)(Context, A1, A2, A3, A4, A5, A6, A7); - FUNCTION_INFO_STRUCT_BODY(FOR_EACH_ARGS_7) -}; - -#undef FUNCTION_INFO_STRUCT_BODY - -#undef FOR_EACH_ARGS_7 -#undef FOR_EACH_ARGS_6 -#undef FOR_EACH_ARGS_5 -#undef FOR_EACH_ARGS_4 -#undef FOR_EACH_ARGS_3 -#undef FOR_EACH_ARGS_2 -#undef FOR_EACH_ARGS_1 - -#undef COMPUTE_INDEX -#undef COMPUTE_OUTPARAM_RESULT -#undef COMPUTE_OUTPARAM_ROOT -#undef COMPUTE_ARG_PROP -#undef COMPUTE_ARG_FLOAT -#undef SEP_OR -#undef NOTHING - class AutoDetectInvalidation { JSContext* cx_; diff --git a/js/src/jit/arm/Simulator-arm.cpp b/js/src/jit/arm/Simulator-arm.cpp index 1f2ffdf0493f..e26c5b005e7b 100644 --- a/js/src/jit/arm/Simulator-arm.cpp +++ b/js/src/jit/arm/Simulator-arm.cpp @@ -692,7 +692,7 @@ ArmDebugger::debug() i < 8 && (i % 2) == 0) { dvalue = getRegisterPairDoubleValue(i); - printf(" (%f)\n", dvalue); + printf(" (%.16g)\n", dvalue); } else { printf("\n"); } @@ -700,7 +700,7 @@ ArmDebugger::debug() for (uint32_t i = 0; i < FloatRegisters::TotalPhys; i++) { dvalue = getVFPDoubleRegisterValue(i); uint64_t as_words = mozilla::BitwiseCast(dvalue); - printf("%3s: %f 0x%08x %08x\n", + printf("%3s: %.16g 0x%08x %08x\n", FloatRegister::FromCode(i).name(), dvalue, static_cast(as_words >> 32), @@ -711,7 +711,7 @@ ArmDebugger::debug() printf("%s: 0x%08x %d \n", arg1, value, value); } else if (getVFPDoubleValue(arg1, &dvalue)) { uint64_t as_words = mozilla::BitwiseCast(dvalue); - printf("%s: %f 0x%08x %08x\n", + printf("%s: %.16g 0x%08x %08x\n", arg1, dvalue, static_cast(as_words >> 32), diff --git a/js/src/jit/arm64/Architecture-arm64.cpp b/js/src/jit/arm64/Architecture-arm64.cpp new file mode 100644 index 000000000000..a5e62fb61c84 --- /dev/null +++ b/js/src/jit/arm64/Architecture-arm64.cpp @@ -0,0 +1,75 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/arm64/Architecture-arm64.h" + +#include + +#include "jit/RegisterSets.h" + +namespace js { +namespace jit { + +Registers::Code +Registers::FromName(const char* name) +{ + // Check for some register aliases first. + if (strcmp(name, "ip0") == 0) + return ip0; + if (strcmp(name, "ip1") == 0) + return ip1; + if (strcmp(name, "fp") == 0) + return fp; + + for (uint32_t i = 0; i < Total; i++) { + if (strcmp(GetName(Code(i)), name) == 0) + return Code(i); + } + + return invalid_reg; +} + +FloatRegisters::Code +FloatRegisters::FromName(const char* name) +{ + for (size_t i = 0; i < Total; i++) { + if (strcmp(GetName(Code(i)), name) == 0) + return Code(i); + } + + return invalid_fpreg; +} + +FloatRegisterSet +FloatRegister::ReduceSetForPush(const FloatRegisterSet& s) +{ + LiveFloatRegisterSet ret; + for (FloatRegisterIterator iter(s); iter.more(); ++iter) + ret.addUnchecked(FromCode((*iter).encoding())); + return ret.set(); +} + +uint32_t +FloatRegister::GetSizeInBytes(const FloatRegisterSet& s) +{ + return s.size() * sizeof(double); +} + +uint32_t +FloatRegister::GetPushSizeInBytes(const FloatRegisterSet& s) +{ + return s.size() * sizeof(double); +} + +uint32_t +FloatRegister::getRegisterDumpOffsetInBytes() +{ + // Although registers are 128-bits wide, only the first 64 need saving per ABI. + return encoding() * sizeof(double); +} + +} // namespace jit +} // namespace js diff --git a/js/src/jit/arm64/Architecture-arm64.h b/js/src/jit/arm64/Architecture-arm64.h new file mode 100644 index 000000000000..d46da4c02f33 --- /dev/null +++ b/js/src/jit/arm64/Architecture-arm64.h @@ -0,0 +1,462 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_arm64_Architecture_arm64_h +#define jit_arm64_Architecture_arm64_h + +#include "mozilla/Assertions.h" +#include "mozilla/MathAlgorithms.h" + +#include "js/Utility.h" + +namespace js { +namespace jit { + +// AArch64 has 32 64-bit integer registers, x0 though x31. +// x31 is special and functions as both the stack pointer and a zero register. +// The bottom 32 bits of each of the X registers is accessible as w0 through w31. +// The program counter is no longer accessible as a register. +// SIMD and scalar floating-point registers share a register bank. +// 32 bit float registers are s0 through s31. +// 64 bit double registers are d0 through d31. +// 128 bit SIMD registers are v0 through v31. +// e.g., s0 is the bottom 32 bits of d0, which is the bottom 64 bits of v0. + +// AArch64 Calling Convention: +// x0 - x7: arguments and return value +// x8: indirect result (struct) location +// x9 - x15: temporary registers +// x16 - x17: intra-call-use registers (PLT, linker) +// x18: platform specific use (TLS) +// x19 - x28: callee-saved registers +// x29: frame pointer +// x30: link register + +// AArch64 Calling Convention for Floats: +// d0 - d7: arguments and return value +// d8 - d15: callee-saved registers +// Bits 64:128 are not saved for v8-v15. +// d16 - d31: temporary registers + +// AArch64 does not have soft float. + +class Registers { + public: + enum RegisterID { + w0 = 0, x0 = 0, + w1 = 1, x1 = 1, + w2 = 2, x2 = 2, + w3 = 3, x3 = 3, + w4 = 4, x4 = 4, + w5 = 5, x5 = 5, + w6 = 6, x6 = 6, + w7 = 7, x7 = 7, + w8 = 8, x8 = 8, + w9 = 9, x9 = 9, + w10 = 10, x10 = 10, + w11 = 11, x11 = 11, + w12 = 12, x12 = 12, + w13 = 13, x13 = 13, + w14 = 14, x14 = 14, + w15 = 15, x15 = 15, + w16 = 16, x16 = 16, ip0 = 16, // MacroAssembler scratch register 1. + w17 = 17, x17 = 17, ip1 = 17, // MacroAssembler scratch register 2. + w18 = 18, x18 = 18, tls = 18, // Platform-specific use (TLS). + w19 = 19, x19 = 19, + w20 = 20, x20 = 20, + w21 = 21, x21 = 21, + w22 = 22, x22 = 22, + w23 = 23, x23 = 23, + w24 = 24, x24 = 24, + w25 = 25, x25 = 25, + w26 = 26, x26 = 26, + w27 = 27, x27 = 27, + w28 = 28, x28 = 28, + w29 = 29, x29 = 29, fp = 29, + w30 = 30, x30 = 30, lr = 30, + w31 = 31, x31 = 31, wzr = 31, xzr = 31, sp = 31, // Special: both stack pointer and a zero register. + invalid_reg + }; + typedef uint8_t Code; + typedef uint32_t Encoding; + typedef uint32_t SetType; + + union RegisterContent { + uintptr_t r; + }; + + static uint32_t SetSize(SetType x) { + static_assert(sizeof(SetType) == 4, "SetType must be 32 bits"); + return mozilla::CountPopulation32(x); + } + static uint32_t FirstBit(SetType x) { + return mozilla::CountTrailingZeroes32(x); + } + static uint32_t LastBit(SetType x) { + return 31 - mozilla::CountLeadingZeroes32(x); + } + + static const char* GetName(Code code) { + static const char* const Names[] = + { "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", + "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", + "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x29", + "lr", "sp", "invalid" }; + return Names[code]; + } + static const char* GetName(uint32_t i) { + MOZ_ASSERT(i < Total); + return GetName(Code(i)); + } + + static Code FromName(const char* name); + + // If SP is used as the base register for a memory load or store, then the value + // of the stack pointer prior to adding any offset must be quadword (16 byte) aligned, + // or else a stack aligment exception will be generated. + static const Code StackPointer = sp; + + static const Code Invalid = invalid_reg; + + static const uint32_t Total = 32; + static const uint32_t TotalPhys = 32; + static const uint32_t Allocatable = 27; // No named special-function registers. + + static const SetType AllMask = 0xFFFFFFFF; + + static const SetType ArgRegMask = + (1 << Registers::x0) | (1 << Registers::x1) | + (1 << Registers::x2) | (1 << Registers::x3) | + (1 << Registers::x4) | (1 << Registers::x5) | + (1 << Registers::x6) | (1 << Registers::x7) | + (1 << Registers::x8); + + static const SetType VolatileMask = + (1 << Registers::x0) | (1 << Registers::x1) | + (1 << Registers::x2) | (1 << Registers::x3) | + (1 << Registers::x4) | (1 << Registers::x5) | + (1 << Registers::x6) | (1 << Registers::x7) | + (1 << Registers::x8) | (1 << Registers::x9) | + (1 << Registers::x10) | (1 << Registers::x11) | + (1 << Registers::x11) | (1 << Registers::x12) | + (1 << Registers::x13) | (1 << Registers::x14) | + (1 << Registers::x14) | (1 << Registers::x15) | + (1 << Registers::x16) | (1 << Registers::x17) | + (1 << Registers::x18); + + static const SetType NonVolatileMask = + (1 << Registers::x19) | (1 << Registers::x20) | + (1 << Registers::x21) | (1 << Registers::x22) | + (1 << Registers::x23) | (1 << Registers::x24) | + (1 << Registers::x25) | (1 << Registers::x26) | + (1 << Registers::x27) | (1 << Registers::x28) | + (1 << Registers::x29) | (1 << Registers::x30); + + static const SetType SingleByteRegs = VolatileMask | NonVolatileMask; + + static const SetType NonAllocatableMask = + (1 << Registers::x28) | // PseudoStackPointer. + (1 << Registers::ip0) | // First scratch register. + (1 << Registers::ip1) | // Second scratch register. + (1 << Registers::tls) | + (1 << Registers::lr) | + (1 << Registers::sp); + + // Registers that can be allocated without being saved, generally. + static const SetType TempMask = VolatileMask & ~NonAllocatableMask; + + static const SetType WrapperMask = VolatileMask; + + // Registers returned from a JS -> JS call. + static const SetType JSCallMask = (1 << Registers::x2); + + // Registers returned from a JS -> C call. + static const SetType CallMask = (1 << Registers::x0); + + static const SetType AllocatableMask = AllMask & ~NonAllocatableMask; +}; + +// Smallest integer type that can hold a register bitmask. +typedef uint32_t PackedRegisterMask; + +template +class TypedRegisterSet; + +class FloatRegisters +{ + public: + enum FPRegisterID { + s0 = 0, d0 = 0, v0 = 0, + s1 = 1, d1 = 1, v1 = 1, + s2 = 2, d2 = 2, v2 = 2, + s3 = 3, d3 = 3, v3 = 3, + s4 = 4, d4 = 4, v4 = 4, + s5 = 5, d5 = 5, v5 = 5, + s6 = 6, d6 = 6, v6 = 6, + s7 = 7, d7 = 7, v7 = 7, + s8 = 8, d8 = 8, v8 = 8, + s9 = 9, d9 = 9, v9 = 9, + s10 = 10, d10 = 10, v10 = 10, + s11 = 11, d11 = 11, v11 = 11, + s12 = 12, d12 = 12, v12 = 12, + s13 = 13, d13 = 13, v13 = 13, + s14 = 14, d14 = 14, v14 = 14, + s15 = 15, d15 = 15, v15 = 15, + s16 = 16, d16 = 16, v16 = 16, + s17 = 17, d17 = 17, v17 = 17, + s18 = 18, d18 = 18, v18 = 18, + s19 = 19, d19 = 19, v19 = 19, + s20 = 20, d20 = 20, v20 = 20, + s21 = 21, d21 = 21, v21 = 21, + s22 = 22, d22 = 22, v22 = 22, + s23 = 23, d23 = 23, v23 = 23, + s24 = 24, d24 = 24, v24 = 24, + s25 = 25, d25 = 25, v25 = 25, + s26 = 26, d26 = 26, v26 = 26, + s27 = 27, d27 = 27, v27 = 27, + s28 = 28, d28 = 28, v28 = 28, + s29 = 29, d29 = 29, v29 = 29, + s30 = 30, d30 = 30, v30 = 30, + s31 = 31, d31 = 31, v31 = 31, // Scratch register. + invalid_fpreg + }; + typedef uint8_t Code; + typedef FPRegisterID Encoding; + typedef uint64_t SetType; + + static const char* GetName(Code code) { + static const char* const Names[] = + { "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", + "d10", "d11", "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", + "d20", "d21", "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29", + "d30", "d31", "invalid" }; + return Names[code]; + } + + static const char* GetName(uint32_t i) { + MOZ_ASSERT(i < TotalPhys); + return GetName(Code(i)); + } + + static Code FromName(const char* name); + + static const Code Invalid = invalid_fpreg; + + static const uint32_t Total = 64; + static const uint32_t TotalPhys = 32; + static const SetType AllMask = 0xFFFFFFFFFFFFFFFFULL; + static const SetType AllPhysMask = 0xFFFFFFFFULL; + static const SetType SpreadCoefficient = 0x100000001ULL; + + static const uint32_t Allocatable = 31; // Without d31, the scratch register. + + // d31 is the ScratchFloatReg. + static const SetType NonVolatileMask = + SetType((1 << FloatRegisters::d8) | (1 << FloatRegisters::d9) | + (1 << FloatRegisters::d10) | (1 << FloatRegisters::d11) | + (1 << FloatRegisters::d12) | (1 << FloatRegisters::d13) | + (1 << FloatRegisters::d14) | (1 << FloatRegisters::d15) | + (1 << FloatRegisters::d16) | (1 << FloatRegisters::d17) | + (1 << FloatRegisters::d18) | (1 << FloatRegisters::d19) | + (1 << FloatRegisters::d20) | (1 << FloatRegisters::d21) | + (1 << FloatRegisters::d22) | (1 << FloatRegisters::d23) | + (1 << FloatRegisters::d24) | (1 << FloatRegisters::d25) | + (1 << FloatRegisters::d26) | (1 << FloatRegisters::d27) | + (1 << FloatRegisters::d28) | (1 << FloatRegisters::d29) | + (1 << FloatRegisters::d30)) * SpreadCoefficient; + + static const SetType VolatileMask = AllMask & ~NonVolatileMask; + static const SetType AllDoubleMask = AllMask; + + static const SetType WrapperMask = VolatileMask; + + // d31 is the ScratchFloatReg. + static const SetType NonAllocatableMask = (SetType(1) << FloatRegisters::d31) * SpreadCoefficient; + + // Registers that can be allocated without being saved, generally. + static const SetType TempMask = VolatileMask & ~NonAllocatableMask; + + static const SetType AllocatableMask = AllMask & ~NonAllocatableMask; + union RegisterContent { + float s; + double d; + }; + enum Kind { + Double, + Single + }; +}; + +// In bytes: slots needed for potential memory->memory move spills. +// +8 for cycles +// +8 for gpr spills +// +8 for double spills +static const uint32_t ION_FRAME_SLACK_SIZE = 24; + +static const uint32_t ShadowStackSpace = 0; + +static const uint32_t ABIStackAlignment = 16; +static const uint32_t CodeAlignment = 16; +static const bool StackKeptAligned = false; + +// Although sp is only usable if 16-byte alignment is kept, +// the Pseudo-StackPointer enables use of 8-byte alignment. +static const uint32_t StackAlignment = 8; +static const uint32_t NativeFrameSize = 8; + +struct FloatRegister +{ + typedef FloatRegisters Codes; + typedef Codes::Code Code; + typedef Codes::Encoding Encoding; + typedef Codes::SetType SetType; + + union RegisterContent { + float s; + double d; + }; + + constexpr FloatRegister(uint32_t code, FloatRegisters::Kind k) + : code_(FloatRegisters::Code(code & 31)), + k_(k) + { } + + constexpr FloatRegister(uint32_t code) + : code_(FloatRegisters::Code(code & 31)), + k_(FloatRegisters::Kind(code >> 5)) + { } + + constexpr FloatRegister() + : code_(FloatRegisters::Code(-1)), + k_(FloatRegisters::Double) + { } + + static uint32_t SetSize(SetType x) { + static_assert(sizeof(SetType) == 8, "SetType must be 64 bits"); + x |= x >> FloatRegisters::TotalPhys; + x &= FloatRegisters::AllPhysMask; + return mozilla::CountPopulation32(x); + } + + static FloatRegister FromCode(uint32_t i) { + MOZ_ASSERT(i < FloatRegisters::Total); + FloatRegister r(i); + return r; + } + Code code() const { + MOZ_ASSERT((uint32_t)code_ < FloatRegisters::Total); + return Code(code_ | (k_ << 5)); + } + Encoding encoding() const { + return Encoding(code_); + } + + const char* name() const { + return FloatRegisters::GetName(code()); + } + bool volatile_() const { + return !!((SetType(1) << code()) & FloatRegisters::VolatileMask); + } + bool operator!=(FloatRegister other) const { + return other.code_ != code_ || other.k_ != k_; + } + bool operator==(FloatRegister other) const { + return other.code_ == code_ && other.k_ == k_; + } + bool aliases(FloatRegister other) const { + return other.code_ == code_; + } + uint32_t numAliased() const { + return 2; + } + static FloatRegisters::Kind otherkind(FloatRegisters::Kind k) { + if (k == FloatRegisters::Double) + return FloatRegisters::Single; + return FloatRegisters::Double; + } + void aliased(uint32_t aliasIdx, FloatRegister* ret) { + if (aliasIdx == 0) + *ret = *this; + else + *ret = FloatRegister(code_, otherkind(k_)); + } + // This function mostly exists for the ARM backend. It is to ensure that two + // floating point registers' types are equivalent. e.g. S0 is not equivalent + // to D16, since S0 holds a float32, and D16 holds a Double. + // Since all floating point registers on x86 and x64 are equivalent, it is + // reasonable for this function to do the same. + bool equiv(FloatRegister other) const { + return k_ == other.k_; + } + MOZ_CONSTEXPR uint32_t size() const { + return k_ == FloatRegisters::Double ? sizeof(double) : sizeof(float); + } + uint32_t numAlignedAliased() { + return numAliased(); + } + void alignedAliased(uint32_t aliasIdx, FloatRegister* ret) { + MOZ_ASSERT(aliasIdx == 0); + aliased(aliasIdx, ret); + } + SetType alignedOrDominatedAliasedSet() const { + return Codes::SpreadCoefficient << code_; + } + + bool isSingle() const { + return k_ == FloatRegisters::Single; + } + bool isDouble() const { + return k_ == FloatRegisters::Double; + } + bool isInt32x4() const { + return false; + } + bool isFloat32x4() const { + return false; + } + + static uint32_t FirstBit(SetType x) { + JS_STATIC_ASSERT(sizeof(SetType) == 8); + return mozilla::CountTrailingZeroes64(x); + } + static uint32_t LastBit(SetType x) { + JS_STATIC_ASSERT(sizeof(SetType) == 8); + return 63 - mozilla::CountLeadingZeroes64(x); + } + + static TypedRegisterSet ReduceSetForPush(const TypedRegisterSet& s); + static uint32_t GetSizeInBytes(const TypedRegisterSet& s); + static uint32_t GetPushSizeInBytes(const TypedRegisterSet& s); + uint32_t getRegisterDumpOffsetInBytes(); + + public: + Code code_ : 8; + FloatRegisters::Kind k_ : 1; +}; + +// ARM/D32 has double registers that cannot be treated as float32. +// Luckily, ARMv8 doesn't have the same misfortune. +inline bool +hasUnaliasedDouble() +{ + return false; +} + +// ARM prior to ARMv8 also has doubles that alias multiple floats. +// Again, ARMv8 is in the clear. +inline bool +hasMultiAlias() +{ + return false; +} + +static const size_t AsmJSCheckedImmediateRange = 0; +static const size_t AsmJSImmediateRange = 0; + +} // namespace jit +} // namespace js + +#endif // jit_arm64_Architecture_arm64_h diff --git a/js/src/jit/arm64/Assembler-arm64.cpp b/js/src/jit/arm64/Assembler-arm64.cpp new file mode 100644 index 000000000000..e0c7a9b8082e --- /dev/null +++ b/js/src/jit/arm64/Assembler-arm64.cpp @@ -0,0 +1,626 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/arm64/Assembler-arm64.h" + +#include "mozilla/DebugOnly.h" +#include "mozilla/MathAlgorithms.h" + +#include "jscompartment.h" +#include "jsutil.h" + +#include "gc/Marking.h" + +#include "jit/arm64/MacroAssembler-arm64.h" +#include "jit/ExecutableAllocator.h" +#include "jit/JitCompartment.h" + +using namespace js; +using namespace js::jit; + +using mozilla::CountLeadingZeroes32; +using mozilla::DebugOnly; + +// Note this is used for inter-AsmJS calls and may pass arguments and results +// in floating point registers even if the system ABI does not. + +ABIArg +ABIArgGenerator::next(MIRType type) +{ + switch (type) { + case MIRType_Int32: + case MIRType_Pointer: + if (intRegIndex_ == NumIntArgRegs) { + current_ = ABIArg(stackOffset_); + stackOffset_ += sizeof(uintptr_t); + break; + } + current_ = ABIArg(Register::FromCode(intRegIndex_)); + intRegIndex_++; + break; + + case MIRType_Float32: + case MIRType_Double: + if (floatRegIndex_ == NumFloatArgRegs) { + current_ = ABIArg(stackOffset_); + stackOffset_ += sizeof(double); + break; + } + current_ = ABIArg(FloatRegister(floatRegIndex_, + type == MIRType_Double ? FloatRegisters::Double + : FloatRegisters::Single)); + floatRegIndex_++; + break; + + default: + MOZ_CRASH("Unexpected argument type"); + } + return current_; +} + +const Register ABIArgGenerator::NonArgReturnReg0 = r8; +const Register ABIArgGenerator::NonArgReturnReg1 = r9; +const Register ABIArgGenerator::NonVolatileReg = r1; +const Register ABIArgGenerator::NonArg_VolatileReg = r13; +const Register ABIArgGenerator::NonReturn_VolatileReg0 = r2; +const Register ABIArgGenerator::NonReturn_VolatileReg1 = r3; + +namespace js { +namespace jit { + +void +Assembler::finish() +{ + armbuffer_.flushPool(); + + // The extended jump table is part of the code buffer. + ExtendedJumpTable_ = emitExtendedJumpTable(); + Assembler::FinalizeCode(); + + // The jump relocation table starts with a fixed-width integer pointing + // to the start of the extended jump table. + if (tmpJumpRelocations_.length()) + jumpRelocations_.writeFixedUint32_t(toFinalOffset(ExtendedJumpTable_)); + + for (unsigned int i = 0; i < tmpJumpRelocations_.length(); i++) { + JumpRelocation& reloc = tmpJumpRelocations_[i]; + + // Each entry in the relocations table is an (offset, extendedTableIndex) pair. + jumpRelocations_.writeUnsigned(toFinalOffset(reloc.jump)); + jumpRelocations_.writeUnsigned(reloc.extendedTableIndex); + } + + for (unsigned int i = 0; i < tmpDataRelocations_.length(); i++) + dataRelocations_.writeUnsigned(toFinalOffset(tmpDataRelocations_[i])); + + for (unsigned int i = 0; i < tmpPreBarriers_.length(); i++) + preBarriers_.writeUnsigned(toFinalOffset(tmpPreBarriers_[i])); +} + +BufferOffset +Assembler::emitExtendedJumpTable() +{ + if (!pendingJumps_.length() || oom()) + return BufferOffset(); + + armbuffer_.flushPool(); + armbuffer_.align(SizeOfJumpTableEntry); + + BufferOffset tableOffset = armbuffer_.nextOffset(); + + for (size_t i = 0; i < pendingJumps_.length(); i++) { + // Each JumpTableEntry is of the form: + // LDR ip0 [PC, 8] + // BR ip0 + // [Patchable 8-byte constant low bits] + // [Patchable 8-byte constant high bits] + DebugOnly preOffset = size_t(armbuffer_.nextOffset().getOffset()); + + ldr(vixl::ip0, ptrdiff_t(8 / vixl::kInstructionSize)); + br(vixl::ip0); + + DebugOnly prePointer = size_t(armbuffer_.nextOffset().getOffset()); + MOZ_ASSERT(prePointer - preOffset == OffsetOfJumpTableEntryPointer); + + brk(0x0); + brk(0x0); + + DebugOnly postOffset = size_t(armbuffer_.nextOffset().getOffset()); + + MOZ_ASSERT(postOffset - preOffset == SizeOfJumpTableEntry); + } + + return tableOffset; +} + +void +Assembler::executableCopy(uint8_t* buffer) +{ + // Copy the code and all constant pools into the output buffer. + armbuffer_.executableCopy(buffer); + + // Patch any relative jumps that target code outside the buffer. + // The extended jump table may be used for distant jumps. + for (size_t i = 0; i < pendingJumps_.length(); i++) { + RelativePatch& rp = pendingJumps_[i]; + + if (!rp.target) { + // The patch target is nullptr for jumps that have been linked to + // a label within the same code block, but may be repatched later + // to jump to a different code block. + continue; + } + + Instruction* target = (Instruction*)rp.target; + Instruction* branch = (Instruction*)(buffer + toFinalOffset(rp.offset)); + JumpTableEntry* extendedJumpTable = + reinterpret_cast(buffer + toFinalOffset(ExtendedJumpTable_)); + if (branch->BranchType() != vixl::UnknownBranchType) { + if (branch->IsTargetReachable(target)) { + branch->SetImmPCOffsetTarget(target); + } else { + JumpTableEntry* entry = &extendedJumpTable[i]; + branch->SetImmPCOffsetTarget(entry->getLdr()); + entry->data = target; + } + } else { + // Currently a two-instruction call, it should be possible to optimize this + // into a single instruction call + nop in some instances, but this will work. + } + } +} + +BufferOffset +Assembler::immPool(ARMRegister dest, uint8_t* value, vixl::LoadLiteralOp op, ARMBuffer::PoolEntry* pe) +{ + uint32_t inst = op | Rt(dest); + const size_t numInst = 1; + const unsigned sizeOfPoolEntryInBytes = 4; + const unsigned numPoolEntries = sizeof(value) / sizeOfPoolEntryInBytes; + return armbuffer_.allocEntry(numInst, numPoolEntries, (uint8_t*)&inst, value, pe); +} + +BufferOffset +Assembler::immPool64(ARMRegister dest, uint64_t value, ARMBuffer::PoolEntry* pe) +{ + return immPool(dest, (uint8_t*)&value, vixl::LDR_x_lit, pe); +} + +BufferOffset +Assembler::immPool64Branch(RepatchLabel* label, ARMBuffer::PoolEntry* pe, Condition c) +{ + MOZ_CRASH("immPool64Branch"); +} + +BufferOffset +Assembler::fImmPool(ARMFPRegister dest, uint8_t* value, vixl::LoadLiteralOp op) +{ + uint32_t inst = op | Rt(dest); + const size_t numInst = 1; + const unsigned sizeOfPoolEntryInBits = 32; + const unsigned numPoolEntries = dest.size() / sizeOfPoolEntryInBits; + return armbuffer_.allocEntry(numInst, numPoolEntries, (uint8_t*)&inst, value); +} + +BufferOffset +Assembler::fImmPool64(ARMFPRegister dest, double value) +{ + return fImmPool(dest, (uint8_t*)&value, vixl::LDR_d_lit); +} +BufferOffset +Assembler::fImmPool32(ARMFPRegister dest, float value) +{ + return fImmPool(dest, (uint8_t*)&value, vixl::LDR_s_lit); +} + +void +Assembler::bind(Label* label, BufferOffset targetOffset) +{ + // Nothing has seen the label yet: just mark the location. + if (!label->used()) { + label->bind(targetOffset.getOffset()); + return; + } + + // Get the most recent instruction that used the label, as stored in the label. + // This instruction is the head of an implicit linked list of label uses. + uint32_t branchOffset = label->offset(); + + while ((int32_t)branchOffset != LabelBase::INVALID_OFFSET) { + Instruction* link = getInstructionAt(BufferOffset(branchOffset)); + + // Before overwriting the offset in this instruction, get the offset of + // the next link in the implicit branch list. + uint32_t nextLinkOffset = uint32_t(link->ImmPCRawOffset()); + if (nextLinkOffset != uint32_t(LabelBase::INVALID_OFFSET)) + nextLinkOffset += branchOffset; + // Linking against the actual (Instruction*) would be invalid, + // since that Instruction could be anywhere in memory. + // Instead, just link against the correct relative offset, assuming + // no constant pools, which will be taken into consideration + // during finalization. + ptrdiff_t relativeByteOffset = targetOffset.getOffset() - branchOffset; + Instruction* target = (Instruction*)(((uint8_t*)link) + relativeByteOffset); + + // Write a new relative offset into the instruction. + link->SetImmPCOffsetTarget(target); + branchOffset = nextLinkOffset; + } + + // Bind the label, so that future uses may encode the offset immediately. + label->bind(targetOffset.getOffset()); +} + +void +Assembler::bind(RepatchLabel* label) +{ + // Nothing has seen the label yet: just mark the location. + if (!label->used()) { + label->bind(nextOffset().getOffset()); + return; + } + int branchOffset = label->offset(); + Instruction* inst = getInstructionAt(BufferOffset(branchOffset)); + inst->SetImmPCOffsetTarget(inst + nextOffset().getOffset() - branchOffset); +} + +void +Assembler::trace(JSTracer* trc) +{ + for (size_t i = 0; i < pendingJumps_.length(); i++) { + RelativePatch& rp = pendingJumps_[i]; + if (rp.kind == Relocation::JITCODE) { + JitCode* code = JitCode::FromExecutable((uint8_t*)rp.target); + TraceManuallyBarrieredEdge(trc, &code, "masmrel32"); + MOZ_ASSERT(code == JitCode::FromExecutable((uint8_t*)rp.target)); + } + } + + // TODO: Trace. +#if 0 + if (tmpDataRelocations_.length()) + ::TraceDataRelocations(trc, &armbuffer_, &tmpDataRelocations_); +#endif +} + +void +Assembler::addJumpRelocation(BufferOffset src, Relocation::Kind reloc) +{ + // Only JITCODE relocations are patchable at runtime. + MOZ_ASSERT(reloc == Relocation::JITCODE); + + // Each relocation requires an entry in the extended jump table. + tmpJumpRelocations_.append(JumpRelocation(src, pendingJumps_.length())); +} + +void +Assembler::addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind reloc) +{ + MOZ_ASSERT(target.value != nullptr); + + if (reloc == Relocation::JITCODE) + addJumpRelocation(src, reloc); + + // This jump is not patchable at runtime. Extended jump table entry requirements + // cannot be known until finalization, so to be safe, give each jump and entry. + // This also causes GC tracing of the target. + enoughMemory_ &= pendingJumps_.append(RelativePatch(src, target.value, reloc)); +} + +size_t +Assembler::addPatchableJump(BufferOffset src, Relocation::Kind reloc) +{ + MOZ_CRASH("TODO: This is currently unused (and untested)"); + if (reloc == Relocation::JITCODE) + addJumpRelocation(src, reloc); + + size_t extendedTableIndex = pendingJumps_.length(); + enoughMemory_ &= pendingJumps_.append(RelativePatch(src, nullptr, reloc)); + return extendedTableIndex; +} + +void +PatchJump(CodeLocationJump& jump_, CodeLocationLabel label) +{ + MOZ_CRASH("PatchJump"); +} + +void +Assembler::PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue, + PatchedImmPtr expected) +{ + Instruction* i = (Instruction*)label.raw(); + void** pValue = i->LiteralAddress(); + MOZ_ASSERT(*pValue == expected.value); + *pValue = newValue.value; +} + +void +Assembler::PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expected) +{ + PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value), PatchedImmPtr(expected.value)); +} + +void +Assembler::ToggleToJmp(CodeLocationLabel inst_) +{ + Instruction* i = (Instruction*)inst_.raw(); + MOZ_ASSERT(i->IsAddSubImmediate()); + + // Refer to instruction layout in ToggleToCmp(). + int imm19 = (int)i->Bits(23, 5); + MOZ_ASSERT(vixl::is_int19(imm19)); + + b(i, imm19, Always); +} + +void +Assembler::ToggleToCmp(CodeLocationLabel inst_) +{ + Instruction* i = (Instruction*)inst_.raw(); + MOZ_ASSERT(i->IsCondB()); + + int imm19 = i->ImmCondBranch(); + // bit 23 is reserved, and the simulator throws an assertion when this happens + // It'll be messy to decode, but we can steal bit 30 or bit 31. + MOZ_ASSERT(vixl::is_int18(imm19)); + + // 31 - 64-bit if set, 32-bit if unset. (OK!) + // 30 - sub if set, add if unset. (OK!) + // 29 - SetFlagsBit. Must be set. + // 22:23 - ShiftAddSub. (OK!) + // 10:21 - ImmAddSub. (OK!) + // 5:9 - First source register (Rn). (OK!) + // 0:4 - Destination Register. Must be xzr. + + // From the above, there is a safe 19-bit contiguous region from 5:23. + Emit(i, vixl::ThirtyTwoBits | vixl::AddSubImmediateFixed | vixl::SUB | Flags(vixl::SetFlags) | + Rd(vixl::xzr) | (imm19 << vixl::Rn_offset)); +} + +void +Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) +{ + Instruction* first = (Instruction*)inst_.raw(); + Instruction* load; + Instruction* call; + + if (first->InstructionBits() == 0x9100039f) { + load = (Instruction*)NextInstruction(first); + call = NextInstruction(load); + } else { + load = first; + call = NextInstruction(first); + } + + if (call->IsBLR() == enabled) + return; + + if (call->IsBLR()) { + // if the second instruction is blr(), then wehave: + // ldr x17, [pc, offset] + // blr x17 + // we want to transform this to: + // adr xzr, [pc, offset] + // nop + int32_t offset = load->ImmLLiteral(); + adr(load, xzr, int32_t(offset)); + nop(call); + } else { + // we have adr xzr, [pc, offset] + // nop + // transform this to + // ldr x17, [pc, offset] + // blr x17 + + int32_t offset = (int)load->ImmPCRawOffset(); + MOZ_ASSERT(vixl::is_int19(offset)); + ldr(load, ScratchReg2_64, int32_t(offset)); + blr(call, ScratchReg2_64); + } +} + +class RelocationIterator +{ + CompactBufferReader reader_; + uint32_t tableStart_; + uint32_t offset_; + uint32_t extOffset_; + + public: + explicit RelocationIterator(CompactBufferReader& reader) + : reader_(reader) + { + // The first uint32_t stores the extended table offset. + tableStart_ = reader_.readFixedUint32_t(); + } + + bool read() { + if (!reader_.more()) + return false; + offset_ = reader_.readUnsigned(); + extOffset_ = reader_.readUnsigned(); + return true; + } + + uint32_t offset() const { + return offset_; + } + uint32_t extendedOffset() const { + return extOffset_; + } +}; + +static JitCode* +CodeFromJump(JitCode* code, uint8_t* jump) +{ + Instruction* branch = (Instruction*)jump; + uint8_t* target; + // If this is a toggled branch, and is currently off, then we have some 'splainin + if (branch->BranchType() == vixl::UnknownBranchType) + target = (uint8_t*)branch->Literal64(); + else + target = (uint8_t*)branch->ImmPCOffsetTarget(); + + // If the jump is within the code buffer, it uses the extended jump table. + if (target >= code->raw() && target < code->raw() + code->instructionsSize()) { + MOZ_ASSERT(target + Assembler::SizeOfJumpTableEntry <= code->raw() + code->instructionsSize()); + + uint8_t** patchablePtr = (uint8_t**)(target + Assembler::OffsetOfJumpTableEntryPointer); + target = *patchablePtr; + } + + return JitCode::FromExecutable(target); +} + +void +Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader) +{ + RelocationIterator iter(reader); + while (iter.read()) { + JitCode* child = CodeFromJump(code, code->raw() + iter.offset()); + TraceManuallyBarrieredEdge(trc, &child, "rel32"); + MOZ_ASSERT(child == CodeFromJump(code, code->raw() + iter.offset())); + } +} + +static void +TraceDataRelocations(JSTracer* trc, uint8_t* buffer, CompactBufferReader& reader) +{ + while (reader.more()) { + size_t offset = reader.readUnsigned(); + Instruction* load = (Instruction*)&buffer[offset]; + + // The only valid traceable operation is a 64-bit load to an ARMRegister. + // Refer to movePatchablePtr() for generation. + MOZ_ASSERT(load->Mask(vixl::LoadLiteralMask) == vixl::LDR_x_lit); + + uintptr_t* literalAddr = load->LiteralAddress(); + uintptr_t literal = *literalAddr; + + // All pointers on AArch64 will have the top bits cleared. + // If those bits are not cleared, this must be a Value. + if (literal >> JSVAL_TAG_SHIFT) { + jsval_layout layout; + layout.asBits = literal; + Value v = IMPL_TO_JSVAL(layout); + TraceManuallyBarrieredEdge(trc, &v, "ion-masm-value"); + *literalAddr = JSVAL_TO_IMPL(v).asBits; + + // TODO: When we can, flush caches here if a pointer was moved. + continue; + } + + // No barriers needed since the pointers are constants. + TraceManuallyBarrieredGenericPointerEdge(trc, reinterpret_cast(literalAddr), + "ion-masm-ptr"); + + // TODO: Flush caches at end? + } +} + +void +Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader) +{ + ::TraceDataRelocations(trc, code->raw(), reader); +} + +void +Assembler::FixupNurseryObjects(JSContext* cx, JitCode* code, CompactBufferReader& reader, + const ObjectVector& nurseryObjects) +{ + + MOZ_ASSERT(!nurseryObjects.empty()); + + uint8_t* buffer = code->raw(); + bool hasNurseryPointers = false; + + while (reader.more()) { + size_t offset = reader.readUnsigned(); + Instruction* ins = (Instruction*)&buffer[offset]; + + uintptr_t* literalAddr = ins->LiteralAddress(); + uintptr_t literal = *literalAddr; + + if (literal >> JSVAL_TAG_SHIFT) + continue; // This is a Value. + + if (!(literal & 0x1)) + continue; + + uint32_t index = literal >> 1; + JSObject* obj = nurseryObjects[index]; + *literalAddr = uintptr_t(obj); + + // Either all objects are still in the nursery, or all objects are tenured. + MOZ_ASSERT_IF(hasNurseryPointers, IsInsideNursery(obj)); + + if (!hasNurseryPointers && IsInsideNursery(obj)) + hasNurseryPointers = true; + } + + if (hasNurseryPointers) + cx->runtime()->gc.storeBuffer.putWholeCellFromMainThread(code); +} + +int32_t +Assembler::ExtractCodeLabelOffset(uint8_t* code) +{ + return *(int32_t*)code; +} + +void +Assembler::PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm) +{ + MOZ_CRASH("PatchInstructionImmediate()"); +} + +void +Assembler::UpdateBoundsCheck(uint32_t heapSize, Instruction* inst) +{ + int32_t mask = ~(heapSize - 1); + unsigned n, imm_s, imm_r; + if (!IsImmLogical(mask, 32, &n, &imm_s, &imm_r)) + MOZ_CRASH("Could not encode immediate!?"); + + inst->SetImmR(imm_r); + inst->SetImmS(imm_s); + inst->SetBitN(n); +} + +void +Assembler::retarget(Label* label, Label* target) +{ + if (label->used()) { + if (target->bound()) { + bind(label, BufferOffset(target)); + } else if (target->used()) { + // The target is not bound but used. Prepend label's branch list + // onto target's. + BufferOffset labelBranchOffset(label); + BufferOffset next; + + // Find the head of the use chain for label. + while (nextLink(labelBranchOffset, &next)) + labelBranchOffset = next; + + // Then patch the head of label's use chain to the tail of target's + // use chain, prepending the entire use chain of target. + Instruction* branch = getInstructionAt(labelBranchOffset); + target->use(label->offset()); + branch->SetImmPCOffsetTarget(branch - labelBranchOffset.getOffset()); + } else { + // The target is unbound and unused. We can just take the head of + // the list hanging off of label, and dump that into target. + DebugOnly prev = target->use(label->offset()); + MOZ_ASSERT((int32_t)prev == Label::INVALID_OFFSET); + } + } + label->reset(); +} + +} // namespace jit +} // namespace js diff --git a/js/src/jit/arm64/Assembler-arm64.h b/js/src/jit/arm64/Assembler-arm64.h new file mode 100644 index 000000000000..62d166397782 --- /dev/null +++ b/js/src/jit/arm64/Assembler-arm64.h @@ -0,0 +1,587 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef A64_ASSEMBLER_A64_H_ +#define A64_ASSEMBLER_A64_H_ + +#include "jit/arm64/vixl/Assembler-vixl.h" + +#include "jit/JitCompartment.h" + +namespace js { +namespace jit { + +// VIXL imports. +typedef vixl::Register ARMRegister; +typedef vixl::FPRegister ARMFPRegister; +using vixl::ARMBuffer; +using vixl::Instruction; + +static const uint32_t AlignmentAtPrologue = 0; +static const uint32_t AlignmentMidPrologue = 8; +static const Scale ScalePointer = TimesEight; +static const uint32_t AlignmentAtAsmJSPrologue = sizeof(void*); + +// The MacroAssembler uses scratch registers extensively and unexpectedly. +// For safety, scratch registers should always be acquired using +// vixl::UseScratchRegisterScope. +static constexpr Register ScratchReg = { Registers::ip0 }; +static constexpr ARMRegister ScratchReg64 = { ScratchReg, 64 }; + +static constexpr Register ScratchReg2 = { Registers::ip1 }; +static constexpr ARMRegister ScratchReg2_64 = { ScratchReg2, 64 }; + +static constexpr FloatRegister ScratchDoubleReg = { FloatRegisters::d31 }; +static constexpr FloatRegister ReturnDoubleReg = { FloatRegisters::d0 }; + +static constexpr FloatRegister ReturnFloat32Reg = { FloatRegisters::s0 , FloatRegisters::Single }; +static constexpr FloatRegister ScratchFloat32Reg = { FloatRegisters::s31 , FloatRegisters::Single }; + +static constexpr Register InvalidReg = { Registers::invalid_reg }; +static constexpr FloatRegister InvalidFloatReg = { FloatRegisters::invalid_fpreg }; + +static constexpr FloatRegister ReturnInt32x4Reg = InvalidFloatReg; +static constexpr FloatRegister ReturnFloat32x4Reg = InvalidFloatReg; + +static constexpr Register OsrFrameReg = { Registers::x3 }; +static constexpr Register ArgumentsRectifierReg = { Registers::x8 }; +static constexpr Register CallTempReg0 = { Registers::x9 }; +static constexpr Register CallTempReg1 = { Registers::x10 }; +static constexpr Register CallTempReg2 = { Registers::x11 }; +static constexpr Register CallTempReg3 = { Registers::x12 }; +static constexpr Register CallTempReg4 = { Registers::x13 }; +static constexpr Register CallTempReg5 = { Registers::x14 }; + +static constexpr Register PreBarrierReg = { Registers::x1 }; + +static constexpr Register ReturnReg = { Registers::x0 }; +static constexpr Register JSReturnReg = { Registers::x2 }; +static constexpr Register FramePointer = { Registers::fp }; +static constexpr Register ZeroRegister = { Registers::sp }; +static constexpr ARMRegister ZeroRegister64 = { Registers::sp, 64 }; +static constexpr ARMRegister ZeroRegister32 = { Registers::sp, 32 }; + +static constexpr FloatRegister ReturnFloatReg = { FloatRegisters::d0 }; +static constexpr FloatRegister ScratchFloatReg = { FloatRegisters::d31 }; + +static constexpr FloatRegister ReturnSimdReg = InvalidFloatReg; +static constexpr FloatRegister ScratchSimdReg = InvalidFloatReg; + +// StackPointer is intentionally undefined on ARM64 to prevent misuse: +// using sp as a base register is only valid if sp % 16 == 0. +static constexpr Register RealStackPointer = { Registers::sp }; +// TODO: We're not quite there yet. +static constexpr Register StackPointer = { Registers::sp }; + +static constexpr Register PseudoStackPointer = { Registers::x28 }; +static constexpr ARMRegister PseudoStackPointer64 = { Registers::x28, 64 }; +static constexpr ARMRegister PseudoStackPointer32 = { Registers::x28, 32 }; + +// StackPointer for use by irregexp. +static constexpr Register RegExpStackPointer = PseudoStackPointer; + +static constexpr Register IntArgReg0 = { Registers::x0 }; +static constexpr Register IntArgReg1 = { Registers::x1 }; +static constexpr Register IntArgReg2 = { Registers::x2 }; +static constexpr Register IntArgReg3 = { Registers::x3 }; +static constexpr Register IntArgReg4 = { Registers::x4 }; +static constexpr Register IntArgReg5 = { Registers::x5 }; +static constexpr Register IntArgReg6 = { Registers::x6 }; +static constexpr Register IntArgReg7 = { Registers::x7 }; +static constexpr Register GlobalReg = { Registers::x20 }; +static constexpr Register HeapReg = { Registers::x21 }; +static constexpr Register HeapLenReg = { Registers::x22 }; + +// Define unsized Registers. +#define DEFINE_UNSIZED_REGISTERS(N) \ +static constexpr Register r##N = { Registers::x##N }; +REGISTER_CODE_LIST(DEFINE_UNSIZED_REGISTERS) +#undef DEFINE_UNSIZED_REGISTERS +static constexpr Register ip0 = { Registers::x16 }; +static constexpr Register ip1 = { Registers::x16 }; +static constexpr Register fp = { Registers::x30 }; +static constexpr Register lr = { Registers::x30 }; +static constexpr Register rzr = { Registers::xzr }; + +// Import VIXL registers into the js::jit namespace. +#define IMPORT_VIXL_REGISTERS(N) \ +static constexpr ARMRegister w##N = vixl::w##N; \ +static constexpr ARMRegister x##N = vixl::x##N; +REGISTER_CODE_LIST(IMPORT_VIXL_REGISTERS) +#undef IMPORT_VIXL_REGISTERS +static constexpr ARMRegister wzr = vixl::wzr; +static constexpr ARMRegister xzr = vixl::xzr; +static constexpr ARMRegister wsp = vixl::wsp; +static constexpr ARMRegister sp = vixl::sp; + +// Import VIXL VRegisters into the js::jit namespace. +#define IMPORT_VIXL_VREGISTERS(N) \ +static constexpr ARMFPRegister s##N = vixl::s##N; \ +static constexpr ARMFPRegister d##N = vixl::d##N; +REGISTER_CODE_LIST(IMPORT_VIXL_VREGISTERS) +#undef IMPORT_VIXL_VREGISTERS + +static constexpr ValueOperand JSReturnOperand = ValueOperand(JSReturnReg); + +// Registers used in the GenerateFFIIonExit Enable Activation block. +static constexpr Register AsmJSIonExitRegCallee = r8; +static constexpr Register AsmJSIonExitRegE0 = r0; +static constexpr Register AsmJSIonExitRegE1 = r1; +static constexpr Register AsmJSIonExitRegE2 = r2; +static constexpr Register AsmJSIonExitRegE3 = r3; + +// Registers used in the GenerateFFIIonExit Disable Activation block. +// None of these may be the second scratch register. +static constexpr Register AsmJSIonExitRegReturnData = r2; +static constexpr Register AsmJSIonExitRegReturnType = r3; +static constexpr Register AsmJSIonExitRegD0 = r0; +static constexpr Register AsmJSIonExitRegD1 = r1; +static constexpr Register AsmJSIonExitRegD2 = r4; + +static constexpr Register JSReturnReg_Type = r3; +static constexpr Register JSReturnReg_Data = r2; + +static constexpr FloatRegister NANReg = { FloatRegisters::d14 }; +// N.B. r8 isn't listed as an aapcs temp register, but we can use it as such because we never +// use return-structs. +static constexpr Register CallTempNonArgRegs[] = { r8, r9, r10, r11, r12, r13, r14, r15 }; +static const uint32_t NumCallTempNonArgRegs = + mozilla::ArrayLength(CallTempNonArgRegs); + +static constexpr uint32_t JitStackAlignment = 16; + +static constexpr uint32_t JitStackValueAlignment = JitStackAlignment / sizeof(Value); +static_assert(JitStackAlignment % sizeof(Value) == 0 && JitStackValueAlignment >= 1, + "Stack alignment should be a non-zero multiple of sizeof(Value)"); + +// This boolean indicates whether we support SIMD instructions flavoured for +// this architecture or not. Rather than a method in the LIRGenerator, it is +// here such that it is accessible from the entire codebase. Once full support +// for SIMD is reached on all tier-1 platforms, this constant can be deleted. +static constexpr bool SupportsSimd = false; +static constexpr uint32_t SimdMemoryAlignment = 16; + +static_assert(CodeAlignment % SimdMemoryAlignment == 0, + "Code alignment should be larger than any of the alignments which are used for " + "the constant sections of the code buffer. Thus it should be larger than the " + "alignment for SIMD constants."); + +static const uint32_t AsmJSStackAlignment = SimdMemoryAlignment; +static const int32_t AsmJSGlobalRegBias = 1024; + +class Assembler : public vixl::Assembler +{ + public: + Assembler() + : vixl::Assembler() + { } + + typedef vixl::Condition Condition; + + void finish(); + void trace(JSTracer* trc); + + // Emit the jump table, returning the BufferOffset to the first entry in the table. + BufferOffset emitExtendedJumpTable(); + BufferOffset ExtendedJumpTable_; + void executableCopy(uint8_t* buffer); + + BufferOffset immPool(ARMRegister dest, uint8_t* value, vixl::LoadLiteralOp op, + ARMBuffer::PoolEntry* pe = nullptr); + BufferOffset immPool64(ARMRegister dest, uint64_t value, ARMBuffer::PoolEntry* pe = nullptr); + BufferOffset immPool64Branch(RepatchLabel* label, ARMBuffer::PoolEntry* pe, vixl::Condition c); + BufferOffset fImmPool(ARMFPRegister dest, uint8_t* value, vixl::LoadLiteralOp op); + BufferOffset fImmPool64(ARMFPRegister dest, double value); + BufferOffset fImmPool32(ARMFPRegister dest, float value); + + void bind(Label* label) { bind(label, nextOffset()); } + void bind(Label* label, BufferOffset boff); + void bind(RepatchLabel* label); + + bool oom() const { + return AssemblerShared::oom() || + armbuffer_.oom() || + jumpRelocations_.oom() || + dataRelocations_.oom() || + preBarriers_.oom(); + } + + void copyJumpRelocationTable(uint8_t* dest) const { + if (jumpRelocations_.length()) + memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length()); + } + void copyDataRelocationTable(uint8_t* dest) const { + if (dataRelocations_.length()) + memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length()); + } + void copyPreBarrierTable(uint8_t* dest) const { + if (preBarriers_.length()) + memcpy(dest, preBarriers_.buffer(), preBarriers_.length()); + } + + size_t jumpRelocationTableBytes() const { + return jumpRelocations_.length(); + } + size_t dataRelocationTableBytes() const { + return dataRelocations_.length(); + } + size_t preBarrierTableBytes() const { + return preBarriers_.length(); + } + size_t bytesNeeded() const { + return SizeOfCodeGenerated() + + jumpRelocationTableBytes() + + dataRelocationTableBytes() + + preBarrierTableBytes(); + } + + BufferOffset nextOffset() const { + return armbuffer_.nextOffset(); + } + + void addCodeLabel(CodeLabel label) { + propagateOOM(codeLabels_.append(label)); + } + size_t numCodeLabels() const { + return codeLabels_.length(); + } + CodeLabel codeLabel(size_t i) { + return codeLabels_[i]; + } + void processCodeLabels(uint8_t* rawCode) { + for (size_t i = 0; i < codeLabels_.length(); i++) { + CodeLabel label = codeLabels_[i]; + Bind(rawCode, label.dest(), rawCode + actualOffset(label.src()->offset())); + } + } + + void Bind(uint8_t* rawCode, AbsoluteLabel* label, const void* address) { + uint32_t off = actualOffset(label->offset()); + *reinterpret_cast(rawCode + off) = address; + } + bool nextLink(BufferOffset cur, BufferOffset* next) { + Instruction* link = getInstructionAt(cur); + uint32_t nextLinkOffset = uint32_t(link->ImmPCRawOffset()); + if (nextLinkOffset == uint32_t(LabelBase::INVALID_OFFSET)) + return false; + *next = BufferOffset(nextLinkOffset + cur.getOffset()); + return true; + } + void retarget(Label* cur, Label* next); + + // The buffer is about to be linked. Ensure any constant pools or + // excess bookkeeping has been flushed to the instruction stream. + void flush() { + armbuffer_.flushPool(); + } + + int actualOffset(int curOffset) { + return curOffset + armbuffer_.poolSizeBefore(curOffset); + } + int actualIndex(int curOffset) { + ARMBuffer::PoolEntry pe(curOffset); + return armbuffer_.poolEntryOffset(pe); + } + int labelOffsetToPatchOffset(int labelOff) { + return actualOffset(labelOff); + } + static uint8_t* PatchableJumpAddress(JitCode* code, uint32_t index) { + return code->raw() + index; + } + void setPrinter(Sprinter* sp) { + } + + static bool SupportsFloatingPoint() { return true; } + static bool SupportsSimd() { return js::jit::SupportsSimd; } + + // Tracks a jump that is patchable after finalization. + void addJumpRelocation(BufferOffset src, Relocation::Kind reloc); + + protected: + // Add a jump whose target is unknown until finalization. + // The jump may not be patched at runtime. + void addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind kind); + + // Add a jump whose target is unknown until finalization, and may change + // thereafter. The jump is patchable at runtime. + size_t addPatchableJump(BufferOffset src, Relocation::Kind kind); + + public: + static uint32_t PatchWrite_NearCallSize() { + return 4; + } + + static uint32_t NopSize() { + return 4; + } + + static void PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall) { + Instruction* dest = (Instruction*)start.raw(); + //printf("patching %p with call to %p\n", start.raw(), toCall.raw()); + bl(dest, ((Instruction*)toCall.raw() - dest)>>2); + + } + static void PatchDataWithValueCheck(CodeLocationLabel label, + PatchedImmPtr newValue, + PatchedImmPtr expected); + + static void PatchDataWithValueCheck(CodeLocationLabel label, + ImmPtr newValue, + ImmPtr expected); + + static void PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm) { + // Raw is going to be the return address. + uint32_t* raw = (uint32_t*)label.raw(); + // Overwrite the 4 bytes before the return address, which will end up being + // the call instruction. + *(raw - 1) = imm.value; + } + static uint32_t AlignDoubleArg(uint32_t offset) { + MOZ_CRASH("AlignDoubleArg()"); + } + static Instruction* NextInstruction(Instruction* instruction, uint32_t* count = nullptr) { + if (count != nullptr) + *count += 4; + Instruction* cur = instruction; + Instruction* next = cur + 4; + // Artificial pool guards can only be B (rather than BR) + if (next->IsUncondB()) { + uint32_t* snd = (uint32_t*)(instruction + 8); + // test both the upper 16 bits, but also bit 15, which should be unset + // for an artificial branch guard. + if ((*snd & 0xffff8000) == 0xffff0000) { + // that was a guard before a pool, step over the pool. + int poolSize = (*snd & 0x7fff); + return (Instruction*)(snd + poolSize); + } + } else if (cur->IsBR() || cur->IsUncondB()) { + // natural pool guards can be anything + // but they need to have bit 15 set. + if ((next->InstructionBits() & 0xffff0000) == 0xffff0000) { + int poolSize = (next->InstructionBits() & 0x7fff); + Instruction* ret = (next + (poolSize << 2)); + return ret; + } + } + return (instruction + 4); + + } + static uint8_t* NextInstruction(uint8_t* instruction, uint32_t* count = nullptr) { + return (uint8_t*)NextInstruction((Instruction*)instruction, count); + } + static uintptr_t GetPointer(uint8_t* ptr) { + Instruction* i = reinterpret_cast(ptr); + uint64_t ret = i->Literal64(); + return ret; + } + + // Toggle a jmp or cmp emitted by toggledJump(). + static void ToggleToJmp(CodeLocationLabel inst_); + static void ToggleToCmp(CodeLocationLabel inst_); + static void ToggleCall(CodeLocationLabel inst_, bool enabled); + + static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader); + static void TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader); + + static int32_t ExtractCodeLabelOffset(uint8_t* code); + static void PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm); + + static void FixupNurseryObjects(JSContext* cx, JitCode* code, CompactBufferReader& reader, + const ObjectVector& nurseryObjects); + + // Convert a BufferOffset to a final byte offset from the start of the code buffer. + size_t toFinalOffset(BufferOffset offset) { + return size_t(offset.getOffset() + armbuffer_.poolSizeBefore(offset.getOffset())); + } + + public: + // A Jump table entry is 2 instructions, with 8 bytes of raw data + static const size_t SizeOfJumpTableEntry = 16; + + struct JumpTableEntry + { + uint32_t ldr; + uint32_t br; + void* data; + + Instruction* getLdr() { + return reinterpret_cast(&ldr); + } + }; + + // Offset of the patchable target for the given entry. + static const size_t OffsetOfJumpTableEntryPointer = 8; + + public: + static void UpdateBoundsCheck(uint32_t logHeapSize, Instruction* inst); + + void writeCodePointer(AbsoluteLabel* absoluteLabel) { + MOZ_ASSERT(!absoluteLabel->bound()); + uintptr_t x = LabelBase::INVALID_OFFSET; + BufferOffset off = EmitData(&x, sizeof(uintptr_t)); + + // The x86/x64 makes general use of AbsoluteLabel and weaves a linked list + // of uses of an AbsoluteLabel through the assembly. ARM only uses labels + // for the case statements of switch jump tables. Thus, for simplicity, we + // simply treat the AbsoluteLabel as a label and bind it to the offset of + // the jump table entry that needs to be patched. + LabelBase* label = absoluteLabel; + label->bind(off.getOffset()); + } + + void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end, + const Disassembler::HeapAccess& heapAccess) + { + MOZ_CRASH("verifyHeapAccessDisassembly"); + } + + protected: + // Because jumps may be relocated to a target inaccessible by a short jump, + // each relocatable jump must have a unique entry in the extended jump table. + // Valid relocatable targets are of type Relocation::JITCODE. + struct JumpRelocation + { + BufferOffset jump; // Offset to the short jump, from the start of the code buffer. + uint32_t extendedTableIndex; // Unique index within the extended jump table. + + JumpRelocation(BufferOffset jump, uint32_t extendedTableIndex) + : jump(jump), extendedTableIndex(extendedTableIndex) + { } + }; + + // Because ARM and A64 use a code buffer that allows for constant pool insertion, + // the actual offset of each jump cannot be known until finalization. + // These vectors store the WIP offsets. + js::Vector tmpDataRelocations_; + js::Vector tmpPreBarriers_; + js::Vector tmpJumpRelocations_; + + // Structure for fixing up pc-relative loads/jumps when the machine + // code gets moved (executable copy, gc, etc.). + struct RelativePatch + { + BufferOffset offset; + void* target; + Relocation::Kind kind; + + RelativePatch(BufferOffset offset, void* target, Relocation::Kind kind) + : offset(offset), target(target), kind(kind) + { } + }; + + js::Vector codeLabels_; + + // List of jumps for which the target is either unknown until finalization, + // or cannot be known due to GC. Each entry here requires a unique entry + // in the extended jump table, and is patched at finalization. + js::Vector pendingJumps_; + + // Final output formatters. + CompactBufferWriter jumpRelocations_; + CompactBufferWriter dataRelocations_; + CompactBufferWriter preBarriers_; +}; + +static const uint32_t NumIntArgRegs = 8; +static const uint32_t NumFloatArgRegs = 8; + +class ABIArgGenerator +{ + public: + ABIArgGenerator() + : intRegIndex_(0), + floatRegIndex_(0), + stackOffset_(0), + current_() + { } + + ABIArg next(MIRType argType); + ABIArg& current() { return current_; } + uint32_t stackBytesConsumedSoFar() const { return stackOffset_; } + + public: + static const Register NonArgReturnReg0; + static const Register NonArgReturnReg1; + static const Register NonVolatileReg; + static const Register NonArg_VolatileReg; + static const Register NonReturn_VolatileReg0; + static const Register NonReturn_VolatileReg1; + + protected: + unsigned intRegIndex_; + unsigned floatRegIndex_; + uint32_t stackOffset_; + ABIArg current_; +}; + +static inline bool +GetIntArgReg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out) +{ + if (usedIntArgs >= NumIntArgRegs) + return false; + *out = Register::FromCode(usedIntArgs); + return true; +} + +static inline bool +GetFloatArgReg(uint32_t usedIntArgs, uint32_t usedFloatArgs, FloatRegister* out) +{ + if (usedFloatArgs >= NumFloatArgRegs) + return false; + *out = FloatRegister::FromCode(usedFloatArgs); + return true; +} + +// Get a register in which we plan to put a quantity that will be used as an +// integer argument. This differs from GetIntArgReg in that if we have no more +// actual argument registers to use we will fall back on using whatever +// CallTempReg* don't overlap the argument registers, and only fail once those +// run out too. +static inline bool +GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out) +{ + if (GetIntArgReg(usedIntArgs, usedFloatArgs, out)) + return true; + // Unfortunately, we have to assume things about the point at which + // GetIntArgReg returns false, because we need to know how many registers it + // can allocate. + usedIntArgs -= NumIntArgRegs; + if (usedIntArgs >= NumCallTempNonArgRegs) + return false; + *out = CallTempNonArgRegs[usedIntArgs]; + return true; + +} + +void PatchJump(CodeLocationJump& jump_, CodeLocationLabel label); + +static inline void +PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitRuntime::BackedgeTarget target) +{ + PatchJump(jump_, label); +} + +// Forbids pool generation during a specified interval. Not nestable. +class AutoForbidPools +{ + Assembler* asm_; + + public: + AutoForbidPools(Assembler* asm_, size_t maxInst) + : asm_(asm_) + { + asm_->enterNoPool(maxInst); + } + + ~AutoForbidPools() { + asm_->leaveNoPool(); + } +}; + +} // namespace jit +} // namespace js + +#endif // A64_ASSEMBLER_A64_H_ diff --git a/js/src/jit/arm64/AtomicOperations-arm64.h b/js/src/jit/arm64/AtomicOperations-arm64.h new file mode 100644 index 000000000000..3742674fd646 --- /dev/null +++ b/js/src/jit/arm64/AtomicOperations-arm64.h @@ -0,0 +1,104 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* For documentation, see jit/AtomicOperations.h */ + +#ifndef jit_arm64_AtomicOperations_arm64_h +#define jit_arm64_AtomicOperations_arm64_h + +#include "jit/arm64/Architecture-arm64.h" +#include "jit/AtomicOperations.h" + +inline bool +js::jit::AtomicOperations::isLockfree8() +{ + MOZ_CRASH("isLockfree8()"); +} + +inline void +js::jit::AtomicOperations::fenceSeqCst() +{ + MOZ_CRASH("fenceSeqCst()"); +} + +template +inline T +js::jit::AtomicOperations::loadSeqCst(T* addr) +{ + MOZ_CRASH("loadSeqCst()"); +} + +template +inline void +js::jit::AtomicOperations::storeSeqCst(T* addr, T val) +{ + MOZ_CRASH("storeSeqCst()"); +} + +template +inline T +js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val) +{ + MOZ_CRASH("exchangeSeqCst()"); +} + +template +inline T +js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, T newval) +{ + MOZ_CRASH("compareExchangeSeqCst()"); +} + +template +inline T +js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val) +{ + MOZ_CRASH("fetchAddSeqCst()"); +} + +template +inline T +js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val) +{ + MOZ_CRASH("fetchSubSeqCst()"); +} + +template +inline T +js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val) +{ + MOZ_CRASH("fetchAndSeqCst()"); +} + +template +inline T +js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val) +{ + MOZ_CRASH("fetchOrSeqCst()"); +} + +template +inline T +js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val) +{ + MOZ_CRASH("fetchXorSeqCst()"); +} + +template +inline void +js::jit::RegionLock::acquire(void* addr) +{ + MOZ_CRASH("acquire()"); +} + +template +inline void +js::jit::RegionLock::release(void* addr) +{ + MOZ_CRASH("release()"); +} + +#endif // jit_arm64_AtomicOperations_arm64_h diff --git a/js/src/jit/arm64/BaselineCompiler-arm64.h b/js/src/jit/arm64/BaselineCompiler-arm64.h new file mode 100644 index 000000000000..946099ff15c0 --- /dev/null +++ b/js/src/jit/arm64/BaselineCompiler-arm64.h @@ -0,0 +1,28 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_arm64_BaselineCompiler_arm64_h +#define jit_arm64_BaselineCompiler_arm64_h + +#include "jit/shared/BaselineCompiler-shared.h" + +namespace js { +namespace jit { + +class BaselineCompilerARM64 : public BaselineCompilerShared +{ + protected: + BaselineCompilerARM64(JSContext* cx, TempAllocator& alloc, JSScript* script) + : BaselineCompilerShared(cx, alloc, script) + { } +}; + +typedef BaselineCompilerARM64 BaselineCompilerSpecific; + +} // namespace jit +} // namespace js + +#endif /* jit_arm64_BaselineCompiler_arm64_h */ diff --git a/js/src/jit/arm64/BaselineIC-arm64.cpp b/js/src/jit/arm64/BaselineIC-arm64.cpp new file mode 100644 index 000000000000..168936964548 --- /dev/null +++ b/js/src/jit/arm64/BaselineIC-arm64.cpp @@ -0,0 +1,269 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/SharedIC.h" +#include "jit/SharedICHelpers.h" + +#ifdef JS_ARM64_SIMULATOR +#include "jit/arm64/Assembler-arm64.h" +#include "jit/arm64/BaselineCompiler-arm64.h" +#include "jit/arm64/vixl/Debugger-vixl.h" +#endif + + +using namespace js; +using namespace js::jit; + +namespace js { +namespace jit { + +// ICCompare_Int32 + +bool +ICCompare_Int32::Compiler::generateStubCode(MacroAssembler& masm) +{ + // Guard that R0 is an integer and R1 is an integer. + Label failure; + masm.branchTestInt32(Assembler::NotEqual, R0, &failure); + masm.branchTestInt32(Assembler::NotEqual, R1, &failure); + + // Compare payload regs of R0 and R1. + Assembler::Condition cond = JSOpToCondition(op, /* signed = */true); + masm.cmp32(R0.valueReg(), R1.valueReg()); + masm.Cset(ARMRegister(R0.valueReg(), 32), cond); + + // Result is implicitly boxed already. + masm.tagValue(JSVAL_TYPE_BOOLEAN, R0.valueReg(), R0); + EmitReturnFromIC(masm); + + // Failure case - jump to next stub. + masm.bind(&failure); + EmitStubGuardFailure(masm); + + return true; +} + +bool +ICCompare_Double::Compiler::generateStubCode(MacroAssembler& masm) +{ + Label failure, isNaN; + masm.ensureDouble(R0, FloatReg0, &failure); + masm.ensureDouble(R1, FloatReg1, &failure); + + Register dest = R0.valueReg(); + + Assembler::DoubleCondition doubleCond = JSOpToDoubleCondition(op); + Assembler::Condition cond = Assembler::ConditionFromDoubleCondition(doubleCond); + + masm.compareDouble(doubleCond, FloatReg0, FloatReg1); + masm.Cset(ARMRegister(dest, 32), cond); + + masm.tagValue(JSVAL_TYPE_BOOLEAN, dest, R0); + EmitReturnFromIC(masm); + + // Failure case - jump to next stub. + masm.bind(&failure); + EmitStubGuardFailure(masm); + return true; +} + +// ICBinaryArith_Int32 + +bool +ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm) +{ + // Guard that R0 is an integer and R1 is an integer. + Label failure; + masm.branchTestInt32(Assembler::NotEqual, R0, &failure); + masm.branchTestInt32(Assembler::NotEqual, R1, &failure); + + // Add R0 and R1. Don't need to explicitly unbox, just use R2. + Register Rscratch = R2_; + ARMRegister Wscratch = ARMRegister(Rscratch, 32); +#ifdef MERGE + // DIV and MOD need an extra non-volatile ValueOperand to hold R0. + AllocatableGeneralRegisterSet savedRegs(availableGeneralRegs(2)); + savedRegs.set() = GeneralRegisterSet::Intersect(GeneralRegisterSet::NonVolatile(), savedRegs); +#endif + // get some more ARM-y names for the registers + ARMRegister W0(R0_, 32); + ARMRegister X0(R0_, 64); + ARMRegister W1(R1_, 32); + ARMRegister X1(R1_, 64); + ARMRegister WTemp(ExtractTemp0, 32); + ARMRegister XTemp(ExtractTemp0, 64); + Label maybeNegZero, revertRegister; + switch(op_) { + case JSOP_ADD: + masm.Adds(WTemp, W0, Operand(W1)); + + // Just jump to failure on overflow. R0 and R1 are preserved, so we can + // just jump to the next stub. + masm.j(Assembler::Overflow, &failure); + + // Box the result and return. We know R0 already contains the + // integer tag, so we just need to move the payload into place. + masm.movePayload(ExtractTemp0, R0_); + break; + + case JSOP_SUB: + masm.Subs(WTemp, W0, Operand(W1)); + masm.j(Assembler::Overflow, &failure); + masm.movePayload(ExtractTemp0, R0_); + break; + + case JSOP_MUL: + masm.mul32(R0.valueReg(), R1.valueReg(), Rscratch, &failure, &maybeNegZero); + masm.movePayload(Rscratch, R0_); + break; + + case JSOP_DIV: + case JSOP_MOD: { + + // Check for INT_MIN / -1, it results in a double. + Label check2; + masm.Cmp(W0, Operand(INT_MIN)); + masm.B(&check2, Assembler::NotEqual); + masm.Cmp(W1, Operand(-1)); + masm.j(Assembler::Equal, &failure); + masm.bind(&check2); + Label no_fail; + // Check for both division by zero and 0 / X with X < 0 (results in -0). + masm.Cmp(W1, Operand(0)); + // If x > 0, then it can't be bad. + masm.B(&no_fail, Assembler::GreaterThan); + // if x == 0, then ignore any comparison, and force + // it to fail, if x < 0 (the only other case) + // then do the comparison, and fail if y == 0 + masm.Ccmp(W0, Operand(0), vixl::ZFlag, Assembler::NotEqual); + masm.B(&failure, Assembler::Equal); + masm.bind(&no_fail); + masm.Sdiv(Wscratch, W0, W1); + // Start calculating the remainder, x - (x / y) * y. + masm.mul(WTemp, W1, Wscratch); + if (op_ == JSOP_DIV) { + // Result is a double if the remainder != 0, which happens + // when (x/y)*y != x. + masm.branch32(Assembler::NotEqual, R0.valueReg(), ExtractTemp0, &revertRegister); + masm.movePayload(Rscratch, R0_); + } else { + // Calculate the actual mod. Set the condition code, so we can see if it is non-zero. + masm.Subs(WTemp, W0, WTemp); + + // If X % Y == 0 and X < 0, the result is -0. + masm.Ccmp(W0, Operand(0), vixl::NoFlag, Assembler::Equal); + masm.branch(Assembler::LessThan, &revertRegister); + masm.movePayload(ExtractTemp0, R0_); + } + break; + } + // ORR, EOR, AND can trivially be coerced int + // working without affecting the tag of the dest.. + case JSOP_BITOR: + masm.Orr(X0, X0, Operand(X1)); + break; + case JSOP_BITXOR: + masm.Eor(X0, X0, Operand(W1, vixl::UXTW)); + break; + case JSOP_BITAND: + masm.And(X0, X0, Operand(X1)); + break; + // LSH, RSH and URSH can not. + case JSOP_LSH: + // ARM will happily try to shift by more than 0x1f. + masm.Lsl(Wscratch, W0, W1); + masm.movePayload(Rscratch, R0.valueReg()); + break; + case JSOP_RSH: + masm.Asr(Wscratch, W0, W1); + masm.movePayload(Rscratch, R0.valueReg()); + break; + case JSOP_URSH: + masm.Lsr(Wscratch, W0, W1); + if (allowDouble_) { + Label toUint; + // Testing for negative is equivalent to testing bit 31 + masm.Tbnz(Wscratch, 31, &toUint); + // Move result and box for return. + masm.movePayload(Rscratch, R0_); + EmitReturnFromIC(masm); + + masm.bind(&toUint); + masm.convertUInt32ToDouble(Rscratch, ScratchDoubleReg); + masm.boxDouble(ScratchDoubleReg, R0); + } else { + // Testing for negative is equivalent to testing bit 31 + masm.Tbnz(Wscratch, 31, &failure); + // Move result for return. + masm.movePayload(Rscratch, R0_); + } + break; + default: + MOZ_CRASH("Unhandled op for BinaryArith_Int32."); + } + + EmitReturnFromIC(masm); + + switch (op_) { + case JSOP_MUL: + masm.bind(&maybeNegZero); + + // Result is -0 if exactly one of lhs or rhs is negative. + masm.Cmn(W0, W1); + masm.j(Assembler::Signed, &failure); + + // Result is +0, so use the zero register. + masm.movePayload(rzr, R0_); + EmitReturnFromIC(masm); + break; + case JSOP_DIV: + case JSOP_MOD: + masm.bind(&revertRegister); + break; + default: + break; + } + + // Failure case - jump to next stub. + masm.bind(&failure); + EmitStubGuardFailure(masm); + + return true; +} + +bool +ICUnaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm) +{ + Label failure; + masm.branchTestInt32(Assembler::NotEqual, R0, &failure); + + switch (op) { + case JSOP_BITNOT: + masm.Mvn(ARMRegister(R1.valueReg(), 32), ARMRegister(R0.valueReg(), 32)); + masm.movePayload(R1.valueReg(), R0.valueReg()); + break; + case JSOP_NEG: + // Guard against 0 and MIN_INT, both result in a double. + masm.branchTest32(Assembler::Zero, R0.valueReg(), Imm32(0x7fffffff), &failure); + + // Compile -x as 0 - x. + masm.Sub(ARMRegister(R1.valueReg(), 32), wzr, ARMRegister(R0.valueReg(), 32)); + masm.movePayload(R1.valueReg(), R0.valueReg()); + break; + default: + MOZ_CRASH("Unexpected op"); + } + + EmitReturnFromIC(masm); + + masm.bind(&failure); + EmitStubGuardFailure(masm); + return true; + +} + +} // namespace jit +} // namespace js diff --git a/js/src/jit/arm64/MacroAssembler-arm64.cpp b/js/src/jit/arm64/MacroAssembler-arm64.cpp new file mode 100644 index 000000000000..7b1d7b494e7e --- /dev/null +++ b/js/src/jit/arm64/MacroAssembler-arm64.cpp @@ -0,0 +1,688 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/arm64/MacroAssembler-arm64.h" + +// TODO #include "jit/arm64/MoveEmitter-arm64.h" +#include "jit/arm64/SharedICRegisters-arm64.h" +#include "jit/Bailouts.h" +#include "jit/BaselineFrame.h" +#include "jit/MacroAssembler.h" + +namespace js { +namespace jit { + +void +MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) +{ + ARMRegister dest(output, 32); + Fcvtns(dest, ARMFPRegister(input, 64)); + + { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + + Mov(scratch32, Operand(0xff)); + Cmp(dest, scratch32); + Csel(dest, dest, scratch32, LessThan); + } + + Cmp(dest, Operand(0)); + Csel(dest, wzr, dest, LessThan); +} + +void +MacroAssemblerCompat::buildFakeExitFrame(Register scratch, uint32_t* offset) +{ + mozilla::DebugOnly initialDepth = framePushed(); + uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS); + + asMasm().Push(Imm32(descriptor)); // descriptor_ + + enterNoPool(3); + Label fakeCallsite; + Adr(ARMRegister(scratch, 64), &fakeCallsite); + asMasm().Push(scratch); + bind(&fakeCallsite); + uint32_t pseudoReturnOffset = currentOffset(); + leaveNoPool(); + + MOZ_ASSERT(framePushed() == initialDepth + ExitFrameLayout::Size()); + + *offset = pseudoReturnOffset; +} + +void +MacroAssemblerCompat::callWithExitFrame(JitCode* target) +{ + uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS); + asMasm().Push(Imm32(descriptor)); + call(target); +} + +void +MacroAssembler::alignFrameForICArguments(MacroAssembler::AfterICSaveLive& aic) +{ + // Exists for MIPS compatibility. +} + +void +MacroAssembler::restoreFrameAlignmentForICArguments(MacroAssembler::AfterICSaveLive& aic) +{ + // Exists for MIPS compatibility. +} + +js::jit::MacroAssembler& +MacroAssemblerCompat::asMasm() +{ + return *static_cast(this); +} + +const js::jit::MacroAssembler& +MacroAssemblerCompat::asMasm() const +{ + return *static_cast(this); +} + +vixl::MacroAssembler& +MacroAssemblerCompat::asVIXL() +{ + return *static_cast(this); +} + +const vixl::MacroAssembler& +MacroAssemblerCompat::asVIXL() const +{ + return *static_cast(this); +} + +BufferOffset +MacroAssemblerCompat::movePatchablePtr(ImmPtr ptr, Register dest) +{ + const size_t numInst = 1; // Inserting one load instruction. + const unsigned numPoolEntries = 2; // Every pool entry is 4 bytes. + uint8_t* literalAddr = (uint8_t*)(&ptr.value); // TODO: Should be const. + + // Scratch space for generating the load instruction. + // + // allocEntry() will use InsertIndexIntoTag() to store a temporary + // index to the corresponding PoolEntry in the instruction itself. + // + // That index will be fixed up later when finishPool() + // walks over all marked loads and calls PatchConstantPoolLoad(). + uint32_t instructionScratch = 0; + + // Emit the instruction mask in the scratch space. + // The offset doesn't matter: it will be fixed up later. + vixl::Assembler::ldr((Instruction*)&instructionScratch, ARMRegister(dest, 64), 0); + + // Add the entry to the pool, fix up the LDR imm19 offset, + // and add the completed instruction to the buffer. + return armbuffer_.allocEntry(numInst, numPoolEntries, + (uint8_t*)&instructionScratch, literalAddr); +} + +BufferOffset +MacroAssemblerCompat::movePatchablePtr(ImmWord ptr, Register dest) +{ + const size_t numInst = 1; // Inserting one load instruction. + const unsigned numPoolEntries = 2; // Every pool entry is 4 bytes. + uint8_t* literalAddr = (uint8_t*)(&ptr.value); + + // Scratch space for generating the load instruction. + // + // allocEntry() will use InsertIndexIntoTag() to store a temporary + // index to the corresponding PoolEntry in the instruction itself. + // + // That index will be fixed up later when finishPool() + // walks over all marked loads and calls PatchConstantPoolLoad(). + uint32_t instructionScratch = 0; + + // Emit the instruction mask in the scratch space. + // The offset doesn't matter: it will be fixed up later. + vixl::Assembler::ldr((Instruction*)&instructionScratch, ARMRegister(dest, 64), 0); + + // Add the entry to the pool, fix up the LDR imm19 offset, + // and add the completed instruction to the buffer. + return armbuffer_.allocEntry(numInst, numPoolEntries, + (uint8_t*)&instructionScratch, literalAddr); +} + +void +MacroAssemblerCompat::handleFailureWithHandlerTail(void* handler) +{ + // Reserve space for exception information. + int64_t size = (sizeof(ResumeFromException) + 7) & ~7; + Sub(GetStackPointer64(), GetStackPointer64(), Operand(size)); + if (!GetStackPointer64().Is(sp)) + Mov(sp, GetStackPointer64()); + + Mov(x0, GetStackPointer64()); + + // Call the handler. + setupUnalignedABICall(1, r1); + passABIArg(r0); + callWithABI(handler); + + Label entryFrame; + Label catch_; + Label finally; + Label return_; + Label bailout; + + MOZ_ASSERT(GetStackPointer64().Is(x28)); // Lets the code below be a little cleaner. + + loadPtr(Address(r28, offsetof(ResumeFromException, kind)), r0); + branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_ENTRY_FRAME), &entryFrame); + branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_CATCH), &catch_); + branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_FINALLY), &finally); + branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_FORCED_RETURN), &return_); + branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_BAILOUT), &bailout); + + breakpoint(); // Invalid kind. + + // No exception handler. Load the error value, load the new stack pointer, + // and return from the entry frame. + bind(&entryFrame); + moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand); + loadPtr(Address(r28, offsetof(ResumeFromException, stackPointer)), r28); + retn(Imm32(1 * sizeof(void*))); // Pop from stack and return. + + // If we found a catch handler, this must be a baseline frame. Restore state + // and jump to the catch block. + bind(&catch_); + loadPtr(Address(r28, offsetof(ResumeFromException, target)), r0); + loadPtr(Address(r28, offsetof(ResumeFromException, framePointer)), BaselineFrameReg); + loadPtr(Address(r28, offsetof(ResumeFromException, stackPointer)), r28); + syncStackPtr(); + Br(x0); + + // If we found a finally block, this must be a baseline frame. + // Push two values expected by JSOP_RETSUB: BooleanValue(true) + // and the exception. + bind(&finally); + ARMRegister exception = x1; + Ldr(exception, MemOperand(GetStackPointer64(), offsetof(ResumeFromException, exception))); + Ldr(x0, MemOperand(GetStackPointer64(), offsetof(ResumeFromException, target))); + Ldr(ARMRegister(BaselineFrameReg, 64), + MemOperand(GetStackPointer64(), offsetof(ResumeFromException, framePointer))); + Ldr(GetStackPointer64(), MemOperand(GetStackPointer64(), offsetof(ResumeFromException, stackPointer))); + syncStackPtr(); + pushValue(BooleanValue(true)); + push(exception); + Br(x0); + + // Only used in debug mode. Return BaselineFrame->returnValue() to the caller. + bind(&return_); + loadPtr(Address(r28, offsetof(ResumeFromException, framePointer)), BaselineFrameReg); + loadPtr(Address(r28, offsetof(ResumeFromException, stackPointer)), r28); + loadValue(Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfReturnValue()), + JSReturnOperand); + movePtr(BaselineFrameReg, r28); + vixl::MacroAssembler::Pop(ARMRegister(BaselineFrameReg, 64), vixl::lr); + syncStackPtr(); + vixl::MacroAssembler::Ret(vixl::lr); + + // If we are bailing out to baseline to handle an exception, + // jump to the bailout tail stub. + bind(&bailout); + Ldr(x2, MemOperand(GetStackPointer64(), offsetof(ResumeFromException, bailoutInfo))); + Ldr(x1, MemOperand(GetStackPointer64(), offsetof(ResumeFromException, target))); + Mov(x0, BAILOUT_RETURN_OK); + Br(x1); +} + +void +MacroAssemblerCompat::setupABICall(uint32_t args) +{ + MOZ_ASSERT(!inCall_); + inCall_ = true; + + args_ = args; + usedOutParam_ = false; + passedIntArgs_ = 0; + passedFloatArgs_ = 0; + passedArgTypes_ = 0; + stackForCall_ = ShadowStackSpace; +} + +void +MacroAssemblerCompat::setupUnalignedABICall(uint32_t args, Register scratch) +{ + setupABICall(args); + dynamicAlignment_ = true; + + int64_t alignment = ~(int64_t(ABIStackAlignment) - 1); + ARMRegister scratch64(scratch, 64); + + // Always save LR -- Baseline ICs assume that LR isn't modified. + push(lr); + + // Unhandled for sp -- needs slightly different logic. + MOZ_ASSERT(!GetStackPointer64().Is(sp)); + + // Remember the stack address on entry. + Mov(scratch64, GetStackPointer64()); + + // Make alignment, including the effective push of the previous sp. + Sub(GetStackPointer64(), GetStackPointer64(), Operand(8)); + And(GetStackPointer64(), GetStackPointer64(), Operand(alignment)); + + // If the PseudoStackPointer is used, sp must be <= psp before a write is valid. + syncStackPtr(); + + // Store previous sp to the top of the stack, aligned. + Str(scratch64, MemOperand(GetStackPointer64(), 0)); +} + +void +MacroAssemblerCompat::passABIArg(const MoveOperand& from, MoveOp::Type type) +{ + if (!enoughMemory_) + return; + + Register activeSP = Register::FromCode(GetStackPointer64().code()); + if (type == MoveOp::GENERAL) { + Register dest; + passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_General; + if (GetIntArgReg(passedIntArgs_++, passedFloatArgs_, &dest)) { + if (!from.isGeneralReg() || from.reg() != dest) + enoughMemory_ = moveResolver_.addMove(from, MoveOperand(dest), type); + return; + } + + enoughMemory_ = moveResolver_.addMove(from, MoveOperand(activeSP, stackForCall_), type); + stackForCall_ += sizeof(int64_t); + return; + } + + MOZ_ASSERT(type == MoveOp::FLOAT32 || type == MoveOp::DOUBLE); + if (type == MoveOp::FLOAT32) + passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Float32; + else + passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Double; + + FloatRegister fdest; + if (GetFloatArgReg(passedIntArgs_, passedFloatArgs_++, &fdest)) { + if (!from.isFloatReg() || from.floatReg() != fdest) + enoughMemory_ = moveResolver_.addMove(from, MoveOperand(fdest), type); + return; + } + + enoughMemory_ = moveResolver_.addMove(from, MoveOperand(activeSP, stackForCall_), type); + switch (type) { + case MoveOp::FLOAT32: stackForCall_ += sizeof(float); break; + case MoveOp::DOUBLE: stackForCall_ += sizeof(double); break; + default: MOZ_CRASH("Unexpected float register class argument type"); + } +} + +void +MacroAssemblerCompat::passABIArg(Register reg) +{ + passABIArg(MoveOperand(reg), MoveOp::GENERAL); +} + +void +MacroAssemblerCompat::passABIArg(FloatRegister reg, MoveOp::Type type) +{ + passABIArg(MoveOperand(reg), type); +} +void +MacroAssemblerCompat::passABIOutParam(Register reg) +{ + if (!enoughMemory_) + return; + MOZ_ASSERT(!usedOutParam_); + usedOutParam_ = true; + if (reg == r8) + return; + enoughMemory_ = moveResolver_.addMove(MoveOperand(reg), MoveOperand(r8), MoveOp::GENERAL); + +} + +void +MacroAssemblerCompat::callWithABIPre(uint32_t* stackAdjust) +{ + *stackAdjust = stackForCall_; + // ARM64 /really/ wants the stack to always be aligned. Since we're already tracking it + // getting it aligned for an abi call is pretty easy. + *stackAdjust += ComputeByteAlignment(*stackAdjust, StackAlignment); + asMasm().reserveStack(*stackAdjust); + { + moveResolver_.resolve(); + MoveEmitter emitter(asMasm()); + emitter.emit(moveResolver_); + emitter.finish(); + } + + // Call boundaries communicate stack via sp. + syncStackPtr(); +} + +void +MacroAssemblerCompat::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result) +{ + // Call boundaries communicate stack via sp. + if (!GetStackPointer64().Is(sp)) + Mov(GetStackPointer64(), sp); + + inCall_ = false; + asMasm().freeStack(stackAdjust); + + // Restore the stack pointer from entry. + if (dynamicAlignment_) + Ldr(GetStackPointer64(), MemOperand(GetStackPointer64(), 0)); + + // Restore LR. + pop(lr); + + // TODO: This one shouldn't be necessary -- check that callers + // aren't enforcing the ABI themselves! + syncStackPtr(); + + // If the ABI's return regs are where ION is expecting them, then + // no other work needs to be done. +} + +#if defined(DEBUG) && defined(JS_ARM64_SIMULATOR) +static void +AssertValidABIFunctionType(uint32_t passedArgTypes) +{ + switch (passedArgTypes) { + case Args_General0: + case Args_General1: + case Args_General2: + case Args_General3: + case Args_General4: + case Args_General5: + case Args_General6: + case Args_General7: + case Args_General8: + case Args_Double_None: + case Args_Int_Double: + case Args_Float32_Float32: + case Args_Double_Double: + case Args_Double_Int: + case Args_Double_DoubleInt: + case Args_Double_DoubleDouble: + case Args_Double_DoubleDoubleDouble: + case Args_Double_DoubleDoubleDoubleDouble: + case Args_Double_IntDouble: + case Args_Int_IntDouble: + break; + default: + MOZ_CRASH("Unexpected type"); + } +} +#endif // DEBUG && JS_ARM64_SIMULATOR + +void +MacroAssemblerCompat::callWithABI(void* fun, MoveOp::Type result) +{ +#ifdef JS_ARM64_SIMULATOR + MOZ_ASSERT(passedIntArgs_ + passedFloatArgs_ <= 15); + passedArgTypes_ <<= ArgType_Shift; + switch (result) { + case MoveOp::GENERAL: passedArgTypes_ |= ArgType_General; break; + case MoveOp::DOUBLE: passedArgTypes_ |= ArgType_Double; break; + case MoveOp::FLOAT32: passedArgTypes_ |= ArgType_Float32; break; + default: MOZ_CRASH("Invalid return type"); + } +# ifdef DEBUG + AssertValidABIFunctionType(passedArgTypes_); +# endif + ABIFunctionType type = ABIFunctionType(passedArgTypes_); + fun = vixl::Simulator::RedirectNativeFunction(fun, type); +#endif // JS_ARM64_SIMULATOR + + uint32_t stackAdjust; + callWithABIPre(&stackAdjust); + call(ImmPtr(fun)); + callWithABIPost(stackAdjust, result); +} + +void +MacroAssemblerCompat::callWithABI(Register fun, MoveOp::Type result) +{ + movePtr(fun, ip0); + + uint32_t stackAdjust; + callWithABIPre(&stackAdjust); + call(ip0); + callWithABIPost(stackAdjust, result); +} + +void +MacroAssemblerCompat::callWithABI(AsmJSImmPtr imm, MoveOp::Type result) +{ + uint32_t stackAdjust; + callWithABIPre(&stackAdjust); + call(imm); + callWithABIPost(stackAdjust, result); +} + +void +MacroAssemblerCompat::callWithABI(Address fun, MoveOp::Type result) +{ + loadPtr(fun, ip0); + + uint32_t stackAdjust; + callWithABIPre(&stackAdjust); + call(ip0); + callWithABIPost(stackAdjust, result); +} + +void +MacroAssemblerCompat::branchPtrInNurseryRange(Condition cond, Register ptr, Register temp, + Label* label) +{ + MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); + MOZ_ASSERT(ptr != temp); + MOZ_ASSERT(ptr != ScratchReg && ptr != ScratchReg2); // Both may be used internally. + MOZ_ASSERT(temp != ScratchReg && temp != ScratchReg2); + + const Nursery& nursery = GetJitContext()->runtime->gcNursery(); + movePtr(ImmWord(-ptrdiff_t(nursery.start())), temp); + addPtr(ptr, temp); + branchPtr(cond == Assembler::Equal ? Assembler::Below : Assembler::AboveOrEqual, + temp, ImmWord(nursery.nurserySize()), label); +} + +void +MacroAssemblerCompat::branchValueIsNurseryObject(Condition cond, ValueOperand value, Register temp, + Label* label) +{ + MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); + MOZ_ASSERT(temp != ScratchReg && temp != ScratchReg2); // Both may be used internally. + + // 'Value' representing the start of the nursery tagged as a JSObject + const Nursery& nursery = GetJitContext()->runtime->gcNursery(); + Value start = ObjectValue(*reinterpret_cast(nursery.start())); + + movePtr(ImmWord(-ptrdiff_t(start.asRawBits())), temp); + addPtr(value.valueReg(), temp); + branchPtr(cond == Assembler::Equal ? Assembler::Below : Assembler::AboveOrEqual, + temp, ImmWord(nursery.nurserySize()), label); +} + +void +MacroAssemblerCompat::callAndPushReturnAddress(Label* label) +{ + // FIXME: Jandem said he would refactor the code to avoid making + // this instruction required, but probably forgot about it. + // Instead of implementing this function, we should make it unnecessary. + Label ret; + { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + + Adr(scratch64, &ret); + asMasm().Push(scratch64.asUnsized()); + } + + Bl(label); + bind(&ret); +} + +void +MacroAssemblerCompat::breakpoint() +{ + static int code = 0xA77; + Brk((code++) & 0xffff); +} + +// =============================================================== +// Stack manipulation functions. + +void +MacroAssembler::reserveStack(uint32_t amount) +{ + // TODO: This bumps |sp| every time we reserve using a second register. + // It would save some instructions if we had a fixed frame size. + vixl::MacroAssembler::Claim(Operand(amount)); + adjustFrame(amount); +} + +void +MacroAssembler::PushRegsInMask(LiveRegisterSet set) +{ + for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ) { + vixl::CPURegister src[4] = { vixl::NoCPUReg, vixl::NoCPUReg, vixl::NoCPUReg, vixl::NoCPUReg }; + + for (size_t i = 0; i < 4 && iter.more(); i++) { + src[i] = ARMRegister(*iter, 64); + ++iter; + adjustFrame(8); + } + vixl::MacroAssembler::Push(src[0], src[1], src[2], src[3]); + } + + for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); ) { + vixl::CPURegister src[4] = { vixl::NoCPUReg, vixl::NoCPUReg, vixl::NoCPUReg, vixl::NoCPUReg }; + + for (size_t i = 0; i < 4 && iter.more(); i++) { + src[i] = ARMFPRegister(*iter, 64); + ++iter; + adjustFrame(8); + } + vixl::MacroAssembler::Push(src[0], src[1], src[2], src[3]); + } +} + +void +MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore) +{ + // The offset of the data from the stack pointer. + uint32_t offset = 0; + + for (FloatRegisterIterator iter(set.fpus().reduceSetForPush()); iter.more(); ) { + vixl::CPURegister dest[2] = { vixl::NoCPUReg, vixl::NoCPUReg }; + uint32_t nextOffset = offset; + + for (size_t i = 0; i < 2 && iter.more(); i++) { + if (!ignore.has(*iter)) + dest[i] = ARMFPRegister(*iter, 64); + ++iter; + nextOffset += sizeof(double); + } + + if (!dest[0].IsNone() && !dest[1].IsNone()) + Ldp(dest[0], dest[1], MemOperand(GetStackPointer64(), offset)); + else if (!dest[0].IsNone()) + Ldr(dest[0], MemOperand(GetStackPointer64(), offset)); + else if (!dest[1].IsNone()) + Ldr(dest[1], MemOperand(GetStackPointer64(), offset + sizeof(double))); + + offset = nextOffset; + } + + MOZ_ASSERT(offset == set.fpus().getPushSizeInBytes()); + + for (GeneralRegisterIterator iter(set.gprs()); iter.more(); ) { + vixl::CPURegister dest[2] = { vixl::NoCPUReg, vixl::NoCPUReg }; + uint32_t nextOffset = offset; + + for (size_t i = 0; i < 2 && iter.more(); i++) { + if (!ignore.has(*iter)) + dest[i] = ARMRegister(*iter, 64); + ++iter; + nextOffset += sizeof(uint64_t); + } + + if (!dest[0].IsNone() && !dest[1].IsNone()) + Ldp(dest[0], dest[1], MemOperand(GetStackPointer64(), offset)); + else if (!dest[0].IsNone()) + Ldr(dest[0], MemOperand(GetStackPointer64(), offset)); + else if (!dest[1].IsNone()) + Ldr(dest[1], MemOperand(GetStackPointer64(), offset + sizeof(uint64_t))); + + offset = nextOffset; + } + + size_t bytesPushed = set.gprs().size() * sizeof(uint64_t) + set.fpus().getPushSizeInBytes(); + MOZ_ASSERT(offset == bytesPushed); + freeStack(bytesPushed); +} + +void +MacroAssembler::Push(Register reg) +{ + push(reg); + adjustFrame(sizeof(intptr_t)); +} + +void +MacroAssembler::Push(const Imm32 imm) +{ + push(imm); + adjustFrame(sizeof(intptr_t)); +} + +void +MacroAssembler::Push(const ImmWord imm) +{ + push(imm); + adjustFrame(sizeof(intptr_t)); +} + +void +MacroAssembler::Push(const ImmPtr imm) +{ + push(imm); + adjustFrame(sizeof(intptr_t)); +} + +void +MacroAssembler::Push(const ImmGCPtr ptr) +{ + push(ptr); + adjustFrame(sizeof(intptr_t)); +} + +void +MacroAssembler::Push(FloatRegister f) +{ + push(f); + adjustFrame(sizeof(double)); +} + +void +MacroAssembler::Pop(const Register reg) +{ + pop(reg); + adjustFrame(-1 * int64_t(sizeof(int64_t))); +} + +void +MacroAssembler::Pop(const ValueOperand& val) +{ + pop(val); + adjustFrame(-1 * int64_t(sizeof(int64_t))); +} + +} // namespace jit +} // namespace js diff --git a/js/src/jit/arm64/MacroAssembler-arm64.h b/js/src/jit/arm64/MacroAssembler-arm64.h new file mode 100644 index 000000000000..b7f1c2301cc7 --- /dev/null +++ b/js/src/jit/arm64/MacroAssembler-arm64.h @@ -0,0 +1,3317 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_arm64_MacroAssembler_arm64_h +#define jit_arm64_MacroAssembler_arm64_h + +#include "jit/arm64/Assembler-arm64.h" +#include "jit/arm64/vixl/Debugger-vixl.h" +#include "jit/arm64/vixl/MacroAssembler-vixl.h" + +#include "jit/AtomicOp.h" +#include "jit/JitFrames.h" +#include "jit/MoveResolver.h" + +namespace js { +namespace jit { + +// Import VIXL operands directly into the jit namespace for shared code. +using vixl::Operand; +using vixl::MemOperand; + +struct ImmShiftedTag : public ImmWord +{ + ImmShiftedTag(JSValueShiftedTag shtag) + : ImmWord((uintptr_t)shtag) + { } + + ImmShiftedTag(JSValueType type) + : ImmWord(uintptr_t(JSValueShiftedTag(JSVAL_TYPE_TO_SHIFTED_TAG(type)))) + { } +}; + +struct ImmTag : public Imm32 +{ + ImmTag(JSValueTag tag) + : Imm32(tag) + { } +}; + +class MacroAssemblerCompat : public vixl::MacroAssembler +{ + public: + typedef vixl::Condition Condition; + + private: + // Perform a downcast. Should be removed by Bug 996602. + js::jit::MacroAssembler& asMasm(); + const js::jit::MacroAssembler& asMasm() const; + + public: + // Restrict to only VIXL-internal functions. + vixl::MacroAssembler& asVIXL(); + const MacroAssembler& asVIXL() const; + + protected: + bool enoughMemory_; + uint32_t framePushed_; + + // TODO: Can this be moved out of the MacroAssembler and into some shared code? + // TODO: All the code seems to be arch-independent, and it's weird to have this here. + bool inCall_; + bool usedOutParam_; + uint32_t args_; + uint32_t passedIntArgs_; + uint32_t passedFloatArgs_; + uint32_t passedArgTypes_; + uint32_t stackForCall_; + bool dynamicAlignment_; + + MacroAssemblerCompat() + : vixl::MacroAssembler(), + enoughMemory_(true), + framePushed_(0), + inCall_(false), + usedOutParam_(false), + args_(0), + passedIntArgs_(0), + passedFloatArgs_(0), + passedArgTypes_(0), + stackForCall_(0), + dynamicAlignment_(false) + { } + + protected: + MoveResolver moveResolver_; + + public: + bool oom() const { + return Assembler::oom() || !enoughMemory_; + } + static MemOperand toMemOperand(Address& a) { + return MemOperand(ARMRegister(a.base, 64), a.offset); + } + void doBaseIndex(const vixl::CPURegister& rt, const BaseIndex& addr, vixl::LoadStoreOp op) { + const ARMRegister base = ARMRegister(addr.base, 64); + const ARMRegister index = ARMRegister(addr.index, 64); + const unsigned scale = addr.scale; + + if (!addr.offset && (!scale || scale == static_cast(CalcLSDataSize(op)))) { + LoadStoreMacro(rt, MemOperand(base, index, vixl::LSL, scale), op); + return; + } + + vixl::UseScratchRegisterScope temps(this); + ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(!scratch64.Is(rt)); + MOZ_ASSERT(!scratch64.Is(base)); + MOZ_ASSERT(!scratch64.Is(index)); + + Add(scratch64, base, Operand(index, vixl::LSL, scale)); + LoadStoreMacro(rt, MemOperand(scratch64, addr.offset), op); + } + void Push(ARMRegister reg) { + push(reg); + adjustFrame(reg.size() / 8); + } + void Push(Register reg) { + vixl::MacroAssembler::Push(ARMRegister(reg, 64)); + adjustFrame(8); + } + void Push(Imm32 imm) { + push(imm); + adjustFrame(8); + } + void Push(FloatRegister f) { + push(ARMFPRegister(f, 64)); + adjustFrame(8); + } + void Push(ImmPtr imm) { + push(imm); + adjustFrame(sizeof(void*)); + } + void push(FloatRegister f) { + vixl::MacroAssembler::Push(ARMFPRegister(f, 64)); + } + void push(ARMFPRegister f) { + vixl::MacroAssembler::Push(f); + } + void push(Imm32 imm) { + if (imm.value == 0) { + vixl::MacroAssembler::Push(vixl::xzr); + } else { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + move32(imm, scratch64.asUnsized()); + vixl::MacroAssembler::Push(scratch64); + } + } + void push(ImmWord imm) { + if (imm.value == 0) { + vixl::MacroAssembler::Push(vixl::xzr); + } else { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + Mov(scratch64, imm.value); + vixl::MacroAssembler::Push(scratch64); + } + } + void push(ImmPtr imm) { + if (imm.value == nullptr) { + vixl::MacroAssembler::Push(vixl::xzr); + } else { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + movePtr(imm, scratch64.asUnsized()); + vixl::MacroAssembler::Push(scratch64); + } + } + void push(ImmGCPtr imm) { + if (imm.value == nullptr) { + vixl::MacroAssembler::Push(vixl::xzr); + } else { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + movePtr(imm, scratch64.asUnsized()); + vixl::MacroAssembler::Push(scratch64); + } + } + void push(ImmMaybeNurseryPtr imm) { + push(noteMaybeNurseryPtr(imm)); + } + void push(ARMRegister reg) { + vixl::MacroAssembler::Push(reg); + } + void push(Address a) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(a.base != scratch64.asUnsized()); + loadPtr(a, scratch64.asUnsized()); + vixl::MacroAssembler::Push(scratch64); + } + + // Push registers. + void push(Register reg) { + vixl::MacroAssembler::Push(ARMRegister(reg, 64)); + } + void push(Register r0, Register r1) { + vixl::MacroAssembler::Push(ARMRegister(r0, 64), ARMRegister(r1, 64)); + } + void push(Register r0, Register r1, Register r2) { + vixl::MacroAssembler::Push(ARMRegister(r0, 64), ARMRegister(r1, 64), ARMRegister(r2, 64)); + } + void push(Register r0, Register r1, Register r2, Register r3) { + vixl::MacroAssembler::Push(ARMRegister(r0, 64), ARMRegister(r1, 64), + ARMRegister(r2, 64), ARMRegister(r3, 64)); + } + void push(ARMFPRegister r0, ARMFPRegister r1, ARMFPRegister r2, ARMFPRegister r3) { + vixl::MacroAssembler::Push(r0, r1, r2, r3); + } + + // Pop registers. + void pop(Register reg) { + vixl::MacroAssembler::Pop(ARMRegister(reg, 64)); + } + void pop(Register r0, Register r1) { + vixl::MacroAssembler::Pop(ARMRegister(r0, 64), ARMRegister(r1, 64)); + } + void pop(Register r0, Register r1, Register r2) { + vixl::MacroAssembler::Pop(ARMRegister(r0, 64), ARMRegister(r1, 64), ARMRegister(r2, 64)); + } + void pop(Register r0, Register r1, Register r2, Register r3) { + vixl::MacroAssembler::Pop(ARMRegister(r0, 64), ARMRegister(r1, 64), + ARMRegister(r2, 64), ARMRegister(r3, 64)); + } + void pop(ARMFPRegister r0, ARMFPRegister r1, ARMFPRegister r2, ARMFPRegister r3) { + vixl::MacroAssembler::Pop(r0, r1, r2, r3); + } + + void pushReturnAddress() { + push(lr); + } + void pop(const ValueOperand& v) { + pop(v.valueReg()); + } + void pop(const FloatRegister& f) { + vixl::MacroAssembler::Pop(ARMRegister(f.code(), 64)); + } + + void implicitPop(uint32_t args) { + MOZ_ASSERT(args % sizeof(intptr_t) == 0); + adjustFrame(-args); + } + void Pop(ARMRegister r) { + vixl::MacroAssembler::Pop(r); + adjustFrame(- r.size() / 8); + } + // FIXME: This is the same on every arch. + // FIXME: If we can share framePushed_, we can share this. + // FIXME: Or just make it at the highest level. + CodeOffsetLabel PushWithPatch(ImmWord word) { + framePushed_ += sizeof(word.value); + return pushWithPatch(word); + } + CodeOffsetLabel PushWithPatch(ImmPtr ptr) { + return PushWithPatch(ImmWord(uintptr_t(ptr.value))); + } + + uint32_t framePushed() const { + return framePushed_; + } + void adjustFrame(int32_t diff) { + setFramePushed(framePushed_ + diff); + } + + void setFramePushed(uint32_t framePushed) { + framePushed_ = framePushed; + } + + void freeStack(Register amount) { + vixl::MacroAssembler::Drop(Operand(ARMRegister(amount, 64))); + } + + // Update sp with the value of the current active stack pointer, if necessary. + void syncStackPtr() { + if (!GetStackPointer64().Is(vixl::sp)) + Mov(vixl::sp, GetStackPointer64()); + } + void initStackPtr() { + if (!GetStackPointer64().Is(vixl::sp)) + Mov(GetStackPointer64(), vixl::sp); + } + void storeValue(ValueOperand val, const Address& dest) { + storePtr(val.valueReg(), dest); + } + + template + void storeValue(JSValueType type, Register reg, const T& dest) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != reg); + tagValue(type, reg, ValueOperand(scratch)); + storeValue(ValueOperand(scratch), dest); + } + template + void storeValue(const Value& val, const T& dest) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + moveValue(val, ValueOperand(scratch)); + storeValue(ValueOperand(scratch), dest); + } + void storeValue(ValueOperand val, BaseIndex dest) { + storePtr(val.valueReg(), dest); + } + + template + void storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const T& dest, MIRType slotType) { + if (valueType == MIRType_Double) { + storeDouble(value.reg().typedReg().fpu(), dest); + return; + } + + // For known integers and booleans, we can just store the unboxed value if + // the slot has the same type. + if ((valueType == MIRType_Int32 || valueType == MIRType_Boolean) && slotType == valueType) { + if (value.constant()) { + Value val = value.value(); + if (valueType == MIRType_Int32) + store32(Imm32(val.toInt32()), dest); + else + store32(Imm32(val.toBoolean() ? 1 : 0), dest); + } else { + store32(value.reg().typedReg().gpr(), dest); + } + return; + } + + if (value.constant()) + storeValue(value.value(), dest); + else + storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(), dest); + + } + void loadValue(Address src, Register val) { + Ldr(ARMRegister(val, 64), MemOperand(src)); + } + void loadValue(Address src, ValueOperand val) { + Ldr(ARMRegister(val.valueReg(), 64), MemOperand(src)); + } + void loadValue(const BaseIndex& src, ValueOperand val) { + doBaseIndex(ARMRegister(val.valueReg(), 64), src, vixl::LDR_x); + } + void tagValue(JSValueType type, Register payload, ValueOperand dest) { + // This could be cleverer, but the first attempt had bugs. + Orr(ARMRegister(dest.valueReg(), 64), ARMRegister(payload, 64), Operand(ImmShiftedTag(type).value)); + } + void pushValue(ValueOperand val) { + vixl::MacroAssembler::Push(ARMRegister(val.valueReg(), 64)); + } + void popValue(ValueOperand val) { + vixl::MacroAssembler::Pop(ARMRegister(val.valueReg(), 64)); + } + void pushValue(const Value& val) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + jsval_layout jv = JSVAL_TO_IMPL(val); + if (val.isMarkable()) { + BufferOffset load = movePatchablePtr(ImmPtr((void*)jv.asBits), scratch); + writeDataRelocation(val, load); + push(scratch); + } else { + moveValue(val, scratch); + push(scratch); + } + } + void pushValue(JSValueType type, Register reg) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != reg); + tagValue(type, reg, ValueOperand(scratch)); + push(scratch); + } + void pushValue(const Address& addr) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != addr.base); + loadValue(addr, scratch); + push(scratch); + } + template + void storeUnboxedPayload(ValueOperand value, T address, size_t nbytes) { + switch (nbytes) { + case 8: { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + unboxNonDouble(value, scratch); + storePtr(scratch, address); + return; + } + case 4: + storePtr(value.valueReg(), address); + return; + case 1: + store8(value.valueReg(), address); + return; + default: MOZ_CRASH("Bad payload width"); + } + } + void moveValue(const Value& val, Register dest) { + if (val.isMarkable()) { + BufferOffset load = movePatchablePtr(ImmPtr((void*)val.asRawBits()), dest); + writeDataRelocation(val, load); + } else { + movePtr(ImmWord(val.asRawBits()), dest); + } + } + void moveValue(const Value& src, const ValueOperand& dest) { + moveValue(src, dest.valueReg()); + } + void moveValue(const ValueOperand& src, const ValueOperand& dest) { + if (src.valueReg() != dest.valueReg()) + movePtr(src.valueReg(), dest.valueReg()); + } + + CodeOffsetLabel pushWithPatch(ImmWord imm) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + CodeOffsetLabel label = movWithPatch(imm, scratch); + push(scratch); + return label; + } + + CodeOffsetLabel movWithPatch(ImmWord imm, Register dest) { + BufferOffset off = immPool64(ARMRegister(dest, 64), imm.value); + return CodeOffsetLabel(off.getOffset()); + } + CodeOffsetLabel movWithPatch(ImmPtr imm, Register dest) { + BufferOffset off = immPool64(ARMRegister(dest, 64), uint64_t(imm.value)); + return CodeOffsetLabel(off.getOffset()); + } + + void boxValue(JSValueType type, Register src, Register dest) { + Orr(ARMRegister(dest, 64), ARMRegister(src, 64), Operand(ImmShiftedTag(type).value)); + } + void splitTag(Register src, Register dest) { + ubfx(ARMRegister(dest, 64), ARMRegister(src, 64), JSVAL_TAG_SHIFT, (64 - JSVAL_TAG_SHIFT)); + } + Register extractTag(const Address& address, Register scratch) { + loadPtr(address, scratch); + splitTag(scratch, scratch); + return scratch; + } + Register extractTag(const ValueOperand& value, Register scratch) { + splitTag(value.valueReg(), scratch); + return scratch; + } + Register extractObject(const Address& address, Register scratch) { + loadPtr(address, scratch); + unboxObject(scratch, scratch); + return scratch; + } + Register extractObject(const ValueOperand& value, Register scratch) { + unboxObject(value, scratch); + return scratch; + } + Register extractInt32(const ValueOperand& value, Register scratch) { + unboxInt32(value, scratch); + return scratch; + } + Register extractBoolean(const ValueOperand& value, Register scratch) { + unboxBoolean(value, scratch); + return scratch; + } + + // If source is a double, load into dest. + // If source is int32, convert to double and store in dest. + // Else, branch to failure. + void ensureDouble(const ValueOperand& source, FloatRegister dest, Label* failure) { + Label isDouble, done; + + // TODO: splitTagForTest really should not leak a scratch register. + Register tag = splitTagForTest(source); + { + vixl::UseScratchRegisterScope temps(this); + temps.Exclude(ARMRegister(tag, 64)); + + branchTestDouble(Assembler::Equal, tag, &isDouble); + branchTestInt32(Assembler::NotEqual, tag, failure); + } + + convertInt32ToDouble(source.valueReg(), dest); + jump(&done); + + bind(&isDouble); + unboxDouble(source, dest); + + bind(&done); + } + + void emitSet(Condition cond, Register dest) { + Cset(ARMRegister(dest, 64), cond); + } + + template + void cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) { + cmpPtr(lhs, rhs); + emitSet(cond, dest); + } + + template + void cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest) { + cmp32(lhs, rhs); + emitSet(cond, dest); + } + + void testNullSet(Condition cond, const ValueOperand& value, Register dest) { + cond = testNull(cond, value); + emitSet(cond, dest); + } + void testObjectSet(Condition cond, const ValueOperand& value, Register dest) { + cond = testObject(cond, value); + emitSet(cond, dest); + } + void testUndefinedSet(Condition cond, const ValueOperand& value, Register dest) { + cond = testUndefined(cond, value); + emitSet(cond, dest); + } + + void convertBoolToInt32(Register source, Register dest) { + Uxtb(ARMRegister(dest, 64), ARMRegister(source, 64)); + } + + void convertInt32ToDouble(Register src, FloatRegister dest) { + Scvtf(ARMFPRegister(dest, 64), ARMRegister(src, 32)); // Uses FPCR rounding mode. + } + void convertInt32ToDouble(const Address& src, FloatRegister dest) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != src.base); + load32(src, scratch); + convertInt32ToDouble(scratch, dest); + } + void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != src.base); + MOZ_ASSERT(scratch != src.index); + load32(src, scratch); + convertInt32ToDouble(scratch, dest); + } + + void convertInt32ToFloat32(Register src, FloatRegister dest) { + Scvtf(ARMFPRegister(dest, 32), ARMRegister(src, 32)); // Uses FPCR rounding mode. + } + void convertInt32ToFloat32(const Address& src, FloatRegister dest) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != src.base); + load32(src, scratch); + convertInt32ToFloat32(scratch, dest); + } + + void convertUInt32ToDouble(Register src, FloatRegister dest) { + Ucvtf(ARMFPRegister(dest, 64), ARMRegister(src, 32)); // Uses FPCR rounding mode. + } + void convertUInt32ToDouble(const Address& src, FloatRegister dest) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != src.base); + load32(src, scratch); + convertUInt32ToDouble(scratch, dest); + } + + void convertUInt32ToFloat32(Register src, FloatRegister dest) { + Ucvtf(ARMFPRegister(dest, 32), ARMRegister(src, 32)); // Uses FPCR rounding mode. + } + void convertUInt32ToFloat32(const Address& src, FloatRegister dest) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != src.base); + load32(src, scratch); + convertUInt32ToFloat32(scratch, dest); + } + + void convertFloat32ToDouble(FloatRegister src, FloatRegister dest) { + Fcvt(ARMFPRegister(dest, 64), ARMFPRegister(src, 32)); + } + void convertDoubleToFloat32(FloatRegister src, FloatRegister dest) { + Fcvt(ARMFPRegister(dest, 32), ARMFPRegister(src, 64)); + } + + void branchTruncateDouble(FloatRegister src, Register dest, Label* fail) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + + // An out of range integer will be saturated to the destination size. + ARMFPRegister src64(src, 64); + ARMRegister dest64(dest, 64); + + MOZ_ASSERT(!scratch64.Is(dest64)); + + //breakpoint(); + Fcvtzs(dest64, src64); + Add(scratch64, dest64, Operand(0x7fffffffffffffff)); + Cmn(scratch64, 3); + B(fail, Assembler::Above); + And(dest64, dest64, Operand(0xffffffff)); + } + void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail, + bool negativeZeroCheck = true) + { + vixl::UseScratchRegisterScope temps(this); + const ARMFPRegister scratch64 = temps.AcquireD(); + + ARMFPRegister fsrc(src, 64); + ARMRegister dest32(dest, 32); + ARMRegister dest64(dest, 64); + + MOZ_ASSERT(!scratch64.Is(fsrc)); + + Fcvtzs(dest32, fsrc); // Convert, rounding toward zero. + Scvtf(scratch64, dest32); // Convert back, using FPCR rounding mode. + Fcmp(scratch64, fsrc); + B(fail, Assembler::NotEqual); + + if (negativeZeroCheck) { + Label nonzero; + Cbnz(dest32, &nonzero); + Fmov(dest64, fsrc); + Cbnz(dest64, fail); + bind(&nonzero); + } + } + void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail, + bool negativeZeroCheck = true) + { + vixl::UseScratchRegisterScope temps(this); + const ARMFPRegister scratch32 = temps.AcquireS(); + + ARMFPRegister fsrc(src, 32); + ARMRegister dest32(dest, 32); + ARMRegister dest64(dest, 64); + + MOZ_ASSERT(!scratch32.Is(fsrc)); + + Fcvtzs(dest64, fsrc); // Convert, rounding toward zero. + Scvtf(scratch32, dest32); // Convert back, using FPCR rounding mode. + Fcmp(scratch32, fsrc); + B(fail, Assembler::NotEqual); + + if (negativeZeroCheck) { + Label nonzero; + Cbnz(dest32, &nonzero); + Fmov(dest32, fsrc); + Cbnz(dest32, fail); + bind(&nonzero); + } + And(dest64, dest64, Operand(0xffffffff)); + } + + void branchTruncateFloat32(FloatRegister src, Register dest, Label* fail) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + + ARMFPRegister src32(src, 32); + ARMRegister dest64(dest, 64); + + MOZ_ASSERT(!scratch64.Is(dest64)); + + Fcvtzs(dest64, src32); + Add(scratch64, dest64, Operand(0x7fffffffffffffff)); + Cmn(scratch64, 3); + B(fail, Assembler::Above); + And(dest64, dest64, Operand(0xffffffff)); + } + void floor(FloatRegister input, Register output, Label* bail) { + Label handleZero; + //Label handleNeg; + Label fin; + ARMFPRegister iDbl(input, 64); + ARMRegister o64(output, 64); + ARMRegister o32(output, 32); + Fcmp(iDbl, 0.0); + B(Assembler::Equal, &handleZero); + //B(Assembler::Signed, &handleNeg); + // NaN is always a bail condition, just bail directly. + B(Assembler::Overflow, bail); + Fcvtms(o64, iDbl); + Cmp(o64, Operand(o64, vixl::SXTW)); + B(NotEqual, bail); + Mov(o32, o32); + B(&fin); + + bind(&handleZero); + // Move the top word of the double into the output reg, if it is non-zero, + // then the original value was -0.0. + Fmov(o64, iDbl); + Cbnz(o64, bail); + bind(&fin); + } + + void floorf(FloatRegister input, Register output, Label* bail) { + Label handleZero; + //Label handleNeg; + Label fin; + ARMFPRegister iFlt(input, 32); + ARMRegister o64(output, 64); + ARMRegister o32(output, 32); + Fcmp(iFlt, 0.0); + B(Assembler::Equal, &handleZero); + //B(Assembler::Signed, &handleNeg); + // NaN is always a bail condition, just bail directly. + B(Assembler::Overflow, bail); + Fcvtms(o64, iFlt); + Cmp(o64, Operand(o64, vixl::SXTW)); + B(NotEqual, bail); + Mov(o32, o32); + B(&fin); + + bind(&handleZero); + // Move the top word of the double into the output reg, if it is non-zero, + // then the original value was -0.0. + Fmov(o32, iFlt); + Cbnz(o32, bail); + bind(&fin); + } + + void ceil(FloatRegister input, Register output, Label* bail) { + Label handleZero; + Label fin; + ARMFPRegister iDbl(input, 64); + ARMRegister o64(output, 64); + ARMRegister o32(output, 32); + Fcmp(iDbl, 0.0); + B(Assembler::Overflow, bail); + Fcvtps(o64, iDbl); + Cmp(o64, Operand(o64, vixl::SXTW)); + B(NotEqual, bail); + Cbz(o64, &handleZero); + Mov(o32, o32); + B(&fin); + + bind(&handleZero); + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch = temps.AcquireX(); + Fmov(scratch, iDbl); + Cbnz(scratch, bail); + bind(&fin); + } + + void ceilf(FloatRegister input, Register output, Label* bail) { + Label handleZero; + Label fin; + ARMFPRegister iFlt(input, 32); + ARMRegister o64(output, 64); + ARMRegister o32(output, 32); + Fcmp(iFlt, 0.0); + + // NaN is always a bail condition, just bail directly. + B(Assembler::Overflow, bail); + Fcvtps(o64, iFlt); + Cmp(o64, Operand(o64, vixl::SXTW)); + B(NotEqual, bail); + Cbz(o64, &handleZero); + Mov(o32, o32); + B(&fin); + + bind(&handleZero); + // Move the top word of the double into the output reg, if it is non-zero, + // then the original value was -0.0. + Fmov(o32, iFlt); + Cbnz(o32, bail); + bind(&fin); + } + + void jump(Label* label) { + B(label); + } + void jump(JitCode* code) { + branch(code); + } + void jump(RepatchLabel* label) { + MOZ_CRASH("jump (repatchlabel)"); + } + void jump(Register reg) { + Br(ARMRegister(reg, 64)); + } + void jump(const Address& addr) { + loadPtr(addr, ip0); + Br(vixl::ip0); + } + + void align(int alignment) { + armbuffer_.align(alignment); + } + + void haltingAlign(int alignment) { + // TODO: Implement a proper halting align. + // ARM doesn't have one either. + armbuffer_.align(alignment); + } + + void movePtr(Register src, Register dest) { + Mov(ARMRegister(dest, 64), ARMRegister(src, 64)); + } + void movePtr(ImmWord imm, Register dest) { + Mov(ARMRegister(dest, 64), int64_t(imm.value)); + } + void movePtr(ImmPtr imm, Register dest) { + Mov(ARMRegister(dest, 64), int64_t(imm.value)); + } + void movePtr(AsmJSImmPtr imm, Register dest) { + BufferOffset off = movePatchablePtr(ImmWord(0xffffffffffffffffULL), dest); + append(AsmJSAbsoluteLink(CodeOffsetLabel(off.getOffset()), imm.kind())); + } + void movePtr(ImmGCPtr imm, Register dest) { + BufferOffset load = movePatchablePtr(ImmPtr(imm.value), dest); + writeDataRelocation(imm, load); + } + void movePtr(ImmMaybeNurseryPtr imm, Register dest) { + movePtr(noteMaybeNurseryPtr(imm), dest); + } + + void mov(ImmWord imm, Register dest) { + movePtr(imm, dest); + } + + void move32(Imm32 imm, Register dest) { + Mov(ARMRegister(dest, 32), (int64_t)imm.value); + } + void move32(Register src, Register dest) { + Mov(ARMRegister(dest, 32), ARMRegister(src, 32)); + } + + // Move a pointer using a literal pool, so that the pointer + // may be easily patched or traced. + // Returns the BufferOffset of the load instruction emitted. + BufferOffset movePatchablePtr(ImmWord ptr, Register dest); + BufferOffset movePatchablePtr(ImmPtr ptr, Register dest); + + void not32(Register reg) { + Orn(ARMRegister(reg, 32), vixl::wzr, ARMRegister(reg, 32)); + } + void neg32(Register reg) { + Negs(ARMRegister(reg, 32), Operand(ARMRegister(reg, 32))); + } + + void loadPtr(AsmJSAbsoluteAddress address, Register dest) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch = temps.AcquireX(); + movePtr(AsmJSImmPtr(address.kind()), scratch.asUnsized()); + Ldr(ARMRegister(dest, 64), MemOperand(scratch)); + } + void loadPtr(AbsoluteAddress address, Register dest) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch = temps.AcquireX(); + movePtr(ImmWord((uintptr_t)address.addr), scratch.asUnsized()); + Ldr(ARMRegister(dest, 64), MemOperand(scratch)); + } + void loadPtr(const Address& address, Register dest) { + Ldr(ARMRegister(dest, 64), MemOperand(address)); + } + void loadPtr(const BaseIndex& src, Register dest) { + Register base = src.base; + uint32_t scale = Imm32::ShiftOf(src.scale).value; + ARMRegister dest64(dest, 64); + ARMRegister index64(src.index, 64); + + if (src.offset) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch = temps.AcquireX(); + MOZ_ASSERT(!scratch.Is(ARMRegister(base, 64))); + MOZ_ASSERT(!scratch.Is(dest64)); + MOZ_ASSERT(!scratch.Is(index64)); + + Add(scratch, ARMRegister(base, 64), Operand(int64_t(src.offset))); + Ldr(dest64, MemOperand(scratch, index64, vixl::LSL, scale)); + return; + } + + Ldr(dest64, MemOperand(ARMRegister(base, 64), index64, vixl::LSL, scale)); + } + void loadPrivate(const Address& src, Register dest) { + loadPtr(src, dest); + lshiftPtr(Imm32(1), dest); + } + + void store8(Register src, const Address& address) { + Strb(ARMRegister(src, 32), MemOperand(ARMRegister(address.base, 64), address.offset)); + } + void store8(Imm32 imm, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != address.base); + move32(imm, scratch32.asUnsized()); + Strb(scratch32, MemOperand(ARMRegister(address.base, 64), address.offset)); + } + void store8(Register src, const BaseIndex& address) { + doBaseIndex(ARMRegister(src, 32), address, vixl::STRB_w); + } + void store8(Imm32 imm, const BaseIndex& address) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != address.base); + MOZ_ASSERT(scratch32.asUnsized() != address.index); + Mov(scratch32, Operand(imm.value)); + doBaseIndex(scratch32, address, vixl::STRB_w); + } + + void store16(Register src, const Address& address) { + Strh(ARMRegister(src, 32), MemOperand(ARMRegister(address.base, 64), address.offset)); + } + void store16(Imm32 imm, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != address.base); + move32(imm, scratch32.asUnsized()); + Strh(scratch32, MemOperand(ARMRegister(address.base, 64), address.offset)); + } + void store16(Register src, const BaseIndex& address) { + doBaseIndex(ARMRegister(src, 32), address, vixl::STRH_w); + } + void store16(Imm32 imm, const BaseIndex& address) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != address.base); + MOZ_ASSERT(scratch32.asUnsized() != address.index); + Mov(scratch32, Operand(imm.value)); + doBaseIndex(scratch32, address, vixl::STRH_w); + } + + void storePtr(ImmWord imm, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != address.base); + movePtr(imm, scratch); + storePtr(scratch, address); + } + void storePtr(ImmPtr imm, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != address.base); + Mov(scratch64, uint64_t(imm.value)); + Str(scratch64, MemOperand(ARMRegister(address.base, 64), address.offset)); + } + void storePtr(ImmGCPtr imm, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != address.base); + movePtr(imm, scratch); + storePtr(scratch, address); + } + void storePtr(Register src, const Address& address) { + Str(ARMRegister(src, 64), MemOperand(ARMRegister(address.base, 64), address.offset)); + } + + void storePtr(ImmWord imm, const BaseIndex& address) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != address.base); + MOZ_ASSERT(scratch64.asUnsized() != address.index); + Mov(scratch64, Operand(imm.value)); + doBaseIndex(scratch64, address, vixl::STR_x); + } + void storePtr(ImmGCPtr imm, const BaseIndex& address) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != address.base); + MOZ_ASSERT(scratch != address.index); + movePtr(imm, scratch); + doBaseIndex(ARMRegister(scratch, 64), address, vixl::STR_x); + } + void storePtr(Register src, const BaseIndex& address) { + doBaseIndex(ARMRegister(src, 64), address, vixl::STR_x); + } + + void storePtr(Register src, AbsoluteAddress address) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + Mov(scratch64, uint64_t(address.addr)); + Str(ARMRegister(src, 64), MemOperand(scratch64)); + } + + void store32(Register src, AbsoluteAddress address) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + Mov(scratch64, uint64_t(address.addr)); + Str(ARMRegister(src, 32), MemOperand(scratch64)); + } + void store32(Imm32 imm, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != address.base); + Mov(scratch32, uint64_t(imm.value)); + Str(scratch32, MemOperand(ARMRegister(address.base, 64), address.offset)); + } + void store32(Register r, const Address& address) { + Str(ARMRegister(r, 32), MemOperand(ARMRegister(address.base, 64), address.offset)); + } + void store32(Imm32 imm, const BaseIndex& address) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != address.base); + MOZ_ASSERT(scratch32.asUnsized() != address.index); + Mov(scratch32, imm.value); + doBaseIndex(scratch32, address, vixl::STR_w); + } + void store32(Register r, const BaseIndex& address) { + doBaseIndex(ARMRegister(r, 32), address, vixl::STR_w); + } + + void store32_NoSecondScratch(Imm32 imm, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + temps.Exclude(ARMRegister(ScratchReg2, 32)); // Disallow ScratchReg2. + const ARMRegister scratch32 = temps.AcquireW(); + + MOZ_ASSERT(scratch32.asUnsized() != address.base); + Mov(scratch32, uint64_t(imm.value)); + Str(scratch32, MemOperand(ARMRegister(address.base, 64), address.offset)); + } + + // SIMD. + void loadInt32x1(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadInt32x1(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadInt32x2(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadInt32x2(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadInt32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadInt32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); } + void storeInt32x1(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); } + void storeInt32x1(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); } + void storeInt32x2(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); } + void storeInt32x2(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); } + void storeInt32x3(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); } + void storeInt32x3(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); } + void loadAlignedInt32x4(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadAlignedInt32x4(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void storeAlignedInt32x4(FloatRegister src, const Address& addr) { MOZ_CRASH("NYI"); } + void storeAlignedInt32x4(FloatRegister src, const BaseIndex& addr) { MOZ_CRASH("NYI"); } + void loadUnalignedInt32x4(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadUnalignedInt32x4(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void storeUnalignedInt32x4(FloatRegister dest, const Address& addr) { MOZ_CRASH("NYI"); } + void storeUnalignedInt32x4(FloatRegister dest, const BaseIndex& addr) { MOZ_CRASH("NYI"); } + + void loadFloat32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadFloat32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); } + void storeFloat32x3(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); } + void storeFloat32x3(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); } + void loadAlignedFloat32x4(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadAlignedFloat32x4(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void storeAlignedFloat32x4(FloatRegister src, const Address& addr) { MOZ_CRASH("NYI"); } + void storeAlignedFloat32x4(FloatRegister src, const BaseIndex& addr) { MOZ_CRASH("NYI"); } + void loadUnalignedFloat32x4(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadUnalignedFloat32x4(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void storeUnalignedFloat32x4(FloatRegister dest, const Address& addr) { MOZ_CRASH("NYI"); } + void storeUnalignedFloat32x4(FloatRegister dest, const BaseIndex& addr) { MOZ_CRASH("NYI"); } + + // StackPointer manipulation. + template + void addToStackPtr(T t) { addPtr(t, getStackPointer()); } + template + void addStackPtrTo(T t) { addPtr(getStackPointer(), t); } + + template + void subFromStackPtr(T t) { subPtr(t, getStackPointer()); syncStackPtr(); } + template + void subStackPtrFrom(T t) { subPtr(getStackPointer(), t); } + + template + void andToStackPtr(T t) { andPtr(t, getStackPointer()); syncStackPtr(); } + template + void andStackPtrTo(T t) { andPtr(getStackPointer(), t); } + + template + void moveToStackPtr(T t) { movePtr(t, getStackPointer()); syncStackPtr(); } + template + void moveStackPtrTo(T t) { movePtr(getStackPointer(), t); } + + template + void loadStackPtr(T t) { loadPtr(t, getStackPointer()); syncStackPtr(); } + template + void storeStackPtr(T t) { storePtr(getStackPointer(), t); } + + // StackPointer testing functions. + template + void branchTestStackPtr(Condition cond, T t, Label* label) { + branchTestPtr(cond, getStackPointer(), t, label); + } + template + void branchStackPtr(Condition cond, T rhs, Label* label) { + branchPtr(cond, getStackPointer(), rhs, label); + } + template + void branchStackPtrRhs(Condition cond, T lhs, Label* label) { + branchPtr(cond, lhs, getStackPointer(), label); + } + + void rshiftPtr(Imm32 imm, Register dest) { + Lsr(ARMRegister(dest, 64), ARMRegister(dest, 64), imm.value); + } + void rshiftPtr(Imm32 imm, Register src, Register dest) { + Lsr(ARMRegister(dest, 64), ARMRegister(src, 64), imm.value); + } + + void rshiftPtrArithmetic(Imm32 imm, Register dest) { + Asr(ARMRegister(dest, 64), ARMRegister(dest, 64), imm.value); + } + void lshiftPtr(Imm32 imm, Register dest) { + Lsl(ARMRegister(dest, 64), ARMRegister(dest, 64), imm.value); + } + void xorPtr(Imm32 imm, Register dest) { + Eor(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value)); + } + void xor32(Imm32 imm, Register dest) { + Eor(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value)); + } + + void xorPtr(Register src, Register dest) { + Eor(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(ARMRegister(src, 64))); + } + void orPtr(ImmWord imm, Register dest) { + Orr(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value)); + } + void orPtr(Imm32 imm, Register dest) { + Orr(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value)); + } + void orPtr(Register src, Register dest) { + Orr(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(ARMRegister(src, 64))); + } + void or32(Imm32 imm, Register dest) { + Orr(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value)); + } + void or32(Register src, Register dest) { + Orr(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(ARMRegister(src, 32))); + } + void or32(Imm32 imm, const Address& dest) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != dest.base); + load32(dest, scratch32.asUnsized()); + Orr(scratch32, scratch32, Operand(imm.value)); + store32(scratch32.asUnsized(), dest); + } + void andPtr(Imm32 imm, Register dest) { + And(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value)); + } + void andPtr(Register src, Register dest) { + And(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(ARMRegister(src, 64))); + } + void and32(Imm32 imm, Register dest) { + And(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value)); + } + void and32(Imm32 imm, Register src, Register dest) { + And(ARMRegister(dest, 32), ARMRegister(src, 32), Operand(imm.value)); + } + + void and32(Register src, Register dest) { + And(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(ARMRegister(src, 32))); + } + void and32(Imm32 mask, Address dest) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != dest.base); + load32(dest, scratch32.asUnsized()); + And(scratch32, scratch32, Operand(mask.value)); + store32(scratch32.asUnsized(), dest); + } + void and32(Address src, Register dest) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != src.base); + load32(src, scratch32.asUnsized()); + And(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(scratch32)); + } + + void testPtr(Register lhs, Register rhs) { + Tst(ARMRegister(lhs, 64), Operand(ARMRegister(rhs, 64))); + } + void test32(Register lhs, Register rhs) { + Tst(ARMRegister(lhs, 32), Operand(ARMRegister(rhs, 32))); + } + void test32(const Address& addr, Imm32 imm) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != addr.base); + load32(addr, scratch32.asUnsized()); + Tst(scratch32, Operand(imm.value)); + } + void test32(Register lhs, Imm32 rhs) { + Tst(ARMRegister(lhs, 32), Operand(rhs.value)); + } + void cmp32(Register lhs, Imm32 rhs) { + Cmp(ARMRegister(lhs, 32), Operand(rhs.value)); + } + void cmp32(Register a, Register b) { + Cmp(ARMRegister(a, 32), Operand(ARMRegister(b, 32))); + } + void cmp32(const Operand& lhs, Imm32 rhs) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + Mov(scratch32, lhs); + Cmp(scratch32, Operand(rhs.value)); + } + void cmp32(const Operand& lhs, Register rhs) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + Mov(scratch32, lhs); + Cmp(scratch32, Operand(ARMRegister(rhs, 32))); + } + + void cmpPtr(Register lhs, Imm32 rhs) { + Cmp(ARMRegister(lhs, 64), Operand(rhs.value)); + } + void cmpPtr(Register lhs, ImmWord rhs) { + Cmp(ARMRegister(lhs, 64), Operand(rhs.value)); + } + void cmpPtr(Register lhs, ImmPtr rhs) { + Cmp(ARMRegister(lhs, 64), Operand(uint64_t(rhs.value))); + } + void cmpPtr(Register lhs, Register rhs) { + Cmp(ARMRegister(lhs, 64), ARMRegister(rhs, 64)); + } + void cmpPtr(Register lhs, ImmGCPtr rhs) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != lhs); + movePtr(rhs, scratch); + cmpPtr(lhs, scratch); + } + void cmpPtr(Register lhs, ImmMaybeNurseryPtr rhs) { + cmpPtr(lhs, noteMaybeNurseryPtr(rhs)); + } + + void cmpPtr(const Address& lhs, Register rhs) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != lhs.base); + MOZ_ASSERT(scratch64.asUnsized() != rhs); + Ldr(scratch64, MemOperand(ARMRegister(lhs.base, 64), lhs.offset)); + Cmp(scratch64, Operand(ARMRegister(rhs, 64))); + } + void cmpPtr(const Address& lhs, ImmWord rhs) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != lhs.base); + Ldr(scratch64, MemOperand(ARMRegister(lhs.base, 64), lhs.offset)); + Cmp(scratch64, Operand(rhs.value)); + } + void cmpPtr(const Address& lhs, ImmPtr rhs) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != lhs.base); + Ldr(scratch64, MemOperand(ARMRegister(lhs.base, 64), lhs.offset)); + Cmp(scratch64, Operand(uint64_t(rhs.value))); + } + void cmpPtr(const Address& lhs, ImmGCPtr rhs) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != lhs.base); + loadPtr(lhs, scratch); + cmpPtr(scratch, rhs); + } + + void loadDouble(const Address& src, FloatRegister dest) { + Ldr(ARMFPRegister(dest, 64), MemOperand(ARMRegister(src.base,64), src.offset)); + } + void loadDouble(const BaseIndex& src, FloatRegister dest) { + ARMRegister base(src.base, 64); + ARMRegister index(src.index, 64); + + if (src.offset == 0) { + Ldr(ARMFPRegister(dest, 64), MemOperand(base, index, vixl::LSL, unsigned(src.scale))); + return; + } + + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != src.base); + MOZ_ASSERT(scratch64.asUnsized() != src.index); + + Add(scratch64, base, Operand(index, vixl::LSL, unsigned(src.scale))); + Ldr(ARMFPRegister(dest, 64), MemOperand(scratch64, src.offset)); + } + void loadFloatAsDouble(const Address& addr, FloatRegister dest) { + Ldr(ARMFPRegister(dest, 32), MemOperand(ARMRegister(addr.base,64), addr.offset)); + fcvt(ARMFPRegister(dest, 64), ARMFPRegister(dest, 32)); + } + void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest) { + ARMRegister base(src.base, 64); + ARMRegister index(src.index, 64); + if (src.offset == 0) { + Ldr(ARMFPRegister(dest, 32), MemOperand(base, index, vixl::LSL, unsigned(src.scale))); + } else { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != src.base); + MOZ_ASSERT(scratch64.asUnsized() != src.index); + + Add(scratch64, base, Operand(index, vixl::LSL, unsigned(src.scale))); + Ldr(ARMFPRegister(dest, 32), MemOperand(scratch64, src.offset)); + } + fcvt(ARMFPRegister(dest, 64), ARMFPRegister(dest, 32)); + } + + void loadFloat32(const Address& addr, FloatRegister dest) { + Ldr(ARMFPRegister(dest, 32), MemOperand(ARMRegister(addr.base,64), addr.offset)); + } + void loadFloat32(const BaseIndex& src, FloatRegister dest) { + ARMRegister base(src.base, 64); + ARMRegister index(src.index, 64); + if (src.offset == 0) { + Ldr(ARMFPRegister(dest, 32), MemOperand(base, index, vixl::LSL, unsigned(src.scale))); + } else { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != src.base); + MOZ_ASSERT(scratch64.asUnsized() != src.index); + + Add(scratch64, base, Operand(index, vixl::LSL, unsigned(src.scale))); + Ldr(ARMFPRegister(dest, 32), MemOperand(scratch64, src.offset)); + } + } + + void storeDouble(FloatRegister src, const Address& dest) { + Str(ARMFPRegister(src, 64), MemOperand(ARMRegister(dest.base, 64), dest.offset)); + } + void storeDouble(FloatRegister src, const BaseIndex& dest) { + doBaseIndex(ARMFPRegister(src, 64), dest, vixl::STR_d); + } + + void storeFloat32(FloatRegister src, Address addr) { + Str(ARMFPRegister(src, 32), MemOperand(ARMRegister(addr.base, 64), addr.offset)); + } + void storeFloat32(FloatRegister src, BaseIndex addr) { + doBaseIndex(ARMFPRegister(src, 32), addr, vixl::STR_s); + } + + void moveDouble(FloatRegister src, FloatRegister dest) { + fmov(ARMFPRegister(dest, 64), ARMFPRegister(src, 64)); + } + void zeroDouble(FloatRegister reg) { + fmov(ARMFPRegister(reg, 64), vixl::xzr); + } + void zeroFloat32(FloatRegister reg) { + fmov(ARMFPRegister(reg, 32), vixl::wzr); + } + void negateDouble(FloatRegister reg) { + fneg(ARMFPRegister(reg, 64), ARMFPRegister(reg, 64)); + } + void negateFloat(FloatRegister reg) { + fneg(ARMFPRegister(reg, 32), ARMFPRegister(reg, 32)); + } + void addDouble(FloatRegister src, FloatRegister dest) { + fadd(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64), ARMFPRegister(src, 64)); + } + void subDouble(FloatRegister src, FloatRegister dest) { + fsub(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64), ARMFPRegister(src, 64)); + } + void mulDouble(FloatRegister src, FloatRegister dest) { + fmul(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64), ARMFPRegister(src, 64)); + } + void divDouble(FloatRegister src, FloatRegister dest) { + fdiv(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64), ARMFPRegister(src, 64)); + } + + void moveFloat32(FloatRegister src, FloatRegister dest) { + fmov(ARMFPRegister(dest, 32), ARMFPRegister(src, 32)); + } + void moveFloatAsDouble(Register src, FloatRegister dest) { + MOZ_CRASH("moveFloatAsDouble"); + } + + void splitTag(const ValueOperand& operand, Register dest) { + splitTag(operand.valueReg(), dest); + } + void splitTag(const Address& operand, Register dest) { + loadPtr(operand, dest); + splitTag(dest, dest); + } + void splitTag(const BaseIndex& operand, Register dest) { + loadPtr(operand, dest); + splitTag(dest, dest); + } + + // Extracts the tag of a value and places it in ScratchReg. + Register splitTagForTest(const ValueOperand& value) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != value.valueReg()); + Lsr(scratch64, ARMRegister(value.valueReg(), 64), JSVAL_TAG_SHIFT); + return scratch64.asUnsized(); // FIXME: Surely we can make a better interface. + } + void cmpTag(const ValueOperand& operand, ImmTag tag) { + MOZ_CRASH("cmpTag"); + } + + void load32(const Address& address, Register dest) { + Ldr(ARMRegister(dest, 32), MemOperand(ARMRegister(address.base, 64), address.offset)); + } + void load32(const BaseIndex& src, Register dest) { + doBaseIndex(ARMRegister(dest, 32), src, vixl::LDR_w); + } + void load32(AbsoluteAddress address, Register dest) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + movePtr(ImmWord((uintptr_t)address.addr), scratch64.asUnsized()); + ldr(ARMRegister(dest, 32), MemOperand(scratch64)); + } + + void load8SignExtend(const Address& address, Register dest) { + Ldrsb(ARMRegister(dest, 32), MemOperand(ARMRegister(address.base, 64), address.offset)); + } + void load8SignExtend(const BaseIndex& src, Register dest) { + doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRSB_w); + } + + void load8ZeroExtend(const Address& address, Register dest) { + Ldrb(ARMRegister(dest, 32), MemOperand(ARMRegister(address.base, 64), address.offset)); + } + void load8ZeroExtend(const BaseIndex& src, Register dest) { + doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRB_w); + } + + void load16SignExtend(const Address& address, Register dest) { + Ldrsh(ARMRegister(dest, 32), MemOperand(ARMRegister(address.base, 64), address.offset)); + } + void load16SignExtend(const BaseIndex& src, Register dest) { + doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRSH_w); + } + + void load16ZeroExtend(const Address& address, Register dest) { + Ldrh(ARMRegister(dest, 32), MemOperand(ARMRegister(address.base, 64), address.offset)); + } + void load16ZeroExtend(const BaseIndex& src, Register dest) { + doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRH_w); + } + + void add32(Register src, Register dest) { + Add(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(ARMRegister(src, 32))); + } + void add32(Imm32 imm, Register dest) { + Add(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value)); + } + void add32(Imm32 imm, const Address& dest) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != dest.base); + + Ldr(scratch32, MemOperand(ARMRegister(dest.base, 64), dest.offset)); + Add(scratch32, scratch32, Operand(imm.value)); + Str(scratch32, MemOperand(ARMRegister(dest.base, 64), dest.offset)); + } + + void adds32(Register src, Register dest) { + Adds(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(ARMRegister(src, 32))); + } + void adds32(Imm32 imm, Register dest) { + Adds(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value)); + } + void adds32(Imm32 imm, const Address& dest) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != dest.base); + + Ldr(scratch32, MemOperand(ARMRegister(dest.base, 64), dest.offset)); + Adds(scratch32, scratch32, Operand(imm.value)); + Str(scratch32, MemOperand(ARMRegister(dest.base, 64), dest.offset)); + } + + void sub32(Imm32 imm, Register dest) { + Sub(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value)); + } + void sub32(Register src, Register dest) { + Sub(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(ARMRegister(src, 32))); + } + + void subs32(Imm32 imm, Register dest) { + Subs(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value)); + } + void subs32(Register src, Register dest) { + Subs(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(ARMRegister(src, 32))); + } + + void addPtr(Register src, Register dest) { + Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(ARMRegister(src, 64))); + } + void addPtr(Register src1, Register src2, Register dest) { + Add(ARMRegister(dest, 64), ARMRegister(src1, 64), Operand(ARMRegister(src2, 64))); + } + + void addPtr(Imm32 imm, Register dest) { + Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value)); + } + void addPtr(Imm32 imm, Register src, Register dest) { + Add(ARMRegister(dest, 64), ARMRegister(src, 64), Operand(imm.value)); + } + + void addPtr(Imm32 imm, const Address& dest) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != dest.base); + + Ldr(scratch64, MemOperand(ARMRegister(dest.base, 64), dest.offset)); + Add(scratch64, scratch64, Operand(imm.value)); + Str(scratch64, MemOperand(ARMRegister(dest.base, 64), dest.offset)); + } + void addPtr(ImmWord imm, Register dest) { + Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value)); + } + void addPtr(ImmPtr imm, Register dest) { + Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(uint64_t(imm.value))); + } + void addPtr(const Address& src, Register dest) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != src.base); + + Ldr(scratch64, MemOperand(ARMRegister(src.base, 64), src.offset)); + Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(scratch64)); + } + void subPtr(Imm32 imm, Register dest) { + Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value)); + } + void subPtr(Register src, Register dest) { + Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(ARMRegister(src, 64))); + } + void subPtr(const Address& addr, Register dest) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != addr.base); + + Ldr(scratch64, MemOperand(ARMRegister(addr.base, 64), addr.offset)); + Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(scratch64)); + } + void subPtr(Register src, const Address& dest) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != dest.base); + + Ldr(scratch64, MemOperand(ARMRegister(dest.base, 64), dest.offset)); + Sub(scratch64, scratch64, Operand(ARMRegister(src, 64))); + Str(scratch64, MemOperand(ARMRegister(dest.base, 64), dest.offset)); + } + void mul32(Register src1, Register src2, Register dest, Label* onOver, Label* onZero) { + Smull(ARMRegister(dest, 64), ARMRegister(src1, 32), ARMRegister(src2, 32)); + if (onOver) { + Cmp(ARMRegister(dest, 64), Operand(ARMRegister(dest, 32), vixl::SXTW)); + B(onOver, NotEqual); + } + if (onZero) + Cbz(ARMRegister(dest, 32), onZero); + + // Clear upper 32 bits. + Mov(ARMRegister(dest, 32), ARMRegister(dest, 32)); + } + + void ret() { + pop(lr); + abiret(); + } + + void retn(Imm32 n) { + // ip0 <- [sp]; sp += n; ret ip0 + Ldr(vixl::ip0, MemOperand(GetStackPointer64(), ptrdiff_t(n.value), vixl::PostIndex)); + syncStackPtr(); // SP is always used to transmit the stack between calls. + Ret(vixl::ip0); + } + + void j(Condition code, Label* dest) { + b(dest, code); + } + void j(Label* dest) { + b(dest, Always); + } + + void branch(Condition cond, Label* label) { + b(label, cond); + } + void branch(JitCode* target) { + syncStackPtr(); + addPendingJump(nextOffset(), ImmPtr(target->raw()), Relocation::JITCODE); + b(-1); // The jump target will be patched by executableCopy(). + } + + void branch16(Condition cond, Register lhs, Register rhs, Label* label) { + MOZ_CRASH("branch16"); + } + + void branch32(Condition cond, const Operand& lhs, Register rhs, Label* label) { + // since rhs is an operand, do the compare backwards + Cmp(ARMRegister(rhs, 32), lhs); + b(label, Assembler::InvertCmpCondition(cond)); + } + void branch32(Condition cond, const Operand& lhs, Imm32 rhs, Label* label) { + ARMRegister l = lhs.reg(); + Cmp(l, Operand(rhs.value)); + b(label, cond); + } + void branch32(Condition cond, Register lhs, Register rhs, Label* label) { + cmp32(lhs, rhs); + b(label, cond); + } + void branch32(Condition cond, Register lhs, Imm32 imm, Label* label) { + cmp32(lhs, imm); + b(label, cond); + } + void branch32(Condition cond, const Address& lhs, Register rhs, Label* label) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != lhs.base); + MOZ_ASSERT(scratch != rhs); + load32(lhs, scratch); + branch32(cond, scratch, rhs, label); + } + void branch32(Condition cond, const Address& lhs, Imm32 imm, Label* label) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != lhs.base); + load32(lhs, scratch); + branch32(cond, scratch, imm, label); + } + void branch32(Condition cond, AbsoluteAddress lhs, Register rhs, Label* label) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + movePtr(ImmPtr(lhs.addr), scratch); + branch32(cond, Address(scratch, 0), rhs, label); + } + void branch32(Condition cond, AbsoluteAddress lhs, Imm32 rhs, Label* label) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + movePtr(ImmPtr(lhs.addr), scratch); + branch32(cond, Address(scratch, 0), rhs, label); + } + void branch32(Condition cond, AsmJSAbsoluteAddress lhs, Imm32 rhs, Label* label) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + movePtr(AsmJSImmPtr(lhs.kind()), scratch); + branch32(cond, Address(scratch, 0), rhs, label); + } + void branch32(Condition cond, BaseIndex lhs, Imm32 rhs, Label* label) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != lhs.base); + MOZ_ASSERT(scratch32.asUnsized() != lhs.index); + doBaseIndex(scratch32, lhs, vixl::LDR_w); + branch32(cond, scratch32.asUnsized(), rhs, label); + } + + void branchSub32(Condition cond, const Address& lhs, Register rhs, Label* label) { + MOZ_CRASH("branchSub32"); + } + void branchSub32(Condition cond, const Address& lhs, Imm32 imm, Label* label) { + MOZ_CRASH("branchSub32"); + } + void branchSub32(Condition cond, Register lhs, Imm32 imm, Label* label) { + MOZ_CRASH("branchSub32"); + } + void branchSub32(Condition cond, Register lhs, Register rhs, Label* label) { + MOZ_CRASH("branchSub32"); + } + void branchSub32(Condition cond, AbsoluteAddress lhs, Imm32 rhs, Label* label) { + MOZ_CRASH("branchSub32"); + } + void branchSub32(Condition cond, AbsoluteAddress lhs, Register rhs, Label* label) { + MOZ_CRASH("branchSub32"); + } + + void branchTest16(Condition cond, Register lhs, Register rhs, Label* label) { + MOZ_CRASH("branchTest16"); + } + void branchTest32(Condition cond, Register lhs, Register rhs, Label* label) { + MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned); + // x86 prefers |test foo, foo| to |cmp foo, #0|. + // Convert the former to the latter for ARM. + if (lhs == rhs && (cond == Zero || cond == NonZero)) + cmp32(lhs, Imm32(0)); + else + test32(lhs, rhs); + B(label, cond); + } + void branchTest32(Condition cond, Register lhs, Imm32 imm, Label* label) { + MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned); + test32(lhs, imm); + B(label, cond); + } + void branchTest32(Condition cond, const Address& address, Imm32 imm, Label* label) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != address.base); + load32(address, scratch); + branchTest32(cond, scratch, imm, label); + } + void branchTest32(Condition cond, AbsoluteAddress address, Imm32 imm, Label* label) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + loadPtr(address, scratch); + branchTest32(cond, scratch, imm, label); + } + CodeOffsetJump jumpWithPatch(RepatchLabel* label, Condition cond = Always) { + ARMBuffer::PoolEntry pe; + BufferOffset load_bo; + BufferOffset branch_bo; + + // Does not overwrite condition codes from the caller. + { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + load_bo = immPool64(scratch64, (uint64_t)label, &pe); + } + + MOZ_ASSERT(!label->bound()); + if (cond != Always) { + Label notTaken; + b(¬Taken, Assembler::InvertCondition(cond)); + branch_bo = b(-1); + bind(¬Taken); + } else { + nop(); + branch_bo = b(-1); + } + label->use(branch_bo.getOffset()); + return CodeOffsetJump(load_bo.getOffset(), pe.index()); + } + CodeOffsetJump backedgeJump(RepatchLabel* label) { + return jumpWithPatch(label); + } + template + CodeOffsetJump branchPtrWithPatch(Condition cond, Register reg, T ptr, RepatchLabel* label) { + cmpPtr(reg, ptr); + return jumpWithPatch(label, cond); + } + template + CodeOffsetJump branchPtrWithPatch(Condition cond, Address addr, T ptr, RepatchLabel* label) { + // The scratch register is unused after the condition codes are set. + { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != addr.base); + loadPtr(addr, scratch); + cmpPtr(scratch, ptr); + } + return jumpWithPatch(label, cond); + } + + void branchPtr(Condition cond, AsmJSAbsoluteAddress lhs, Register rhs, Label* label) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != rhs); + loadPtr(lhs, scratch); + branchPtr(cond, scratch, rhs, label); + } + void branchPtr(Condition cond, Address lhs, ImmWord ptr, Label* label) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != lhs.base); + loadPtr(lhs, scratch); + branchPtr(cond, scratch, ptr, label); + } + void branchPtr(Condition cond, Address lhs, ImmPtr ptr, Label* label) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != lhs.base); + loadPtr(lhs, scratch); + branchPtr(cond, scratch, ptr, label); + } + void branchPtr(Condition cond, Address lhs, Register ptr, Label* label) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != lhs.base); + MOZ_ASSERT(scratch != ptr); + loadPtr(lhs, scratch); + branchPtr(cond, scratch, ptr, label); + } + void branchPtr(Condition cond, Register lhs, Imm32 imm, Label* label) { + cmpPtr(lhs, imm); + B(label, cond); + } + void branchPtr(Condition cond, Register lhs, ImmWord ptr, Label* label) { + cmpPtr(lhs, ptr); + B(label, cond); + } + void branchPtr(Condition cond, Register lhs, ImmPtr rhs, Label* label) { + cmpPtr(lhs, rhs); + B(label, cond); + } + void branchPtr(Condition cond, Register lhs, ImmGCPtr ptr, Label* label) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != lhs); + movePtr(ptr, scratch); + branchPtr(cond, lhs, scratch, label); + } + void branchPtr(Condition cond, Address lhs, ImmGCPtr ptr, Label* label) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch1_64 = temps.AcquireX(); + const ARMRegister scratch2_64 = temps.AcquireX(); + MOZ_ASSERT(scratch1_64.asUnsized() != lhs.base); + MOZ_ASSERT(scratch2_64.asUnsized() != lhs.base); + + movePtr(ptr, scratch1_64.asUnsized()); + loadPtr(lhs, scratch2_64.asUnsized()); + cmp(scratch2_64, scratch1_64); + B(cond, label); + + } + void branchPtr(Condition cond, Address lhs, ImmMaybeNurseryPtr ptr, Label* label) { + branchPtr(cond, lhs, noteMaybeNurseryPtr(ptr), label); + } + void branchPtr(Condition cond, Register lhs, Register rhs, Label* label) { + Cmp(ARMRegister(lhs, 64), ARMRegister(rhs, 64)); + B(label, cond); + } + void branchPtr(Condition cond, AbsoluteAddress lhs, Register rhs, Label* label) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != rhs); + loadPtr(lhs, scratch); + branchPtr(cond, scratch, rhs, label); + } + void branchPtr(Condition cond, AbsoluteAddress lhs, ImmWord ptr, Label* label) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + loadPtr(lhs, scratch); + branchPtr(cond, scratch, ptr, label); + } + + void branchTestPtr(Condition cond, Register lhs, Register rhs, Label* label) { + Tst(ARMRegister(lhs, 64), Operand(ARMRegister(rhs, 64))); + B(label, cond); + } + void branchTestPtr(Condition cond, Register lhs, Imm32 imm, Label* label) { + Tst(ARMRegister(lhs, 64), Operand(imm.value)); + B(label, cond); + } + void branchTestPtr(Condition cond, const Address& lhs, Imm32 imm, Label* label) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != lhs.base); + loadPtr(lhs, scratch); + branchTestPtr(cond, scratch, imm, label); + } + void branchPrivatePtr(Condition cond, const Address& lhs, ImmPtr ptr, Label* label) { + branchPtr(cond, lhs, ptr, label); + } + + void branchPrivatePtr(Condition cond, const Address& lhs, Register ptr, Label* label) { + branchPtr(cond, lhs, ptr, label); + } + + void branchPrivatePtr(Condition cond, Register lhs, ImmWord ptr, Label* label) { + branchPtr(cond, lhs, ptr, label); + } + + void decBranchPtr(Condition cond, Register lhs, Imm32 imm, Label* label) { + Subs(ARMRegister(lhs, 64), ARMRegister(lhs, 64), Operand(imm.value)); + B(cond, label); + } + + void branchTestUndefined(Condition cond, Register tag, Label* label) { + Condition c = testUndefined(cond, tag); + B(label, c); + } + void branchTestInt32(Condition cond, Register tag, Label* label) { + Condition c = testInt32(cond, tag); + B(label, c); + } + void branchTestDouble(Condition cond, Register tag, Label* label) { + Condition c = testDouble(cond, tag); + B(label, c); + } + void branchTestBoolean(Condition cond, Register tag, Label* label) { + Condition c = testBoolean(cond, tag); + B(label, c); + } + void branchTestNull(Condition cond, Register tag, Label* label) { + Condition c = testNull(cond, tag); + B(label, c); + } + void branchTestString(Condition cond, Register tag, Label* label) { + Condition c = testString(cond, tag); + B(label, c); + } + void branchTestSymbol(Condition cond, Register tag, Label* label) { + Condition c = testSymbol(cond, tag); + B(label, c); + } + void branchTestObject(Condition cond, Register tag, Label* label) { + Condition c = testObject(cond, tag); + B(label, c); + } + void branchTestNumber(Condition cond, Register tag, Label* label) { + Condition c = testNumber(cond, tag); + B(label, c); + } + + void branchTestUndefined(Condition cond, const Address& address, Label* label) { + Condition c = testUndefined(cond, address); + B(label, c); + } + void branchTestInt32(Condition cond, const Address& address, Label* label) { + Condition c = testInt32(cond, address); + B(label, c); + } + void branchTestDouble(Condition cond, const Address& address, Label* label) { + Condition c = testDouble(cond, address); + B(label, c); + } + void branchTestBoolean(Condition cond, const Address& address, Label* label) { + Condition c = testDouble(cond, address); + B(label, c); + } + void branchTestNull(Condition cond, const Address& address, Label* label) { + Condition c = testNull(cond, address); + B(label, c); + } + void branchTestString(Condition cond, const Address& address, Label* label) { + Condition c = testString(cond, address); + B(label, c); + } + void branchTestSymbol(Condition cond, const Address& address, Label* label) { + Condition c = testSymbol(cond, address); + B(label, c); + } + void branchTestObject(Condition cond, const Address& address, Label* label) { + Condition c = testObject(cond, address); + B(label, c); + } + void branchTestNumber(Condition cond, const Address& address, Label* label) { + Condition c = testNumber(cond, address); + B(label, c); + } + + // Perform a type-test on a full Value loaded into a register. + // Clobbers the ScratchReg. + void branchTestUndefined(Condition cond, const ValueOperand& src, Label* label) { + Condition c = testUndefined(cond, src); + B(label, c); + } + void branchTestInt32(Condition cond, const ValueOperand& src, Label* label) { + Condition c = testInt32(cond, src); + B(label, c); + } + void branchTestBoolean(Condition cond, const ValueOperand& src, Label* label) { + Condition c = testBoolean(cond, src); + B(label, c); + } + void branchTestDouble(Condition cond, const ValueOperand& src, Label* label) { + Condition c = testDouble(cond, src); + B(label, c); + } + void branchTestNull(Condition cond, const ValueOperand& src, Label* label) { + Condition c = testNull(cond, src); + B(label, c); + } + void branchTestString(Condition cond, const ValueOperand& src, Label* label) { + Condition c = testString(cond, src); + B(label, c); + } + void branchTestSymbol(Condition cond, const ValueOperand& src, Label* label) { + Condition c = testSymbol(cond, src); + B(label, c); + } + void branchTestObject(Condition cond, const ValueOperand& src, Label* label) { + Condition c = testObject(cond, src); + B(label, c); + } + void branchTestNumber(Condition cond, const ValueOperand& src, Label* label) { + Condition c = testNumber(cond, src); + B(label, c); + } + + // Perform a type-test on a Value addressed by BaseIndex. + // Clobbers the ScratchReg. + void branchTestUndefined(Condition cond, const BaseIndex& address, Label* label) { + Condition c = testUndefined(cond, address); + B(label, c); + } + void branchTestInt32(Condition cond, const BaseIndex& address, Label* label) { + Condition c = testInt32(cond, address); + B(label, c); + } + void branchTestBoolean(Condition cond, const BaseIndex& address, Label* label) { + Condition c = testBoolean(cond, address); + B(label, c); + } + void branchTestDouble(Condition cond, const BaseIndex& address, Label* label) { + Condition c = testDouble(cond, address); + B(label, c); + } + void branchTestNull(Condition cond, const BaseIndex& address, Label* label) { + Condition c = testNull(cond, address); + B(label, c); + } + void branchTestString(Condition cond, const BaseIndex& address, Label* label) { + Condition c = testString(cond, address); + B(label, c); + } + void branchTestSymbol(Condition cond, const BaseIndex& address, Label* label) { + Condition c = testSymbol(cond, address); + B(label, c); + } + void branchTestObject(Condition cond, const BaseIndex& address, Label* label) { + Condition c = testObject(cond, address); + B(label, c); + } + template + void branchTestGCThing(Condition cond, const T& src, Label* label) { + Condition c = testGCThing(cond, src); + B(label, c); + } + template + void branchTestPrimitive(Condition cond, const T& t, Label* label) { + Condition c = testPrimitive(cond, t); + B(label, c); + } + template + void branchTestMagic(Condition cond, const T& t, Label* label) { + Condition c = testMagic(cond, t); + B(label, c); + } + void branchTestMagicValue(Condition cond, const ValueOperand& val, JSWhyMagic why, Label* label) { + MOZ_ASSERT(cond == Equal || cond == NotEqual); + branchTestValue(cond, val, MagicValue(why), label); + } + void branchTestValue(Condition cond, const ValueOperand& value, const Value& v, Label* label) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != value.valueReg()); + moveValue(v, ValueOperand(scratch64.asUnsized())); + Cmp(ARMRegister(value.valueReg(), 64), scratch64); + B(label, cond); + } + void branchTestValue(Condition cond, const Address& valaddr, const ValueOperand& value, + Label* label) + { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != valaddr.base); + MOZ_ASSERT(scratch64.asUnsized() != value.valueReg()); + loadValue(valaddr, scratch64.asUnsized()); + Cmp(ARMRegister(value.valueReg(), 64), Operand(scratch64)); + B(label, cond); + } + + void compareDouble(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs) { + Fcmp(ARMFPRegister(lhs, 64), ARMFPRegister(rhs, 64)); + } + void branchDouble(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs, Label* label) { + compareDouble(cond, lhs, rhs); + switch (cond) { + case DoubleNotEqual: { + Label unordered; + // not equal *and* ordered + branch(Overflow, &unordered); + branch(NotEqual, label); + bind(&unordered); + break; + } + case DoubleEqualOrUnordered: + branch(Overflow, label); + branch(Equal, label); + break; + default: + branch(Condition(cond), label); + } + } + + void compareFloat(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs) { + Fcmp(ARMFPRegister(lhs, 32), ARMFPRegister(rhs, 32)); + } + void branchFloat(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs, Label* label) { + compareFloat(cond, lhs, rhs); + switch (cond) { + case DoubleNotEqual: { + Label unordered; + // not equal *and* ordered + branch(Overflow, &unordered); + branch(NotEqual, label); + bind(&unordered); + break; + } + case DoubleEqualOrUnordered: + branch(Overflow, label); + branch(Equal, label); + break; + default: + branch(Condition(cond), label); + } + } + + void branchNegativeZero(FloatRegister reg, Register scratch, Label* label) { + MOZ_CRASH("branchNegativeZero"); + } + void branchNegativeZeroFloat32(FloatRegister reg, Register scratch, Label* label) { + MOZ_CRASH("branchNegativeZeroFloat32"); + } + + void boxDouble(FloatRegister src, const ValueOperand& dest) { + Fmov(ARMRegister(dest.valueReg(), 64), ARMFPRegister(src, 64)); + } + void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest) { + boxValue(type, src, dest.valueReg()); + } + + // Note that the |dest| register here may be ScratchReg, so we shouldn't use it. + void unboxInt32(const ValueOperand& src, Register dest) { + move32(src.valueReg(), dest); + } + void unboxInt32(const Address& src, Register dest) { + load32(src, dest); + } + void unboxDouble(const Address& src, FloatRegister dest) { + loadDouble(src, dest); + } + void unboxDouble(const ValueOperand& src, FloatRegister dest) { + Fmov(ARMFPRegister(dest, 64), ARMRegister(src.valueReg(), 64)); + } + + void unboxArgObjMagic(const ValueOperand& src, Register dest) { + MOZ_CRASH("unboxArgObjMagic"); + } + void unboxArgObjMagic(const Address& src, Register dest) { + MOZ_CRASH("unboxArgObjMagic"); + } + + void unboxBoolean(const ValueOperand& src, Register dest) { + move32(src.valueReg(), dest); + } + void unboxBoolean(const Address& src, Register dest) { + load32(src, dest); + } + + void unboxMagic(const ValueOperand& src, Register dest) { + move32(src.valueReg(), dest); + } + // Unbox any non-double value into dest. Prefer unboxInt32 or unboxBoolean + // instead if the source type is known. + void unboxNonDouble(const ValueOperand& src, Register dest) { + unboxNonDouble(src.valueReg(), dest); + } + void unboxNonDouble(Address src, Register dest) { + loadPtr(src, dest); + unboxNonDouble(dest, dest); + } + + void unboxNonDouble(Register src, Register dest) { + And(ARMRegister(dest, 64), ARMRegister(src, 64), Operand((1ULL << JSVAL_TAG_SHIFT) - 1ULL)); + } + + void unboxPrivate(const ValueOperand& src, Register dest) { + ubfx(ARMRegister(dest, 64), ARMRegister(src.valueReg(), 64), 1, JSVAL_TAG_SHIFT - 1); + } + + void notBoolean(const ValueOperand& val) { + ARMRegister r(val.valueReg(), 64); + eor(r, r, Operand(1)); + } + void unboxObject(const ValueOperand& src, Register dest) { + unboxNonDouble(src.valueReg(), dest); + } + void unboxObject(Register src, Register dest) { + unboxNonDouble(src, dest); + } + void unboxObject(const Address& src, Register dest) { + loadPtr(src, dest); + unboxNonDouble(dest, dest); + } + void unboxObject(const BaseIndex& src, Register dest) { + doBaseIndex(ARMRegister(dest, 64), src, vixl::LDR_x); + unboxNonDouble(dest, dest); + } + + void unboxValue(const ValueOperand& src, AnyRegister dest) { + if (dest.isFloat()) { + Label notInt32, end; + branchTestInt32(Assembler::NotEqual, src, ¬Int32); + convertInt32ToDouble(src.valueReg(), dest.fpu()); + jump(&end); + bind(¬Int32); + unboxDouble(src, dest.fpu()); + bind(&end); + } else { + unboxNonDouble(src, dest.gpr()); + } + + } + void unboxString(const ValueOperand& operand, Register dest) { + unboxNonDouble(operand, dest); + } + void unboxString(const Address& src, Register dest) { + unboxNonDouble(src, dest); + } + void unboxSymbol(const ValueOperand& operand, Register dest) { + unboxNonDouble(operand, dest); + } + void unboxSymbol(const Address& src, Register dest) { + unboxNonDouble(src, dest); + } + // These two functions use the low 32-bits of the full value register. + void boolValueToDouble(const ValueOperand& operand, FloatRegister dest) { + convertInt32ToDouble(operand.valueReg(), dest); + } + void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest) { + convertInt32ToDouble(operand.valueReg(), dest); + } + + void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest) { + convertInt32ToFloat32(operand.valueReg(), dest); + } + void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest) { + convertInt32ToFloat32(operand.valueReg(), dest); + } + + void loadConstantDouble(double d, FloatRegister dest) { + Fmov(ARMFPRegister(dest, 64), d); + } + void loadConstantFloat32(float f, FloatRegister dest) { + Fmov(ARMFPRegister(dest, 32), f); + } + + // Register-based tests. + Condition testUndefined(Condition cond, Register tag) { + MOZ_ASSERT(cond == Equal || cond == NotEqual); + cmp32(tag, ImmTag(JSVAL_TAG_UNDEFINED)); + return cond; + } + Condition testInt32(Condition cond, Register tag) { + MOZ_ASSERT(cond == Equal || cond == NotEqual); + cmp32(tag, ImmTag(JSVAL_TAG_INT32)); + return cond; + } + Condition testBoolean(Condition cond, Register tag) { + MOZ_ASSERT(cond == Equal || cond == NotEqual); + cmp32(tag, ImmTag(JSVAL_TAG_BOOLEAN)); + return cond; + } + Condition testNull(Condition cond, Register tag) { + MOZ_ASSERT(cond == Equal || cond == NotEqual); + cmp32(tag, ImmTag(JSVAL_TAG_NULL)); + return cond; + } + Condition testString(Condition cond, Register tag) { + MOZ_ASSERT(cond == Equal || cond == NotEqual); + cmp32(tag, ImmTag(JSVAL_TAG_STRING)); + return cond; + } + Condition testSymbol(Condition cond, Register tag) { + MOZ_ASSERT(cond == Equal || cond == NotEqual); + cmp32(tag, ImmTag(JSVAL_TAG_SYMBOL)); + return cond; + } + Condition testObject(Condition cond, Register tag) { + MOZ_ASSERT(cond == Equal || cond == NotEqual); + cmp32(tag, ImmTag(JSVAL_TAG_OBJECT)); + return cond; + } + Condition testDouble(Condition cond, Register tag) { + MOZ_ASSERT(cond == Equal || cond == NotEqual); + cmp32(tag, Imm32(JSVAL_TAG_MAX_DOUBLE)); + return (cond == Equal) ? BelowOrEqual : Above; + } + Condition testNumber(Condition cond, Register tag) { + MOZ_ASSERT(cond == Equal || cond == NotEqual); + cmp32(tag, Imm32(JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET)); + return (cond == Equal) ? BelowOrEqual : Above; + } + Condition testGCThing(Condition cond, Register tag) { + MOZ_ASSERT(cond == Equal || cond == NotEqual); + cmp32(tag, Imm32(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET)); + return (cond == Equal) ? AboveOrEqual : Below; + } + Condition testMagic(Condition cond, Register tag) { + MOZ_ASSERT(cond == Equal || cond == NotEqual); + cmp32(tag, ImmTag(JSVAL_TAG_MAGIC)); + return cond; + } + Condition testPrimitive(Condition cond, Register tag) { + MOZ_ASSERT(cond == Equal || cond == NotEqual); + cmp32(tag, Imm32(JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET)); + return (cond == Equal) ? Below : AboveOrEqual; + } + Condition testError(Condition cond, Register tag) { + return testMagic(cond, tag); + } + + // ValueOperand-based tests. + Condition testInt32(Condition cond, const ValueOperand& value) { + // The incoming ValueOperand may use scratch registers. + vixl::UseScratchRegisterScope temps(this); + + if (value.valueReg() == ScratchReg2) { + MOZ_ASSERT(temps.IsAvailable(ScratchReg64)); + MOZ_ASSERT(!temps.IsAvailable(ScratchReg2_64)); + temps.Exclude(ScratchReg64); + + if (cond != Equal && cond != NotEqual) + MOZ_CRASH("NYI: non-equality comparisons"); + + // In the event that the tag is not encodable in a single cmp / teq instruction, + // perform the xor that teq would use, this will leave the tag bits being + // zero, or non-zero, which can be tested with either and or shift. + unsigned int n, imm_r, imm_s; + uint64_t immediate = uint64_t(ImmTag(JSVAL_TAG_INT32).value) << JSVAL_TAG_SHIFT; + if (IsImmLogical(immediate, 64, &n, &imm_s, &imm_r)) { + Eor(ScratchReg64, ScratchReg2_64, Operand(immediate)); + } else { + Mov(ScratchReg64, immediate); + Eor(ScratchReg64, ScratchReg2_64, ScratchReg64); + } + Tst(ScratchReg64, Operand(-1ll << JSVAL_TAG_SHIFT)); + return cond; + } + + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != value.valueReg()); + + splitTag(value, scratch); + return testInt32(cond, scratch); + } + Condition testBoolean(Condition cond, const ValueOperand& value) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(value.valueReg() != scratch); + splitTag(value, scratch); + return testBoolean(cond, scratch); + } + Condition testDouble(Condition cond, const ValueOperand& value) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(value.valueReg() != scratch); + splitTag(value, scratch); + return testDouble(cond, scratch); + } + Condition testNull(Condition cond, const ValueOperand& value) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(value.valueReg() != scratch); + splitTag(value, scratch); + return testNull(cond, scratch); + } + Condition testUndefined(Condition cond, const ValueOperand& value) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(value.valueReg() != scratch); + splitTag(value, scratch); + return testUndefined(cond, scratch); + } + Condition testString(Condition cond, const ValueOperand& value) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(value.valueReg() != scratch); + splitTag(value, scratch); + return testString(cond, scratch); + } + Condition testSymbol(Condition cond, const ValueOperand& value) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(value.valueReg() != scratch); + splitTag(value, scratch); + return testSymbol(cond, scratch); + } + Condition testObject(Condition cond, const ValueOperand& value) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(value.valueReg() != scratch); + splitTag(value, scratch); + return testObject(cond, scratch); + } + Condition testNumber(Condition cond, const ValueOperand& value) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(value.valueReg() != scratch); + splitTag(value, scratch); + return testNumber(cond, scratch); + } + Condition testPrimitive(Condition cond, const ValueOperand& value) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(value.valueReg() != scratch); + splitTag(value, scratch); + return testPrimitive(cond, scratch); + } + Condition testMagic(Condition cond, const ValueOperand& value) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(value.valueReg() != scratch); + splitTag(value, scratch); + return testMagic(cond, scratch); + } + Condition testError(Condition cond, const ValueOperand& value) { + return testMagic(cond, value); + } + + // Address-based tests. + Condition testGCThing(Condition cond, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(address.base != scratch); + splitTag(address, scratch); + return testGCThing(cond, scratch); + } + Condition testMagic(Condition cond, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(address.base != scratch); + splitTag(address, scratch); + return testMagic(cond, scratch); + } + Condition testInt32(Condition cond, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(address.base != scratch); + splitTag(address, scratch); + return testInt32(cond, scratch); + } + Condition testDouble(Condition cond, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(address.base != scratch); + splitTag(address, scratch); + return testDouble(cond, scratch); + } + Condition testBoolean(Condition cond, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(address.base != scratch); + splitTag(address, scratch); + return testBoolean(cond, scratch); + } + Condition testNull(Condition cond, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(address.base != scratch); + splitTag(address, scratch); + return testNull(cond, scratch); + } + Condition testUndefined(Condition cond, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(address.base != scratch); + splitTag(address, scratch); + return testUndefined(cond, scratch); + } + Condition testString(Condition cond, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(address.base != scratch); + splitTag(address, scratch); + return testString(cond, scratch); + } + Condition testSymbol(Condition cond, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(address.base != scratch); + splitTag(address, scratch); + return testSymbol(cond, scratch); + } + Condition testObject(Condition cond, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(address.base != scratch); + splitTag(address, scratch); + return testObject(cond, scratch); + } + Condition testNumber(Condition cond, const Address& address) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(address.base != scratch); + splitTag(address, scratch); + return testNumber(cond, scratch); + } + + // BaseIndex-based tests. + Condition testUndefined(Condition cond, const BaseIndex& src) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(src.base != scratch); + MOZ_ASSERT(src.index != scratch); + splitTag(src, scratch); + return testUndefined(cond, scratch); + } + Condition testNull(Condition cond, const BaseIndex& src) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(src.base != scratch); + MOZ_ASSERT(src.index != scratch); + splitTag(src, scratch); + return testNull(cond, scratch); + } + Condition testBoolean(Condition cond, const BaseIndex& src) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(src.base != scratch); + MOZ_ASSERT(src.index != scratch); + splitTag(src, scratch); + return testBoolean(cond, scratch); + } + Condition testString(Condition cond, const BaseIndex& src) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(src.base != scratch); + MOZ_ASSERT(src.index != scratch); + splitTag(src, scratch); + return testString(cond, scratch); + } + Condition testSymbol(Condition cond, const BaseIndex& src) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(src.base != scratch); + MOZ_ASSERT(src.index != scratch); + splitTag(src, scratch); + return testSymbol(cond, scratch); + } + Condition testInt32(Condition cond, const BaseIndex& src) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(src.base != scratch); + MOZ_ASSERT(src.index != scratch); + splitTag(src, scratch); + return testInt32(cond, scratch); + } + Condition testObject(Condition cond, const BaseIndex& src) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(src.base != scratch); + MOZ_ASSERT(src.index != scratch); + splitTag(src, scratch); + return testObject(cond, scratch); + } + Condition testDouble(Condition cond, const BaseIndex& src) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(src.base != scratch); + MOZ_ASSERT(src.index != scratch); + splitTag(src, scratch); + return testDouble(cond, scratch); + } + Condition testMagic(Condition cond, const BaseIndex& src) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(src.base != scratch); + MOZ_ASSERT(src.index != scratch); + splitTag(src, scratch); + return testMagic(cond, scratch); + } + Condition testGCThing(Condition cond, const BaseIndex& src) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(src.base != scratch); + MOZ_ASSERT(src.index != scratch); + splitTag(src, scratch); + return testGCThing(cond, scratch); + } + + Condition testInt32Truthy(bool truthy, const ValueOperand& operand) { + ARMRegister payload32(operand.valueReg(), 32); + Tst(payload32, payload32); + return truthy ? NonZero : Zero; + } + void branchTestInt32Truthy(bool truthy, const ValueOperand& operand, Label* label) { + Condition c = testInt32Truthy(truthy, operand); + B(label, c); + } + + void branchTestDoubleTruthy(bool truthy, FloatRegister reg, Label* label) { + Fcmp(ARMFPRegister(reg, 64), 0.0); + if (!truthy) { + // falsy values are zero, and NaN. + branch(Zero, label); + branch(Overflow, label); + } else { + // truthy values are non-zero and not nan. + // If it is overflow + Label onFalse; + branch(Zero, &onFalse); + branch(Overflow, &onFalse); + b(label); + bind(&onFalse); + } + } + + Condition testBooleanTruthy(bool truthy, const ValueOperand& operand) { + ARMRegister payload32(operand.valueReg(), 32); + Tst(payload32, payload32); + return truthy ? NonZero : Zero; + } + void branchTestBooleanTruthy(bool truthy, const ValueOperand& operand, Label* label) { + Condition c = testBooleanTruthy(truthy, operand); + B(label, c); + } + Condition testStringTruthy(bool truthy, const ValueOperand& value) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + const ARMRegister scratch32(scratch, 32); + const ARMRegister scratch64(scratch, 64); + + MOZ_ASSERT(value.valueReg() != scratch); + + unboxString(value, scratch); + Ldr(scratch32, MemOperand(scratch64, JSString::offsetOfLength())); + Cmp(scratch32, Operand(0)); + return truthy ? Condition::NonZero : Condition::Zero; + } + void branchTestStringTruthy(bool truthy, const ValueOperand& value, Label* label) { + Condition c = testStringTruthy(truthy, value); + B(label, c); + } + void int32OrDouble(Register src, ARMFPRegister dest) { + Label isInt32; + Label join; + testInt32(Equal, ValueOperand(src)); + B(&isInt32, Equal); + // is double, move teh bits as is + Fmov(dest, ARMRegister(src, 64)); + B(&join); + bind(&isInt32); + // is int32, do a conversion while moving + Scvtf(dest, ARMRegister(src, 32)); + bind(&join); + } + void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) { + if (dest.isFloat()) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != address.base); + Ldr(scratch64, toMemOperand(address)); + int32OrDouble(scratch64.asUnsized(), ARMFPRegister(dest.fpu(), 64)); + } else if (type == MIRType_Int32 || type == MIRType_Boolean) { + load32(address, dest.gpr()); + } else { + loadPtr(address, dest.gpr()); + unboxNonDouble(dest.gpr(), dest.gpr()); + } + } + + void loadUnboxedValue(BaseIndex address, MIRType type, AnyRegister dest) { + if (dest.isFloat()) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != address.base); + MOZ_ASSERT(scratch64.asUnsized() != address.index); + doBaseIndex(scratch64, address, vixl::LDR_x); + int32OrDouble(scratch64.asUnsized(), ARMFPRegister(dest.fpu(), 64)); + } else if (type == MIRType_Int32 || type == MIRType_Boolean) { + load32(address, dest.gpr()); + } else { + loadPtr(address, dest.gpr()); + unboxNonDouble(dest.gpr(), dest.gpr()); + } + } + + void loadInstructionPointerAfterCall(Register dest) { + MOZ_CRASH("loadInstructionPointerAfterCall"); + } + + // Emit a B that can be toggled to a CMP. See ToggleToJmp(), ToggleToCmp(). + CodeOffsetLabel toggledJump(Label* label) { + BufferOffset offset = b(label, Always); + CodeOffsetLabel ret(offset.getOffset()); + return ret; + } + + // load: offset to the load instruction obtained by movePatchablePtr(). + void writeDataRelocation(ImmGCPtr ptr, BufferOffset load) { + if (ptr.value) + tmpDataRelocations_.append(load); + } + void writeDataRelocation(const Value& val, BufferOffset load) { + if (val.isMarkable()) { + gc::Cell* cell = reinterpret_cast(val.toGCThing()); + if (cell && gc::IsInsideNursery(cell)) + embedsNurseryPointers_ = true; + tmpDataRelocations_.append(load); + } + } + + void writePrebarrierOffset(CodeOffsetLabel label) { + tmpPreBarriers_.append(BufferOffset(label.offset())); + } + + void computeEffectiveAddress(const Address& address, Register dest) { + Add(ARMRegister(dest, 64), ARMRegister(address.base, 64), Operand(address.offset)); + } + void computeEffectiveAddress(const BaseIndex& address, Register dest) { + ARMRegister dest64(dest, 64); + ARMRegister base64(address.base, 64); + ARMRegister index64(address.index, 64); + + Add(dest64, base64, Operand(index64, vixl::LSL, address.scale)); + if (address.offset) + Add(dest64, dest64, Operand(address.offset)); + } + + private: + void setupABICall(uint32_t args); + + public: + // Setup a call to C/C++ code, given the number of general arguments it + // takes. Note that this only supports cdecl. + // + // In order for alignment to work correctly, the MacroAssembler must have a + // consistent view of the stack displacement. It is okay to call "push" + // manually, however, if the stack alignment were to change, the macro + // assembler should be notified before starting a call. + void setupAlignedABICall(uint32_t args) { + MOZ_CRASH("setupAlignedABICall"); + } + + // Sets up an ABI call for when the alignment is not known. This may need a + // scratch register. + void setupUnalignedABICall(uint32_t args, Register scratch); + + // Arguments must be assigned to a C/C++ call in order. They are moved + // in parallel immediately before performing the call. This process may + // temporarily use more stack, in which case sp-relative addresses will be + // automatically adjusted. It is extremely important that sp-relative + // addresses are computed *after* setupABICall(). Furthermore, no + // operations should be emitted while setting arguments. + void passABIArg(const MoveOperand& from, MoveOp::Type type); + void passABIArg(Register reg); + void passABIArg(FloatRegister reg, MoveOp::Type type); + void passABIOutParam(Register reg); + + private: + void callWithABIPre(uint32_t* stackAdjust); + void callWithABIPost(uint32_t stackAdjust, MoveOp::Type result); + + public: + // Emits a call to a C/C++ function, resolving all argument moves. + void callWithABI(void* fun, MoveOp::Type result = MoveOp::GENERAL); + void callWithABI(Register fun, MoveOp::Type result = MoveOp::GENERAL); + void callWithABI(AsmJSImmPtr imm, MoveOp::Type result = MoveOp::GENERAL); + void callWithABI(Address fun, MoveOp::Type result = MoveOp::GENERAL); + + CodeOffsetLabel labelForPatch() { + return CodeOffsetLabel(nextOffset().getOffset()); + } + + void handleFailureWithHandlerTail(void* handler); + + // FIXME: This is the same on all platforms. Can be common code? + void makeFrameDescriptor(Register frameSizeReg, FrameType type) { + lshiftPtr(Imm32(FRAMESIZE_SHIFT), frameSizeReg); + orPtr(Imm32(type), frameSizeReg); + } + + void callWithExitFrame(JitCode* target, Register dynStack) { + add32(Imm32(framePushed()), dynStack); + makeFrameDescriptor(dynStack, JitFrame_IonJS); + Push(dynStack); // descriptor + + call(target); + } + + // FIXME: See CodeGeneratorX64 calls to noteAsmJSGlobalAccess. + void patchAsmJSGlobalAccess(CodeOffsetLabel patchAt, uint8_t* code, + uint8_t* globalData, unsigned globalDataOffset) + { + MOZ_CRASH("patchAsmJSGlobalAccess"); + } + + void memIntToValue(const Address& src, const Address& dest) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + MOZ_ASSERT(scratch != src.base); + MOZ_ASSERT(scratch != dest.base); + load32(src, scratch); + storeValue(JSVAL_TYPE_INT32, scratch, dest); + } + + void branchPtrInNurseryRange(Condition cond, Register ptr, Register temp, Label* label); + void branchValueIsNurseryObject(Condition cond, ValueOperand value, Register temp, Label* label); + + // Builds an exit frame on the stack, with a return address to an internal + // non-function. Returns offset to be passed to markSafepointAt(). + void buildFakeExitFrame(Register scratch, uint32_t* offset); + + void callWithExitFrame(Label* target) { + uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS); + Push(Imm32(descriptor)); // descriptor + + call(target); + } + + void callWithExitFrame(JitCode* target); + + void callJit(Register callee) { + // AArch64 cannot read from the PC, so pushing must be handled callee-side. + syncStackPtr(); + Blr(ARMRegister(callee, 64)); + } + + void appendCallSite(const CallSiteDesc& desc) { + MOZ_CRASH("appendCallSite"); + } + + void call(const CallSiteDesc& desc, Label* label) { + syncStackPtr(); + call(label); + append(desc, currentOffset(), framePushed_); + } + void call(const CallSiteDesc& desc, Register reg) { + syncStackPtr(); + call(reg); + append(desc, currentOffset(), framePushed_); + } + void call(const CallSiteDesc& desc, AsmJSImmPtr imm) { + syncStackPtr(); + call(imm); + append(desc, currentOffset(), framePushed_); + } + + void call(AsmJSImmPtr imm) { + vixl::UseScratchRegisterScope temps(this); + const Register scratch = temps.AcquireX().asUnsized(); + syncStackPtr(); + movePtr(imm, scratch); + call(scratch); + } + + void call(Register target) { + syncStackPtr(); + Blr(ARMRegister(target, 64)); + } + // Call a target JitCode, which must be traceable, and may be movable. + void call(JitCode* target) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + syncStackPtr(); + BufferOffset off = immPool64(scratch64, uint64_t(target->raw())); + addPendingJump(off, ImmPtr(target->raw()), Relocation::JITCODE); + blr(scratch64); + } + // Call a target native function, which is neither traceable nor movable. + void call(ImmPtr target) { + syncStackPtr(); + movePtr(target, ip0); + Blr(vixl::ip0); + } + void call(Label* target) { + syncStackPtr(); + Bl(target); + } + void callExit(AsmJSImmPtr imm, uint32_t stackArgBytes) { + MOZ_CRASH("callExit"); + } + + void callJitFromAsmJS(Register reg) { + Blr(ARMRegister(reg, 64)); + } + + void callAndPushReturnAddress(Label* label); + + void profilerEnterFrame(Register framePtr, Register scratch) { + AbsoluteAddress activation(GetJitContext()->runtime->addressOfProfilingActivation()); + loadPtr(activation, scratch); + storePtr(framePtr, Address(scratch, JitActivation::offsetOfLastProfilingFrame())); + storePtr(ImmPtr(nullptr), Address(scratch, JitActivation::offsetOfLastProfilingCallSite())); + } + void profilerExitFrame() { + branch(GetJitContext()->runtime->jitRuntime()->getProfilerExitFrameTail()); + } + Address ToPayload(Address value) { + return value; + } + Address ToType(Address value) { + return value; + } + + private: + template + void compareExchange(int nbytes, bool signExtend, const T& address, Register oldval, + Register newval, Register output) + { + MOZ_CRASH("compareExchange"); + } + + template + void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32& value, + const T& address, Register temp, Register output) + { + MOZ_CRASH("atomicFetchOp"); + } + + template + void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register& value, + const T& address, Register temp, Register output) + { + MOZ_CRASH("atomicFetchOp"); + } + + template + void atomicEffectOp(int nbytes, AtomicOp op, const Register& value, const T& mem) { + MOZ_CRASH("atomicEffectOp"); + } + + template + void atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value, const T& mem) { + MOZ_CRASH("atomicEffectOp"); + } + + public: + // T in {Address,BaseIndex} + // S in {Imm32,Register} + + template + void compareExchange8SignExtend(const T& mem, Register oldval, Register newval, Register output) + { + compareExchange(1, true, mem, oldval, newval, output); + } + template + void compareExchange8ZeroExtend(const T& mem, Register oldval, Register newval, Register output) + { + compareExchange(1, false, mem, oldval, newval, output); + } + template + void compareExchange16SignExtend(const T& mem, Register oldval, Register newval, Register output) + { + compareExchange(2, true, mem, oldval, newval, output); + } + template + void compareExchange16ZeroExtend(const T& mem, Register oldval, Register newval, Register output) + { + compareExchange(2, false, mem, oldval, newval, output); + } + template + void compareExchange32(const T& mem, Register oldval, Register newval, Register output) { + compareExchange(4, false, mem, oldval, newval, output); + } + + template + void atomicFetchAdd8SignExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(1, true, AtomicFetchAddOp, value, mem, temp, output); + } + template + void atomicFetchAdd8ZeroExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(1, false, AtomicFetchAddOp, value, mem, temp, output); + } + template + void atomicFetchAdd16SignExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(2, true, AtomicFetchAddOp, value, mem, temp, output); + } + template + void atomicFetchAdd16ZeroExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(2, false, AtomicFetchAddOp, value, mem, temp, output); + } + template + void atomicFetchAdd32(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(4, false, AtomicFetchAddOp, value, mem, temp, output); + } + + template + void atomicAdd8(const S& value, const T& mem) { + atomicEffectOp(1, AtomicFetchAddOp, value, mem); + } + template + void atomicAdd16(const S& value, const T& mem) { + atomicEffectOp(2, AtomicFetchAddOp, value, mem); + } + template + void atomicAdd32(const S& value, const T& mem) { + atomicEffectOp(4, AtomicFetchAddOp, value, mem); + } + + template + void atomicFetchSub8SignExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(1, true, AtomicFetchSubOp, value, mem, temp, output); + } + template + void atomicFetchSub8ZeroExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(1, false, AtomicFetchSubOp, value, mem, temp, output); + } + template + void atomicFetchSub16SignExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(2, true, AtomicFetchSubOp, value, mem, temp, output); + } + template + void atomicFetchSub16ZeroExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(2, false, AtomicFetchSubOp, value, mem, temp, output); + } + template + void atomicFetchSub32(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(4, false, AtomicFetchSubOp, value, mem, temp, output); + } + + template + void atomicSub8(const S& value, const T& mem) { + atomicEffectOp(1, AtomicFetchSubOp, value, mem); + } + template + void atomicSub16(const S& value, const T& mem) { + atomicEffectOp(2, AtomicFetchSubOp, value, mem); + } + template + void atomicSub32(const S& value, const T& mem) { + atomicEffectOp(4, AtomicFetchSubOp, value, mem); + } + + template + void atomicFetchAnd8SignExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(1, true, AtomicFetchAndOp, value, mem, temp, output); + } + template + void atomicFetchAnd8ZeroExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(1, false, AtomicFetchAndOp, value, mem, temp, output); + } + template + void atomicFetchAnd16SignExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(2, true, AtomicFetchAndOp, value, mem, temp, output); + } + template + void atomicFetchAnd16ZeroExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(2, false, AtomicFetchAndOp, value, mem, temp, output); + } + template + void atomicFetchAnd32(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(4, false, AtomicFetchAndOp, value, mem, temp, output); + } + + template + void atomicAnd8(const S& value, const T& mem) { + atomicEffectOp(1, AtomicFetchAndOp, value, mem); + } + template + void atomicAnd16(const S& value, const T& mem) { + atomicEffectOp(2, AtomicFetchAndOp, value, mem); + } + template + void atomicAnd32(const S& value, const T& mem) { + atomicEffectOp(4, AtomicFetchAndOp, value, mem); + } + + template + void atomicFetchOr8SignExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(1, true, AtomicFetchOrOp, value, mem, temp, output); + } + template + void atomicFetchOr8ZeroExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(1, false, AtomicFetchOrOp, value, mem, temp, output); + } + template + void atomicFetchOr16SignExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(2, true, AtomicFetchOrOp, value, mem, temp, output); + } + template + void atomicFetchOr16ZeroExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(2, false, AtomicFetchOrOp, value, mem, temp, output); + } + template + void atomicFetchOr32(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(4, false, AtomicFetchOrOp, value, mem, temp, output); + } + + template + void atomicOr8(const S& value, const T& mem) { + atomicEffectOp(1, AtomicFetchOrOp, value, mem); + } + template + void atomicOr16(const S& value, const T& mem) { + atomicEffectOp(2, AtomicFetchOrOp, value, mem); + } + template + void atomicOr32(const S& value, const T& mem) { + atomicEffectOp(4, AtomicFetchOrOp, value, mem); + } + + template + void atomicFetchXor8SignExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(1, true, AtomicFetchXorOp, value, mem, temp, output); + } + template + void atomicFetchXor8ZeroExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(1, false, AtomicFetchXorOp, value, mem, temp, output); + } + template + void atomicFetchXor16SignExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(2, true, AtomicFetchXorOp, value, mem, temp, output); + } + template + void atomicFetchXor16ZeroExtend(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(2, false, AtomicFetchXorOp, value, mem, temp, output); + } + template + void atomicFetchXor32(const S& value, const T& mem, Register temp, Register output) { + atomicFetchOp(4, false, AtomicFetchXorOp, value, mem, temp, output); + } + + template + void atomicXor8(const S& value, const T& mem) { + atomicEffectOp(1, AtomicFetchXorOp, value, mem); + } + template + void atomicXor16(const S& value, const T& mem) { + atomicEffectOp(2, AtomicFetchXorOp, value, mem); + } + template + void atomicXor32(const S& value, const T& mem) { + atomicEffectOp(4, AtomicFetchXorOp, value, mem); + } + + // Emit a BLR or NOP instruction. ToggleCall can be used to patch + // this instruction. + CodeOffsetLabel toggledCall(JitCode* target, bool enabled) { + // TODO: Random pool insertion between instructions below is terrible. + // Unfortunately, we can't forbid pool prevention, because we're trying + // to add an entry to a pool. So as a temporary fix, just flush the pool + // now, so that it won't add later. If you're changing this, also + // check ToggleCall(), which will probably break. + armbuffer_.flushPool(); + + syncStackPtr(); + + BufferOffset offset = nextOffset(); + BufferOffset loadOffset; + { + vixl::UseScratchRegisterScope temps(this); + + // The register used for the load is hardcoded, so that ToggleCall + // can patch in the branch instruction easily. This could be changed, + // but then ToggleCall must read the target register from the load. + MOZ_ASSERT(temps.IsAvailable(ScratchReg2_64)); + temps.Exclude(ScratchReg2_64); + + loadOffset = immPool64(ScratchReg2_64, uint64_t(target->raw())); + + if (enabled) + blr(ScratchReg2_64); + else + nop(); + } + + addPendingJump(loadOffset, ImmPtr(target->raw()), Relocation::JITCODE); + CodeOffsetLabel ret(offset.getOffset()); + return ret; + } + + static size_t ToggledCallSize(uint8_t* code) { + static const uint32_t syncStackInstruction = 0x9100039f; // mov sp, r28 + + // start it off as an 8 byte sequence + int ret = 8; + Instruction* cur = (Instruction*)code; + uint32_t* curw = (uint32_t*)code; + + if (*curw == syncStackInstruction) { + ret += 4; + cur += 4; + } + + if (cur->IsUncondB()) + ret += cur->ImmPCRawOffset() << vixl::kInstructionSizeLog2; + + return ret; + } + + void checkARMRegAlignment(const ARMRegister& reg) { +#ifdef DEBUG + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch64 = temps.AcquireX(); + MOZ_ASSERT(scratch64.asUnsized() != reg.asUnsized()); + Label aligned; + Mov(scratch64, reg); + Tst(scratch64, Operand(StackAlignment - 1)); + B(Zero, &aligned); + breakpoint(); + bind(&aligned); + Mov(scratch64, vixl::xzr); // Clear the scratch register for sanity. +#endif + } + + void checkStackAlignment() { +#ifdef DEBUG + checkARMRegAlignment(GetStackPointer64()); + + // If another register is being used to track pushes, check sp explicitly. + if (!GetStackPointer64().Is(vixl::sp)) + checkARMRegAlignment(vixl::sp); +#endif + } + + void abiret() { + syncStackPtr(); // SP is always used to transmit the stack between calls. + vixl::MacroAssembler::Ret(vixl::lr); + } + + void mulBy3(Register src, Register dest) { + ARMRegister xdest(dest, 64); + ARMRegister xsrc(src, 64); + Add(xdest, xsrc, Operand(xsrc, vixl::LSL, 1)); + } + + template + void branchAdd32(Condition cond, T src, Register dest, Label* label) { + adds32(src, dest); + branch(cond, label); + } + + template + void branchSub32(Condition cond, T src, Register dest, Label* label) { + subs32(src, dest); + branch(cond, label); + } + void clampCheck(Register r, Label* handleNotAnInt) { + MOZ_CRASH("clampCheck"); + } + + void memMove32(Address Source, Address Dest) { + MOZ_CRASH("memMove32"); + } + void memMove64(Address Source, Address Dest) { + MOZ_CRASH("memMove64"); + } + + void stackCheck(ImmWord limitAddr, Label* label) { + MOZ_CRASH("stackCheck"); + } + void clampIntToUint8(Register reg) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + const ARMRegister reg32(reg, 32); + MOZ_ASSERT(!scratch32.Is(reg32)); + + Cmp(reg32, Operand(reg32, vixl::UXTB)); + Csel(reg32, reg32, vixl::wzr, Assembler::GreaterThanOrEqual); + Mov(scratch32, Operand(0xff)); + Csel(reg32, reg32, scratch32, Assembler::LessThanOrEqual); + } + + void incrementInt32Value(const Address& addr) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratch32 = temps.AcquireW(); + MOZ_ASSERT(scratch32.asUnsized() != addr.base); + + load32(addr, scratch32.asUnsized()); + Add(scratch32, scratch32, Operand(1)); + store32(scratch32.asUnsized(), addr); + } + void inc64(AbsoluteAddress dest) { + vixl::UseScratchRegisterScope temps(this); + const ARMRegister scratchAddr64 = temps.AcquireX(); + const ARMRegister scratch64 = temps.AcquireX(); + + Mov(scratchAddr64, uint64_t(dest.addr)); + Ldr(scratch64, MemOperand(scratchAddr64, 0)); + Add(scratch64, scratch64, Operand(1)); + Str(scratch64, MemOperand(scratchAddr64, 0)); + } + + void BoundsCheck(Register ptrReg, Label* onFail, vixl::CPURegister zeroMe = vixl::NoReg) { + // use tst rather than Tst to *ensure* that a single instrution is generated. + Cmp(ARMRegister(ptrReg, 32), ARMRegister(HeapLenReg, 32)); + if (!zeroMe.IsNone()) { + if (zeroMe.IsRegister()) { + Csel(ARMRegister(zeroMe), + ARMRegister(zeroMe), + Operand(zeroMe.Is32Bits() ? vixl::wzr : vixl::xzr), + Assembler::Below); + } else if (zeroMe.Is32Bits()) { + vixl::UseScratchRegisterScope temps(this); + const ARMFPRegister scratchFloat = temps.AcquireS(); + Fmov(scratchFloat, JS::GenericNaN()); + Fcsel(ARMFPRegister(zeroMe), ARMFPRegister(zeroMe), scratchFloat, Assembler::Below); + } else { + vixl::UseScratchRegisterScope temps(this); + const ARMFPRegister scratchDouble = temps.AcquireD(); + Fmov(scratchDouble, JS::GenericNaN()); + Fcsel(ARMFPRegister(zeroMe), ARMFPRegister(zeroMe), scratchDouble, Assembler::Below); + } + } + B(onFail, Assembler::AboveOrEqual); + } + void breakpoint(); + + // Emits a simulator directive to save the current sp on an internal stack. + void simulatorMarkSP() { +#ifdef JS_ARM64_SIMULATOR + svc(vixl::kMarkStackPointer); +#endif + } + + // Emits a simulator directive to pop from its internal stack + // and assert that the value is equal to the current sp. + void simulatorCheckSP() { +#ifdef JS_ARM64_SIMULATOR + svc(vixl::kCheckStackPointer); +#endif + } + + void loadAsmJSActivation(Register dest) { + loadPtr(Address(GlobalReg, AsmJSActivationGlobalDataOffset - AsmJSGlobalRegBias), dest); + } + void loadAsmJSHeapRegisterFromGlobalData() { + loadPtr(Address(GlobalReg, AsmJSHeapGlobalDataOffset - AsmJSGlobalRegBias), HeapReg); + loadPtr(Address(GlobalReg, AsmJSHeapGlobalDataOffset - AsmJSGlobalRegBias + 8), HeapLenReg); + } + + // Overwrites the payload bits of a dest register containing a Value. + void movePayload(Register src, Register dest) { + // Bfxil cannot be used with the zero register as a source. + if (src == rzr) + And(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(~int64_t(JSVAL_PAYLOAD_MASK))); + else + Bfxil(ARMRegister(dest, 64), ARMRegister(src, 64), 0, JSVAL_TAG_SHIFT); + } + + // FIXME: Should be in Assembler? + // FIXME: Should be const? + uint32_t currentOffset() const { + return nextOffset().getOffset(); + } + + protected: + bool buildOOLFakeExitFrame(void* fakeReturnAddr) { + uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS); + Push(Imm32(descriptor)); + Push(ImmPtr(fakeReturnAddr)); + return true; + } +}; + +typedef MacroAssemblerCompat MacroAssemblerSpecific; + +} // namespace jit +} // namespace js + +#endif // jit_arm64_MacroAssembler_arm64_h diff --git a/js/src/jit/arm64/SharedICHelpers-arm64.h b/js/src/jit/arm64/SharedICHelpers-arm64.h new file mode 100644 index 000000000000..092885a6b35c --- /dev/null +++ b/js/src/jit/arm64/SharedICHelpers-arm64.h @@ -0,0 +1,309 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_arm64_SharedICHelpers_arm64_h +#define jit_arm64_SharedICHelpers_arm64_h + +#include "jit/BaselineFrame.h" +#include "jit/BaselineIC.h" +#include "jit/MacroAssembler.h" +#include "jit/SharedICRegisters.h" + +namespace js { +namespace jit { + +// Distance from sp to the top Value inside an IC stub (no return address on the stack on ARM). +static const size_t ICStackValueOffset = 0; + +inline void +EmitRestoreTailCallReg(MacroAssembler& masm) +{ + // No-op on ARM because link register is always holding the return address. +} + +inline void +EmitRepushTailCallReg(MacroAssembler& masm) +{ + // No-op on ARM because link register is always holding the return address. +} + +inline void +EmitCallIC(CodeOffsetLabel* patchOffset, MacroAssembler& masm) +{ + // Move ICEntry offset into ICStubReg + CodeOffsetLabel offset = masm.movWithPatch(ImmWord(-1), ICStubReg); + *patchOffset = offset; + + // Load stub pointer into ICStubReg + masm.loadPtr(Address(ICStubReg, ICEntry::offsetOfFirstStub()), ICStubReg); + + // Load stubcode pointer from BaselineStubEntry. + // R2 won't be active when we call ICs, so we can use r0. + MOZ_ASSERT(R2 == ValueOperand(r0)); + masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), r0); + + // Call the stubcode via a direct branch-and-link. + masm.Blr(x0); +} + +inline void +EmitEnterTypeMonitorIC(MacroAssembler& masm, + size_t monitorStubOffset = ICMonitoredStub::offsetOfFirstMonitorStub()) +{ + // This is expected to be called from within an IC, when ICStubReg is + // properly initialized to point to the stub. + masm.loadPtr(Address(ICStubReg, (uint32_t) monitorStubOffset), ICStubReg); + + // Load stubcode pointer from BaselineStubEntry. + // R2 won't be active when we call ICs, so we can use r0. + MOZ_ASSERT(R2 == ValueOperand(r0)); + masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), r0); + + // Jump to the stubcode. + masm.Br(x0); +} + +inline void +EmitReturnFromIC(MacroAssembler& masm) +{ + masm.abiret(); // Defaults to lr. +} + +inline void +EmitChangeICReturnAddress(MacroAssembler& masm, Register reg) +{ + masm.movePtr(reg, lr); +} + +inline void +EmitTailCallVM(JitCode* target, MacroAssembler& masm, uint32_t argSize) +{ + // We assume that R0 has been pushed, and R2 is unused. + MOZ_ASSERT(R2 == ValueOperand(r0)); + + // Compute frame size into w0. Used below in makeFrameDescriptor(). + masm.Sub(x0, BaselineFrameReg64, masm.GetStackPointer64()); + masm.Add(w0, w0, Operand(BaselineFrame::FramePointerOffset)); + + // Store frame size without VMFunction arguments for GC marking. + { + vixl::UseScratchRegisterScope temps(&masm.asVIXL()); + const ARMRegister scratch32 = temps.AcquireW(); + + masm.Sub(scratch32, w0, Operand(argSize)); + masm.store32(scratch32.asUnsized(), + Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize())); + } + + // Push frame descriptor (minus the return address) and perform the tail call. + MOZ_ASSERT(ICTailCallReg == lr); + masm.makeFrameDescriptor(r0, JitFrame_BaselineJS); + masm.push(r0); + + // The return address will be pushed by the VM wrapper, for compatibility + // with direct calls. Refer to the top of generateVMWrapper(). + // ICTailCallReg (lr) already contains the return address (as we keep + // it there through the stub calls). + + masm.branch(target); +} + +inline void +EmitCreateStubFrameDescriptor(MacroAssembler& masm, Register reg) +{ + ARMRegister reg64(reg, 64); + + // Compute stub frame size. + masm.Sub(reg64, masm.GetStackPointer64(), Operand(sizeof(void*) * 2)); + masm.Sub(reg64, BaselineFrameReg64, reg64); + + masm.makeFrameDescriptor(reg, JitFrame_BaselineStub); +} + +inline void +EmitCallVM(JitCode* target, MacroAssembler& masm) +{ + EmitCreateStubFrameDescriptor(masm, r0); + masm.push(r0); + masm.call(target); +} + +// Size of values pushed by EmitEnterStubFrame. +static const uint32_t STUB_FRAME_SIZE = 4 * sizeof(void*); +static const uint32_t STUB_FRAME_SAVED_STUB_OFFSET = sizeof(void*); + +inline void +EmitEnterStubFrame(MacroAssembler& masm, Register scratch) +{ + MOZ_ASSERT(scratch != ICTailCallReg); + + // Compute frame size. + masm.Add(ARMRegister(scratch, 64), BaselineFrameReg64, Operand(BaselineFrame::FramePointerOffset)); + masm.Sub(ARMRegister(scratch, 64), ARMRegister(scratch, 64), masm.GetStackPointer64()); + + masm.store32(scratch, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize())); + + // Note: when making changes here, don't forget to update STUB_FRAME_SIZE. + + // Push frame descriptor and return address. + // Save old frame pointer, stack pointer, and stub reg. + masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS); + masm.push(scratch, ICTailCallReg, ICStubReg, BaselineFrameReg); + + // Update the frame register. + masm.Mov(BaselineFrameReg64, masm.GetStackPointer64()); + + // Stack should remain 16-byte aligned. + masm.checkStackAlignment(); +} + +inline void +EmitLeaveStubFrame(MacroAssembler& masm, bool calledIntoIon = false) +{ + vixl::UseScratchRegisterScope temps(&masm.asVIXL()); + const ARMRegister scratch64 = temps.AcquireX(); + + // Ion frames do not save and restore the frame pointer. If we called + // into Ion, we have to restore the stack pointer from the frame descriptor. + // If we performed a VM call, the descriptor has been popped already so + // in that case we use the frame pointer. + if (calledIntoIon) { + masm.pop(scratch64.asUnsized()); + masm.Lsr(scratch64, scratch64, FRAMESIZE_SHIFT); + masm.Add(masm.GetStackPointer64(), masm.GetStackPointer64(), scratch64); + } else { + masm.Mov(masm.GetStackPointer64(), BaselineFrameReg64); + } + + // Pop values, discarding the frame descriptor. + masm.pop(BaselineFrameReg, ICStubReg, ICTailCallReg, scratch64.asUnsized()); + + // Stack should remain 16-byte aligned. + masm.checkStackAlignment(); +} + +inline void +EmitStowICValues(MacroAssembler& masm, int values) +{ + switch (values) { + case 1: + // Stow R0. + masm.pushValue(R0); + break; + case 2: + // Stow R0 and R1. + masm.push(R0.valueReg(), R1.valueReg()); + break; + default: + MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Expected 1 or 2 values"); + } +} + +inline void +EmitUnstowICValues(MacroAssembler& masm, int values, bool discard = false) +{ + MOZ_ASSERT(values >= 0 && values <= 2); + switch (values) { + case 1: + // Unstow R0. + if (discard) + masm.Drop(Operand(sizeof(Value))); + else + masm.popValue(R0); + break; + case 2: + // Unstow R0 and R1. + if (discard) + masm.Drop(Operand(sizeof(Value) * 2)); + else + masm.pop(R1.valueReg(), R0.valueReg()); + break; + default: + MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Expected 1 or 2 values"); + } +} + +inline void +EmitCallTypeUpdateIC(MacroAssembler& masm, JitCode* code, uint32_t objectOffset) +{ + // R0 contains the value that needs to be typechecked. + // The object we're updating is a boxed Value on the stack, at offset + // objectOffset from stack top, excluding the return address. + MOZ_ASSERT(R2 == ValueOperand(r0)); + + // Save the current ICStubReg to stack, as well as the TailCallReg, + // since on AArch64, the LR is live. + masm.push(ICStubReg, ICTailCallReg); + + // This is expected to be called from within an IC, when ICStubReg + // is properly initialized to point to the stub. + masm.loadPtr(Address(ICStubReg, (int32_t)ICUpdatedStub::offsetOfFirstUpdateStub()), + ICStubReg); + + // Load stubcode pointer from ICStubReg into ICTailCallReg. + masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), ICTailCallReg); + + // Call the stubcode. + masm.Blr(ARMRegister(ICTailCallReg, 64)); + + // Restore the old stub reg and tailcall reg. + masm.pop(ICTailCallReg, ICStubReg); + + // The update IC will store 0 or 1 in R1.scratchReg() reflecting if the + // value in R0 type-checked properly or not. + Label success; + masm.cmp32(R1.scratchReg(), Imm32(1)); + masm.j(Assembler::Equal, &success); + + // If the IC failed, then call the update fallback function. + EmitEnterStubFrame(masm, R1.scratchReg()); + + masm.loadValue(Address(masm.getStackPointer(), STUB_FRAME_SIZE + objectOffset), R1); + masm.push(R0.valueReg(), R1.valueReg(), ICStubReg); + + // Load previous frame pointer, push BaselineFrame*. + masm.loadPtr(Address(BaselineFrameReg, 0), R0.scratchReg()); + masm.pushBaselineFramePtr(R0.scratchReg(), R0.scratchReg()); + + EmitCallVM(code, masm); + EmitLeaveStubFrame(masm); + + // Success at end. + masm.bind(&success); +} + +template +inline void +EmitPreBarrier(MacroAssembler& masm, const AddrType& addr, MIRType type) +{ + // On AArch64, lr is clobbered by patchableCallPreBarrier. Save it first. + masm.push(lr); + masm.patchableCallPreBarrier(addr, type); + masm.pop(lr); +} + +inline void +EmitStubGuardFailure(MacroAssembler& masm) +{ + // NOTE: This routine assumes that the stub guard code left the stack in the + // same state it was in when it was entered. + + // BaselineStubEntry points to the current stub. + + // Load next stub into ICStubReg. + masm.loadPtr(Address(ICStubReg, ICStub::offsetOfNext()), ICStubReg); + + // Load stubcode pointer from BaselineStubEntry into scratch register. + masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), r0); + + // Return address is already loaded, just jump to the next stubcode. + masm.Br(x0); +} + +} // namespace jit +} // namespace js + +#endif // jit_arm64_SharedICHelpers_arm64_h diff --git a/js/src/jit/arm64/SharedICRegisters-arm64.h b/js/src/jit/arm64/SharedICRegisters-arm64.h new file mode 100644 index 000000000000..932a16ddd91e --- /dev/null +++ b/js/src/jit/arm64/SharedICRegisters-arm64.h @@ -0,0 +1,60 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_arm64_SharedICRegisters_arm64_h +#define jit_arm64_SharedICRegisters_arm64_h + +#include "jit/MacroAssembler.h" + +namespace js { +namespace jit { + +// Must be a callee-saved register for preservation around generateEnterJIT(). +static constexpr Register BaselineFrameReg = r23; +static constexpr ARMRegister BaselineFrameReg64 = { BaselineFrameReg, 64 }; + +// The BaselineStackReg cannot be sp, because that register is treated +// as xzr/wzr during load/store operations. +static constexpr Register BaselineStackReg = PseudoStackPointer; + +// ValueOperands R0, R1, and R2. +// R0 == JSReturnReg, and R2 uses registers not preserved across calls. +// R1 value should be preserved across calls. +static constexpr Register R0_ = r2; +static constexpr Register R1_ = r19; +static constexpr Register R2_ = r0; + +static constexpr ValueOperand R0(R0_); +static constexpr ValueOperand R1(R1_); +static constexpr ValueOperand R2(R2_); + +// ICTailCallReg and ICStubReg use registers that are not preserved across calls. +static constexpr Register ICTailCallReg = r30; +static constexpr Register ICStubReg = r9; + +// ExtractTemps must be callee-save registers: +// ICSetProp_Native::Compiler::generateStubCode() stores the object +// in ExtractTemp0, but then calls callTypeUpdateIC(), which clobbers +// caller-save registers. +// They should also not be the scratch registers ip0 or ip1, +// since those get clobbered all the time. +static constexpr Register ExtractTemp0 = r24; +static constexpr Register ExtractTemp1 = r25; + +// R7 - R9 are generally available for use within stubcode. + +// Note that BaselineTailCallReg is actually just the link +// register. In ARM code emission, we do not clobber BaselineTailCallReg +// since we keep the return address for calls there. + +// FloatReg0 must be equal to ReturnFloatReg. +static constexpr FloatRegister FloatReg0 = { FloatRegisters::v0 }; +static constexpr FloatRegister FloatReg1 = { FloatRegisters::v1 }; + +} // namespace jit +} // namespace js + +#endif // jit_arm64_SharedICRegisters_arm64_h diff --git a/js/src/jit/arm64/vixl/MacroAssembler-vixl.h b/js/src/jit/arm64/vixl/MacroAssembler-vixl.h index d1720fa8a46b..d14538379554 100644 --- a/js/src/jit/arm64/vixl/MacroAssembler-vixl.h +++ b/js/src/jit/arm64/vixl/MacroAssembler-vixl.h @@ -27,8 +27,7 @@ #ifndef VIXL_A64_MACRO_ASSEMBLER_A64_H_ #define VIXL_A64_MACRO_ASSEMBLER_A64_H_ -// TODO: Re-enable once landed. -// #include "jit/arm64/Assembler-arm64.h" +#include "jit/arm64/Assembler-arm64.h" #include "jit/arm64/vixl/Debugger-vixl.h" #include "jit/arm64/vixl/Globals-vixl.h" diff --git a/js/src/jit/shared/CodeGenerator-shared.h b/js/src/jit/shared/CodeGenerator-shared.h index 0caef861aedf..e3c8ad75bbf1 100644 --- a/js/src/jit/shared/CodeGenerator-shared.h +++ b/js/src/jit/shared/CodeGenerator-shared.h @@ -8,6 +8,7 @@ #define jit_shared_CodeGenerator_shared_h #include "mozilla/Alignment.h" +#include "mozilla/Move.h" #include "jit/JitFrames.h" #include "jit/LIR.h" @@ -578,69 +579,57 @@ class OutOfLineCodeBase : public OutOfLineCode // ArgSeq store arguments for OutOfLineCallVM. // // OutOfLineCallVM are created with "oolCallVM" function. The third argument of -// this function is an instance of a class which provides a "generate" function -// to call the "pushArg" needed by the VMFunction call. The list of argument -// can be created by using the ArgList function which create an empty list of -// arguments. Arguments are added to this list by using the comma operator. -// The type of the argument list is returned by the comma operator, and due to -// templates arguments, it is quite painful to write by hand. It is recommended -// to use it directly as argument of a template function which would get its -// arguments infered by the compiler (such as oolCallVM). The list of arguments -// must be written in the same order as if you were calling the function in C++. +// this function is an instance of a class which provides a "generate" in charge +// of pushing the argument, with "pushArg", for a VMFunction. +// +// Such list of arguments can be created by using the "ArgList" function which +// creates one instance of "ArgSeq", where the type of the arguments are inferred +// from the type of the arguments. +// +// The list of arguments must be written in the same order as if you were +// calling the function in C++. // // Example: -// (ArgList(), ToRegister(lir->lhs()), ToRegister(lir->rhs())) +// ArgList(ToRegister(lir->lhs()), ToRegister(lir->rhs())) -template -class ArgSeq : public SeqType -{ - private: - typedef ArgSeq ThisType; - LastType last_; +template +class ArgSeq; - public: - ArgSeq(const SeqType& seq, const LastType& last) - : SeqType(seq), - last_(last) - { } - - template - inline ArgSeq - operator, (const NextType& last) const { - return ArgSeq(*this, last); - } - - inline void generate(CodeGeneratorShared* codegen) const { - codegen->pushArg(last_); - this->SeqType::generate(codegen); - } -}; - -// Mark the end of an argument list. template <> -class ArgSeq +class ArgSeq<> { - private: - typedef ArgSeq ThisType; - public: ArgSeq() { } - ArgSeq(const ThisType&) { } - - template - inline ArgSeq - operator, (const NextType& last) const { - return ArgSeq(*this, last); - } inline void generate(CodeGeneratorShared* codegen) const { } }; -inline ArgSeq -ArgList() +template +class ArgSeq : public ArgSeq { - return ArgSeq(); + private: + HeadType head_; + + public: + explicit ArgSeq(HeadType&& head, TailTypes&&... tail) + : ArgSeq(mozilla::Move(tail)...), + head_(mozilla::Move(head)) + { } + + // Arguments are pushed in reverse order, from last argument to first + // argument. + inline void generate(CodeGeneratorShared* codegen) const { + this->ArgSeq::generate(codegen); + codegen->pushArg(head_); + } +}; + +template +inline ArgSeq +ArgList(ArgTypes... args) +{ + return ArgSeq(mozilla::Move(args)...); } // Store wrappers, to generate the right move of data after the VM call. diff --git a/js/src/jit/x86-shared/BaseAssembler-x86-shared.h b/js/src/jit/x86-shared/BaseAssembler-x86-shared.h index 9b3ce2136236..8e883be6f698 100644 --- a/js/src/jit/x86-shared/BaseAssembler-x86-shared.h +++ b/js/src/jit/x86-shared/BaseAssembler-x86-shared.h @@ -3744,12 +3744,12 @@ threeByteOpImmSimd("vblendps", VEX_PD, OP3_BLENDPS_VpsWpsIb, ESCAPE_3A, imm, off void doubleConstant(double d) { - spew(".double %.20f", d); + spew(".double %.16g", d); m_formatter.doubleConstant(d); } void floatConstant(float f) { - spew(".float %.20f", f); + spew(".float %.16g", f); m_formatter.floatConstant(f); } @@ -3761,7 +3761,7 @@ threeByteOpImmSimd("vblendps", VEX_PD, OP3_BLENDPS_VpsWpsIb, ESCAPE_3A, imm, off } void float32x4Constant(const float f[4]) { - spew(".float %f,%f,%f,%f", f[0], f[1], f[2], f[3]); + spew(".float %g,%g,%g,%g", f[0], f[1], f[2], f[3]); MOZ_ASSERT(m_formatter.isAligned(16)); m_formatter.float32x4Constant(f); } diff --git a/js/src/js.msg b/js/src/js.msg index d4a6a9acc477..7ae431f1d14c 100644 --- a/js/src/js.msg +++ b/js/src/js.msg @@ -311,7 +311,7 @@ MSG_DEF(JSMSG_SEMI_AFTER_FOR_COND, 0, JSEXN_SYNTAXERR, "missing ; after for- MSG_DEF(JSMSG_SEMI_AFTER_FOR_INIT, 0, JSEXN_SYNTAXERR, "missing ; after for-loop initializer") MSG_DEF(JSMSG_SEMI_BEFORE_STMNT, 0, JSEXN_SYNTAXERR, "missing ; before statement") MSG_DEF(JSMSG_SOURCE_TOO_LONG, 0, JSEXN_RANGEERR, "source is too long") -MSG_DEF(JSMSG_STMT_AFTER_RETURN, 0, JSEXN_SYNTAXERR, "unreachable code after return statement") +MSG_DEF(JSMSG_STMT_AFTER_RETURN, 0, JSEXN_NONE, "unreachable code after return statement") MSG_DEF(JSMSG_STRICT_CODE_WITH, 0, JSEXN_SYNTAXERR, "strict mode code may not contain 'with' statements") MSG_DEF(JSMSG_STRICT_FUNCTION_STATEMENT, 0, JSEXN_SYNTAXERR, "in strict mode code, functions may be declared only at top level or immediately within another function") MSG_DEF(JSMSG_TEMPLSTR_UNTERM_EXPR, 0, JSEXN_SYNTAXERR, "missing } in template string") diff --git a/js/src/jsapi.h b/js/src/jsapi.h index 7e6566f53021..2036beced9bc 100644 --- a/js/src/jsapi.h +++ b/js/src/jsapi.h @@ -4018,6 +4018,9 @@ namespace JS { * * The provided chain of SavedFrame objects can live in any compartment, * although it will be copied to the compartment where the stack is captured. + * + * See also `js/src/doc/SavedFrame/SavedFrame.md` for documentation on async + * stack frames. */ class MOZ_STACK_CLASS JS_PUBLIC_API(AutoSetAsyncStackForNewCalls) { @@ -5301,6 +5304,8 @@ CaptureCurrentStack(JSContext* cx, MutableHandleObject stackp, unsigned maxFrame * caller's principals do not subsume any of the chained SavedFrame object's * principals, `SavedFrameResult::AccessDenied` is returned and a (hopefully) * sane default value is chosen for the out param. + * + * See also `js/src/doc/SavedFrame/SavedFrame.md`. */ enum class SavedFrameResult { diff --git a/js/src/jsfriendapi.h b/js/src/jsfriendapi.h index 99843edb5b47..193dd0c07615 100644 --- a/js/src/jsfriendapi.h +++ b/js/src/jsfriendapi.h @@ -104,6 +104,8 @@ enum { JS_TELEMETRY_GC_REASON, JS_TELEMETRY_GC_IS_COMPARTMENTAL, JS_TELEMETRY_GC_MS, + JS_TELEMETRY_GC_BUDGET_MS, + JS_TELEMETRY_GC_ANIMATION_MS, JS_TELEMETRY_GC_MAX_PAUSE_MS, JS_TELEMETRY_GC_MARK_MS, JS_TELEMETRY_GC_SWEEP_MS, diff --git a/js/src/jsgc.cpp b/js/src/jsgc.cpp index 57c2e356b2e8..eafd362940f3 100644 --- a/js/src/jsgc.cpp +++ b/js/src/jsgc.cpp @@ -3004,7 +3004,7 @@ SliceBudget::describe(char* buffer, size_t maxlen) const { if (isUnlimited()) return JS_snprintf(buffer, maxlen, "unlimited"); - else if (deadline == 0) + else if (isWorkBudget()) return JS_snprintf(buffer, maxlen, "work(%lld)", workBudget.budget); else return JS_snprintf(buffer, maxlen, "%lldms", timeBudget.budget); diff --git a/js/src/shell/js.cpp b/js/src/shell/js.cpp index 726dbb51a666..1354e898058b 100644 --- a/js/src/shell/js.cpp +++ b/js/src/shell/js.cpp @@ -126,6 +126,9 @@ static double gTimeoutInterval = -1.0; static volatile bool gServiceInterrupt = false; static JS::PersistentRootedValue gInterruptFunc; +static bool gLastWarningEnabled = false; +static JS::PersistentRootedValue gLastWarning; + static bool enableDisassemblyDumps = false; static bool offthreadCompilation = false; static bool enableBaseline = false; @@ -3037,6 +3040,63 @@ SetInterruptCallback(JSContext* cx, unsigned argc, Value* vp) return true; } +static bool +EnableLastWarning(JSContext* cx, unsigned argc, Value* vp) +{ + CallArgs args = CallArgsFromVp(argc, vp); + + gLastWarningEnabled = true; + gLastWarning.setNull(); + + args.rval().setUndefined(); + return true; +} + +static bool +DisableLastWarning(JSContext* cx, unsigned argc, Value* vp) +{ + CallArgs args = CallArgsFromVp(argc, vp); + + gLastWarningEnabled = false; + gLastWarning.setNull(); + + args.rval().setUndefined(); + return true; +} + +static bool +GetLastWarning(JSContext* cx, unsigned argc, Value* vp) +{ + CallArgs args = CallArgsFromVp(argc, vp); + + if (!gLastWarningEnabled) { + JS_ReportError(cx, "Call enableLastWarning first."); + return false; + } + + if (!JS_WrapValue(cx, &gLastWarning)) + return false; + + args.rval().set(gLastWarning); + return true; +} + +static bool +ClearLastWarning(JSContext* cx, unsigned argc, Value* vp) +{ + CallArgs args = CallArgsFromVp(argc, vp); + + if (!gLastWarningEnabled) { + JS_ReportError(cx, "Call enableLastWarning first."); + return false; + } + + gLastWarning.setNull(); + + args.rval().setUndefined(); + return true; +} + #ifdef DEBUG static bool StackDump(JSContext* cx, unsigned argc, Value* vp) @@ -4706,6 +4766,22 @@ static const JSFunctionSpecWithHelp shell_functions[] = { " Sets func as the interrupt callback function.\n" " Calling this function will replace any callback set by |timeout|.\n"), + JS_FN_HELP("enableLastWarning", EnableLastWarning, 0, 0, +"enableLastWarning()", +" Enable storing the last warning."), + + JS_FN_HELP("disableLastWarning", DisableLastWarning, 0, 0, +"disableLastWarning()", +" Disable storing the last warning."), + + JS_FN_HELP("getLastWarning", GetLastWarning, 0, 0, +"getLastWarning()", +" Returns an object that represents the last warning."), + + JS_FN_HELP("clearLastWarning", ClearLastWarning, 0, 0, +"clearLastWarning()", +" Clear the last warning."), + JS_FN_HELP("elapsed", Elapsed, 0, 0, "elapsed()", " Execution time elapsed for the current context."), @@ -5026,9 +5102,55 @@ js::shell::my_GetErrorMessage(void* userRef, const unsigned errorNumber) return &jsShell_ErrorFormatString[errorNumber]; } +static bool +CreateLastWarningObject(JSContext* cx, JSErrorReport* report) +{ + RootedObject warningObj(cx, JS_NewObject(cx, nullptr)); + if (!warningObj) + return false; + + RootedString nameStr(cx); + if (report->exnType == JSEXN_NONE) + nameStr = JS_NewStringCopyZ(cx, "None"); + else + nameStr = GetErrorTypeName(cx->runtime(), report->exnType); + if (!nameStr) + return false; + RootedValue nameVal(cx, StringValue(nameStr)); + if (!DefineProperty(cx, warningObj, cx->names().name, nameVal)) + return false; + + RootedString messageStr(cx, JS_NewUCStringCopyZ(cx, report->ucmessage)); + if (!messageStr) + return false; + RootedValue messageVal(cx, StringValue(messageStr)); + if (!DefineProperty(cx, warningObj, cx->names().message, messageVal)) + return false; + + RootedValue linenoVal(cx, Int32Value(report->lineno)); + if (!DefineProperty(cx, warningObj, cx->names().lineNumber, linenoVal)) + return false; + + RootedValue columnVal(cx, Int32Value(report->column)); + if (!DefineProperty(cx, warningObj, cx->names().columnNumber, columnVal)) + return false; + + gLastWarning.setObject(*warningObj); + return true; +} + void js::shell::my_ErrorReporter(JSContext* cx, const char* message, JSErrorReport* report) { + if (report && JSREPORT_IS_WARNING(report->flags) && gLastWarningEnabled) { + JS::AutoSaveExceptionState savedExc(cx); + if (!CreateLastWarningObject(cx, report)) { + fputs("Unhandled error happened while creating last warning object.\n", gOutFile); + fflush(gOutFile); + } + savedExc.restore(); + } + gGotError = PrintError(cx, gErrFile, message, report, reportWarnings); if (report->exnType != JSEXN_NONE && !JSREPORT_IS_WARNING(report->flags)) { if (report->errorNumber == JSMSG_OUT_OF_MEMORY) { @@ -6305,6 +6427,7 @@ main(int argc, char** argv, char** envp) return 1; gInterruptFunc.init(rt, NullValue()); + gLastWarning.init(rt, NullValue()); JS_SetGCParameter(rt, JSGC_MAX_BYTES, 0xffffffff); diff --git a/js/src/tests/ecma_6/Class/newTargetMethods.js b/js/src/tests/ecma_6/Class/newTargetMethods.js index 84dba1cec720..055ec0bf1984 100644 --- a/js/src/tests/ecma_6/Class/newTargetMethods.js +++ b/js/src/tests/ecma_6/Class/newTargetMethods.js @@ -1,3 +1,5 @@ +var test = ` + // Just like newTargetDirectInvoke, except to prove it works in functions // defined with method syntax as well. Note that methods, getters, and setters // are not constructible. @@ -46,7 +48,10 @@ for (let i = 0; i < TEST_ITERATIONS; i++) clInst.cl; for (let i = 0; i < TEST_ITERATIONS; i++) clInst.cl = 4; +`; +if (classesEnabled()) + eval(test); if (typeof reportCompare === "function") reportCompare(0,0,"OK"); diff --git a/js/src/tests/ecma_6/Class/superPropEvalInsideArrow.js b/js/src/tests/ecma_6/Class/superPropEvalInsideArrow.js index 882f93ba0ace..c81d86c3dc0d 100644 --- a/js/src/tests/ecma_6/Class/superPropEvalInsideArrow.js +++ b/js/src/tests/ecma_6/Class/superPropEvalInsideArrow.js @@ -1,3 +1,5 @@ +var test = ` + class foo { constructor() { } @@ -6,6 +8,10 @@ class foo { } } assertEq(new foo().method()(), Object.prototype.toString); +`; + +if (classesEnabled()) + eval(test); if (typeof reportCompare === "function") reportCompare(0,0,"OK"); diff --git a/js/src/tests/ecma_6/Class/superPropHeavyweightArrow.js b/js/src/tests/ecma_6/Class/superPropHeavyweightArrow.js index ce30690d539d..dda69edc0c7c 100644 --- a/js/src/tests/ecma_6/Class/superPropHeavyweightArrow.js +++ b/js/src/tests/ecma_6/Class/superPropHeavyweightArrow.js @@ -1,3 +1,5 @@ +var test = ` + class foo { constructor() { } @@ -7,6 +9,10 @@ class foo { } assertEq(new foo().method()(), Object.prototype.toString); +`; + +if (classesEnabled()) + eval(test); if (typeof reportCompare === "function") reportCompare(0,0,"OK"); diff --git a/js/src/tests/ecma_6/shell.js b/js/src/tests/ecma_6/shell.js index d9d777fdd474..1d5d37460759 100644 --- a/js/src/tests/ecma_6/shell.js +++ b/js/src/tests/ecma_6/shell.js @@ -204,3 +204,14 @@ if (typeof assertDeepEq === 'undefined') { }; })(); } + +if (typeof assertWarning === 'undefined') { + function assertWarning(func, name) { + enableLastWarning(); + func(); + var warning = getLastWarning(); + assertEq(warning !== null, true); + assertEq(warning.name, name); + disableLastWarning(); + } +} diff --git a/js/src/tests/js1_5/String/replace-flags.js b/js/src/tests/js1_5/String/replace-flags.js index 656d4acfa6f6..866aa877ef1a 100644 --- a/js/src/tests/js1_5/String/replace-flags.js +++ b/js/src/tests/js1_5/String/replace-flags.js @@ -6,10 +6,19 @@ var summary = 'Add console warnings for non-standard flag argument of String.pro printBugNumber(BUGNUMBER); printStatus (summary); -options("werror"); -assertEq(evaluate("'aaaA'.match('a', 'i')", {catchTermination: true}), "terminated"); -assertEq(evaluate("'aaaA'.search('a', 'i')", {catchTermination: true}), "terminated"); -assertEq(evaluate("'aaaA'.replace('a', 'b', 'g')", {catchTermination: true}), "terminated"); +function assertWarningForComponent(code, name) { + enableLastWarning(); + var g = newGlobal(); + g.eval(code); + var warning = getLastWarning(); + assertEq(warning !== null, true); + assertEq(warning.name, name); + disableLastWarning(); +} + +assertWarningForComponent(`'aaaA'.match('a', 'i');`, "None"); +assertWarningForComponent(`'aaaA'.search('a', 'i');`, "None"); +assertWarningForComponent(`'aaaA'.replace('a', 'b', 'g');`, "None"); if (typeof reportCompare === "function") reportCompare(true, true); diff --git a/js/src/tests/lib/jittests.py b/js/src/tests/lib/jittests.py index 7fb9dce3cfe5..180234153cb6 100755 --- a/js/src/tests/lib/jittests.py +++ b/js/src/tests/lib/jittests.py @@ -116,6 +116,8 @@ class Test: self.tz_pacific = False # True means force Pacific time for the test self.test_also_noasmjs = False # True means run with and without asm.js # enabled. + self.test_also = [] # List of other configurations to test with. + self.test_join = [] # List of other configurations to test with all existing variants. self.expect_error = '' # Errors to expect and consider passing self.expect_status = 0 # Exit status to expect from shell @@ -129,6 +131,8 @@ class Test: t.valgrind = self.valgrind t.tz_pacific = self.tz_pacific t.test_also_noasmjs = self.test_also_noasmjs + t.test_also = self.test_also + t.test_join = self.test_join t.expect_error = self.expect_error t.expect_status = self.expect_status return t @@ -139,12 +143,14 @@ class Test: return t def copy_variants(self, variants): - # If the tests are flagged with the |jit-test| test-also-noasmjs flags, - # then we duplicate the variants such that the test can be used both - # with the interpreter and asmjs. This is a simple way to check for - # differential behaviour. - if self.test_also_noasmjs: - variants = variants + [['--no-asmjs']] + # Append variants to be tested in addition to the current set of tests. + variants = variants + self.test_also + + # For each existing variant, duplicates it for each list of options in + # test_join. This will multiply the number of variants by 2 for set of + # options. + for join_opts in self.test_join: + variants = variants + [ opts + join_opts for opts in variants ]; # For each list of jit flags, make a copy of the test. return [self.copy_and_extend_jitflags(v) for v in variants] @@ -201,7 +207,12 @@ class Test: elif name == 'tz-pacific': test.tz_pacific = True elif name == 'test-also-noasmjs': - test.test_also_noasmjs = options.can_test_also_noasmjs + if options.can_test_also_noasmjs: + test.test_also.append(['--no-asmjs']) + elif name.startswith('test-also='): + test.test_also.append([name[len('test-also='):]]) + elif name.startswith('test-join='): + test.test_join.append([name[len('test-join='):]]) elif name == 'ion-eager': test.jitflags.append('--ion-eager') elif name == 'dump-bytecode': diff --git a/js/src/tests/shell/warning.js b/js/src/tests/shell/warning.js new file mode 100644 index 000000000000..dfa1d5405fc0 --- /dev/null +++ b/js/src/tests/shell/warning.js @@ -0,0 +1,52 @@ +// |reftest| skip-if(!xulRuntime.shell) + +var BUGNUMBER = 1170716; +var summary = 'Add js shell functions to get last warning'; + +print(BUGNUMBER + ": " + summary); + +// Warning with JSEXN_NONE. + +enableLastWarning(); + +eval(`({}).__proto__ = {};`); + +var warning = getLastWarning(); +assertEq(warning !== null, true); +assertEq(warning.name, "None"); +assertEq(warning.message.includes("mutating"), true); +assertEq(warning.lineNumber, 1); +assertEq(warning.columnNumber, 1); + +// Clear last warning. + +clearLastWarning(); +warning = getLastWarning(); +assertEq(warning, null); + +// Warning with JSEXN_SYNTAXERR. + +options("strict"); +eval(`var a; if (a=0) {}`); + +warning = getLastWarning(); +assertEq(warning !== null, true); +assertEq(warning.name, "SyntaxError"); +assertEq(warning.message.includes("equality"), true); +assertEq(warning.lineNumber, 1); +assertEq(warning.columnNumber, 14); + +// Disabled. + +disableLastWarning(); + +eval(`var a; if (a=0) {}`); + +enableLastWarning(); +warning = getLastWarning(); +assertEq(warning, null); + +disableLastWarning(); + +if (typeof reportCompare === "function") + reportCompare(true, true); diff --git a/js/src/vm/HelperThreads.cpp b/js/src/vm/HelperThreads.cpp index 7974ca49d506..0a6eea154d65 100644 --- a/js/src/vm/HelperThreads.cpp +++ b/js/src/vm/HelperThreads.cpp @@ -434,8 +434,8 @@ js::EnqueuePendingParseTasksAfterGC(JSRuntime* rt) HelperThreadState().notifyAll(GlobalHelperThreadState::PRODUCER); } -static const uint32_t kDefaultHelperStackSize = 512 * 1024; -static const uint32_t kDefaultHelperStackQuota = 450 * 1024; +static const uint32_t kDefaultHelperStackSize = 2048 * 1024; +static const uint32_t kDefaultHelperStackQuota = 1800 * 1024; // TSan enforces a minimum stack size that's just slightly larger than our // default helper stack size. It does this to store blobs of TSan-specific diff --git a/js/src/vm/NativeObject.cpp b/js/src/vm/NativeObject.cpp index 4463fdfceedb..819e50c23c66 100644 --- a/js/src/vm/NativeObject.cpp +++ b/js/src/vm/NativeObject.cpp @@ -1205,9 +1205,80 @@ GetExistingPropertyValue(ExclusiveContext* cx, HandleNativeObject obj, HandleId } if (!cx->shouldBeJSContext()) return false; + + MOZ_ASSERT(shape->propid() == id); + MOZ_ASSERT(obj->contains(cx, shape)); + return GetExistingProperty(cx->asJSContext(), obj, obj, shape, vp); } +/* + * If ES6 draft rev 37 9.1.6.3 ValidateAndApplyPropertyDescriptor step 4 would + * return early, because desc is redundant with an existing own property obj[id], + * then set *redundant = true and return true. + */ +static bool +DefinePropertyIsRedundant(ExclusiveContext* cx, HandleNativeObject obj, HandleId id, + HandleShape shape, unsigned shapeAttrs, + Handle desc, bool *redundant) +{ + *redundant = false; + + if (desc.hasConfigurable() && desc.configurable() != ((shapeAttrs & JSPROP_PERMANENT) == 0)) + return true; + if (desc.hasEnumerable() && desc.enumerable() != ((shapeAttrs & JSPROP_ENUMERATE) != 0)) + return true; + if (desc.isDataDescriptor()) { + if ((shapeAttrs & (JSPROP_GETTER | JSPROP_SETTER)) != 0) + return true; + if (desc.hasWritable() && desc.writable() != ((shapeAttrs & JSPROP_READONLY) == 0)) + return true; + if (desc.hasValue()) { + // Get the current value of the existing property. + RootedValue currentValue(cx); + if (!IsImplicitDenseOrTypedArrayElement(shape) && + shape->hasSlot() && + shape->hasDefaultGetter()) + { + // Inline GetExistingPropertyValue in order to omit a type + // correctness assertion that's too strict for this particular + // call site. For details, see bug 1125624 comments 13-16. + currentValue.set(obj->getSlot(shape->slot())); + } else { + if (!GetExistingPropertyValue(cx, obj, id, shape, ¤tValue)) + return false; + } + + // The specification calls for SameValue here, but it seems to be a + // bug. See . + if (desc.value() != currentValue) + return true; + } + + GetterOp existingGetterOp = + IsImplicitDenseOrTypedArrayElement(shape) ? nullptr : shape->getter(); + if (desc.getter() != existingGetterOp) + return true; + + SetterOp existingSetterOp = + IsImplicitDenseOrTypedArrayElement(shape) ? nullptr : shape->setter(); + if (desc.setter() != existingSetterOp) + return true; + } else { + if (desc.hasGetterObject()) { + if (!(shapeAttrs & JSPROP_GETTER) || desc.getterObject() != shape->getterObject()) + return true; + } + if (desc.hasSetterObject()) { + if (!(shapeAttrs & JSPROP_SETTER) || desc.setterObject() != shape->setterObject()) + return true; + } + } + + *redundant = true; + return true; +} + bool js::NativeDefineProperty(ExclusiveContext* cx, HandleNativeObject obj, HandleId id, Handle desc_, @@ -1296,6 +1367,25 @@ js::NativeDefineProperty(ExclusiveContext* cx, HandleNativeObject obj, HandleId MOZ_ASSERT(shape); + // Steps 3-4. (Step 3 is a special case of step 4.) We use shapeAttrs as a + // stand-in for shape in many places below, since shape might not be a + // pointer to a real Shape (see IsImplicitDenseOrTypedArrayElement). + unsigned shapeAttrs = GetShapeAttributes(obj, shape); + bool redundant; + if (!DefinePropertyIsRedundant(cx, obj, id, shape, shapeAttrs, desc, &redundant)) + return false; + if (redundant) { + // In cases involving JSOP_NEWOBJECT and JSOP_INITPROP, obj can have a + // type for this property that doesn't match the value in the slot. + // Update the type here, even though this DefineProperty call is + // otherwise a no-op. (See bug 1125624 comment 13.) + if (!IsImplicitDenseOrTypedArrayElement(shape) && desc.hasValue()) { + if (!UpdateShapeTypeAndValue(cx, obj, shape, desc.value())) + return false; + } + return result.succeed(); + } + // Non-standard hack: Allow redefining non-configurable properties if // JSPROP_REDEFINE_NONCONFIGURABLE is set _and_ the object is a non-DOM // global. The idea is that a DOM object can never have such a thing on @@ -1306,12 +1396,7 @@ js::NativeDefineProperty(ExclusiveContext* cx, HandleNativeObject obj, HandleId obj->is() && !obj->getClass()->isDOMClass(); - // Steps 3-4 are redundant. - - // Step 5. We use shapeAttrs as a stand-in for shape in many places below - // since shape might not be a pointer to a real Shape (see - // IsImplicitDenseOrTypedArrayElement). - unsigned shapeAttrs = GetShapeAttributes(obj, shape); + // Step 5. if (!IsConfigurable(shapeAttrs) && !skipRedefineChecks) { if (desc.hasConfigurable() && desc.configurable()) return result.fail(JSMSG_CANT_REDEFINE_PROP); diff --git a/js/src/vm/UnboxedObject.cpp b/js/src/vm/UnboxedObject.cpp index 3e08f3c63c21..2548a6379eca 100644 --- a/js/src/vm/UnboxedObject.cpp +++ b/js/src/vm/UnboxedObject.cpp @@ -522,7 +522,7 @@ UnboxedLayout::makeNativeGroup(JSContext* cx, ObjectGroup* group) return false; HeapTypeSet* nativeProperty = nativeGroup->maybeGetProperty(id); - if (nativeProperty->canSetDefinite(i)) + if (nativeProperty && nativeProperty->canSetDefinite(i)) nativeProperty->setDefinite(i); } } diff --git a/js/xpconnect/src/XPCJSRuntime.cpp b/js/xpconnect/src/XPCJSRuntime.cpp index 1785c47cf12b..0b6c18c118a4 100644 --- a/js/xpconnect/src/XPCJSRuntime.cpp +++ b/js/xpconnect/src/XPCJSRuntime.cpp @@ -3110,6 +3110,12 @@ AccumulateTelemetryCallback(int id, uint32_t sample, const char* key) case JS_TELEMETRY_GC_MS: Telemetry::Accumulate(Telemetry::GC_MS, sample); break; + case JS_TELEMETRY_GC_BUDGET_MS: + Telemetry::Accumulate(Telemetry::GC_BUDGET_MS, sample); + break; + case JS_TELEMETRY_GC_ANIMATION_MS: + Telemetry::Accumulate(Telemetry::GC_ANIMATION_MS, sample); + break; case JS_TELEMETRY_GC_MAX_PAUSE_MS: Telemetry::Accumulate(Telemetry::GC_MAX_PAUSE_MS, sample); break; diff --git a/js/xpconnect/src/nsXPConnect.cpp b/js/xpconnect/src/nsXPConnect.cpp index 4cdb40fe20d7..428c0b187df8 100644 --- a/js/xpconnect/src/nsXPConnect.cpp +++ b/js/xpconnect/src/nsXPConnect.cpp @@ -85,7 +85,7 @@ nsXPConnect::~nsXPConnect() // XPConnect, to clean the stuff we forcibly disconnected. The forced // shutdown code defaults to leaking in a number of situations, so we can't // get by with only the second GC. :-( - JS_GC(mRuntime->Runtime()); + mRuntime->GarbageCollect(JS::gcreason::XPCONNECT_SHUTDOWN); mShuttingDown = true; XPCWrappedNativeScope::SystemIsBeingShutDown(); @@ -95,7 +95,7 @@ nsXPConnect::~nsXPConnect() // after which point we need to GC to clean everything up. We need to do // this before deleting the XPCJSRuntime, because doing so destroys the // maps that our finalize callback depends on. - JS_GC(mRuntime->Runtime()); + mRuntime->GarbageCollect(JS::gcreason::XPCONNECT_SHUTDOWN); NS_RELEASE(gSystemPrincipal); gScriptSecurityManager = nullptr; diff --git a/layout/base/nsBidi.cpp b/layout/base/nsBidi.cpp index 24630c4b0cab..f669c9731536 100644 --- a/layout/base/nsBidi.cpp +++ b/layout/base/nsBidi.cpp @@ -505,7 +505,7 @@ void nsBidi::GetDirProps(const char16_t *aText) } else if (state == SEEKING_STRONG_FOR_FSI) { if (stackLast <= NSBIDI_MAX_EXPLICIT_LEVEL) { dirProps[isolateStartStack[stackLast]] = LRI; - flags |= LRI; + flags |= DIRPROP_FLAG(LRI); } state = LOOKING_FOR_PDI; } @@ -518,7 +518,7 @@ void nsBidi::GetDirProps(const char16_t *aText) } else if (state == SEEKING_STRONG_FOR_FSI) { if (stackLast <= NSBIDI_MAX_EXPLICIT_LEVEL) { dirProps[isolateStartStack[stackLast]] = RLI; - flags |= RLI; + flags |= DIRPROP_FLAG(RLI); } state = LOOKING_FOR_PDI; } diff --git a/layout/base/nsDisplayList.cpp b/layout/base/nsDisplayList.cpp index 4852f5884bae..2e65f85febd4 100644 --- a/layout/base/nsDisplayList.cpp +++ b/layout/base/nsDisplayList.cpp @@ -1582,7 +1582,7 @@ already_AddRefed nsDisplayList::PaintRoot(nsDisplayListBuilder* aB } if (addMetrics || ensureMetricsForRootId) { - bool isRoot = presContext->IsRootContentDocument(); + bool isRootContent = presContext->IsRootContentDocument(); nsRect viewport(aBuilder->ToReferenceFrame(frame), frame->GetSize()); @@ -1591,7 +1591,7 @@ already_AddRefed nsDisplayList::PaintRoot(nsDisplayListBuilder* aB rootScrollFrame, content, aBuilder->FindReferenceFrameFor(frame), root, FrameMetrics::NULL_SCROLL_ID, viewport, Nothing(), - isRoot, containerParameters)); + isRootContent, containerParameters)); } else { // Set empty metrics to clear any metrics that might be on a recycled layer. root->SetFrameMetrics(nsTArray()); diff --git a/layout/base/nsLayoutUtils.cpp b/layout/base/nsLayoutUtils.cpp index fc550dc932e7..71ab13c3827e 100644 --- a/layout/base/nsLayoutUtils.cpp +++ b/layout/base/nsLayoutUtils.cpp @@ -8169,7 +8169,7 @@ nsLayoutUtils::ComputeFrameMetrics(nsIFrame* aForFrame, ViewID aScrollParentId, const nsRect& aViewport, const Maybe& aClipRect, - bool aIsRoot, + bool aIsRootContent, const ContainerLayerParameters& aContainerParameters) { nsPresContext* presContext = aForFrame->PresContext(); @@ -8248,7 +8248,7 @@ nsLayoutUtils::ComputeFrameMetrics(nsIFrame* aForFrame, // overscroll handoff chain. MOZ_ASSERT(aScrollParentId == FrameMetrics::NULL_SCROLL_ID || scrollId != aScrollParentId); metrics.SetScrollId(scrollId); - metrics.SetIsRoot(aIsRoot); + metrics.SetIsRootContent(aIsRootContent); metrics.SetScrollParentId(aScrollParentId); if (scrollId != FrameMetrics::NULL_SCROLL_ID && !presContext->GetParentPresContext()) { diff --git a/layout/build/moz.build b/layout/build/moz.build index e144a964cd84..abe45694ce72 100644 --- a/layout/build/moz.build +++ b/layout/build/moz.build @@ -119,6 +119,7 @@ if CONFIG['MOZ_B2G_BT']: if CONFIG['MOZ_WEBSPEECH']: LOCAL_INCLUDES += [ + '/dom/media/webspeech/recognition', '/dom/media/webspeech/synth', ] diff --git a/layout/build/nsLayoutModule.cpp b/layout/build/nsLayoutModule.cpp index c2f1216acbe0..0f243a598312 100644 --- a/layout/build/nsLayoutModule.cpp +++ b/layout/build/nsLayoutModule.cpp @@ -99,6 +99,9 @@ #ifdef MOZ_WEBSPEECH_TEST_BACKEND #include "mozilla/dom/FakeSpeechRecognitionService.h" #endif +#ifdef MOZ_WEBSPEECH_POCKETSPHINX +#include "mozilla/dom/PocketSphinxSpeechRecognitionService.h" +#endif #ifdef MOZ_WEBSPEECH #include "mozilla/dom/nsSynthVoiceRegistry.h" #endif @@ -635,6 +638,9 @@ NS_GENERIC_FACTORY_SINGLETON_CONSTRUCTOR(DataStoreService, DataStoreService::Get #ifdef MOZ_WEBSPEECH_TEST_BACKEND NS_GENERIC_FACTORY_CONSTRUCTOR(FakeSpeechRecognitionService) #endif +#ifdef MOZ_WEBSPEECH_POCKETSPHINX +NS_GENERIC_FACTORY_CONSTRUCTOR(PocketSphinxSpeechRecognitionService) +#endif NS_GENERIC_FACTORY_CONSTRUCTOR(nsCSPContext) NS_GENERIC_FACTORY_CONSTRUCTOR(CSPService) @@ -831,6 +837,9 @@ NS_DEFINE_NAMED_CID(NS_GAMEPAD_TEST_CID); #ifdef MOZ_WEBSPEECH_TEST_BACKEND NS_DEFINE_NAMED_CID(NS_FAKE_SPEECH_RECOGNITION_SERVICE_CID); #endif +#ifdef MOZ_WEBSPEECH_POCKETSPHINX +NS_DEFINE_NAMED_CID(NS_POCKETSPHINX_SPEECH_RECOGNITION_SERVICE_CID); +#endif #ifdef MOZ_WEBSPEECH NS_DEFINE_NAMED_CID(NS_SYNTHVOICEREGISTRY_CID); #endif @@ -1088,6 +1097,9 @@ static const mozilla::Module::CIDEntry kLayoutCIDs[] = { #ifdef MOZ_WEBSPEECH_TEST_BACKEND { &kNS_FAKE_SPEECH_RECOGNITION_SERVICE_CID, false, nullptr, FakeSpeechRecognitionServiceConstructor }, #endif +#ifdef MOZ_WEBSPEECH_POCKETSPHINX + { &kNS_POCKETSPHINX_SPEECH_RECOGNITION_SERVICE_CID, false, nullptr, PocketSphinxSpeechRecognitionServiceConstructor }, +#endif #ifdef MOZ_WEBSPEECH { &kNS_SYNTHVOICEREGISTRY_CID, true, nullptr, nsSynthVoiceRegistryConstructor }, #endif @@ -1252,6 +1264,9 @@ static const mozilla::Module::ContractIDEntry kLayoutContracts[] = { #ifdef MOZ_WEBSPEECH_TEST_BACKEND { NS_SPEECH_RECOGNITION_SERVICE_CONTRACTID_PREFIX "fake", &kNS_FAKE_SPEECH_RECOGNITION_SERVICE_CID }, #endif +#ifdef MOZ_WEBSPEECH_POCKETSPHINX + { NS_SPEECH_RECOGNITION_SERVICE_CONTRACTID_PREFIX "pocketsphinx", &kNS_POCKETSPHINX_SPEECH_RECOGNITION_SERVICE_CID }, +#endif #ifdef MOZ_WEBSPEECH { NS_SYNTHVOICEREGISTRY_CONTRACTID, &kNS_SYNTHVOICEREGISTRY_CID }, #endif diff --git a/layout/generic/nsBlockFrame.cpp b/layout/generic/nsBlockFrame.cpp index e3f0786a18e3..fe8e33061e89 100644 --- a/layout/generic/nsBlockFrame.cpp +++ b/layout/generic/nsBlockFrame.cpp @@ -7215,7 +7215,7 @@ nsBlockFrame::ISizeToClearPastFloats(nsBlockReflowState& aState, nscoord inlineStartOffset, inlineEndOffset; WritingMode wm = aState.mReflowState.GetWritingMode(); nsCSSOffsetState offsetState(aFrame, aState.mReflowState.rendContext, - aState.mContentArea.Width(wm)); + wm, aState.mContentArea.ISize(wm)); ReplacedElementISizeToClear result; aState.ComputeReplacedBlockOffsetsForFloats(aFrame, aFloatAvailableSpace, diff --git a/layout/generic/nsBlockReflowState.cpp b/layout/generic/nsBlockReflowState.cpp index 1d8945840d2a..aca5af94cd9d 100644 --- a/layout/generic/nsBlockReflowState.cpp +++ b/layout/generic/nsBlockReflowState.cpp @@ -182,7 +182,7 @@ nsBlockReflowState::ComputeReplacedBlockOffsetsForFloats( } else { LogicalMargin frameMargin(wm); nsCSSOffsetState os(aFrame, mReflowState.rendContext, - mContentArea.ISize(wm)); + wm, mContentArea.ISize(wm)); frameMargin = os.ComputedLogicalMargin().ConvertTo(wm, aFrame->GetWritingMode()); @@ -209,7 +209,8 @@ GetBEndMarginClone(nsIFrame* aFrame, { if (aFrame->StyleBorder()->mBoxDecorationBreak == NS_STYLE_BOX_DECORATION_BREAK_CLONE) { - nsCSSOffsetState os(aFrame, aRenderingContext, aContentArea.ISize(aWritingMode)); + nsCSSOffsetState os(aFrame, aRenderingContext, aWritingMode, + aContentArea.ISize(aWritingMode)); return os.ComputedLogicalMargin(). ConvertTo(aWritingMode, aFrame->GetWritingMode()).BEnd(aWritingMode); @@ -721,7 +722,7 @@ nsBlockReflowState::FlowAndPlaceFloat(nsIFrame* aFloat) "Float frame has wrong parent"); nsCSSOffsetState offsets(aFloat, mReflowState.rendContext, - mReflowState.ComputedISize()); + wm, mReflowState.ComputedISize()); nscoord floatMarginISize = FloatMarginISize(mReflowState, adjustedAvailableSpace.ISize(wm), diff --git a/layout/generic/nsGfxScrollFrame.cpp b/layout/generic/nsGfxScrollFrame.cpp index 4f1856bd0661..04fa0cdbf955 100644 --- a/layout/generic/nsGfxScrollFrame.cpp +++ b/layout/generic/nsGfxScrollFrame.cpp @@ -3066,13 +3066,13 @@ ScrollFrameHelper::ComputeFrameMetrics(Layer* aLayer, } nsPoint toReferenceFrame = mOuter->GetOffsetToCrossDoc(aContainerReferenceFrame); - bool isRoot = mIsRoot && mOuter->PresContext()->IsRootContentDocument(); + bool isRootContent = mIsRoot && mOuter->PresContext()->IsRootContentDocument(); Maybe parentLayerClip; if (needsParentLayerClip) { nsRect clip = nsRect(mScrollPort.TopLeft() + toReferenceFrame, nsLayoutUtils::CalculateCompositionSizeForFrame(mOuter)); - if (isRoot) { + if (isRootContent) { double res = mOuter->PresContext()->PresShell()->GetResolution(); clip.width = NSToCoordRound(clip.width / res); clip.height = NSToCoordRound(clip.height / res); @@ -3089,7 +3089,7 @@ ScrollFrameHelper::ComputeFrameMetrics(Layer* aLayer, #if defined(MOZ_WIDGET_ANDROID) && !defined(MOZ_ANDROID_APZ) // Android without apzc (aka the java pan zoom code) only uses async scrolling // for the root scroll frame of the root content document. - if (!isRoot) { + if (!isRootContent) { thisScrollFrameUsesAsyncScrolling = false; } #endif @@ -3124,7 +3124,7 @@ ScrollFrameHelper::ComputeFrameMetrics(Layer* aLayer, nsLayoutUtils::ComputeFrameMetrics( mScrolledFrame, mOuter, mOuter->GetContent(), aContainerReferenceFrame, aLayer, mScrollParentID, - scrollport, parentLayerClip, isRoot, aParameters); + scrollport, parentLayerClip, isRootContent, aParameters); } bool diff --git a/layout/generic/nsHTMLReflowState.cpp b/layout/generic/nsHTMLReflowState.cpp index edca8648ad71..5c6e935386e6 100644 --- a/layout/generic/nsHTMLReflowState.cpp +++ b/layout/generic/nsHTMLReflowState.cpp @@ -144,6 +144,7 @@ FontSizeInflationListMarginAdjustment(const nsIFrame* aFrame) // containing-block block-size, rather than its inline-size. nsCSSOffsetState::nsCSSOffsetState(nsIFrame *aFrame, nsRenderingContext *aRenderingContext, + WritingMode aContainingBlockWritingMode, nscoord aContainingBlockISize) : frame(aFrame) , rendContext(aRenderingContext) @@ -153,9 +154,9 @@ nsCSSOffsetState::nsCSSOffsetState(nsIFrame *aFrame, "We're about to resolve percent margin & padding " "values against CB inline size, which is incorrect for " "flex/grid items"); - LogicalSize cbSize(mWritingMode, aContainingBlockISize, + LogicalSize cbSize(aContainingBlockWritingMode, aContainingBlockISize, aContainingBlockISize); - InitOffsets(cbSize, frame->GetType()); + InitOffsets(aContainingBlockWritingMode, cbSize, frame->GetType()); } // Initialize a reflow state for a child frame's reflow. Some state @@ -2031,7 +2032,7 @@ nsHTMLReflowState::InitConstraints(nsPresContext* aPresContext, // height equal to the available space if (nullptr == parentReflowState || mFlags.mDummyParentReflowState) { // XXXldb This doesn't mean what it used to! - InitOffsets(OffsetPercentBasis(frame, wm, aContainingBlockSize), + InitOffsets(wm, OffsetPercentBasis(frame, wm, aContainingBlockSize), aFrameType, aBorder, aPadding); // Override mComputedMargin since reflow roots start from the // frame's boundary, which is inside the margin. @@ -2084,9 +2085,15 @@ nsHTMLReflowState::InitConstraints(nsPresContext* aPresContext, // XXX Might need to also pass the CB height (not width) for page boxes, // too, if we implement them. - InitOffsets(OffsetPercentBasis(frame, wm, cbSize), + + // For calculating positioning offsets, margins, borders and + // padding, we use the writing mode of the containing block + WritingMode cbwm = cbrs->GetWritingMode(); + InitOffsets(cbwm, OffsetPercentBasis(frame, cbwm, + cbSize.ConvertTo(cbwm, wm)), aFrameType, aBorder, aPadding); + // For calculating the size of this box, we use its own writing mode const nsStyleCoord &blockSize = mStylePosition->BSize(wm); nsStyleUnit blockSizeUnit = blockSize.GetUnit(); @@ -2138,14 +2145,16 @@ nsHTMLReflowState::InitConstraints(nsPresContext* aPresContext, } } - // Compute our offsets if the element is relatively positioned. We need - // the correct containing block width and blockSize here, which is why we need - // to do it after all the quirks-n-such above. (If the element is sticky - // positioned, we need to wait until the scroll container knows its size, - // so we compute offsets from StickyScrollContainer::UpdatePositions.) + // Compute our offsets if the element is relatively positioned. We + // need the correct containing block inline-size and block-size + // here, which is why we need to do it after all the quirks-n-such + // above. (If the element is sticky positioned, we need to wait + // until the scroll container knows its size, so we compute offsets + // from StickyScrollContainer::UpdatePositions.) if (mStyleDisplay->IsRelativelyPositioned(frame) && NS_STYLE_POSITION_RELATIVE == mStyleDisplay->mPosition) { - ComputeRelativeOffsets(wm, frame, cbSize, ComputedPhysicalOffsets()); + ComputeRelativeOffsets(cbwm, frame, cbSize.ConvertTo(cbwm, wm), + ComputedPhysicalOffsets()); } else { // Initialize offsets to 0 ComputedPhysicalOffsets().SizeTo(0, 0, 0, 0); @@ -2306,7 +2315,8 @@ UpdateProp(FrameProperties& aProps, } void -nsCSSOffsetState::InitOffsets(const LogicalSize& aPercentBasis, +nsCSSOffsetState::InitOffsets(WritingMode aWM, + const LogicalSize& aPercentBasis, nsIAtom* aFrameType, const nsMargin *aBorder, const nsMargin *aPadding) @@ -2323,7 +2333,7 @@ nsCSSOffsetState::InitOffsets(const LogicalSize& aPercentBasis, // become the default computed values, and may be adjusted below // XXX fix to provide 0,0 for the top&bottom margins for // inline-non-replaced elements - bool needMarginProp = ComputeMargin(aPercentBasis); + bool needMarginProp = ComputeMargin(aWM, aPercentBasis); // XXX We need to include 'auto' horizontal margins in this too! // ... but if we did that, we'd need to fix nsFrame::GetUsedMargin // to use it even when the margins are all zero (since sometimes @@ -2356,7 +2366,7 @@ nsCSSOffsetState::InitOffsets(const LogicalSize& aPercentBasis, (frame->GetStateBits() & NS_FRAME_REFLOW_ROOT); } else { - needPaddingProp = ComputePadding(aPercentBasis, aFrameType); + needPaddingProp = ComputePadding(aWM, aPercentBasis, aFrameType); } if (isThemed) { @@ -2650,7 +2660,8 @@ nsHTMLReflowState::CalcLineHeight(nsIContent* aContent, } bool -nsCSSOffsetState::ComputeMargin(const LogicalSize& aPercentBasis) +nsCSSOffsetState::ComputeMargin(WritingMode aWM, + const LogicalSize& aPercentBasis) { // SVG text frames have no margin. if (frame->IsSVGText()) { @@ -2662,26 +2673,29 @@ nsCSSOffsetState::ComputeMargin(const LogicalSize& aPercentBasis) bool isCBDependent = !styleMargin->GetMargin(ComputedPhysicalMargin()); if (isCBDependent) { - // We have to compute the value - WritingMode wm = GetWritingMode(); - LogicalMargin m(wm); - m.IStart(wm) = nsLayoutUtils:: - ComputeCBDependentValue(aPercentBasis.ISize(wm), - styleMargin->mMargin.GetIStart(wm)); - m.IEnd(wm) = nsLayoutUtils:: - ComputeCBDependentValue(aPercentBasis.ISize(wm), - styleMargin->mMargin.GetIEnd(wm)); + // We have to compute the value. Note that this calculation is + // performed according to the writing mode of the containing block + // (http://dev.w3.org/csswg/css-writing-modes-3/#orthogonal-flows) + LogicalMargin m(aWM); + m.IStart(aWM) = nsLayoutUtils:: + ComputeCBDependentValue(aPercentBasis.ISize(aWM), + styleMargin->mMargin.GetIStart(aWM)); + m.IEnd(aWM) = nsLayoutUtils:: + ComputeCBDependentValue(aPercentBasis.ISize(aWM), + styleMargin->mMargin.GetIEnd(aWM)); - m.BStart(wm) = nsLayoutUtils:: - ComputeCBDependentValue(aPercentBasis.BSize(wm), - styleMargin->mMargin.GetBStart(wm)); - m.BEnd(wm) = nsLayoutUtils:: - ComputeCBDependentValue(aPercentBasis.BSize(wm), - styleMargin->mMargin.GetBEnd(wm)); + m.BStart(aWM) = nsLayoutUtils:: + ComputeCBDependentValue(aPercentBasis.BSize(aWM), + styleMargin->mMargin.GetBStart(aWM)); + m.BEnd(aWM) = nsLayoutUtils:: + ComputeCBDependentValue(aPercentBasis.BSize(aWM), + styleMargin->mMargin.GetBEnd(aWM)); - SetComputedLogicalMargin(m); + SetComputedLogicalMargin(aWM, m); } + // ... but font-size-inflation-based margin adjustment uses the + // frame's writing mode nscoord marginAdjustment = FontSizeInflationListMarginAdjustment(frame); if (marginAdjustment > 0) { @@ -2694,7 +2708,8 @@ nsCSSOffsetState::ComputeMargin(const LogicalSize& aPercentBasis) } bool -nsCSSOffsetState::ComputePadding(const LogicalSize& aPercentBasis, +nsCSSOffsetState::ComputePadding(WritingMode aWM, + const LogicalSize& aPercentBasis, nsIAtom* aFrameType) { // If style can provide us the padding directly, then use it. @@ -2709,25 +2724,26 @@ nsCSSOffsetState::ComputePadding(const LogicalSize& aPercentBasis, ComputedPhysicalPadding().SizeTo(0,0,0,0); } else if (isCBDependent) { - // We have to compute the value + // We have to compute the value. This calculation is performed + // according to the writing mode of the containing block + // (http://dev.w3.org/csswg/css-writing-modes-3/#orthogonal-flows) // clamp negative calc() results to 0 - WritingMode wm = GetWritingMode(); - LogicalMargin p(wm); - p.IStart(wm) = std::max(0, nsLayoutUtils:: - ComputeCBDependentValue(aPercentBasis.ISize(wm), - stylePadding->mPadding.GetIStart(wm))); - p.IEnd(wm) = std::max(0, nsLayoutUtils:: - ComputeCBDependentValue(aPercentBasis.ISize(wm), - stylePadding->mPadding.GetIEnd(wm))); + LogicalMargin p(aWM); + p.IStart(aWM) = std::max(0, nsLayoutUtils:: + ComputeCBDependentValue(aPercentBasis.ISize(aWM), + stylePadding->mPadding.GetIStart(aWM))); + p.IEnd(aWM) = std::max(0, nsLayoutUtils:: + ComputeCBDependentValue(aPercentBasis.ISize(aWM), + stylePadding->mPadding.GetIEnd(aWM))); - p.BStart(wm) = std::max(0, nsLayoutUtils:: - ComputeCBDependentValue(aPercentBasis.BSize(wm), - stylePadding->mPadding.GetBStart(wm))); - p.BEnd(wm) = std::max(0, nsLayoutUtils:: - ComputeCBDependentValue(aPercentBasis.BSize(wm), - stylePadding->mPadding.GetBEnd(wm))); + p.BStart(aWM) = std::max(0, nsLayoutUtils:: + ComputeCBDependentValue(aPercentBasis.BSize(aWM), + stylePadding->mPadding.GetBStart(aWM))); + p.BEnd(aWM) = std::max(0, nsLayoutUtils:: + ComputeCBDependentValue(aPercentBasis.BSize(aWM), + stylePadding->mPadding.GetBEnd(aWM))); - SetComputedLogicalPadding(p); + SetComputedLogicalPadding(aWM, p); } return isCBDependent; } diff --git a/layout/generic/nsHTMLReflowState.h b/layout/generic/nsHTMLReflowState.h index 93817e76ac48..f3146478f495 100644 --- a/layout/generic/nsHTMLReflowState.h +++ b/layout/generic/nsHTMLReflowState.h @@ -124,12 +124,23 @@ public: const LogicalMargin ComputedLogicalPadding() const { return LogicalMargin(mWritingMode, mComputedPadding); } + void SetComputedLogicalMargin(mozilla::WritingMode aWM, + const LogicalMargin& aMargin) + { mComputedMargin = aMargin.GetPhysicalMargin(aWM); } void SetComputedLogicalMargin(const LogicalMargin& aMargin) - { mComputedMargin = aMargin.GetPhysicalMargin(mWritingMode); } + { SetComputedLogicalMargin(mWritingMode, aMargin); } + + void SetComputedLogicalBorderPadding(mozilla::WritingMode aWM, + const LogicalMargin& aMargin) + { mComputedBorderPadding = aMargin.GetPhysicalMargin(aWM); } void SetComputedLogicalBorderPadding(const LogicalMargin& aMargin) - { mComputedBorderPadding = aMargin.GetPhysicalMargin(mWritingMode); } + { SetComputedLogicalBorderPadding(mWritingMode, aMargin); } + + void SetComputedLogicalPadding(mozilla::WritingMode aWM, + const LogicalMargin& aMargin) + { mComputedPadding = aMargin.GetPhysicalMargin(aWM); } void SetComputedLogicalPadding(const LogicalMargin& aMargin) - { mComputedPadding = aMargin.GetPhysicalMargin(mWritingMode); } + { SetComputedLogicalPadding(mWritingMode, aMargin); } WritingMode GetWritingMode() const { return mWritingMode; } @@ -159,6 +170,7 @@ public: } nsCSSOffsetState(nsIFrame *aFrame, nsRenderingContext *aRenderingContext, + mozilla::WritingMode aContainingBlockWritingMode, nscoord aContainingBlockISize); #ifdef DEBUG @@ -180,9 +192,11 @@ private: * Computes margin values from the specified margin style information, and * fills in the mComputedMargin member. * + * @param aWM Writing mode of the containing block * @param aPercentBasis - * Logical size to use for resolving percentage margin values in - * the inline and block axes. + * Logical size in the writing mode of the containing block to use + * for resolving percentage margin values in the inline and block + * axes. * The inline size is usually the containing block inline-size * (width if writing mode is horizontal, and height if vertical). * The block size is usually the containing block inline-size, per @@ -191,15 +205,18 @@ private: * Flexbox and Grid. * @return true if the margin is dependent on the containing block size. */ - bool ComputeMargin(const mozilla::LogicalSize& aPercentBasis); + bool ComputeMargin(mozilla::WritingMode aWM, + const mozilla::LogicalSize& aPercentBasis); /** * Computes padding values from the specified padding style information, and * fills in the mComputedPadding member. * + * @param aWM Writing mode of the containing block * @param aPercentBasis - * Length to use for resolving percentage padding values in - * the inline and block axes. + * Logical size in the writing mode of the containing block to use + * for resolving percentage padding values in the inline and block + * axes. * The inline size is usually the containing block inline-size * (width if writing mode is horizontal, and height if vertical). * The block size is usually the containing block inline-size, per @@ -208,12 +225,14 @@ private: * Flexbox and Grid. * @return true if the padding is dependent on the containing block size. */ - bool ComputePadding(const mozilla::LogicalSize& aPercentBasis, + bool ComputePadding(mozilla::WritingMode aWM, + const mozilla::LogicalSize& aPercentBasis, nsIAtom* aFrameType); protected: - void InitOffsets(const mozilla::LogicalSize& aPercentBasis, + void InitOffsets(mozilla::WritingMode aWM, + const mozilla::LogicalSize& aPercentBasis, nsIAtom* aFrameType, const nsMargin *aBorder = nullptr, const nsMargin *aPadding = nullptr); diff --git a/layout/ipc/RenderFrameParent.cpp b/layout/ipc/RenderFrameParent.cpp index 4e47526d3e49..c15e64e53758 100644 --- a/layout/ipc/RenderFrameParent.cpp +++ b/layout/ipc/RenderFrameParent.cpp @@ -198,7 +198,7 @@ public: void ClearRenderFrame() { mRenderFrame = nullptr; } - virtual void SendAsyncScrollDOMEvent(bool aIsRoot, + virtual void SendAsyncScrollDOMEvent(bool aIsRootContent, const CSSRect& aContentRect, const CSSSize& aContentSize) override { @@ -207,10 +207,10 @@ public: FROM_HERE, NewRunnableMethod(this, &RemoteContentController::SendAsyncScrollDOMEvent, - aIsRoot, aContentRect, aContentSize)); + aIsRootContent, aContentRect, aContentSize)); return; } - if (mRenderFrame && aIsRoot) { + if (mRenderFrame && aIsRootContent) { TabParent* browser = TabParent::GetFrom(mRenderFrame->Manager()); BrowserElementParent::DispatchAsyncScrollEvent(browser, aContentRect, aContentSize); diff --git a/layout/mathml/nsMathMLSelectedFrame.cpp b/layout/mathml/nsMathMLSelectedFrame.cpp index ecf3bac6a1d2..f55e5693df70 100644 --- a/layout/mathml/nsMathMLSelectedFrame.cpp +++ b/layout/mathml/nsMathMLSelectedFrame.cpp @@ -117,7 +117,8 @@ nsMathMLSelectedFrame::ComputeSize(nsRenderingContext *aRenderingContext, nscoord availableISize = aAvailableISize - aBorder.ISize(aWM) - aPadding.ISize(aWM) - aMargin.ISize(aWM); LogicalSize cbSize = aCBSize - aBorder - aPadding - aMargin; - nsCSSOffsetState offsetState(childFrame, aRenderingContext, availableISize); + nsCSSOffsetState offsetState(childFrame, aRenderingContext, aWM, + availableISize); LogicalSize size = childFrame->ComputeSize(aRenderingContext, aWM, cbSize, availableISize, offsetState.ComputedLogicalMargin().Size(aWM), diff --git a/layout/reftests/w3c-css/submitted/flexbox/flexbox-writing-mode-001.html b/layout/reftests/w3c-css/submitted/flexbox/flexbox-writing-mode-001.html index a6ff66c6e854..b2fb6d3f1f07 100644 --- a/layout/reftests/w3c-css/submitted/flexbox/flexbox-writing-mode-001.html +++ b/layout/reftests/w3c-css/submitted/flexbox/flexbox-writing-mode-001.html @@ -7,7 +7,7 @@ CSS Test: Try various flex-flow values, with 'direction: ltr' and 'writing-mode: horizontal-tb' - + + + + +
+
+
+
+
+ + diff --git a/layout/reftests/writing-mode/1172774-percent-margin-2.html b/layout/reftests/writing-mode/1172774-percent-margin-2.html new file mode 100644 index 000000000000..55ce27f509cb --- /dev/null +++ b/layout/reftests/writing-mode/1172774-percent-margin-2.html @@ -0,0 +1,59 @@ + + + + CSS Writing Modes Test: margin percentage and 'vertical-lr' + + + + + + +
+
+
+
+
+ + diff --git a/layout/reftests/writing-mode/1172774-percent-margin-3.html b/layout/reftests/writing-mode/1172774-percent-margin-3.html new file mode 100644 index 000000000000..03f5edde4f95 --- /dev/null +++ b/layout/reftests/writing-mode/1172774-percent-margin-3.html @@ -0,0 +1,63 @@ + + + + CSS Writing Modes Test: margin percentage and 'vertical-rl' + + + + + + +
+
+
+
+
+ + diff --git a/layout/reftests/writing-mode/1172774-percent-margin-4.html b/layout/reftests/writing-mode/1172774-percent-margin-4.html new file mode 100644 index 000000000000..adac543bed65 --- /dev/null +++ b/layout/reftests/writing-mode/1172774-percent-margin-4.html @@ -0,0 +1,63 @@ + + + + CSS Writing Modes Test: margin percentage and 'vertical-lr' + + + + + + +
+
+
+
+
+ + diff --git a/layout/reftests/writing-mode/1172774-percent-padding-1.html b/layout/reftests/writing-mode/1172774-percent-padding-1.html new file mode 100644 index 000000000000..d665db72528d --- /dev/null +++ b/layout/reftests/writing-mode/1172774-percent-padding-1.html @@ -0,0 +1,57 @@ + + + + CSS Writing Modes Test: padding percentage and 'vertical-rl' + + + + + +
+
Image download support must be enabled
+
+
Image download support must be enabled
+
+ + diff --git a/layout/reftests/writing-mode/1172774-percent-padding-2.html b/layout/reftests/writing-mode/1172774-percent-padding-2.html new file mode 100644 index 000000000000..2a9cb52f7642 --- /dev/null +++ b/layout/reftests/writing-mode/1172774-percent-padding-2.html @@ -0,0 +1,58 @@ + + + + CSS Writing Modes Test: padding percentage and 'vertical-lr' + + + + + + +
+
Image download support must be enabled
+
+
Image download support must be enabled
+
+ + diff --git a/layout/reftests/writing-mode/1172774-percent-padding-3.html b/layout/reftests/writing-mode/1172774-percent-padding-3.html new file mode 100644 index 000000000000..fb897735a5c3 --- /dev/null +++ b/layout/reftests/writing-mode/1172774-percent-padding-3.html @@ -0,0 +1,62 @@ + + + + CSS Writing Modes Test: padding percentage and 'vertical-rl' + + + + + + +
+
Image download support must be enabled
+
+
Image download support must be enabled
+
+ + diff --git a/layout/reftests/writing-mode/1172774-percent-padding-4.html b/layout/reftests/writing-mode/1172774-percent-padding-4.html new file mode 100644 index 000000000000..a3d8859a5b1e --- /dev/null +++ b/layout/reftests/writing-mode/1172774-percent-padding-4.html @@ -0,0 +1,62 @@ + + + + CSS Writing Modes Test: padding percentage and 'vertical-lr' + + + + + + +
+
Image download support must be enabled
+
+
Image download support must be enabled
+
+ + diff --git a/layout/reftests/writing-mode/1172774-percent-vertical-ref.html b/layout/reftests/writing-mode/1172774-percent-vertical-ref.html new file mode 100644 index 000000000000..2fd2a2f08fac --- /dev/null +++ b/layout/reftests/writing-mode/1172774-percent-vertical-ref.html @@ -0,0 +1,10 @@ + + + + CSS Writing Modes Test: margin percentage and 'vertical-rl' + + + +
Image download support must be enabled
+ + \ No newline at end of file diff --git a/layout/reftests/writing-mode/blue-yellow-165w-206h.png b/layout/reftests/writing-mode/blue-yellow-165w-206h.png new file mode 100644 index 000000000000..3d70889ee25e Binary files /dev/null and b/layout/reftests/writing-mode/blue-yellow-165w-206h.png differ diff --git a/layout/reftests/writing-mode/blue-yellow-206w-165h.png b/layout/reftests/writing-mode/blue-yellow-206w-165h.png new file mode 100644 index 000000000000..47ceb84d3fbd Binary files /dev/null and b/layout/reftests/writing-mode/blue-yellow-206w-165h.png differ diff --git a/layout/reftests/writing-mode/reftest.list b/layout/reftests/writing-mode/reftest.list index de7b241435cc..a8174a84863e 100644 --- a/layout/reftests/writing-mode/reftest.list +++ b/layout/reftests/writing-mode/reftest.list @@ -140,6 +140,14 @@ fails == 1147834-relative-overconstrained-vertical-rl-rtl.html 1147834-top-left- == 1157758-1-vertical-arabic.html 1157758-1-vertical-arabic-ref.html == 1158549-1-vertical-block-size-constraints.html 1158549-1-vertical-block-size-constraints-ref.html == 1163238-orthogonal-auto-margins.html 1163238-orthogonal-auto-margins-ref.html +== 1172774-percent-margin-1.html 1172774-percent-horizontal-ref.html +== 1172774-percent-margin-2.html 1172774-percent-horizontal-ref.html +== 1172774-percent-margin-3.html 1172774-percent-vertical-ref.html +== 1172774-percent-margin-4.html 1172774-percent-vertical-ref.html +== 1172774-percent-padding-1.html 1172774-percent-horizontal-ref.html +== 1172774-percent-padding-2.html 1172774-percent-horizontal-ref.html +== 1172774-percent-padding-3.html 1172774-percent-vertical-ref.html +== 1172774-percent-padding-4.html 1172774-percent-vertical-ref.html # Suite of tests from Gérard Talbot in bug 1079151 include abspos/reftest.list diff --git a/layout/reftests/writing-mode/swatch-yellow.png b/layout/reftests/writing-mode/swatch-yellow.png new file mode 100644 index 000000000000..1591aa0e2e27 Binary files /dev/null and b/layout/reftests/writing-mode/swatch-yellow.png differ diff --git a/layout/style/forms.css b/layout/style/forms.css index d3379999b3d4..df901f648c08 100644 --- a/layout/style/forms.css +++ b/layout/style/forms.css @@ -2,10 +2,10 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ -/** +/** Styles for old GFX form widgets - **/ - + **/ + @namespace url(http://www.w3.org/1999/xhtml); /* set default namespace to HTML */ @namespace xul url(http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul); @@ -48,7 +48,7 @@ label { /* default inputs, text inputs, and selects */ -/* Note: Values in nsNativeTheme IsWidgetStyled function +/* Note: Values in nsNativeTheme IsWidgetStyled function need to match textfield background/border values here */ input { @@ -206,7 +206,7 @@ select { line-height: normal !important; white-space: nowrap !important; word-wrap: normal !important; - text-align: start; + text-align: start; cursor: default; box-sizing: border-box; -moz-user-select: none; @@ -224,7 +224,7 @@ select { /* Need the "select[size][multiple]" selector to override the settings on 'select[size="1"]', eg if one has