Merge inbound to mozilla-central. a=merge

This commit is contained in:
Tiberius Oros 2018-02-27 00:19:49 +02:00
Родитель 22d5a6c371 b16eed599e
Коммит ba173eb9ee
125 изменённых файлов: 3404 добавлений и 1438 удалений

Просмотреть файл

@ -1 +1,2 @@
Andreas Tolfsen <ato@sny.no> <ato@mozilla.com>
Nika Layzell <nika@thelayzells.com> Michael Layzell <michael@thelayzells.com>

Просмотреть файл

@ -251,7 +251,7 @@ var BrowserPageActions = {
let popupSet = document.getElementById("mainPopupSet");
popupSet.appendChild(panelNode);
panelNode.addEventListener("popuphidden", () => {
panelNode.remove();
PanelMultiView.removePopup(panelNode);
}, { once: true });
if (iframeNode) {

Просмотреть файл

@ -77,18 +77,17 @@ toolbar[customizable="true"] {
panelmultiview {
-moz-box-align: start;
-moz-binding: url("chrome://browser/content/customizableui/panelUI.xml#panelmultiview");
}
panelmultiview[transitioning] {
pointer-events: none;
}
panelview {
-moz-box-orient: vertical;
}
panel[hidden] panelmultiview {
-moz-binding: none;
}
panelview:not([current]):not([in-transition]) {
panelview:not([visible]) {
visibility: collapse;
}
@ -97,6 +96,26 @@ panelview[mainview] > .panel-header {
display: none;
}
.panel-viewcontainer {
overflow: hidden;
}
.panel-viewcontainer[panelopen] {
transition-property: height;
transition-timing-function: var(--animation-easing-function);
transition-duration: var(--panelui-subview-transition-duration);
will-change: height;
}
.panel-viewcontainer.offscreen {
position: absolute;
}
.panel-viewstack {
overflow: visible;
transition: height var(--panelui-subview-transition-duration);
}
#navigator-toolbox {
-moz-binding: url("chrome://browser/content/tabbrowser.xml#empty");
}

Просмотреть файл

@ -100,21 +100,19 @@ add_task(async function() {
await BrowserTestUtils.waitForEvent(PanelUI.panel, "ViewShown");
// Workaround until bug 1363756 is fixed, then this can be removed.
let container = PanelUI.multiView.querySelector(".panel-viewcontainer");
await BrowserTestUtils.waitForCondition(() => {
return !PanelUI.multiView.instance._viewContainer.hasAttribute("width");
return !container.hasAttribute("width");
});
info("Shown " + PanelUI.multiView.instance._currentSubView.id);
// Unfortunately, I can't find a better accessor to the current
// subview, so I have to reach the PanelMultiView instance
// here.
await openSubViewsRecursively(PanelUI.multiView.instance._currentSubView);
info("Shown " + PanelUI.multiView.current.id);
await openSubViewsRecursively(PanelUI.multiView.current);
PanelUI.multiView.goBack();
await BrowserTestUtils.waitForEvent(PanelUI.panel, "ViewShown");
// Workaround until bug 1363756 is fixed, then this can be removed.
await BrowserTestUtils.waitForCondition(() => {
return !PanelUI.multiView.instance._viewContainer.hasAttribute("width");
return !container.hasAttribute("width");
});
}
}

Просмотреть файл

@ -55,6 +55,20 @@
* When navigating backwards, an open subview will first become invisible and
* then will be closed.
*
* -- Active or inactive
*
* This indicates whether the view is fully scrolled into the visible area
* and ready to receive mouse and keyboard events. An active view is always
* visible, but a visible view may be inactive. For example, during a scroll
* transition, both views will be inactive.
*
* When a view becomes active, the ViewShown event is fired synchronously.
* For the main view of the panel, this happens during the "popupshown"
* event, which means that other "popupshown" handlers may be called before
* the view is active. However, the main view can already receive mouse and
* keyboard events at this point, only because this allows regression tests
* to use the "popupshown" event to simulate interaction.
*
* -- Navigating with the keyboard
*
* An open view may keep state related to keyboard navigation, even if it is
@ -96,11 +110,14 @@ ChromeUtils.import("resource://gre/modules/XPCOMUtils.jsm");
ChromeUtils.import("resource://gre/modules/Services.jsm");
ChromeUtils.defineModuleGetter(this, "AppConstants",
"resource://gre/modules/AppConstants.jsm");
ChromeUtils.defineModuleGetter(this, "BrowserUtils",
"resource://gre/modules/BrowserUtils.jsm");
ChromeUtils.defineModuleGetter(this, "CustomizableUI",
"resource:///modules/CustomizableUI.jsm");
XPCOMUtils.defineLazyGetter(this, "gBundle", function() {
return Services.strings.createBundle(
"chrome://browser/locale/browser.properties");
});
/**
* Safety timeout after which asynchronous events will be canceled if any of the
* registered blockers does not return.
@ -115,6 +132,7 @@ const TRANSITION_PHASES = Object.freeze({
});
let gNodeToObjectMap = new WeakMap();
let gWindowsWithUnloadHandler = new WeakSet();
let gMultiLineElementsMap = new WeakMap();
/**
@ -247,7 +265,7 @@ var AssociatedToNode = class {
};
/**
* This is associated to <panelmultiview> elements by the panelUI.xml binding.
* This is associated to <panelmultiview> elements.
*/
var PanelMultiView = class extends this.AssociatedToNode {
/**
@ -285,6 +303,44 @@ var PanelMultiView = class extends this.AssociatedToNode {
}
}
/**
* Removes the specified <panel> from the document, ensuring that any
* <panelmultiview> node it contains is destroyed properly.
*
* If the panel does not contain a <panelmultiview>, it is removed directly.
* This allows consumers like page actions to accept different panel types.
*/
static removePopup(panelNode) {
try {
let panelMultiViewNode = panelNode.querySelector("panelmultiview");
if (panelMultiViewNode) {
this.forNode(panelMultiViewNode).disconnect();
}
} finally {
// Make sure to remove the panel element even if disconnecting fails.
panelNode.remove();
}
}
/**
* Ensures that when the specified window is closed all the <panelmultiview>
* node it contains are destroyed properly.
*/
static ensureUnloadHandlerRegistered(window) {
if (gWindowsWithUnloadHandler.has(window)) {
return;
}
window.addEventListener("unload", () => {
for (let panelMultiViewNode of
window.document.querySelectorAll("panelmultiview")) {
this.forNode(panelMultiViewNode).disconnect();
}
}, { once: true });
gWindowsWithUnloadHandler.add(window);
}
get _panel() {
return this.node.parentNode;
}
@ -349,28 +405,41 @@ var PanelMultiView = class extends this.AssociatedToNode {
connect() {
this.connected = true;
PanelMultiView.ensureUnloadHandlerRegistered(this.window);
let viewContainer = this._viewContainer =
this.document.createElement("box");
viewContainer.classList.add("panel-viewcontainer");
let viewStack = this._viewStack = this.document.createElement("box");
viewStack.classList.add("panel-viewstack");
viewContainer.append(viewStack);
let offscreenViewContainer = this.document.createElement("box");
offscreenViewContainer.classList.add("panel-viewcontainer", "offscreen");
let offscreenViewStack = this._offscreenViewStack =
this.document.createElement("box");
offscreenViewStack.classList.add("panel-viewstack");
offscreenViewContainer.append(offscreenViewStack);
this.node.prepend(offscreenViewContainer);
this.node.prepend(viewContainer);
this.openViews = [];
this.__transitioning = false;
const {document, window} = this;
this._viewContainer =
document.getAnonymousElementByAttribute(this.node, "anonid", "viewContainer");
this._viewStack =
document.getAnonymousElementByAttribute(this.node, "anonid", "viewStack");
this._offscreenViewStack =
document.getAnonymousElementByAttribute(this.node, "anonid", "offscreenViewStack");
XPCOMUtils.defineLazyGetter(this, "_panelViewCache", () => {
let viewCacheId = this.node.getAttribute("viewCacheId");
return viewCacheId ? document.getElementById(viewCacheId) : null;
return viewCacheId ? this.document.getElementById(viewCacheId) : null;
});
this._panel.addEventListener("popupshowing", this);
this._panel.addEventListener("popuppositioned", this);
this._panel.addEventListener("popuphidden", this);
this._panel.addEventListener("popupshown", this);
let cs = window.getComputedStyle(document.documentElement);
let cs = this.window.getComputedStyle(this.document.documentElement);
// Set CSS-determined attributes now to prevent a layout flush when we do
// it when transitioning between panels.
this._dir = cs.direction;
@ -391,9 +460,9 @@ var PanelMultiView = class extends this.AssociatedToNode {
});
}
destructor() {
disconnect() {
// Guard against re-entrancy.
if (!this.node)
if (!this.node || !this.connected)
return;
this._cleanupTransitionPhase();
@ -489,21 +558,8 @@ var PanelMultiView = class extends this.AssociatedToNode {
return true;
}
try {
// Most of the panel elements in the browser window have their display
// turned off for performance reasons, typically by setting the "hidden"
// attribute. If the caller has just turned on the display, the XBL
// binding for the <panelmultiview> element may still be disconnected.
// In this case, give the layout code a chance to run.
if (!this.connected) {
await this.window.promiseDocumentFlushed(() => {});
// The XBL binding must be connected at this point. If this is not the
// case, the calling code should be updated to unhide the panel.
if (!this.connected) {
throw new Error("The binding for the panelmultiview element isn't" +
" connected. The containing panel may still have" +
" its display turned off by the hidden attribute.");
}
this.connect();
}
// Allow any of the ViewShowing handlers to prevent showing the main view.
if (!(await this._showMainView())) {
@ -631,7 +687,7 @@ var PanelMultiView = class extends this.AssociatedToNode {
}
await this._transitionViews(prevPanelView.node, viewNode, false, anchor);
this._viewShown(nextPanelView);
this._activateView(nextPanelView);
}
/**
@ -655,7 +711,7 @@ var PanelMultiView = class extends this.AssociatedToNode {
this._closeLatestView();
this._viewShown(nextPanelView);
this._activateView(nextPanelView);
}
/**
@ -672,6 +728,10 @@ var PanelMultiView = class extends this.AssociatedToNode {
let oldPanelMultiViewNode = nextPanelView.node.panelMultiView;
if (oldPanelMultiViewNode) {
PanelMultiView.forNode(oldPanelMultiViewNode).hidePopup();
// Wait for a layout flush after hiding the popup, otherwise the view may
// not be displayed correctly for some time after the new panel is opened.
// This is filed as bug 1441015.
await this.window.promiseDocumentFlushed(() => {});
}
if (!(await this._openView(nextPanelView))) {
@ -688,7 +748,6 @@ var PanelMultiView = class extends this.AssociatedToNode {
nextPanelView.visible = true;
nextPanelView.descriptionHeightWorkaround();
this._viewShown(nextPanelView);
return true;
}
@ -728,10 +787,12 @@ var PanelMultiView = class extends this.AssociatedToNode {
}
/**
* Raises the ViewShown event if the specified view is still open.
* Activates the specified view and raises the ViewShown event, unless the
* view was closed in the meantime.
*/
_viewShown(panelView) {
_activateView(panelView) {
if (panelView.node.panelMultiView == this.node) {
panelView.active = true;
panelView.dispatchCustomEvent("ViewShown");
}
}
@ -804,10 +865,6 @@ var PanelMultiView = class extends this.AssociatedToNode {
if (anchor)
anchor.setAttribute("open", "true");
// Since we're going to show two subview at the same time, don't abuse the
// 'current' attribute, since it's needed for other state-keeping, but use
// a separate 'in-transition' attribute instead.
previousViewNode.setAttribute("in-transition", true);
// Set the viewContainer dimensions to make sure only the current view is
// visible.
let olderView = reverse ? nextPanelView : prevPanelView;
@ -825,7 +882,7 @@ var PanelMultiView = class extends this.AssociatedToNode {
// reopening a subview, because its contents may have changed.
viewRect = { width: nextPanelView.knownWidth,
height: nextPanelView.knownHeight };
viewNode.setAttribute("in-transition", true);
nextPanelView.visible = true;
} else if (viewNode.customRectGetter) {
// Can't use Object.assign directly with a DOM Rect object because its properties
// aren't enumerable.
@ -836,12 +893,13 @@ var PanelMultiView = class extends this.AssociatedToNode {
if (header && header.classList.contains("panel-header")) {
viewRect.height += this._dwu.getBoundsWithoutFlushing(header).height;
}
viewNode.setAttribute("in-transition", true);
nextPanelView.visible = true;
nextPanelView.descriptionHeightWorkaround();
} else {
let oldSibling = viewNode.nextSibling || null;
this._offscreenViewStack.style.minHeight = olderView.knownHeight + "px";
this._offscreenViewStack.appendChild(viewNode);
viewNode.setAttribute("in-transition", true);
nextPanelView.visible = true;
// Now that the subview is visible, we can check the height of the
// description elements it contains.
@ -884,6 +942,10 @@ var PanelMultiView = class extends this.AssociatedToNode {
// Use an outline instead of a border so that the size is not affected.
deepestNode.style.outline = "1px solid var(--panel-separator-color)";
// Now that all the elements are in place for the start of the transition,
// give the layout code a chance to set the initial values.
await window.promiseDocumentFlushed(() => {});
// Now set the viewContainer dimensions to that of the new view, which
// kicks of the height animation.
this._viewContainer.style.height = viewRect.height + "px";
@ -894,7 +956,13 @@ var PanelMultiView = class extends this.AssociatedToNode {
// sliding animation with smaller views.
viewNode.style.width = viewRect.width + "px";
await window.promiseDocumentFlushed(() => {});
// For proper bookkeeping, mark the view that is about to scrolled out of
// the visible area as inactive, because it won't be possible to simulate
// mouse events on it properly. In practice this isn't important, because we
// use the separate "transitioning" attribute on the panel to suppress
// pointer events. This allows mouse events to be available for the main
// view in regression tests that wait for the "popupshown" event.
prevPanelView.active = false;
// Kick off the transition!
details.phase = TRANSITION_PHASES.TRANSITION;
@ -926,8 +994,6 @@ var PanelMultiView = class extends this.AssociatedToNode {
// Apply the final visibility, unless the view was closed in the meantime.
if (nextPanelView.node.panelMultiView == this.node) {
prevPanelView.visible = false;
nextPanelView.visible = true;
nextPanelView.descriptionHeightWorkaround();
}
// This will complete the operation by removing any transition properties.
@ -958,9 +1024,6 @@ var PanelMultiView = class extends this.AssociatedToNode {
// Do the things we _always_ need to do whenever the transition ends or is
// interrupted.
previousViewNode.removeAttribute("in-transition");
viewNode.removeAttribute("in-transition");
if (anchor)
anchor.removeAttribute("open");
@ -1060,7 +1123,7 @@ var PanelMultiView = class extends this.AssociatedToNode {
this.openViews.forEach(panelView => panelView.clearNavigation());
break;
case "popupshowing": {
this.node.setAttribute("panelopen", "true");
this._viewContainer.setAttribute("panelopen", "true");
if (!this.node.hasAttribute("disablekeynav")) {
this.window.addEventListener("keydown", this);
this._panel.addEventListener("mousemove", this);
@ -1085,15 +1148,17 @@ var PanelMultiView = class extends this.AssociatedToNode {
break;
}
case "popupshown":
let mainPanelView = PanelView.forNode(this._mainView);
// Now that the main view is visible, we can check the height of the
// description elements it contains.
PanelView.forNode(this._mainView).descriptionHeightWorkaround();
mainPanelView.descriptionHeightWorkaround();
this._activateView(mainPanelView);
break;
case "popuphidden": {
// WebExtensions consumers can hide the popup from viewshowing, or
// mid-transition, which disrupts our state:
this._transitioning = false;
this.node.removeAttribute("panelopen");
this._viewContainer.removeAttribute("panelopen");
this._cleanupTransitionPhase();
this.window.removeEventListener("keydown", this);
this._panel.removeEventListener("mousemove", this);
@ -1117,6 +1182,16 @@ var PanelMultiView = class extends this.AssociatedToNode {
* This is associated to <panelview> elements.
*/
var PanelView = class extends this.AssociatedToNode {
constructor(node) {
super(node);
/**
* Indicates whether the view is active. When this is false, consumers can
* wait for the ViewShown event to know when the view becomes active.
*/
this.active = false;
}
/**
* The "mainview" attribute is set before the panel is opened when this view
* is displayed as the main view, and is removed before the <panelview> is
@ -1131,11 +1206,16 @@ var PanelView = class extends this.AssociatedToNode {
}
}
/**
* Determines whether the view is visible. Setting this to false also resets
* the "active" property.
*/
set visible(value) {
if (value) {
this.node.setAttribute("current", true);
this.node.setAttribute("visible", true);
} else {
this.node.removeAttribute("current");
this.node.removeAttribute("visible");
this.active = false;
}
}
@ -1181,8 +1261,8 @@ var PanelView = class extends this.AssociatedToNode {
"subviewbutton subviewbutton-iconic subviewbutton-back";
backButton.setAttribute("closemenu", "none");
backButton.setAttribute("tabindex", "0");
backButton.setAttribute("tooltip",
this.node.getAttribute("data-subviewbutton-tooltip"));
backButton.setAttribute("aria-label",
gBundle.GetStringFromName("panel.back"));
backButton.addEventListener("command", () => {
// The panelmultiview element may change if the view is reused.
this.node.panelMultiView.goBack();

Просмотреть файл

@ -3,8 +3,6 @@
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
browser.jar:
content/browser/customizableui/panelUI.css
content/browser/customizableui/panelUI.js
content/browser/customizableui/panelUI.xml
content/browser/customizableui/toolbar.xml

Просмотреть файл

@ -1,27 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
.panel-viewcontainer {
overflow: hidden;
}
.panel-viewstack {
overflow: visible;
transition: height var(--panelui-subview-transition-duration);
}
.panel-viewcontainer[panelopen] {
transition-property: height;
transition-timing-function: var(--animation-easing-function);
transition-duration: var(--panelui-subview-transition-duration);
will-change: height;
}
panelmultiview[transitioning] {
pointer-events: none;
}
.panel-viewcontainer.offscreen {
position: absolute;
}

Просмотреть файл

@ -423,10 +423,7 @@ const PanelUI = {
}
aAnchor.open = false;
// Ensure we run the destructor:
multiView.instance.destructor();
tempPanel.remove();
PanelMultiView.removePopup(tempPanel);
};
if (aAnchor.parentNode.id == "PersonalToolbar") {

Просмотреть файл

@ -1,42 +0,0 @@
<?xml version="1.0"?>
<!-- This Source Code Form is subject to the terms of the Mozilla Public
- License, v. 2.0. If a copy of the MPL was not distributed with this
- file, You can obtain one at http://mozilla.org/MPL/2.0/. -->
<!DOCTYPE bindings [
<!ENTITY % browserDTD SYSTEM "chrome://browser/locale/browser.dtd">
%browserDTD;
]>
<bindings id="browserPanelUIBindings"
xmlns="http://www.mozilla.org/xbl"
xmlns:xul="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul"
xmlns:xbl="http://www.mozilla.org/xbl">
<binding id="panelmultiview">
<resources>
<stylesheet src="chrome://browser/content/customizableui/panelUI.css"/>
</resources>
<content data-subviewbutton-tooltip="&backCmd.label;">
<xul:box anonid="viewContainer" class="panel-viewcontainer" xbl:inherits="panelopen,transitioning">
<xul:box anonid="viewStack" xbl:inherits="transitioning" class="panel-viewstack">
<children includes="panelview"/>
</xul:box>
</xul:box>
<xul:box class="panel-viewcontainer offscreen">
<xul:box anonid="offscreenViewStack" class="panel-viewstack"/>
</xul:box>
</content>
<implementation>
<constructor><![CDATA[
const {PanelMultiView} = ChromeUtils.import("resource:///modules/PanelMultiView.jsm", {});
this.instance = PanelMultiView.forNode(this);
this.instance.connect();
]]></constructor>
<destructor><![CDATA[
this.instance.destructor();
]]></destructor>
</implementation>
</binding>
</bindings>

Просмотреть файл

@ -141,6 +141,7 @@ skip-if = os == "linux" # linux doesn't get drag space (no tabsintitlebar)
[browser_drag_outside_palette.js]
[browser_exit_background_customize_mode.js]
[browser_insert_before_moved_node.js]
[browser_library_after_appMenu.js]
[browser_overflow_use_subviews.js]
[browser_panel_keyboard_navigation.js]
[browser_panel_toggle.js]
@ -164,3 +165,7 @@ subsuite = clipboard
[browser_sidebar_toggle.js]
[browser_remote_tabs_button.js]
[browser_widget_animation.js]
# Unit tests for the PanelMultiView module. These are independent from
# CustomizableUI, but are located here together with the module they're testing.
[browser_PanelMultiView.js]

Просмотреть файл

@ -28,7 +28,7 @@ add_task(async function() {
let promise = BrowserTestUtils.waitForEvent(historyPanel, "ViewShown");
historyButton.click();
await promise;
ok(historyPanel.getAttribute("current"), "History Panel is in view");
ok(historyPanel.getAttribute("visible"), "History Panel is in view");
let browserLoaded = BrowserTestUtils.browserLoaded(tab.linkedBrowser);
let panelHiddenPromise = promiseOverflowHidden(window);

Просмотреть файл

@ -37,7 +37,7 @@ add_task(async function() {
charEncodingButton.click();
await subviewShownPromise;
ok(characterEncodingView.hasAttribute("current"), "The Character encoding panel is displayed");
ok(characterEncodingView.hasAttribute("visible"), "The Character encoding panel is displayed");
let pinnedEncodings = document.getElementById("PanelUI-characterEncodingView-pinned");
let charsetsList = document.getElementById("PanelUI-characterEncodingView-charsets");

Просмотреть файл

@ -31,21 +31,12 @@ add_task(async function testAddOnBeforeCreatedWidget() {
ok(widgetNode, "Widget should exist");
ok(viewNode, "Panelview should exist");
let widgetPanel;
let panelShownPromise;
let viewShownPromise = new Promise(resolve => {
viewNode.addEventListener("ViewShown", () => {
widgetPanel = document.getElementById("customizationui-widget-panel");
ok(widgetPanel, "Widget panel should exist");
// Add the popupshown event listener directly inside the ViewShown event
// listener to avoid missing the event.
panelShownPromise = promisePanelElementShown(window, widgetPanel);
resolve();
}, { once: true });
});
let viewShownPromise = BrowserTestUtils.waitForEvent(viewNode, "ViewShown");
widgetNode.click();
await viewShownPromise;
await panelShownPromise;
let widgetPanel = document.getElementById("customizationui-widget-panel");
ok(widgetPanel, "Widget panel should exist");
let panelHiddenPromise = promisePanelElementHidden(window, widgetPanel);
widgetPanel.hidePopup();
@ -55,9 +46,9 @@ add_task(async function testAddOnBeforeCreatedWidget() {
await waitForOverflowButtonShown();
await document.getElementById("nav-bar").overflowable.show();
viewShownPromise = BrowserTestUtils.waitForEvent(viewNode, "ViewShown");
widgetNode.click();
await BrowserTestUtils.waitForEvent(viewNode, "ViewShown");
await viewShownPromise;
let panelHidden = promiseOverflowHidden(window);
PanelUI.overflowPanel.hidePopup();

Просмотреть файл

@ -0,0 +1,478 @@
/* Any copyright is dedicated to the Public Domain.
* http://creativecommons.org/publicdomain/zero/1.0/ */
"use strict";
/**
* Unit tests for the PanelMultiView module.
*/
ChromeUtils.import("resource:///modules/PanelMultiView.jsm");
const PANELS_COUNT = 2;
let gPanelAnchors = [];
let gPanels = [];
let gPanelMultiViews = [];
const PANELVIEWS_COUNT = 4;
let gPanelViews = [];
let gPanelViewLabels = [];
const EVENT_TYPES = ["popupshown", "popuphidden", "PanelMultiViewHidden",
"ViewShowing", "ViewShown", "ViewHiding"];
/**
* Checks that the element is displayed, including the state of the popup where
* the element is located. This can trigger a synchronous reflow if necessary,
* because even though the code under test is designed to avoid synchronous
* reflows, it can raise completion events while a layout flush is still needed.
*
* In production code, event handlers for ViewShown have to wait for a flush if
* they need to read style or layout information, like other code normally does.
*/
function is_visible(element) {
var style = element.ownerGlobal.getComputedStyle(element);
if (style.display == "none")
return false;
if (style.visibility != "visible")
return false;
if (style.display == "-moz-popup" && element.state != "open")
return false;
// Hiding a parent element will hide all its children
if (element.parentNode != element.ownerDocument)
return is_visible(element.parentNode);
return true;
}
/**
* Checks whether the label in the specified view is visible.
*/
function assertLabelVisible(viewIndex, expectedVisible) {
Assert.equal(is_visible(gPanelViewLabels[viewIndex]), expectedVisible,
`Visibility of label in view ${viewIndex}`);
}
/**
* Opens the specified view as the main view in the specified panel.
*/
async function openPopup(panelIndex, viewIndex) {
gPanelMultiViews[panelIndex].setAttribute("mainViewId",
gPanelViews[viewIndex].id);
let promiseShown = BrowserTestUtils.waitForEvent(gPanels[panelIndex],
"popupshown");
PanelMultiView.openPopup(gPanels[panelIndex], gPanelAnchors[panelIndex],
"bottomcenter topright");
await promiseShown;
assertLabelVisible(viewIndex, true);
}
/**
* Closes the specified panel.
*/
async function hidePopup(panelIndex) {
gPanelMultiViews[panelIndex].setAttribute("mainViewId",
gPanelViews[panelIndex].id);
let promiseHidden = BrowserTestUtils.waitForEvent(gPanels[panelIndex],
"popuphidden");
PanelMultiView.hidePopup(gPanels[panelIndex]);
await promiseHidden;
}
/**
* Opens the specified subview in the specified panel.
*/
async function showSubView(panelIndex, viewIndex) {
let promiseShown = BrowserTestUtils.waitForEvent(gPanelViews[viewIndex],
"ViewShown");
gPanelMultiViews[panelIndex].showSubView(gPanelViews[viewIndex]);
await promiseShown;
assertLabelVisible(viewIndex, true);
}
/**
* Navigates backwards to the specified view, which is displayed as a result.
*/
async function goBack(panelIndex, viewIndex) {
let promiseShown = BrowserTestUtils.waitForEvent(gPanelViews[viewIndex],
"ViewShown");
gPanelMultiViews[panelIndex].goBack();
await promiseShown;
assertLabelVisible(viewIndex, true);
}
/**
* Records the specified events on an element into the specified array. An
* optional callback can be used to respond to events and trigger nested events.
*/
function recordEvents(element, eventTypes, recordArray,
eventCallback = () => {}) {
let nestedEvents = [];
element.recorders = eventTypes.map(eventType => {
let recorder = {
eventType,
listener(event) {
let eventString = nestedEvents.join("") +
`${event.originalTarget.id}: ${event.type}`;
info(`Event on ${eventString}`);
recordArray.push(eventString);
// Any synchronous event triggered from within the given callback will
// include information about the current event.
nestedEvents.unshift(`${eventString} > `);
eventCallback(event);
nestedEvents.shift();
},
};
element.addEventListener(recorder.eventType, recorder.listener);
return recorder;
});
}
/**
* Stops recording events on an element.
*/
function stopRecordingEvents(element) {
for (let recorder of element.recorders) {
element.removeEventListener(recorder.eventType, recorder.listener);
}
delete element.recorders;
}
/**
* Sets up the elements in the browser window that will be used by all the other
* regression tests. Since the panel and view elements can live anywhere in the
* document, they are simply added to the same toolbar as the panel anchors.
*
* <toolbar id="nav-bar">
* <toolbarbutton/> -> gPanelAnchors[panelIndex]
* <panel> -> gPanels[panelIndex]
* <panelmultiview/> -> gPanelMultiViews[panelIndex]
* </panel>
* <panelview> -> gPanelViews[viewIndex]
* <label/> -> gPanelViewLabels[viewIndex]
* </panelview>
* </toolbar>
*/
add_task(async function test_setup() {
let navBar = document.getElementById("nav-bar");
for (let i = 0; i < PANELS_COUNT; i++) {
gPanelAnchors[i] = document.createElement("toolbarbutton");
gPanelAnchors[i].classList.add("toolbarbutton-1",
"chromeclass-toolbar-additional");
navBar.appendChild(gPanelAnchors[i]);
gPanels[i] = document.createElement("panel");
gPanels[i].id = "panel-" + i;
gPanels[i].setAttribute("type", "arrow");
gPanels[i].setAttribute("photon", true);
navBar.appendChild(gPanels[i]);
gPanelMultiViews[i] = document.createElement("panelmultiview");
gPanelMultiViews[i].id = "panelmultiview-" + i;
gPanels[i].appendChild(gPanelMultiViews[i]);
}
for (let i = 0; i < PANELVIEWS_COUNT; i++) {
gPanelViews[i] = document.createElement("panelview");
gPanelViews[i].id = "panelview-" + i;
navBar.appendChild(gPanelViews[i]);
gPanelViewLabels[i] = document.createElement("label");
gPanelViewLabels[i].setAttribute("value", "PanelView " + i);
gPanelViews[i].appendChild(gPanelViewLabels[i]);
}
registerCleanupFunction(() => {
[...gPanelAnchors, ...gPanels, ...gPanelViews].forEach(e => e.remove());
});
});
/**
* Shows and hides all views in a panel with this static structure:
*
* - Panel 0
* - View 0
* - View 1
* - View 3
* - View 2
*/
add_task(async function test_simple() {
// Show main view 0.
await openPopup(0, 0);
// Show and hide subview 1.
await showSubView(0, 1);
assertLabelVisible(0, false);
await goBack(0, 0);
assertLabelVisible(1, false);
// Show subview 3.
await showSubView(0, 3);
assertLabelVisible(0, false);
// Show and hide subview 2.
await showSubView(0, 2);
assertLabelVisible(3, false);
await goBack(0, 3);
assertLabelVisible(2, false);
// Hide subview 3.
await goBack(0, 0);
assertLabelVisible(3, false);
// Hide main view 0.
await hidePopup(0);
assertLabelVisible(0, false);
});
/**
* Tests the event sequence in a panel with this static structure:
*
* - Panel 0
* - View 0
* - View 1
* - View 3
* - View 2
*/
add_task(async function test_simple_event_sequence() {
let recordArray = [];
recordEvents(gPanels[0], EVENT_TYPES, recordArray);
await openPopup(0, 0);
await showSubView(0, 1);
await goBack(0, 0);
await showSubView(0, 3);
await showSubView(0, 2);
await goBack(0, 3);
await goBack(0, 0);
await hidePopup(0);
stopRecordingEvents(gPanels[0]);
Assert.deepEqual(recordArray, [
"panelview-0: ViewShowing",
"panelview-0: ViewShown",
"panel-0: popupshown",
"panelview-1: ViewShowing",
"panelview-1: ViewShown",
"panelview-1: ViewHiding",
"panelview-0: ViewShown",
"panelview-3: ViewShowing",
"panelview-3: ViewShown",
"panelview-2: ViewShowing",
"panelview-2: ViewShown",
"panelview-2: ViewHiding",
"panelview-3: ViewShown",
"panelview-3: ViewHiding",
"panelview-0: ViewShown",
"panelview-0: ViewHiding",
"panelmultiview-0: PanelMultiViewHidden",
"panel-0: popuphidden",
]);
});
/**
* Tests reusing views that are already open in another panel. In this test, the
* structure of the first panel will change dynamically:
*
* - Panel 0
* - View 0
* - View 1
* - Panel 1
* - View 1
* - View 2
* - Panel 0
* - View 1
* - View 0
*/
add_task(async function test_switch_event_sequence() {
let recordArray = [];
recordEvents(gPanels[0], EVENT_TYPES, recordArray);
recordEvents(gPanels[1], EVENT_TYPES, recordArray);
// Show panel 0.
await openPopup(0, 0);
await showSubView(0, 1);
// Show panel 1 with the view that is already open and visible in panel 0.
// This will close panel 0 automatically.
await openPopup(1, 1);
await showSubView(1, 2);
// Show panel 0 with a view that is already open but invisible in panel 1.
// This will close panel 1 automatically.
await openPopup(0, 1);
await showSubView(0, 0);
// Hide panel 0.
await hidePopup(0);
stopRecordingEvents(gPanels[0]);
stopRecordingEvents(gPanels[1]);
Assert.deepEqual(recordArray, [
"panelview-0: ViewShowing",
"panelview-0: ViewShown",
"panel-0: popupshown",
"panelview-1: ViewShowing",
"panelview-1: ViewShown",
"panelview-1: ViewHiding",
"panelview-0: ViewHiding",
"panelmultiview-0: PanelMultiViewHidden",
"panel-0: popuphidden",
"panelview-1: ViewShowing",
"panel-1: popupshown",
"panelview-1: ViewShown",
"panelview-2: ViewShowing",
"panelview-2: ViewShown",
"panel-1: popuphidden",
"panelview-2: ViewHiding",
"panelview-1: ViewHiding",
"panelmultiview-1: PanelMultiViewHidden",
"panelview-1: ViewShowing",
"panelview-1: ViewShown",
"panel-0: popupshown",
"panelview-0: ViewShowing",
"panelview-0: ViewShown",
"panelview-0: ViewHiding",
"panelview-1: ViewHiding",
"panelmultiview-0: PanelMultiViewHidden",
"panel-0: popuphidden",
]);
});
/**
* Tests the event sequence when opening the main view is canceled.
*/
add_task(async function test_cancel_mainview_event_sequence() {
let recordArray = [];
recordEvents(gPanels[0], EVENT_TYPES, recordArray, event => {
if (event.type == "ViewShowing") {
event.preventDefault();
}
});
gPanelMultiViews[0].setAttribute("mainViewId", gPanelViews[0].id);
let promiseHidden = BrowserTestUtils.waitForEvent(gPanels[0], "popuphidden");
PanelMultiView.openPopup(gPanels[0], gPanelAnchors[0],
"bottomcenter topright");
await promiseHidden;
stopRecordingEvents(gPanels[0]);
Assert.deepEqual(recordArray, [
"panelview-0: ViewShowing",
"panelview-0: ViewHiding",
"panelmultiview-0: popuphidden",
]);
});
/**
* Tests the event sequence when opening a subview is canceled.
*/
add_task(async function test_cancel_subview_event_sequence() {
let recordArray = [];
recordEvents(gPanels[0], EVENT_TYPES, recordArray, event => {
if (event.type == "ViewShowing" &&
event.originalTarget.id == gPanelViews[1].id) {
event.preventDefault();
}
});
await openPopup(0, 0);
let promiseHiding = BrowserTestUtils.waitForEvent(gPanelViews[1],
"ViewHiding");
gPanelMultiViews[0].showSubView(gPanelViews[1]);
await promiseHiding;
// Only the subview should have received the hidden event at this point.
Assert.deepEqual(recordArray, [
"panelview-0: ViewShowing",
"panelview-0: ViewShown",
"panel-0: popupshown",
"panelview-1: ViewShowing",
"panelview-1: ViewHiding",
]);
recordArray.length = 0;
await hidePopup(0);
stopRecordingEvents(gPanels[0]);
Assert.deepEqual(recordArray, [
"panelview-0: ViewHiding",
"panelmultiview-0: PanelMultiViewHidden",
"panel-0: popuphidden",
]);
});
/**
* Tests the event sequence when closing the panel while opening the main view.
*/
add_task(async function test_close_while_showing_mainview_event_sequence() {
let recordArray = [];
recordEvents(gPanels[0], EVENT_TYPES, recordArray, event => {
if (event.type == "ViewShowing") {
PanelMultiView.hidePopup(gPanels[0]);
}
});
gPanelMultiViews[0].setAttribute("mainViewId", gPanelViews[0].id);
let promiseHidden = BrowserTestUtils.waitForEvent(gPanels[0], "popuphidden");
let promiseHiding = BrowserTestUtils.waitForEvent(gPanelViews[0],
"ViewHiding");
PanelMultiView.openPopup(gPanels[0], gPanelAnchors[0],
"bottomcenter topright");
await promiseHiding;
await promiseHidden;
stopRecordingEvents(gPanels[0]);
Assert.deepEqual(recordArray, [
"panelview-0: ViewShowing",
"panelview-0: ViewShowing > panelmultiview-0: popuphidden",
"panelview-0: ViewShowing > panelview-0: ViewHiding",
]);
});
/**
* Tests the event sequence when closing the panel while opening a subview.
*/
add_task(async function test_close_while_showing_subview_event_sequence() {
let recordArray = [];
recordEvents(gPanels[0], EVENT_TYPES, recordArray, event => {
if (event.type == "ViewShowing" &&
event.originalTarget.id == gPanelViews[1].id) {
PanelMultiView.hidePopup(gPanels[0]);
}
});
await openPopup(0, 0);
let promiseHidden = BrowserTestUtils.waitForEvent(gPanels[0], "popuphidden");
gPanelMultiViews[0].showSubView(gPanelViews[1]);
await promiseHidden;
stopRecordingEvents(gPanels[0]);
Assert.deepEqual(recordArray, [
"panelview-0: ViewShowing",
"panelview-0: ViewShown",
"panel-0: popupshown",
"panelview-1: ViewShowing",
"panelview-1: ViewShowing > panelview-1: ViewHiding",
"panelview-1: ViewShowing > panelview-0: ViewHiding",
"panelview-1: ViewShowing > panelmultiview-0: PanelMultiViewHidden",
"panelview-1: ViewShowing > panel-0: popuphidden",
]);
});

Просмотреть файл

@ -0,0 +1,37 @@
/* Any copyright is dedicated to the Public Domain.
* http://creativecommons.org/publicdomain/zero/1.0/ */
"use strict";
/**
* Checks that opening the Library view using the default toolbar button works
* also while the view is displayed in the main menu.
*/
add_task(async function test_library_after_appMenu() {
await PanelUI.show();
// Show the Library view as a subview of the main menu.
let libraryView = document.getElementById("appMenu-libraryView");
let promise = BrowserTestUtils.waitForEvent(libraryView, "ViewShown");
document.getElementById("appMenu-library-button").click();
await promise;
// Show the Library view as the main view of the Library panel.
promise = BrowserTestUtils.waitForEvent(libraryView, "ViewShown");
document.getElementById("library-button").click();
await promise;
// Navigate to the History subview.
let historyView = document.getElementById("PanelUI-history");
promise = BrowserTestUtils.waitForEvent(historyView, "ViewShown");
document.getElementById("appMenu-library-history-button").click();
await promise;
Assert.ok(PanelView.forNode(historyView).active);
// Close the Library panel.
let historyPanel = historyView.closest("panel");
promise = BrowserTestUtils.waitForEvent(historyPanel, "popuphidden");
historyPanel.hidePopup();
await promise;
});

Просмотреть файл

@ -38,7 +38,7 @@ add_task(async function testSyncRemoteTabsButtonFunctionality() {
// click the button - the panel should open.
syncRemoteTabsBtn.click();
await viewShown;
ok(remoteTabsPanel.getAttribute("current"), "Sync Panel is in view");
ok(remoteTabsPanel.getAttribute("visible"), "Sync Panel is in view");
// Find and click the "setup" button.
let syncNowButton = document.getElementById("PanelUI-remotetabs-syncnow");

Просмотреть файл

@ -86,7 +86,7 @@ async function openPrefsFromMenuPanel(expectedPanelId, entryPoint) {
let viewShownPromise = BrowserTestUtils.waitForEvent(syncPanel, "ViewShown");
syncButton.click();
await Promise.all([tabsUpdatedPromise, viewShownPromise]);
ok(syncPanel.getAttribute("current"), "Sync Panel is in view");
ok(syncPanel.getAttribute("visible"), "Sync Panel is in view");
// Sync is not configured - verify that state is reflected.
let subpanel = document.getElementById(expectedPanelId);
@ -187,7 +187,7 @@ add_task(async function() {
let syncButton = document.getElementById("sync-button");
syncButton.click();
await Promise.all([tabsUpdatedPromise, viewShownPromise]);
ok(syncPanel.getAttribute("current"), "Sync Panel is in view");
ok(syncPanel.getAttribute("visible"), "Sync Panel is in view");
let subpanel = document.getElementById("PanelUI-remotetabs-main");
ok(!subpanel.hidden, "main pane is visible");
@ -352,7 +352,7 @@ add_task(async function() {
await Promise.all([tabsUpdatedPromise, viewShownPromise]);
// Check pre-conditions
ok(syncPanel.getAttribute("current"), "Sync Panel is in view");
ok(syncPanel.getAttribute("visible"), "Sync Panel is in view");
let subpanel = document.getElementById("PanelUI-remotetabs-main");
ok(!subpanel.hidden, "main pane is visible");
let deck = document.getElementById("PanelUI-remotetabs-deck");

Просмотреть файл

@ -102,6 +102,7 @@ skip-if = (os == 'win' && ccov) # Bug 1423667
[browser_ext_menus_events.js]
[browser_ext_menus_refresh.js]
[browser_ext_omnibox.js]
skip-if = debug && (os == 'linux' || os == 'mac') # Bug 1417052
[browser_ext_openPanel.js]
[browser_ext_optionsPage_browser_style.js]
[browser_ext_optionsPage_modals.js]

Просмотреть файл

@ -35,6 +35,7 @@ skip-if = artifact # bug 1315953
[browser_oneOffContextMenu.js]
[browser_oneOffContextMenu_setDefault.js]
[browser_oneOffHeader.js]
skip-if = os == "mac" #1421238
[browser_private_search_perwindowpb.js]
[browser_yahoo.js]
[browser_abouthome_behavior.js]

Просмотреть файл

@ -941,3 +941,8 @@ midi.shareWithSite.message = Will you allow %S to access your MIDI Devices?
midi.shareSysexWithFile.message = Will you allow this local file to access your MIDI devices and send/receive SysEx messages?
# LOCALIZATION NOTE (midi.shareSysexWithFile.message): %S is the name of the site URL (https://...) requesting MIDI access
midi.shareSysexWithSite.message = Will you allow %S to access your MIDI devices and send/receive SysEx messages?
# LOCALIZATION NOTE (panel.back):
# This is used by screen readers to label the "back" button in various browser
# popup panels, including the sliding subviews of the main menu.
panel.back = Back

Просмотреть файл

@ -51,7 +51,7 @@ class AutocompletePopup extends Component {
computeState({ autocompleteProvider, filter }) {
let list = autocompleteProvider(filter);
let selectedIndex = list.length == 1 ? 0 : -1;
let selectedIndex = list.length > 0 ? 0 : -1;
return { list, selectedIndex };
}

Просмотреть файл

@ -26,15 +26,19 @@ window.onload = async function () {
* compared with the reference item's value as a test
*
* @params {Node} - Node to be compared
* @reference {array} - Reference array for comparison
* @reference {array} - Reference array for comparison. The selected index is
* highlighted as a single element array ie. ["[abc]", "ab", "abcPQR"],
* Here the element "abc" is highlighted
*/
function compareAutocompleteList(list, reference) {
let items = [...list.children].map(el => el.textContent);
for (let i = 0; i < items.length; i++) {
let item = items[i];
let ref = reference[i];
is(item, ref, `Item ${i} in list is correct`);
}
const delimiter = " - ";
const observedList = [...list.children].map(el => {
return el.classList.contains("autocomplete-selected")
? `[${el.textContent}]`
: el.textContent
});
is(observedList.join(delimiter), reference.join(delimiter),
"Autocomplete items are rendered as expected");
}
let React = browserRequire("devtools/client/shared/vendor/react");
@ -96,7 +100,7 @@ window.onload = async function () {
await forceRender(component);
compareAutocompleteList($(".devtools-autocomplete-listbox"), [
"ABC",
"[ABC]",
"a1",
"a2",
"a3",
@ -105,8 +109,6 @@ window.onload = async function () {
"abc",
]);
is(refs.autocomplete.state.selectedIndex, -1, "Initialised selectedIndex is -1");
// Blur event
$(".devtools-searchinput").blur();
await forceRender(component);
@ -121,35 +123,84 @@ window.onload = async function () {
// ArrowDown
synthesizeKey("KEY_ArrowDown");
await forceRender(component);
is(refs.autocomplete.state.selectedIndex, 0, "selectedIndex is 0");
ok($(".devtools-autocomplete-listbox .autocomplete-item:nth-child(1)")
compareAutocompleteList($(".devtools-autocomplete-listbox"), [
"ABC",
"[a1]",
"a2",
"a3",
"a4",
"a5",
"abc",
]);
ok($(".devtools-autocomplete-listbox .autocomplete-item:nth-child(2)")
.className.includes("autocomplete-selected"),
"Selection class applied");
// ArrowUp should roll back to the bottom of the list
// A double ArrowUp should roll back to the bottom of the list
synthesizeKey("KEY_ArrowUp");
synthesizeKey("KEY_ArrowUp");
await forceRender(component);
is(refs.autocomplete.state.selectedIndex, 6, "ArrowUp works");
compareAutocompleteList($(".devtools-autocomplete-listbox"), [
"ABC",
"a1",
"a2",
"a3",
"a4",
"a5",
"[abc]",
]);
// PageDown should take -5 places up
synthesizeKey("KEY_PageUp");
await forceRender(component);
is(refs.autocomplete.state.selectedIndex, 1, "PageUp works");
compareAutocompleteList($(".devtools-autocomplete-listbox"), [
"ABC",
"[a1]",
"a2",
"a3",
"a4",
"a5",
"abc",
]);
// PageDown should take +5 places down
synthesizeKey("KEY_PageDown");
await forceRender(component);
is(refs.autocomplete.state.selectedIndex, 6, "PageDown works");
compareAutocompleteList($(".devtools-autocomplete-listbox"), [
"ABC",
"a1",
"a2",
"a3",
"a4",
"a5",
"[abc]",
]);
// Home should take to the top of the list
synthesizeKey("KEY_Home");
await forceRender(component);
is(refs.autocomplete.state.selectedIndex, 0, "Home works");
compareAutocompleteList($(".devtools-autocomplete-listbox"), [
"[ABC]",
"a1",
"a2",
"a3",
"a4",
"a5",
"abc",
]);
// End should take to the bottom of the list
synthesizeKey("KEY_End");
await forceRender(component);
is(refs.autocomplete.state.selectedIndex, 6, "End works");
compareAutocompleteList($(".devtools-autocomplete-listbox"), [
"ABC",
"a1",
"a2",
"a3",
"a4",
"a5",
"[abc]",
]);
// Key down in existing state should rollover to the top
synthesizeKey("KEY_ArrowDown");
@ -164,7 +215,10 @@ window.onload = async function () {
synthesizeKey("KEY_Backspace");
await forceRender(component);
ok($(".devtools-autocomplete-popup"), "Popup is up");
compareAutocompleteList($(".devtools-autocomplete-listbox"), ["ABC", "abc"]);
compareAutocompleteList($(".devtools-autocomplete-listbox"), [
"[ABC]",
"abc"
]);
// Enter key selection
synthesizeKey("KEY_ArrowUp");
@ -212,7 +266,10 @@ window.onload = async function () {
// Test for string "pqr ab" which should show list of ABC, abc
sendString(" ab");
await forceRender(component);
compareAutocompleteList($(".devtools-autocomplete-listbox"), ["ABC", "abc"]);
compareAutocompleteList($(".devtools-autocomplete-listbox"), [
"[ABC]",
"abc"
]);
// Select the first element, value now should be "pqr ABC"
synthesizeMouseAtCenter(

Просмотреть файл

@ -693,8 +693,10 @@ Animation::Tick()
// during the *previous* tick of the refresh driver, it can still be
// ahead of the *current* timeline time when we are using the
// vsync timer so we need to clamp it to the timeline time.
mPendingReadyTime.SetValue(std::min(mTimeline->GetCurrentTime().Value(),
mPendingReadyTime.Value()));
TimeDuration currentTime = mTimeline->GetCurrentTime().Value();
if (currentTime < mPendingReadyTime.Value()) {
mPendingReadyTime.SetValue(currentTime);
}
FinishPendingAt(mPendingReadyTime.Value());
mPendingReadyTime.SetNull();
}

Просмотреть файл

@ -27,3 +27,4 @@ support-files =
[test_bug1412775.xul]
[test_eventctors.xul]
[test_DataTransferItemList.html]
skip-if = !debug && (os == "linux") #Bug 1421150

Просмотреть файл

@ -129,6 +129,7 @@ const char* mozilla::dom::ContentPrefs::gEarlyPrefs[] = {
"javascript.options.parallel_parsing",
"javascript.options.shared_memory",
"javascript.options.spectre.index_masking",
"javascript.options.spectre.object_mitigations.barriers",
"javascript.options.spectre.string_mitigations",
"javascript.options.spectre.value_masking",
"javascript.options.streams",

Просмотреть файл

@ -292,11 +292,10 @@ CamerasChild::NumberOfCapabilities(CaptureEngine aCapEngine,
LOG((__PRETTY_FUNCTION__));
LOG(("NumberOfCapabilities for %s", deviceUniqueIdUTF8));
nsCString unique_id(deviceUniqueIdUTF8);
RefPtr<CamerasChild> self(this);
nsCOMPtr<nsIRunnable> runnable =
mozilla::NewNonOwningRunnableMethod<CaptureEngine, nsCString>(
mozilla::NewRunnableMethod<CaptureEngine, nsCString>(
"camera::PCamerasChild::SendNumberOfCapabilities",
self,
this,
&CamerasChild::SendNumberOfCapabilities,
aCapEngine,
unique_id);
@ -309,11 +308,10 @@ int
CamerasChild::NumberOfCaptureDevices(CaptureEngine aCapEngine)
{
LOG((__PRETTY_FUNCTION__));
RefPtr<CamerasChild> self(this);
nsCOMPtr<nsIRunnable> runnable =
mozilla::NewNonOwningRunnableMethod<CaptureEngine>(
mozilla::NewRunnableMethod<CaptureEngine>(
"camera::PCamerasChild::SendNumberOfCaptureDevices",
self,
this,
&CamerasChild::SendNumberOfCaptureDevices,
aCapEngine);
LockAndDispatch<> dispatcher(this, __func__, runnable, 0, mReplyInteger);
@ -337,11 +335,10 @@ int
CamerasChild::EnsureInitialized(CaptureEngine aCapEngine)
{
LOG((__PRETTY_FUNCTION__));
RefPtr<CamerasChild> self(this);
nsCOMPtr<nsIRunnable> runnable =
mozilla::NewNonOwningRunnableMethod<CaptureEngine>(
mozilla::NewRunnableMethod<CaptureEngine>(
"camera::PCamerasChild::SendEnsureInitialized",
self,
this,
&CamerasChild::SendEnsureInitialized,
aCapEngine);
LockAndDispatch<> dispatcher(this, __func__, runnable, 0, mReplyInteger);
@ -357,11 +354,10 @@ CamerasChild::GetCaptureCapability(CaptureEngine aCapEngine,
{
LOG(("GetCaptureCapability: %s %d", unique_idUTF8, capability_number));
nsCString unique_id(unique_idUTF8);
RefPtr<CamerasChild> self(this);
nsCOMPtr<nsIRunnable> runnable =
mozilla::NewNonOwningRunnableMethod<CaptureEngine, nsCString, unsigned int>(
mozilla::NewRunnableMethod<CaptureEngine, nsCString, unsigned int>(
"camera::PCamerasChild::SendGetCaptureCapability",
self,
this,
&CamerasChild::SendGetCaptureCapability,
aCapEngine,
unique_id,
@ -400,11 +396,10 @@ CamerasChild::GetCaptureDevice(CaptureEngine aCapEngine,
bool* scary)
{
LOG((__PRETTY_FUNCTION__));
RefPtr<CamerasChild> self(this);
nsCOMPtr<nsIRunnable> runnable =
mozilla::NewNonOwningRunnableMethod<CaptureEngine, unsigned int>(
mozilla::NewRunnableMethod<CaptureEngine, unsigned int>(
"camera::PCamerasChild::SendGetCaptureDevice",
self,
this,
&CamerasChild::SendGetCaptureDevice,
aCapEngine,
list_number);
@ -445,13 +440,12 @@ CamerasChild::AllocateCaptureDevice(CaptureEngine aCapEngine,
{
LOG((__PRETTY_FUNCTION__));
nsCString unique_id(unique_idUTF8);
RefPtr<CamerasChild> self(this);
nsCOMPtr<nsIRunnable> runnable =
mozilla::NewNonOwningRunnableMethod<CaptureEngine,
nsCString,
const mozilla::ipc::PrincipalInfo&>(
mozilla::NewRunnableMethod<CaptureEngine,
nsCString,
const mozilla::ipc::PrincipalInfo&>(
"camera::PCamerasChild::SendAllocateCaptureDevice",
self,
this,
&CamerasChild::SendAllocateCaptureDevice,
aCapEngine,
unique_id,
@ -482,11 +476,10 @@ CamerasChild::ReleaseCaptureDevice(CaptureEngine aCapEngine,
const int capture_id)
{
LOG((__PRETTY_FUNCTION__));
RefPtr<CamerasChild> self(this);
nsCOMPtr<nsIRunnable> runnable =
mozilla::NewNonOwningRunnableMethod<CaptureEngine, int>(
mozilla::NewRunnableMethod<CaptureEngine, int>(
"camera::PCamerasChild::SendReleaseCaptureDevice",
self,
this,
&CamerasChild::SendReleaseCaptureDevice,
aCapEngine,
capture_id);
@ -534,11 +527,10 @@ CamerasChild::StartCapture(CaptureEngine aCapEngine,
webrtcCaps.rawType,
webrtcCaps.codecType,
webrtcCaps.interlaced);
RefPtr<CamerasChild> self(this);
nsCOMPtr<nsIRunnable> runnable = mozilla::
NewNonOwningRunnableMethod<CaptureEngine, int, VideoCaptureCapability>(
NewRunnableMethod<CaptureEngine, int, VideoCaptureCapability>(
"camera::PCamerasChild::SendStartCapture",
self,
this,
&CamerasChild::SendStartCapture,
aCapEngine,
capture_id,
@ -551,11 +543,10 @@ int
CamerasChild::StopCapture(CaptureEngine aCapEngine, const int capture_id)
{
LOG((__PRETTY_FUNCTION__));
RefPtr<CamerasChild> self(this);
nsCOMPtr<nsIRunnable> runnable =
mozilla::NewNonOwningRunnableMethod<CaptureEngine, int>(
mozilla::NewRunnableMethod<CaptureEngine, int>(
"camera::PCamerasChild::SendStopCapture",
self,
this,
&CamerasChild::SendStopCapture,
aCapEngine,
capture_id);
@ -624,9 +615,8 @@ CamerasChild::ShutdownParent()
// Delete the parent actor.
// CamerasChild (this) will remain alive and is only deleted by the
// IPC layer when SendAllDone returns.
RefPtr<CamerasChild> self(this);
nsCOMPtr<nsIRunnable> deleteRunnable = mozilla::NewNonOwningRunnableMethod(
"camera::PCamerasChild::SendAllDone", self, &CamerasChild::SendAllDone);
nsCOMPtr<nsIRunnable> deleteRunnable = mozilla::NewRunnableMethod(
"camera::PCamerasChild::SendAllDone", this, &CamerasChild::SendAllDone);
CamerasSingleton::Thread()->Dispatch(deleteRunnable, NS_DISPATCH_NORMAL);
} else {
LOG(("ShutdownParent called without PBackground thread"));

Просмотреть файл

@ -862,7 +862,7 @@ ModuleObject::functionDeclarations()
void
ModuleObject::init(HandleScript script)
{
initReservedSlot(ScriptSlot, PrivateValue(script));
initReservedSlot(ScriptSlot, PrivateGCThingValue(script));
initReservedSlot(StatusSlot, Int32Value(MODULE_STATUS_UNINSTANTIATED));
}
@ -973,7 +973,7 @@ ModuleObject::hasScript() const
JSScript*
ModuleObject::script() const
{
return static_cast<JSScript*>(getReservedSlot(ScriptSlot).toPrivate());
return getReservedSlot(ScriptSlot).toGCThing()->as<JSScript>();
}
static inline void
@ -1026,11 +1026,6 @@ ModuleObject::enclosingScope() const
ModuleObject::trace(JSTracer* trc, JSObject* obj)
{
ModuleObject& module = obj->as<ModuleObject>();
if (module.hasScript()) {
JSScript* script = module.script();
TraceManuallyBarrieredEdge(trc, &script, "Module script");
module.setReservedSlot(ScriptSlot, PrivateValue(script));
}
if (module.hasImportBindings())
module.importBindings().trace(trc);

Просмотреть файл

@ -903,9 +903,8 @@ IsTrailSurrogateWithLeadSurrogate(HandleLinearString input, int32_t index)
* steps 3, 9-14, except 12.a.i, 12.c.i.1.
*/
static RegExpRunStatus
ExecuteRegExp(JSContext* cx, HandleObject regexp, HandleString string,
int32_t lastIndex,
MatchPairs* matches, size_t* endIndex, RegExpStaticsUpdate staticsUpdate)
ExecuteRegExp(JSContext* cx, HandleObject regexp, HandleString string, int32_t lastIndex,
MatchPairs* matches, size_t* endIndex)
{
/*
* WARNING: Despite the presence of spec step comment numbers, this
@ -920,14 +919,9 @@ ExecuteRegExp(JSContext* cx, HandleObject regexp, HandleString string,
if (!re)
return RegExpRunStatus_Error;
RegExpStatics* res;
if (staticsUpdate == UpdateRegExpStatics) {
res = GlobalObject::getRegExpStatics(cx, cx->global());
if (!res)
return RegExpRunStatus_Error;
} else {
res = nullptr;
}
RegExpStatics* res = GlobalObject::getRegExpStatics(cx, cx->global());
if (!res)
return RegExpRunStatus_Error;
RootedLinearString input(cx, string->ensureLinear(cx));
if (!input)
@ -981,15 +975,14 @@ ExecuteRegExp(JSContext* cx, HandleObject regexp, HandleString string,
* steps 3, 9-25, except 12.a.i, 12.c.i.1, 15.
*/
static bool
RegExpMatcherImpl(JSContext* cx, HandleObject regexp, HandleString string,
int32_t lastIndex, RegExpStaticsUpdate staticsUpdate, MutableHandleValue rval)
RegExpMatcherImpl(JSContext* cx, HandleObject regexp, HandleString string, int32_t lastIndex,
MutableHandleValue rval)
{
/* Execute regular expression and gather matches. */
ScopedMatchPairs matches(&cx->tempLifoAlloc());
/* Steps 3, 9-14, except 12.a.i, 12.c.i.1. */
RegExpRunStatus status = ExecuteRegExp(cx, regexp, string, lastIndex,
&matches, nullptr, staticsUpdate);
RegExpRunStatus status = ExecuteRegExp(cx, regexp, string, lastIndex, &matches, nullptr);
if (status == RegExpRunStatus_Error)
return false;
@ -1023,8 +1016,7 @@ js::RegExpMatcher(JSContext* cx, unsigned argc, Value* vp)
MOZ_ALWAYS_TRUE(ToInt32(cx, args[2], &lastIndex));
/* Steps 3, 9-25, except 12.a.i, 12.c.i.1, 15. */
return RegExpMatcherImpl(cx, regexp, string, lastIndex,
UpdateRegExpStatics, args.rval());
return RegExpMatcherImpl(cx, regexp, string, lastIndex, args.rval());
}
/*
@ -1042,8 +1034,7 @@ js::RegExpMatcherRaw(JSContext* cx, HandleObject regexp, HandleString input,
// successful only if the pairs have actually been filled in.
if (maybeMatches && maybeMatches->pairsRaw()[0] >= 0)
return CreateRegExpMatchResult(cx, input, *maybeMatches, output);
return RegExpMatcherImpl(cx, regexp, input, lastIndex,
UpdateRegExpStatics, output);
return RegExpMatcherImpl(cx, regexp, input, lastIndex, output);
}
/*
@ -1053,15 +1044,14 @@ js::RegExpMatcherRaw(JSContext* cx, HandleObject regexp, HandleString input,
* changes to this code need to get reflected in there too.
*/
static bool
RegExpSearcherImpl(JSContext* cx, HandleObject regexp, HandleString string,
int32_t lastIndex, RegExpStaticsUpdate staticsUpdate, int32_t* result)
RegExpSearcherImpl(JSContext* cx, HandleObject regexp, HandleString string, int32_t lastIndex,
int32_t* result)
{
/* Execute regular expression and gather matches. */
ScopedMatchPairs matches(&cx->tempLifoAlloc());
/* Steps 3, 9-14, except 12.a.i, 12.c.i.1. */
RegExpRunStatus status = ExecuteRegExp(cx, regexp, string, lastIndex,
&matches, nullptr, staticsUpdate);
RegExpRunStatus status = ExecuteRegExp(cx, regexp, string, lastIndex, &matches, nullptr);
if (status == RegExpRunStatus_Error)
return false;
@ -1097,7 +1087,7 @@ js::RegExpSearcher(JSContext* cx, unsigned argc, Value* vp)
/* Steps 3, 9-25, except 12.a.i, 12.c.i.1, 15. */
int32_t result = 0;
if (!RegExpSearcherImpl(cx, regexp, string, lastIndex, UpdateRegExpStatics, &result))
if (!RegExpSearcherImpl(cx, regexp, string, lastIndex, &result))
return false;
args.rval().setInt32(result);
@ -1120,23 +1110,7 @@ js::RegExpSearcherRaw(JSContext* cx, HandleObject regexp, HandleString input,
*result = CreateRegExpSearchResult(*maybeMatches);
return true;
}
return RegExpSearcherImpl(cx, regexp, input, lastIndex,
UpdateRegExpStatics, result);
}
bool
js::regexp_exec_no_statics(JSContext* cx, unsigned argc, Value* vp)
{
CallArgs args = CallArgsFromVp(argc, vp);
MOZ_ASSERT(args.length() == 2);
MOZ_ASSERT(IsRegExpObject(args[0]));
MOZ_ASSERT(args[1].isString());
RootedObject regexp(cx, &args[0].toObject());
RootedString string(cx, args[1].toString());
return RegExpMatcherImpl(cx, regexp, string, 0,
DontUpdateRegExpStatics, args.rval());
return RegExpSearcherImpl(cx, regexp, input, lastIndex, result);
}
/*
@ -1160,8 +1134,7 @@ js::RegExpTester(JSContext* cx, unsigned argc, Value* vp)
/* Steps 3, 9-14, except 12.a.i, 12.c.i.1. */
size_t endIndex = 0;
RegExpRunStatus status = ExecuteRegExp(cx, regexp, string, lastIndex,
nullptr, &endIndex, UpdateRegExpStatics);
RegExpRunStatus status = ExecuteRegExp(cx, regexp, string, lastIndex, nullptr, &endIndex);
if (status == RegExpRunStatus_Error)
return false;
@ -1186,8 +1159,7 @@ js::RegExpTesterRaw(JSContext* cx, HandleObject regexp, HandleString input,
MOZ_ASSERT(lastIndex >= 0);
size_t endIndexTmp = 0;
RegExpRunStatus status = ExecuteRegExp(cx, regexp, input, lastIndex,
nullptr, &endIndexTmp, UpdateRegExpStatics);
RegExpRunStatus status = ExecuteRegExp(cx, regexp, input, lastIndex, nullptr, &endIndexTmp);
if (status == RegExpRunStatus_Success) {
MOZ_ASSERT(endIndexTmp <= INT32_MAX);
@ -1202,24 +1174,6 @@ js::RegExpTesterRaw(JSContext* cx, HandleObject regexp, HandleString input,
return false;
}
bool
js::regexp_test_no_statics(JSContext* cx, unsigned argc, Value* vp)
{
CallArgs args = CallArgsFromVp(argc, vp);
MOZ_ASSERT(args.length() == 2);
MOZ_ASSERT(IsRegExpObject(args[0]));
MOZ_ASSERT(args[1].isString());
RootedObject regexp(cx, &args[0].toObject());
RootedString string(cx, args[1].toString());
size_t ignored = 0;
RegExpRunStatus status = ExecuteRegExp(cx, regexp, string, 0,
nullptr, &ignored, DontUpdateRegExpStatics);
args.rval().setBoolean(status == RegExpRunStatus_Success);
return status != RegExpRunStatus_Error;
}
using CapturesVector = GCVector<Value, 4>;
struct JSSubString

Просмотреть файл

@ -19,10 +19,6 @@ namespace js {
JSObject*
InitRegExpClass(JSContext* cx, HandleObject obj);
// Whether RegExp statics should be updated with the input and results of a
// regular expression execution.
enum RegExpStaticsUpdate { UpdateRegExpStatics, DontUpdateRegExpStatics };
/*
* Legacy behavior of ExecuteRegExp(), which is baked into the JSAPI.
*
@ -71,22 +67,6 @@ intrinsic_GetStringDataProperty(JSContext* cx, unsigned argc, Value* vp);
* The following functions are for use by self-hosted code.
*/
/*
* Behaves like regexp.exec(string), but doesn't set RegExp statics.
*
* Usage: match = regexp_exec_no_statics(regexp, string)
*/
extern MOZ_MUST_USE bool
regexp_exec_no_statics(JSContext* cx, unsigned argc, Value* vp);
/*
* Behaves like regexp.test(string), but doesn't set RegExp statics.
*
* Usage: does_match = regexp_test_no_statics(regexp, string)
*/
extern MOZ_MUST_USE bool
regexp_test_no_statics(JSContext* cx, unsigned argc, Value* vp);
/*
* Behaves like RegExp(source, flags).
* |source| must be a valid regular expression pattern, |flags| is a raw

Просмотреть файл

@ -5,16 +5,7 @@
/* Portions Copyright Norbert Lindenberg 2011-2012. */
/**
* Holder object for encapsulating regexp instances.
*
* Regular expression instances should be created after the initialization of
* self-hosted global.
*/
var internalIntlRegExps = std_Object_create(null);
internalIntlRegExps.unicodeLocaleExtensionSequenceRE = null;
/**
* Regular expression matching a "Unicode locale extension sequence", which the
* Returns the start index of a "Unicode locale extension sequence", which the
* specification defines as: "any substring of a language tag that starts with
* a separator '-' and the singleton 'u' and includes the maximum sequence of
* following non-singleton subtags and their preceding '-' separators."
@ -25,50 +16,110 @@ internalIntlRegExps.unicodeLocaleExtensionSequenceRE = null;
*
* Spec: ECMAScript Internationalization API Specification, 6.2.1.
*/
function getUnicodeLocaleExtensionSequenceRE() {
return internalIntlRegExps.unicodeLocaleExtensionSequenceRE ||
(internalIntlRegExps.unicodeLocaleExtensionSequenceRE =
RegExpCreate("-u(?:-[a-z0-9]{2,8})+"));
function startOfUnicodeExtensions(locale) {
assert(typeof locale === "string", "locale is a string");
assert(IsStructurallyValidLanguageTag(locale), "locale is a language tag");
assert(CanonicalizeLanguageTag(locale) === locale, "locale is a canonicalized language tag");
#define HYPHEN 0x2D
assert(std_String_fromCharCode(HYPHEN) === "-",
"code unit constant should match the expected character");
// A wholly-privateuse or grandfathered locale has no extension sequences.
if (callFunction(std_String_charCodeAt, locale, 1) === HYPHEN) {
assert(locale[0] === "x" || locale[0] === "i",
"locale[1] === '-' implies a privateuse-only or grandfathered locale");
return -1;
}
#undef HYPHEN
// Search for "-u-" marking the start of a Unicode extension sequence.
var start = callFunction(std_String_indexOf, locale, "-u-");
if (start < 0)
return -1;
// And search for "-x-" marking the start of any privateuse component to
// handle the case when "-u-" was only found within a privateuse subtag.
var privateExt = callFunction(std_String_indexOf, locale, "-x-");
if (privateExt >= 0 && privateExt < start)
return -1;
return start;
}
/**
* Returns the end index of a Unicode locale extension sequence.
*/
function endOfUnicodeExtensions(locale, start) {
assert(typeof locale === "string", "locale is a string");
assert(IsStructurallyValidLanguageTag(locale), "locale is a language tag");
assert(CanonicalizeLanguageTag(locale) === locale, "locale is a canonicalized language tag");
assert(0 <= start && start < locale.length, "start is an index into locale");
assert(Substring(locale, start, 3) === "-u-", "start points to Unicode extension sequence");
#define HYPHEN 0x2D
assert(std_String_fromCharCode(HYPHEN) === "-",
"code unit constant should match the expected character");
// Search for the start of the next singleton or privateuse subtag.
//
// Begin searching after the smallest possible Unicode locale extension
// sequence, namely |"-u-" 2alphanum|. End searching once the remaining
// characters can't fit the smallest possible singleton or privateuse
// subtag, namely |"-x-" alphanum|. Note the reduced end-limit means
// indexing inside the loop is always in-range.
for (var i = start + 5, end = locale.length - 4; i <= end; i++) {
if (callFunction(std_String_charCodeAt, locale, i) !== HYPHEN)
continue;
if (callFunction(std_String_charCodeAt, locale, i + 2) === HYPHEN)
return i;
// Skip over (i + 1) and (i + 2) because we've just verified they
// aren't "-", so the next possible delimiter can only be at (i + 3).
i += 2;
}
#undef HYPHEN
// If no singleton or privateuse subtag was found, the Unicode extension
// sequence extends until the end of the string.
return locale.length;
}
/**
* Removes Unicode locale extension sequences from the given language tag.
*/
function removeUnicodeExtensions(locale) {
// A wholly-privateuse locale has no extension sequences.
if (callFunction(std_String_startsWith, locale, "x-"))
var start = startOfUnicodeExtensions(locale);
if (start < 0)
return locale;
// Otherwise, split on "-x-" marking the start of any privateuse component.
// Replace Unicode locale extension sequences in the left half, and return
// the concatenation.
var pos = callFunction(std_String_indexOf, locale, "-x-");
if (pos < 0)
pos = locale.length;
var left = callFunction(String_substring, locale, 0, pos);
var right = callFunction(String_substring, locale, pos);
var unicodeLocaleExtensionSequenceRE = getUnicodeLocaleExtensionSequenceRE();
var extensions = regexp_exec_no_statics(unicodeLocaleExtensionSequenceRE, left);
if (extensions !== null) {
left = callFunction(String_substring, left, 0, extensions.index) +
callFunction(String_substring, left, extensions.index + extensions[0].length);
}
var end = endOfUnicodeExtensions(locale, start);
var left = Substring(locale, 0, start);
var right = Substring(locale, end, locale.length - end);
var combined = left + right;
assert(IsStructurallyValidLanguageTag(combined), "recombination produced an invalid language tag");
assert(function() {
var uindex = callFunction(std_String_indexOf, combined, "-u-");
if (uindex < 0)
return true;
var xindex = callFunction(std_String_indexOf, combined, "-x-");
return xindex > 0 && xindex < uindex;
}(), "recombination failed to remove all Unicode locale extension sequences");
assert(IsStructurallyValidLanguageTag(combined),
"recombination produced an invalid language tag");
assert(startOfUnicodeExtensions(combined) < 0,
"recombination failed to remove all Unicode locale extension sequences");
return combined;
}
/**
* Returns Unicode locale extension sequences from the given language tag.
*/
function getUnicodeExtensions(locale) {
var start = startOfUnicodeExtensions(locale);
assert(start >= 0, "start of Unicode extension sequence not found");
var end = endOfUnicodeExtensions(locale, start);
return Substring(locale, start, end - start);
}
/* eslint-disable complexity */
/**
* Parser for BCP 47 language tags.
@ -702,23 +753,6 @@ function ValidateAndCanonicalizeLanguageTag(locale) {
return CanonicalizeLanguageTagFromObject(localeObj);
}
function localeContainsNoUnicodeExtensions(locale) {
// No "-u-", no possible Unicode extension.
if (callFunction(std_String_indexOf, locale, "-u-") === -1)
return true;
// "-u-" within privateuse also isn't one.
if (callFunction(std_String_indexOf, locale, "-u-") > callFunction(std_String_indexOf, locale, "-x-"))
return true;
// An entirely-privateuse tag doesn't contain extensions.
if (callFunction(std_String_startsWith, locale, "x-"))
return true;
// Otherwise, we have a Unicode extension sequence.
return false;
}
// The last-ditch locale is used if none of the available locales satisfies a
// request. "en-GB" is used based on the assumptions that English is the most
// common second language, that both en-GB and en-US are normally available in
@ -785,7 +819,7 @@ function DefaultLocaleIgnoringAvailableLocales() {
assert(IsStructurallyValidLanguageTag(candidate),
"the candidate must be structurally valid");
assert(localeContainsNoUnicodeExtensions(candidate),
assert(startOfUnicodeExtensions(candidate) < 0,
"the candidate must not contain a Unicode extension sequence");
return candidate;
@ -825,7 +859,7 @@ function DefaultLocale() {
"the computed default locale must be structurally valid");
assert(locale === CanonicalizeLanguageTag(locale),
"the computed default locale must be canonical");
assert(localeContainsNoUnicodeExtensions(locale),
assert(startOfUnicodeExtensions(locale) < 0,
"the computed default locale must not contain a Unicode extension sequence");
localeCache.defaultLocale = locale;
@ -917,7 +951,7 @@ function CanonicalizeLocaleList(locales) {
function BestAvailableLocaleHelper(availableLocales, locale, considerDefaultLocale) {
assert(IsStructurallyValidLanguageTag(locale), "invalid BestAvailableLocale locale structure");
assert(locale === CanonicalizeLanguageTag(locale), "non-canonical BestAvailableLocale locale");
assert(localeContainsNoUnicodeExtensions(locale), "locale must contain no Unicode extensions");
assert(startOfUnicodeExtensions(locale) < 0, "locale must contain no Unicode extensions");
// In the spec, [[availableLocales]] is formally a list of all available
// locales. But in our implementation, it's an *incomplete* list, not
@ -1009,12 +1043,8 @@ function LookupMatcher(availableLocales, requestedLocales) {
result.locale = availableLocale;
// Step 2.c.ii.
if (locale !== noExtensionsLocale) {
var unicodeLocaleExtensionSequenceRE = getUnicodeLocaleExtensionSequenceRE();
var extensionMatch = regexp_exec_no_statics(unicodeLocaleExtensionSequenceRE,
locale);
result.extension = extensionMatch[0];
}
if (locale !== noExtensionsLocale)
result.extension = getUnicodeExtensions(locale);
// Step 2.c.iii.
return result;
@ -1049,11 +1079,9 @@ function BestFitMatcher(availableLocales, requestedLocales) {
*/
function UnicodeExtensionValue(extension, key) {
assert(typeof extension === "string", "extension is a string value");
assert(function() {
var unicodeLocaleExtensionSequenceRE = getUnicodeLocaleExtensionSequenceRE();
var extensionMatch = regexp_exec_no_statics(unicodeLocaleExtensionSequenceRE, extension);
return extensionMatch !== null && extensionMatch[0] === extension;
}(), "extension is a Unicode extension subtag");
assert(callFunction(std_String_startsWith, extension, "-u-") &&
getUnicodeExtensions("und" + extension) === extension,
"extension is a Unicode extension subtag");
assert(typeof key === "string", "key is a string value");
// Step 1.

Просмотреть файл

@ -14,6 +14,7 @@
#include "mozilla/EnumeratedRange.h"
#include "mozilla/MathAlgorithms.h"
#include "mozilla/ScopeExit.h"
#include "mozilla/Unused.h"
#include <type_traits>
@ -442,22 +443,25 @@ CodeGenerator::visitValueToDouble(LValueToDouble* lir)
ValueOperand operand = ToValue(lir, LValueToDouble::Input);
FloatRegister output = ToFloatRegister(lir->output());
Register tag = masm.splitTagForTest(operand);
Label isDouble, isInt32, isBool, isNull, isUndefined, done;
bool hasBoolean = false, hasNull = false, hasUndefined = false;
masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
{
ScratchTagScope tag(masm, operand);
masm.splitTagForTest(operand, tag);
if (mir->conversion() != MToFPInstruction::NumbersOnly) {
masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
hasBoolean = true;
hasUndefined = true;
if (mir->conversion() != MToFPInstruction::NonNullNonStringPrimitives) {
masm.branchTestNull(Assembler::Equal, tag, &isNull);
hasNull = true;
masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
if (mir->conversion() != MToFPInstruction::NumbersOnly) {
masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
hasBoolean = true;
hasUndefined = true;
if (mir->conversion() != MToFPInstruction::NonNullNonStringPrimitives) {
masm.branchTestNull(Assembler::Equal, tag, &isNull);
hasNull = true;
}
}
}
@ -497,22 +501,25 @@ CodeGenerator::visitValueToFloat32(LValueToFloat32* lir)
ValueOperand operand = ToValue(lir, LValueToFloat32::Input);
FloatRegister output = ToFloatRegister(lir->output());
Register tag = masm.splitTagForTest(operand);
Label isDouble, isInt32, isBool, isNull, isUndefined, done;
bool hasBoolean = false, hasNull = false, hasUndefined = false;
masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
{
ScratchTagScope tag(masm, operand);
masm.splitTagForTest(operand, tag);
if (mir->conversion() != MToFPInstruction::NumbersOnly) {
masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
hasBoolean = true;
hasUndefined = true;
if (mir->conversion() != MToFPInstruction::NonNullNonStringPrimitives) {
masm.branchTestNull(Assembler::Equal, tag, &isNull);
hasNull = true;
masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
if (mir->conversion() != MToFPInstruction::NumbersOnly) {
masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
hasBoolean = true;
hasUndefined = true;
if (mir->conversion() != MToFPInstruction::NonNullNonStringPrimitives) {
masm.branchTestNull(Assembler::Equal, tag, &isNull);
hasNull = true;
}
}
}
@ -751,7 +758,8 @@ CodeGenerator::testValueTruthyKernel(const ValueOperand& value,
return;
}
Register tag = masm.splitTagForTest(value);
ScratchTagScope tag(masm, value);
masm.splitTagForTest(value, tag);
if (mightBeUndefined) {
MOZ_ASSERT(tagCount > 1);
@ -770,7 +778,10 @@ CodeGenerator::testValueTruthyKernel(const ValueOperand& value,
Label notBoolean;
if (tagCount != 1)
masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
masm.branchTestBooleanTruthy(false, value, ifFalsy);
{
ScratchTagScopeRelease _(&tag);
masm.branchTestBooleanTruthy(false, value, ifFalsy);
}
if (tagCount != 1)
masm.jump(ifTruthy);
// Else just fall through to truthiness.
@ -783,7 +794,10 @@ CodeGenerator::testValueTruthyKernel(const ValueOperand& value,
Label notInt32;
if (tagCount != 1)
masm.branchTestInt32(Assembler::NotEqual, tag, &notInt32);
masm.branchTestInt32Truthy(false, value, ifFalsy);
{
ScratchTagScopeRelease _(&tag);
masm.branchTestInt32Truthy(false, value, ifFalsy);
}
if (tagCount != 1)
masm.jump(ifTruthy);
// Else just fall through to truthiness.
@ -799,8 +813,11 @@ CodeGenerator::testValueTruthyKernel(const ValueOperand& value,
if (tagCount != 1)
masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
Register objreg = masm.extractObject(value, ToRegister(scratch1));
testObjectEmulatesUndefined(objreg, ifFalsy, ifTruthy, ToRegister(scratch2), ool);
{
ScratchTagScopeRelease _(&tag);
Register objreg = masm.extractObject(value, ToRegister(scratch1));
testObjectEmulatesUndefined(objreg, ifFalsy, ifTruthy, ToRegister(scratch2), ool);
}
masm.bind(&notObject);
} else {
@ -822,7 +839,10 @@ CodeGenerator::testValueTruthyKernel(const ValueOperand& value,
Label notString;
if (tagCount != 1)
masm.branchTestString(Assembler::NotEqual, tag, &notString);
masm.branchTestStringTruthy(false, value, ifFalsy);
{
ScratchTagScopeRelease _(&tag);
masm.branchTestStringTruthy(false, value, ifFalsy);
}
if (tagCount != 1)
masm.jump(ifTruthy);
// Else just fall through to truthiness.
@ -842,8 +862,11 @@ CodeGenerator::testValueTruthyKernel(const ValueOperand& value,
if (mightBeDouble) {
MOZ_ASSERT(tagCount == 1);
// If we reach here the value is a double.
masm.unboxDouble(value, fr);
masm.branchTestDoubleTruthy(false, fr, ifFalsy);
{
ScratchTagScopeRelease _(&tag);
masm.unboxDouble(value, fr);
masm.branchTestDoubleTruthy(false, fr, ifFalsy);
}
--tagCount;
}
@ -1095,7 +1118,7 @@ CodeGenerator::visitValueToString(LValueToString* lir)
StoreRegisterTo(output));
Label done;
Register tag = masm.splitTagForTest(input);
Register tag = masm.extractTag(input, output);
const JSAtomState& names = gen->runtime->names();
// String
@ -2148,7 +2171,7 @@ CodeGenerator::visitRegExpMatcher(LRegExpMatcher* lir)
MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg);
MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg);
MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpMatcherLastIndexReg);
MOZ_ASSERT(GetValueOutput(lir) == JSReturnOperand);
MOZ_ASSERT(ToOutValue(lir) == JSReturnOperand);
#if defined(JS_NUNBOX32)
MOZ_ASSERT(RegExpMatcherRegExpReg != JSReturnReg_Type);
@ -3518,7 +3541,7 @@ void
CodeGenerator::visitGetPropertyPolymorphicV(LGetPropertyPolymorphicV* ins)
{
Register obj = ToRegister(ins->obj());
ValueOperand output = GetValueOutput(ins);
ValueOperand output = ToOutValue(ins);
emitGetPropertyPolymorphic(ins, obj, output.scratchReg(), output);
}
@ -3835,9 +3858,13 @@ CodeGenerator::visitTypeBarrierV(LTypeBarrierV* lir)
Register unboxScratch = ToTempRegisterOrInvalid(lir->unboxTemp());
Register objScratch = ToTempRegisterOrInvalid(lir->objTemp());
// guardObjectType may zero the payload/Value register on speculative paths
// (we should have a defineReuseInput allocation in this case).
Register spectreRegToZero = operand.payloadOrValueReg();
Label miss;
masm.guardTypeSet(operand, lir->mir()->resultTypeSet(), lir->mir()->barrierKind(),
unboxScratch, objScratch, &miss);
unboxScratch, objScratch, spectreRegToZero, &miss);
bailoutFrom(&miss, lir->snapshot());
}
@ -3859,26 +3886,16 @@ CodeGenerator::visitTypeBarrierO(LTypeBarrierO* lir)
if (lir->mir()->barrierKind() != BarrierKind::TypeTagOnly) {
masm.comment("Type tag only");
masm.guardObjectType(obj, lir->mir()->resultTypeSet(), scratch, &miss);
// guardObjectType may zero the object register on speculative paths
// (we should have a defineReuseInput allocation in this case).
Register spectreRegToZero = obj;
masm.guardObjectType(obj, lir->mir()->resultTypeSet(), scratch, spectreRegToZero, &miss);
}
bailoutFrom(&miss, lir->snapshot());
masm.bind(&ok);
}
void
CodeGenerator::visitMonitorTypes(LMonitorTypes* lir)
{
ValueOperand operand = ToValue(lir, LMonitorTypes::Input);
Register unboxScratch = ToTempRegisterOrInvalid(lir->unboxTemp());
Register objScratch = ToTempRegisterOrInvalid(lir->objTemp());
Label matched, miss;
masm.guardTypeSet(operand, lir->mir()->typeSet(), lir->mir()->barrierKind(), unboxScratch,
objScratch, &miss);
bailoutFrom(&miss, lir->snapshot());
}
// Out-of-line path to update the store buffer.
class OutOfLineCallPostWriteBarrier : public OutOfLineCodeBase<CodeGenerator>
{
@ -5103,13 +5120,26 @@ CodeGenerator::generateArgumentsChecks(bool assert)
if (!types || types->unknown())
continue;
#ifndef JS_CODEGEN_ARM64
// Calculate the offset on the stack of the argument.
// (i - info.startArgSlot()) - Compute index of arg within arg vector.
// ... * sizeof(Value) - Scale by value size.
// ArgToStackOffset(...) - Compute displacement within arg vector.
int32_t offset = ArgToStackOffset((i - info.startArgSlot()) * sizeof(Value));
Address argAddr(masm.getStackPointer(), offset);
masm.guardTypeSet(argAddr, types, BarrierKind::TypeSet, temp1, temp2, &miss);
// guardObjectType will zero the stack pointer register on speculative
// paths.
Register spectreRegToZero = masm.getStackPointer();
masm.guardTypeSet(argAddr, types, BarrierKind::TypeSet, temp1, temp2,
spectreRegToZero, &miss);
#else
// On ARM64, the stack pointer situation is more complicated. When we
// enable Ion, we should figure out how to mitigate Spectre there.
mozilla::Unused << temp1;
mozilla::Unused << temp2;
MOZ_CRASH("NYI");
#endif
}
if (miss.used()) {
@ -5409,7 +5439,7 @@ CodeGenerator::emitAssertObjectOrStringResult(Register input, MIRType type, cons
if (type == MIRType::ObjectOrNull)
masm.branchPtr(Assembler::Equal, input, ImmWord(0), &ok);
if (typeset->getObjectCount() > 0)
masm.guardObjectType(input, typeset, temp, &miss);
masm.guardObjectType(input, typeset, temp, input, &miss);
else
masm.jump(&miss);
masm.jump(&ok);
@ -5475,7 +5505,8 @@ CodeGenerator::emitAssertResultV(const ValueOperand input, const TemporaryTypeSe
if (typeset && !typeset->unknown()) {
// We have a result TypeSet, assert this value is in it.
Label miss, ok;
masm.guardTypeSet(input, typeset, BarrierKind::TypeSet, temp1, temp2, &miss);
masm.guardTypeSet(input, typeset, BarrierKind::TypeSet, temp1, temp2,
input.payloadOrValueReg(), &miss);
masm.jump(&ok);
masm.bind(&miss);
@ -5521,8 +5552,10 @@ CodeGenerator::emitObjectOrStringResultChecks(LInstruction* lir, MDefinition* mi
return;
MOZ_ASSERT(lir->numDefs() == 1);
Register output = ToRegister(lir->getDef(0));
if (lir->getDef(0)->isBogusTemp())
return;
Register output = ToRegister(lir->getDef(0));
emitAssertObjectOrStringResult(output, mir->type(), mir->resultTypeSet());
}
@ -7576,22 +7609,28 @@ CodeGenerator::visitIsNullOrLikeUndefinedV(LIsNullOrLikeUndefinedV* lir)
notNullOrLikeUndefined = label2.ptr();
}
Register tag = masm.splitTagForTest(value);
MDefinition* input = lir->mir()->lhs();
if (input->mightBeType(MIRType::Null))
masm.branchTestNull(Assembler::Equal, tag, nullOrLikeUndefined);
if (input->mightBeType(MIRType::Undefined))
masm.branchTestUndefined(Assembler::Equal, tag, nullOrLikeUndefined);
{
ScratchTagScope tag(masm, value);
masm.splitTagForTest(value, tag);
if (ool) {
// Check whether it's a truthy object or a falsy object that emulates
// undefined.
masm.branchTestObject(Assembler::NotEqual, tag, notNullOrLikeUndefined);
MDefinition* input = lir->mir()->lhs();
if (input->mightBeType(MIRType::Null))
masm.branchTestNull(Assembler::Equal, tag, nullOrLikeUndefined);
if (input->mightBeType(MIRType::Undefined))
masm.branchTestUndefined(Assembler::Equal, tag, nullOrLikeUndefined);
Register objreg = masm.extractObject(value, ToTempUnboxRegister(lir->tempToUnbox()));
branchTestObjectEmulatesUndefined(objreg, nullOrLikeUndefined, notNullOrLikeUndefined,
ToRegister(lir->temp()), ool);
// fall through
if (ool) {
// Check whether it's a truthy object or a falsy object that emulates
// undefined.
masm.branchTestObject(Assembler::NotEqual, tag, notNullOrLikeUndefined);
ScratchTagScopeRelease _(&tag);
Register objreg = masm.extractObject(value, ToTempUnboxRegister(lir->tempToUnbox()));
branchTestObjectEmulatesUndefined(objreg, nullOrLikeUndefined, notNullOrLikeUndefined,
ToRegister(lir->temp()), ool);
// fall through
}
}
Label done;
@ -7652,28 +7691,33 @@ CodeGenerator::visitIsNullOrLikeUndefinedAndBranchV(LIsNullOrLikeUndefinedAndBra
addOutOfLineCode(ool, lir->cmpMir());
}
Register tag = masm.splitTagForTest(value);
{
ScratchTagScope tag(masm, value);
masm.splitTagForTest(value, tag);
Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
MDefinition* input = lir->cmpMir()->lhs();
if (input->mightBeType(MIRType::Null))
masm.branchTestNull(Assembler::Equal, tag, ifTrueLabel);
if (input->mightBeType(MIRType::Undefined))
masm.branchTestUndefined(Assembler::Equal, tag, ifTrueLabel);
MDefinition* input = lir->cmpMir()->lhs();
if (input->mightBeType(MIRType::Null))
masm.branchTestNull(Assembler::Equal, tag, ifTrueLabel);
if (input->mightBeType(MIRType::Undefined))
masm.branchTestUndefined(Assembler::Equal, tag, ifTrueLabel);
if (ool) {
masm.branchTestObject(Assembler::NotEqual, tag, ifFalseLabel);
if (ool) {
masm.branchTestObject(Assembler::NotEqual, tag, ifFalseLabel);
// Objects that emulate undefined are loosely equal to null/undefined.
Register objreg = masm.extractObject(value, ToTempUnboxRegister(lir->tempToUnbox()));
Register scratch = ToRegister(lir->temp());
testObjectEmulatesUndefined(objreg, ifTrueLabel, ifFalseLabel, scratch, ool);
} else {
masm.jump(ifFalseLabel);
ScratchTagScopeRelease _(&tag);
// Objects that emulate undefined are loosely equal to null/undefined.
Register objreg = masm.extractObject(value, ToTempUnboxRegister(lir->tempToUnbox()));
Register scratch = ToRegister(lir->temp());
testObjectEmulatesUndefined(objreg, ifTrueLabel, ifFalseLabel, scratch, ool);
} else {
masm.jump(ifFalseLabel);
}
return;
}
return;
}
MOZ_ASSERT(op == JSOP_STRICTEQ || op == JSOP_STRICTNE);
@ -9677,7 +9721,7 @@ CodeGenerator::visitArgumentsLength(LArgumentsLength* lir)
void
CodeGenerator::visitGetFrameArgument(LGetFrameArgument* lir)
{
ValueOperand result = GetValueOutput(lir);
ValueOperand result = ToOutValue(lir);
const LAllocation* index = lir->index();
size_t argvOffset = frameSize() + JitFrameLayout::offsetOfActualArgs();
@ -10432,7 +10476,7 @@ CodeGenerator::visitLoadFixedSlotV(LLoadFixedSlotV* ins)
{
const Register obj = ToRegister(ins->getOperand(0));
size_t slot = ins->mir()->slot();
ValueOperand result = GetValueOutput(ins);
ValueOperand result = ToOutValue(ins);
masm.loadValue(Address(obj, NativeObject::getFixedSlotOffset(slot)), result);
}
@ -10526,7 +10570,7 @@ CodeGenerator::visitGetNameCache(LGetNameCache* ins)
{
LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
Register envChain = ToRegister(ins->envObj());
ValueOperand output(GetValueOutput(ins));
ValueOperand output = ToOutValue(ins);
Register temp = ToRegister(ins->temp());
IonGetNameIC ic(liveRegs, envChain, output, temp);
@ -10620,7 +10664,7 @@ CodeGenerator::visitGetPropertyCacheV(LGetPropertyCacheV* ins)
TypedOrValueRegister value =
toConstantOrRegister(ins, LGetPropertyCacheV::Value, ins->mir()->value()->type()).reg();
ConstantOrRegister id = toConstantOrRegister(ins, LGetPropertyCacheV::Id, ins->mir()->idval()->type());
TypedOrValueRegister output = TypedOrValueRegister(GetValueOutput(ins));
TypedOrValueRegister output(ToOutValue(ins));
Register maybeTemp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
addGetPropertyCache(ins, liveRegs, value, id, output, maybeTemp,
@ -10649,7 +10693,7 @@ CodeGenerator::visitGetPropSuperCacheV(LGetPropSuperCacheV* ins)
TypedOrValueRegister receiver =
toConstantOrRegister(ins, LGetPropSuperCacheV::Receiver, ins->mir()->receiver()->type()).reg();
ConstantOrRegister id = toConstantOrRegister(ins, LGetPropSuperCacheV::Id, ins->mir()->idval()->type());
TypedOrValueRegister output = TypedOrValueRegister(GetValueOutput(ins));
TypedOrValueRegister output(ToOutValue(ins));
CacheKind kind = CacheKind::GetElemSuper;
if (id.constant() && id.value().isString()) {
@ -10842,7 +10886,7 @@ CodeGenerator::visitTypeOfV(LTypeOfV* lir)
{
const ValueOperand value = ToValue(lir, LTypeOfV::Input);
Register output = ToRegister(lir->output());
Register tag = masm.splitTagForTest(value);
Register tag = masm.extractTag(value, output);
const JSAtomState& names = gen->runtime->names();
Label done;
@ -11050,7 +11094,7 @@ CodeGenerator::visitToIdV(LToIdV* lir)
ArgList(ToValue(lir, LToIdV::Input)),
StoreValueTo(out));
Register tag = masm.splitTagForTest(input);
Register tag = masm.extractTag(input, out.scratchReg());
masm.branchTestInt32(Assembler::NotEqual, tag, &notInt32);
masm.moveValue(input, out);
@ -12007,7 +12051,7 @@ CodeGenerator::visitGetDOMMemberV(LGetDOMMemberV* ins)
// proxies in IonBuilder.
Register object = ToRegister(ins->object());
size_t slot = ins->mir()->domMemberSlotIndex();
ValueOperand result = GetValueOutput(ins);
ValueOperand result = ToOutValue(ins);
masm.loadValue(Address(object, NativeObject::getFixedSlotOffset(slot)), result);
}
@ -12622,28 +12666,38 @@ CodeGenerator::visitAssertRangeV(LAssertRangeV* ins)
{
const Range* r = ins->range();
const ValueOperand value = ToValue(ins, LAssertRangeV::Input);
Register tag = masm.splitTagForTest(value);
Label done;
{
Label isNotInt32;
masm.branchTestInt32(Assembler::NotEqual, tag, &isNotInt32);
Register unboxInt32 = ToTempUnboxRegister(ins->temp());
Register input = masm.extractInt32(value, unboxInt32);
emitAssertRangeI(r, input);
masm.jump(&done);
masm.bind(&isNotInt32);
}
ScratchTagScope tag(masm, value);
masm.splitTagForTest(value, tag);
{
Label isNotDouble;
masm.branchTestDouble(Assembler::NotEqual, tag, &isNotDouble);
FloatRegister input = ToFloatRegister(ins->floatTemp1());
FloatRegister temp = ToFloatRegister(ins->floatTemp2());
masm.unboxDouble(value, input);
emitAssertRangeD(r, input, temp);
masm.jump(&done);
masm.bind(&isNotDouble);
{
Label isNotInt32;
masm.branchTestInt32(Assembler::NotEqual, tag, &isNotInt32);
{
ScratchTagScopeRelease _(&tag);
Register unboxInt32 = ToTempUnboxRegister(ins->temp());
Register input = masm.extractInt32(value, unboxInt32);
emitAssertRangeI(r, input);
masm.jump(&done);
}
masm.bind(&isNotInt32);
}
{
Label isNotDouble;
masm.branchTestDouble(Assembler::NotEqual, tag, &isNotDouble);
{
ScratchTagScopeRelease _(&tag);
FloatRegister input = ToFloatRegister(ins->floatTemp1());
FloatRegister temp = ToFloatRegister(ins->floatTemp2());
masm.unboxDouble(value, input);
emitAssertRangeD(r, input, temp);
masm.jump(&done);
}
masm.bind(&isNotDouble);
}
}
masm.assumeUnreachable("Incorrect range for Value.");
@ -12814,7 +12868,7 @@ CodeGenerator::visitDebugger(LDebugger* ins)
void
CodeGenerator::visitNewTarget(LNewTarget *ins)
{
ValueOperand output = GetValueOutput(ins);
ValueOperand output = ToOutValue(ins);
// if (isConstructing) output = argv[Max(numActualArgs, numFormalArgs)]
Label notConstructing, done;

Просмотреть файл

@ -161,7 +161,6 @@ class CodeGenerator final : public CodeGeneratorSpecific
void visitLoadUnboxedExpando(LLoadUnboxedExpando* lir);
void visitTypeBarrierV(LTypeBarrierV* lir);
void visitTypeBarrierO(LTypeBarrierO* lir);
void visitMonitorTypes(LMonitorTypes* lir);
void emitPostWriteBarrier(const LAllocation* obj);
void emitPostWriteBarrier(Register objreg);
void emitPostWriteBarrierS(Address address, Register prev, Register next);

Просмотреть файл

@ -3156,7 +3156,10 @@ AutoFlushICache::setRange(uintptr_t start, size_t len)
void
AutoFlushICache::flush(uintptr_t start, size_t len)
{
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_NONE)
// Nothing
#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
JSContext* cx = TlsContext.get();
AutoFlushICache* afc = cx ? cx->autoFlushICache() : nullptr;
if (!afc) {
@ -3175,6 +3178,8 @@ AutoFlushICache::flush(uintptr_t start, size_t len)
JitSpewCont(JitSpew_CacheFlush, afc->inhibit_ ? "x" : "*");
ExecutableAllocator::cacheFlush((void*)start, len);
#else
MOZ_CRASH("Unresolved porting API - AutoFlushICache::flush");
#endif
}
@ -3183,12 +3188,17 @@ AutoFlushICache::flush(uintptr_t start, size_t len)
void
AutoFlushICache::setInhibit()
{
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_NONE)
// Nothing
#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
AutoFlushICache* afc = TlsContext.get()->autoFlushICache();
MOZ_ASSERT(afc);
MOZ_ASSERT(afc->start_);
JitSpewCont(JitSpew_CacheFlush, "I");
afc->inhibit_ = true;
#else
MOZ_CRASH("Unresolved porting API - AutoFlushICache::setInhibit");
#endif
}

Просмотреть файл

@ -1439,16 +1439,22 @@ EmitCheckPropertyTypes(MacroAssembler& masm, const PropertyTypeCheckInfo* typeCh
masm.Push(obj);
Register scratch1 = obj;
// We may also need a scratch register for guardTypeSet.
// We may also need a scratch register for guardTypeSet. Additionally,
// spectreRegToZero is the register that may be zeroed on speculatively
// executed paths.
Register objScratch = InvalidReg;
Register spectreRegToZero = InvalidReg;
if (propTypes && !propTypes->unknownObject() && propTypes->getObjectCount() > 0) {
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
if (!val.constant()) {
TypedOrValueRegister valReg = val.reg();
if (valReg.hasValue())
if (valReg.hasValue()) {
regs.take(valReg.valueReg());
else if (!valReg.typedReg().isFloat())
spectreRegToZero = valReg.valueReg().payloadOrValueReg();
} else if (!valReg.typedReg().isFloat()) {
regs.take(valReg.typedReg().gpr());
spectreRegToZero = valReg.typedReg().gpr();
}
}
regs.take(scratch1);
objScratch = regs.takeAny();
@ -1485,7 +1491,7 @@ EmitCheckPropertyTypes(MacroAssembler& masm, const PropertyTypeCheckInfo* typeCh
// guardTypeSet can read from type sets without triggering read barriers.
TypeSet::readBarrier(propTypes);
masm.guardTypeSet(valReg, propTypes, BarrierKind::TypeSet, scratch1, objScratch,
&failedFastPath);
spectreRegToZero, &failedFastPath);
masm.jump(&done);
} else {
masm.jump(&failedFastPath);

Просмотреть файл

@ -235,6 +235,7 @@ DefaultJitOptions::DefaultJitOptions()
}
SET_DEFAULT(spectreIndexMasking, true);
SET_DEFAULT(spectreObjectMitigationsBarriers, false);
SET_DEFAULT(spectreStringMitigations, true);
SET_DEFAULT(spectreValueMasking, true);

Просмотреть файл

@ -98,6 +98,7 @@ struct DefaultJitOptions
// measure the effectiveness of each mitigation with various proof of
// concept.
bool spectreIndexMasking;
bool spectreObjectMitigationsBarriers;
bool spectreStringMitigations;
bool spectreValueMasking;

Просмотреть файл

@ -2806,16 +2806,23 @@ LIRGenerator::visitTypeBarrier(MTypeBarrier* ins)
return;
}
bool needObjTemp = !types->unknownObject() && types->getObjectCount() > 0;
bool hasSpecificObjects = !types->unknownObject() && types->getObjectCount() > 0;
// Handle typebarrier with Value as input.
if (inputType == MIRType::Value) {
LDefinition objTemp = needObjTemp ? temp() : LDefinition::BogusTemp();
LTypeBarrierV* barrier = new(alloc()) LTypeBarrierV(useBox(ins->input()), tempToUnbox(),
objTemp);
assignSnapshot(barrier, Bailout_TypeBarrierV);
add(barrier, ins);
redefine(ins, ins->input());
LDefinition objTemp = hasSpecificObjects ? temp() : LDefinition::BogusTemp();
if (ins->canRedefineInput()) {
LTypeBarrierV* barrier =
new(alloc()) LTypeBarrierV(useBox(ins->input()), tempToUnbox(), objTemp);
assignSnapshot(barrier, Bailout_TypeBarrierV);
add(barrier, ins);
redefine(ins, ins->input());
} else {
LTypeBarrierV* barrier =
new(alloc()) LTypeBarrierV(useBoxAtStart(ins->input()), tempToUnbox(), objTemp);
assignSnapshot(barrier, Bailout_TypeBarrierV);
defineBoxReuseInput(barrier, ins, 0);
}
return;
}
@ -2831,11 +2838,19 @@ LIRGenerator::visitTypeBarrier(MTypeBarrier* ins)
}
if (needsObjectBarrier) {
LDefinition tmp = needObjTemp ? temp() : LDefinition::BogusTemp();
LTypeBarrierO* barrier = new(alloc()) LTypeBarrierO(useRegister(ins->getOperand(0)), tmp);
assignSnapshot(barrier, Bailout_TypeBarrierO);
add(barrier, ins);
redefine(ins, ins->getOperand(0));
LDefinition tmp = hasSpecificObjects ? temp() : LDefinition::BogusTemp();
if (ins->canRedefineInput()) {
LTypeBarrierO* barrier =
new(alloc()) LTypeBarrierO(useRegister(ins->input()), tmp);
assignSnapshot(barrier, Bailout_TypeBarrierO);
add(barrier, ins);
redefine(ins, ins->getOperand(0));
} else {
LTypeBarrierO* barrier =
new(alloc()) LTypeBarrierO(useRegisterAtStart(ins->input()), tmp);
assignSnapshot(barrier, Bailout_TypeBarrierO);
defineReuseInput(barrier, ins, 0);
}
return;
}
@ -2843,22 +2858,6 @@ LIRGenerator::visitTypeBarrier(MTypeBarrier* ins)
redefine(ins, ins->getOperand(0));
}
void
LIRGenerator::visitMonitorTypes(MMonitorTypes* ins)
{
// Requesting a non-GC pointer is safe here since we never re-enter C++
// from inside a type check.
const TemporaryTypeSet* types = ins->typeSet();
bool needObjTemp = !types->unknownObject() && types->getObjectCount() > 0;
LDefinition objTemp = needObjTemp ? temp() : LDefinition::BogusTemp();
LMonitorTypes* lir = new(alloc()) LMonitorTypes(useBox(ins->input()), tempToUnbox(), objTemp);
assignSnapshot(lir, Bailout_MonitorTypes);
add(lir, ins);
}
// Returns true iff |def| is a constant that's either not a GC thing or is not
// allocated in the nursery.
static bool

Просмотреть файл

@ -216,7 +216,6 @@ class LIRGenerator : public LIRGeneratorSpecific
void visitStoreSlot(MStoreSlot* ins) override;
void visitFilterTypeSet(MFilterTypeSet* ins) override;
void visitTypeBarrier(MTypeBarrier* ins) override;
void visitMonitorTypes(MMonitorTypes* ins) override;
void visitPostWriteBarrier(MPostWriteBarrier* ins) override;
void visitPostWriteElementBarrier(MPostWriteElementBarrier* ins) override;
void visitArrayLength(MArrayLength* ins) override;

Просмотреть файл

@ -4340,7 +4340,7 @@ IonBuilder::prepareForSimdLoadStore(CallInfo& callInfo, Scalar::Type simdType,
// in bounds while the actual index isn't, so we need two bounds checks
// here.
if (byteLoadSize > 1) {
indexLoadEnd = addBoundsCheck(indexLoadEnd, length, BoundsCheckKind::UnusedIndex);
indexLoadEnd = addBoundsCheck(indexLoadEnd, length, BoundsCheckKind::IsLoad);
auto* sub = MSub::New(alloc(), indexLoadEnd, constant(Int32Value(byteLoadSize - 1)));
sub->setInt32Specialization();
current->add(sub);

Просмотреть файл

@ -2361,6 +2361,29 @@ MTypeBarrier::foldsTo(TempAllocator& alloc)
return input();
}
bool
MTypeBarrier::canRedefineInput()
{
// LTypeBarrier does not need its own def usually, because we can use the
// input's allocation (LIRGenerator::redefineInput). However, if Spectre
// mitigations are enabled, guardObjectType may zero the object register on
// speculatively executed paths, so LTypeBarrier needs to have its own def
// then to guarantee all uses will see this potentially-zeroed value.
if (!JitOptions.spectreObjectMitigationsBarriers)
return true;
if (barrierKind() == BarrierKind::TypeTagOnly)
return true;
TemporaryTypeSet* types = resultTypeSet();
bool hasSpecificObjects = !types->unknownObject() && types->getObjectCount() > 0;
if (!hasSpecificObjects)
return true;
return false;
}
#ifdef DEBUG
void
MPhi::assertLoopPhi() const
@ -6636,8 +6659,17 @@ TryAddTypeBarrierForWrite(TempAllocator& alloc, CompilerConstraintList* constrai
if ((*pvalue)->resultTypeSet() && (*pvalue)->resultTypeSet()->objectsAreSubset(types))
kind = BarrierKind::TypeTagOnly;
MInstruction* ins = MMonitorTypes::New(alloc, *pvalue, types, kind);
MInstruction* ins = MTypeBarrier::New(alloc, *pvalue, types, kind);
current->add(ins);
ins->setNotMovable();
if (ins->type() == MIRType::Undefined) {
ins = MConstant::New(alloc, UndefinedValue());
current->add(ins);
} else if (ins->type() == MIRType::Null) {
ins = MConstant::New(alloc, NullValue());
current->add(ins);
}
*pvalue = ins;
return true;
}

Просмотреть файл

@ -13223,6 +13223,8 @@ class MTypeBarrier
}
MDefinition* foldsTo(TempAllocator& alloc) override;
bool canRedefineInput();
bool alwaysBails() const {
// If mirtype of input doesn't agree with mirtype of barrier,
// we will definitely bail.
@ -13243,43 +13245,6 @@ class MTypeBarrier
ALLOW_CLONE(MTypeBarrier)
};
// Like MTypeBarrier, guard that the value is in the given type set. This is
// used before property writes to ensure the value being written is represented
// in the property types for the object.
class MMonitorTypes
: public MUnaryInstruction,
public BoxInputsPolicy::Data
{
const TemporaryTypeSet* typeSet_;
BarrierKind barrierKind_;
MMonitorTypes(MDefinition* def, const TemporaryTypeSet* types, BarrierKind kind)
: MUnaryInstruction(classOpcode, def),
typeSet_(types),
barrierKind_(kind)
{
MOZ_ASSERT(kind == BarrierKind::TypeTagOnly || kind == BarrierKind::TypeSet);
setGuard();
MOZ_ASSERT(!types->unknown());
}
public:
INSTRUCTION_HEADER(MonitorTypes)
TRIVIAL_NEW_WRAPPERS
const TemporaryTypeSet* typeSet() const {
return typeSet_;
}
BarrierKind barrierKind() const {
return barrierKind_;
}
AliasSet getAliasSet() const override {
return AliasSet::None();
}
};
// Given a value being written to another object, update the generational store
// buffer if the value is in the nursery and object is in the tenured heap.
class MPostWriteBarrier : public MBinaryInstruction, public ObjectPolicy<0>::Data

Просмотреть файл

@ -186,7 +186,6 @@ namespace jit {
_(HomeObjectSuperBase) \
_(FilterTypeSet) \
_(TypeBarrier) \
_(MonitorTypes) \
_(PostWriteBarrier) \
_(PostWriteElementBarrier) \
_(GetPropSuperCache) \

Просмотреть файл

@ -80,13 +80,17 @@ EmitTypeCheck(MacroAssembler& masm, Assembler::Condition cond, const T& src, Typ
template <typename Source> void
MacroAssembler::guardTypeSet(const Source& address, const TypeSet* types, BarrierKind kind,
Register unboxScratch, Register objScratch, Label* miss)
Register unboxScratch, Register objScratch,
Register spectreRegToZero, Label* miss)
{
// unboxScratch may be InvalidReg on 32-bit platforms. It should only be
// used for extracting the Value tag or payload.
//
// objScratch may be InvalidReg if the TypeSet does not contain specific
// objects to guard on. It should only be used for guardObjectType.
//
// spectreRegToZero is a register that will be zeroed by guardObjectType on
// speculatively executed paths.
MOZ_ASSERT(kind == BarrierKind::TypeTagOnly || kind == BarrierKind::TypeSet);
MOZ_ASSERT(!types->unknown());
@ -154,12 +158,12 @@ MacroAssembler::guardTypeSet(const Source& address, const TypeSet* types, Barrie
if (kind != BarrierKind::TypeTagOnly) {
Register obj = extractObject(address, unboxScratch);
guardObjectType(obj, types, objScratch, miss);
guardObjectType(obj, types, objScratch, spectreRegToZero, miss);
} else {
#ifdef DEBUG
Label fail;
Register obj = extractObject(address, unboxScratch);
guardObjectType(obj, types, objScratch, &fail);
guardObjectType(obj, types, objScratch, spectreRegToZero, &fail);
jump(&matched);
bind(&fail);
@ -210,8 +214,8 @@ MacroAssembler::guardTypeSetMightBeIncomplete(const TypeSet* types, Register obj
#endif
void
MacroAssembler::guardObjectType(Register obj, const TypeSet* types,
Register scratch, Label* miss)
MacroAssembler::guardObjectType(Register obj, const TypeSet* types, Register scratch,
Register spectreRegToZero, Label* miss)
{
MOZ_ASSERT(obj != scratch);
MOZ_ASSERT(!types->unknown());
@ -246,33 +250,66 @@ MacroAssembler::guardObjectType(Register obj, const TypeSet* types,
return;
}
if (JitOptions.spectreObjectMitigationsBarriers)
move32(Imm32(0), scratch);
if (hasSingletons) {
for (unsigned i = 0; i < count; i++) {
JSObject* singleton = types->getSingletonNoBarrier(i);
if (!singleton)
continue;
if (--numBranches > 0)
branchPtr(Equal, obj, ImmGCPtr(singleton), &matched);
else
branchPtr(NotEqual, obj, ImmGCPtr(singleton), miss);
if (JitOptions.spectreObjectMitigationsBarriers) {
if (--numBranches > 0) {
Label next;
branchPtr(NotEqual, obj, ImmGCPtr(singleton), &next);
spectreMovePtr(NotEqual, scratch, spectreRegToZero);
jump(&matched);
bind(&next);
} else {
branchPtr(NotEqual, obj, ImmGCPtr(singleton), miss);
spectreMovePtr(NotEqual, scratch, spectreRegToZero);
}
} else {
if (--numBranches > 0)
branchPtr(Equal, obj, ImmGCPtr(singleton), &matched);
else
branchPtr(NotEqual, obj, ImmGCPtr(singleton), miss);
}
}
}
if (hasObjectGroups) {
comment("has object groups");
loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
// If Spectre mitigations are enabled, we use the scratch register as
// zero register. Without mitigations we can use it to store the group.
Address groupAddr(obj, JSObject::offsetOfGroup());
if (!JitOptions.spectreObjectMitigationsBarriers)
loadPtr(groupAddr, scratch);
for (unsigned i = 0; i < count; i++) {
ObjectGroup* group = types->getGroupNoBarrier(i);
if (!group)
continue;
if (--numBranches > 0)
branchPtr(Equal, scratch, ImmGCPtr(group), &matched);
else
branchPtr(NotEqual, scratch, ImmGCPtr(group), miss);
if (JitOptions.spectreObjectMitigationsBarriers) {
if (--numBranches > 0) {
Label next;
branchPtr(NotEqual, groupAddr, ImmGCPtr(group), &next);
spectreMovePtr(NotEqual, scratch, spectreRegToZero);
jump(&matched);
bind(&next);
} else {
branchPtr(NotEqual, groupAddr, ImmGCPtr(group), miss);
spectreMovePtr(NotEqual, scratch, spectreRegToZero);
}
} else {
if (--numBranches > 0)
branchPtr(Equal, scratch, ImmGCPtr(group), &matched);
else
branchPtr(NotEqual, scratch, ImmGCPtr(group), miss);
}
}
}
@ -283,13 +320,16 @@ MacroAssembler::guardObjectType(Register obj, const TypeSet* types,
template void MacroAssembler::guardTypeSet(const Address& address, const TypeSet* types,
BarrierKind kind, Register unboxScratch,
Register objScratch, Label* miss);
Register objScratch, Register spectreRegToZero,
Label* miss);
template void MacroAssembler::guardTypeSet(const ValueOperand& value, const TypeSet* types,
BarrierKind kind, Register unboxScratch,
Register objScratch, Label* miss);
Register objScratch, Register spectreRegToZero,
Label* miss);
template void MacroAssembler::guardTypeSet(const TypedOrValueRegister& value, const TypeSet* types,
BarrierKind kind, Register unboxScratch,
Register objScratch, Label* miss);
Register objScratch, Register spectreRegToZero,
Label* miss);
template<typename S, typename T>
static void
@ -2212,15 +2252,18 @@ void
MacroAssembler::convertValueToFloatingPoint(ValueOperand value, FloatRegister output,
Label* fail, MIRType outputType)
{
Register tag = splitTagForTest(value);
Label isDouble, isInt32, isBool, isNull, done;
branchTestDouble(Assembler::Equal, tag, &isDouble);
branchTestInt32(Assembler::Equal, tag, &isInt32);
branchTestBoolean(Assembler::Equal, tag, &isBool);
branchTestNull(Assembler::Equal, tag, &isNull);
branchTestUndefined(Assembler::NotEqual, tag, fail);
{
ScratchTagScope tag(*this, value);
splitTagForTest(value, tag);
branchTestDouble(Assembler::Equal, tag, &isDouble);
branchTestInt32(Assembler::Equal, tag, &isInt32);
branchTestBoolean(Assembler::Equal, tag, &isBool);
branchTestNull(Assembler::Equal, tag, &isNull);
branchTestUndefined(Assembler::NotEqual, tag, fail);
}
// fall-through: undefined
loadConstantFloatingPoint(GenericNaN(), float(GenericNaN()), output, outputType);
@ -2428,7 +2471,8 @@ MacroAssembler::convertValueToInt(ValueOperand value, MDefinition* maybeInput,
Label* fail, IntConversionBehavior behavior,
IntConversionInputKind conversion)
{
Register tag = splitTagForTest(value);
Label done, isInt32, isBool, isDouble, isNull, isString;
bool handleStrings = (behavior == IntConversionBehavior::Truncate ||
behavior == IntConversionBehavior::ClampToUint8) &&
handleStringEntry &&
@ -2436,33 +2480,36 @@ MacroAssembler::convertValueToInt(ValueOperand value, MDefinition* maybeInput,
MOZ_ASSERT_IF(handleStrings, conversion == IntConversionInputKind::Any);
Label done, isInt32, isBool, isDouble, isNull, isString;
{
ScratchTagScope tag(*this, value);
splitTagForTest(value, tag);
maybeBranchTestType(MIRType::Int32, maybeInput, tag, &isInt32);
if (conversion == IntConversionInputKind::Any || conversion == IntConversionInputKind::NumbersOrBoolsOnly)
maybeBranchTestType(MIRType::Boolean, maybeInput, tag, &isBool);
maybeBranchTestType(MIRType::Double, maybeInput, tag, &isDouble);
maybeBranchTestType(MIRType::Int32, maybeInput, tag, &isInt32);
if (conversion == IntConversionInputKind::Any || conversion == IntConversionInputKind::NumbersOrBoolsOnly)
maybeBranchTestType(MIRType::Boolean, maybeInput, tag, &isBool);
maybeBranchTestType(MIRType::Double, maybeInput, tag, &isDouble);
if (conversion == IntConversionInputKind::Any) {
// If we are not truncating, we fail for anything that's not
// null. Otherwise we might be able to handle strings and objects.
switch (behavior) {
case IntConversionBehavior::Normal:
case IntConversionBehavior::NegativeZeroCheck:
branchTestNull(Assembler::NotEqual, tag, fail);
break;
if (conversion == IntConversionInputKind::Any) {
// If we are not truncating, we fail for anything that's not
// null. Otherwise we might be able to handle strings and objects.
switch (behavior) {
case IntConversionBehavior::Normal:
case IntConversionBehavior::NegativeZeroCheck:
branchTestNull(Assembler::NotEqual, tag, fail);
break;
case IntConversionBehavior::Truncate:
case IntConversionBehavior::ClampToUint8:
maybeBranchTestType(MIRType::Null, maybeInput, tag, &isNull);
if (handleStrings)
maybeBranchTestType(MIRType::String, maybeInput, tag, &isString);
maybeBranchTestType(MIRType::Object, maybeInput, tag, fail);
branchTestUndefined(Assembler::NotEqual, tag, fail);
break;
case IntConversionBehavior::Truncate:
case IntConversionBehavior::ClampToUint8:
maybeBranchTestType(MIRType::Null, maybeInput, tag, &isNull);
if (handleStrings)
maybeBranchTestType(MIRType::String, maybeInput, tag, &isString);
maybeBranchTestType(MIRType::Object, maybeInput, tag, fail);
branchTestUndefined(Assembler::NotEqual, tag, fail);
break;
}
} else {
jump(fail);
}
} else {
jump(fail);
}
// The value is null or undefined in truncation contexts - just emit 0.
@ -3581,7 +3628,7 @@ namespace jit {
#ifdef DEBUG
template <class RegisterType>
AutoGenericRegisterScope<RegisterType>::AutoGenericRegisterScope(MacroAssembler& masm, RegisterType reg)
: RegisterType(reg), masm_(masm)
: RegisterType(reg), masm_(masm), released_(false)
{
masm.debugTrackedRegisters_.add(reg);
}
@ -3594,12 +3641,39 @@ template AutoGenericRegisterScope<FloatRegister>::AutoGenericRegisterScope(Macro
template <class RegisterType>
AutoGenericRegisterScope<RegisterType>::~AutoGenericRegisterScope()
{
const RegisterType& reg = *dynamic_cast<RegisterType*>(this);
masm_.debugTrackedRegisters_.take(reg);
if (!released_)
release();
}
template AutoGenericRegisterScope<Register>::~AutoGenericRegisterScope();
template AutoGenericRegisterScope<FloatRegister>::~AutoGenericRegisterScope();
template <class RegisterType>
void
AutoGenericRegisterScope<RegisterType>::release()
{
MOZ_ASSERT(!released_);
released_ = true;
const RegisterType& reg = *dynamic_cast<RegisterType*>(this);
masm_.debugTrackedRegisters_.take(reg);
}
template void AutoGenericRegisterScope<Register>::release();
template void AutoGenericRegisterScope<FloatRegister>::release();
template <class RegisterType>
void
AutoGenericRegisterScope<RegisterType>::reacquire()
{
MOZ_ASSERT(released_);
released_ = false;
const RegisterType& reg = *dynamic_cast<RegisterType*>(this);
masm_.debugTrackedRegisters_.add(reg);
}
template void AutoGenericRegisterScope<Register>::reacquire();
template void AutoGenericRegisterScope<FloatRegister>::reacquire();
#endif // DEBUG
} // namespace jit

Просмотреть файл

@ -1367,6 +1367,10 @@ class MacroAssembler : public MacroAssemblerSpecific
Register dest)
DEFINED_ON(arm, arm64, mips_shared, x86, x64);
// Conditional move for Spectre mitigations.
inline void spectreMovePtr(Condition cond, Register src, Register dest)
DEFINED_ON(arm, arm64, x86, x64);
// Performs a bounds check and zeroes the index register if out-of-bounds
// (to mitigate Spectre).
inline void boundsCheck32ForLoad(Register index, Register length, Register scratch,
@ -1485,23 +1489,23 @@ class MacroAssembler : public MacroAssemblerSpecific
void wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value, Operand dstAddr) DEFINED_ON(x86, x64);
void wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value, Operand dstAddr) DEFINED_ON(x86);
// For all the ARM wasmLoad and wasmStore functions, `ptr` MUST equal
// `ptrScratch`, and that register will be updated based on conditions
// For all the ARM and ARM64 wasmLoad and wasmStore functions, `ptr` MUST
// equal `ptrScratch`, and that register will be updated based on conditions
// listed below (where it is only mentioned as `ptr`).
// `ptr` will be updated if access.offset() != 0 or access.type() == Scalar::Int64.
void wasmLoad(const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
Register ptrScratch, AnyRegister output)
DEFINED_ON(arm, mips_shared);
DEFINED_ON(arm, arm64, mips_shared);
void wasmLoadI64(const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
Register ptrScratch, Register64 output)
DEFINED_ON(arm, mips32, mips64);
DEFINED_ON(arm, arm64, mips32, mips64);
void wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value, Register memoryBase,
Register ptr, Register ptrScratch)
DEFINED_ON(arm, mips_shared);
DEFINED_ON(arm, arm64, mips_shared);
void wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value, Register memoryBase,
Register ptr, Register ptrScratch)
DEFINED_ON(arm, mips32, mips64);
DEFINED_ON(arm, arm64, mips32, mips64);
// `ptr` will always be updated.
void wasmUnalignedLoad(const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
@ -1934,9 +1938,11 @@ class MacroAssembler : public MacroAssemblerSpecific
// register is required.
template <typename Source>
void guardTypeSet(const Source& address, const TypeSet* types, BarrierKind kind,
Register unboxScratch, Register objScratch, Label* miss);
Register unboxScratch, Register objScratch, Register spectreRegToZero,
Label* miss);
void guardObjectType(Register obj, const TypeSet* types, Register scratch, Label* miss);
void guardObjectType(Register obj, const TypeSet* types, Register scratch,
Register spectreRegToZero, Label* miss);
#ifdef DEBUG
void guardTypeSetMightBeIncomplete(const TypeSet* types, Register obj, Register scratch,

Просмотреть файл

@ -130,7 +130,7 @@ class ValueOperand
bool aliases(Register reg) const {
return type_ == reg || payload_ == reg;
}
Register scratchReg() const {
Register payloadOrValueReg() const {
return payloadReg();
}
constexpr bool operator==(const ValueOperand& o) const {
@ -154,7 +154,7 @@ class ValueOperand
bool aliases(Register reg) const {
return value_ == reg;
}
Register scratchReg() const {
Register payloadOrValueReg() const {
return valueReg();
}
constexpr bool operator==(const ValueOperand& o) const {
@ -165,6 +165,10 @@ class ValueOperand
}
#endif
Register scratchReg() const {
return payloadOrValueReg();
}
ValueOperand() = default;
};

Просмотреть файл

@ -345,12 +345,17 @@ struct AutoGenericRegisterScope : public RegisterType
#ifdef DEBUG
MacroAssembler& masm_;
bool released_;
explicit AutoGenericRegisterScope(MacroAssembler& masm, RegisterType reg);
~AutoGenericRegisterScope();
void release();
void reacquire();
#else
constexpr explicit AutoGenericRegisterScope(MacroAssembler& masm, RegisterType reg)
: RegisterType(reg)
{ }
void release() {}
void reacquire() {}
#endif
};

Просмотреть файл

@ -1385,14 +1385,6 @@ CodeGeneratorARM::ToValue(LInstruction* ins, size_t pos)
return ValueOperand(typeReg, payloadReg);
}
ValueOperand
CodeGeneratorARM::ToOutValue(LInstruction* ins)
{
Register typeReg = ToRegister(ins->getDef(TYPE_INDEX));
Register payloadReg = ToRegister(ins->getDef(PAYLOAD_INDEX));
return ValueOperand(typeReg, payloadReg);
}
ValueOperand
CodeGeneratorARM::ToTempValue(LInstruction* ins, size_t pos)
{
@ -1472,10 +1464,10 @@ CodeGeneratorARM::visitFloat32(LFloat32* ins)
masm.loadConstantFloat32(ins->getFloat(), ToFloatRegister(out));
}
Register
CodeGeneratorARM::splitTagForTest(const ValueOperand& value)
void
CodeGeneratorARM::splitTagForTest(const ValueOperand& value, ScratchTagScope& tag)
{
return value.typeReg();
MOZ_ASSERT(value.typeReg() == tag);
}
void

Просмотреть файл

@ -200,13 +200,12 @@ class CodeGeneratorARM : public CodeGeneratorShared
protected:
ValueOperand ToValue(LInstruction* ins, size_t pos);
ValueOperand ToOutValue(LInstruction* ins);
ValueOperand ToTempValue(LInstruction* ins, size_t pos);
Register64 ToOperandOrRegister64(const LInt64Allocation input);
// Functions for LTestVAndBranch.
Register splitTagForTest(const ValueOperand& value);
void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag);
void divICommon(MDiv* mir, Register lhs, Register rhs, Register output, LSnapshot* snapshot,
Label& done);

Просмотреть файл

@ -2181,6 +2181,12 @@ MacroAssembler::test32MovePtr(Condition cond, const Address& addr, Imm32 mask, R
ma_mov(src, dest, LeaveCC, cond);
}
void
MacroAssembler::spectreMovePtr(Condition cond, Register src, Register dest)
{
ma_mov(src, dest, LeaveCC, cond);
}
void
MacroAssembler::boundsCheck32ForLoad(Register index, Register length, Register scratch,
Label* failure)

Просмотреть файл

@ -23,6 +23,25 @@ static Register CallReg = ip;
static const int defaultShift = 3;
JS_STATIC_ASSERT(1 << defaultShift == sizeof(JS::Value));
// See documentation for ScratchTagScope and ScratchTagScopeRelease in
// MacroAssembler-x64.h.
class ScratchTagScope
{
const ValueOperand& v_;
public:
ScratchTagScope(MacroAssembler&, const ValueOperand& v) : v_(v) {}
operator Register() { return v_.typeReg(); }
void release() {}
void reacquire() {}
};
class ScratchTagScopeRelease
{
public:
explicit ScratchTagScopeRelease(ScratchTagScope*) {}
};
// MacroAssemblerARM is inheriting form Assembler defined in
// Assembler-arm.{h,cpp}
class MacroAssemblerARM : public Assembler
@ -700,9 +719,8 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
test32(lhs, rhs);
}
// Returns the register containing the type tag.
Register splitTagForTest(const ValueOperand& value) {
return value.typeReg();
void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag) {
MOZ_ASSERT(value.typeReg() == tag);
}
// Higher level tag testing code.

Просмотреть файл

@ -35,6 +35,7 @@ ABIArgGenerator::next(MIRType type)
{
switch (type) {
case MIRType::Int32:
case MIRType::Int64:
case MIRType::Pointer:
if (intRegIndex_ == NumIntArgRegs) {
current_ = ABIArg(stackOffset_);
@ -87,6 +88,34 @@ Assembler::finish()
}
}
bool
Assembler::appendRawCode(const uint8_t* code, size_t numBytes)
{
flush();
return armbuffer_.appendRawCode(code, numBytes);
}
bool
Assembler::reserve(size_t size)
{
// This buffer uses fixed-size chunks so there's no point in reserving
// now vs. on-demand.
return !oom();
}
bool
Assembler::swapBuffer(wasm::Bytes& bytes)
{
// For now, specialize to the one use case. As long as wasm::Bytes is a
// Vector, not a linked-list of chunks, there's not much we can do other
// than copy.
MOZ_ASSERT(bytes.empty());
if (!bytes.resize(bytesNeeded()))
return false;
armbuffer_.executableCopy(bytes.begin());
return true;
}
BufferOffset
Assembler::emitExtendedJumpTable()
{
@ -285,6 +314,19 @@ Assembler::bind(RepatchLabel* label)
inst->SetImmPCOffsetTarget(inst + nextOffset().getOffset() - branchOffset);
}
void
Assembler::bindLater(Label* label, wasm::OldTrapDesc target)
{
if (label->used()) {
BufferOffset b(label);
do {
append(wasm::OldTrapSite(target, b.getOffset()));
b = NextLink(b);
} while (b.assigned());
}
label->reset();
}
void
Assembler::trace(JSTracer* trc)
{
@ -381,6 +423,8 @@ Assembler::ToggleToJmp(CodeLocationLabel inst_)
MOZ_ASSERT(vixl::is_int19(imm19));
b(i, imm19, Always);
AutoFlushICache::flush(uintptr_t(i), 4);
}
void
@ -405,6 +449,8 @@ Assembler::ToggleToCmp(CodeLocationLabel inst_)
// From the above, there is a safe 19-bit contiguous region from 5:23.
Emit(i, vixl::ThirtyTwoBits | vixl::AddSubImmediateFixed | vixl::SUB | Flags(vixl::SetFlags) |
Rd(vixl::xzr) | (imm19 << vixl::Rn_offset));
AutoFlushICache::flush(uintptr_t(i), 4);
}
void
@ -431,7 +477,7 @@ Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled)
return;
if (call->IsBLR()) {
// If the second instruction is blr(), then wehave:
// If the second instruction is blr(), then we have:
// ldr x17, [pc, offset]
// blr x17
MOZ_ASSERT(load->IsLDR());
@ -455,6 +501,9 @@ Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled)
ldr(load, ScratchReg2_64, int32_t(offset));
blr(call, ScratchReg2_64);
}
AutoFlushICache::flush(uintptr_t(first), 4);
AutoFlushICache::flush(uintptr_t(call), 8);
}
class RelocationIterator

Просмотреть файл

@ -192,15 +192,9 @@ class Assembler : public vixl::Assembler
typedef vixl::Condition Condition;
void finish();
bool appendRawCode(const uint8_t* code, size_t numBytes) {
MOZ_CRASH("NYI");
}
bool reserve(size_t size) {
MOZ_CRASH("NYI");
}
bool swapBuffer(wasm::Bytes& bytes) {
MOZ_CRASH("NYI");
}
bool appendRawCode(const uint8_t* code, size_t numBytes);
bool reserve(size_t size);
bool swapBuffer(wasm::Bytes& bytes);
void trace(JSTracer* trc);
// Emit the jump table, returning the BufferOffset to the first entry in the table.
@ -220,9 +214,7 @@ class Assembler : public vixl::Assembler
void bind(Label* label) { bind(label, nextOffset()); }
void bind(Label* label, BufferOffset boff);
void bind(RepatchLabel* label);
void bindLater(Label* label, wasm::OldTrapDesc target) {
MOZ_CRASH("NYI");
}
void bindLater(Label* label, wasm::OldTrapDesc target);
bool oom() const {
return AssemblerShared::oom() ||
@ -473,8 +465,8 @@ static constexpr Register ABINonArgReturnVolatileReg = lr;
// TLS pointer argument register for WebAssembly functions. This must not alias
// any other register used for passing function arguments or return values.
// Preserved by WebAssembly functions.
static constexpr Register WasmTlsReg { Registers::x17 };
// Preserved by WebAssembly functions. Must be nonvolatile.
static constexpr Register WasmTlsReg { Registers::x23 };
// Registers used for wasm table calls. These registers must be disjoint
// from the ABI argument registers, WasmTlsReg and each other.

Просмотреть файл

@ -401,13 +401,6 @@ CodeGeneratorARM64::ToValue(LInstruction* ins, size_t pos)
return ValueOperand(ToRegister(ins->getOperand(pos)));
}
ValueOperand
CodeGeneratorARM64::ToOutValue(LInstruction* ins)
{
Register payloadReg = ToRegister(ins->getDef(0));
return ValueOperand(payloadReg);
}
ValueOperand
CodeGeneratorARM64::ToTempValue(LInstruction* ins, size_t pos)
{
@ -444,8 +437,8 @@ CodeGeneratorARM64::visitFloat32(LFloat32* ins)
MOZ_CRASH("visitFloat32");
}
Register
CodeGeneratorARM64::splitTagForTest(const ValueOperand& value)
void
CodeGeneratorARM64::splitTagForTest(const ValueOperand& value, ScratchTagScope& tag)
{
MOZ_CRASH("splitTagForTest");
}

Просмотреть файл

@ -158,11 +158,10 @@ class CodeGeneratorARM64 : public CodeGeneratorShared
protected:
ValueOperand ToValue(LInstruction* ins, size_t pos);
ValueOperand ToOutValue(LInstruction* ins);
ValueOperand ToTempValue(LInstruction* ins, size_t pos);
// Functions for LTestVAndBranch.
Register splitTagForTest(const ValueOperand& value);
void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag);
void storeElementTyped(const LAllocation* value, MIRType valueType, MIRType elementType,
Register elements, const LAllocation* index);

Просмотреть файл

@ -17,79 +17,79 @@ namespace jit {
void
MacroAssembler::move64(Register64 src, Register64 dest)
{
movePtr(src.reg, dest.reg);
Mov(ARMRegister(dest.reg, 64), ARMRegister(src.reg, 64));
}
void
MacroAssembler::move64(Imm64 imm, Register64 dest)
{
movePtr(ImmWord(imm.value), dest.reg);
Mov(ARMRegister(dest.reg, 64), imm.value);
}
void
MacroAssembler::moveFloat32ToGPR(FloatRegister src, Register dest)
{
MOZ_CRASH("NYI: moveFloat32ToGPR");
Fmov(ARMRegister(dest, 32), ARMFPRegister(src, 32));
}
void
MacroAssembler::moveGPRToFloat32(Register src, FloatRegister dest)
{
MOZ_CRASH("NYI: moveGPRToFloat32");
Fmov(ARMFPRegister(dest, 32), ARMRegister(src, 32));
}
void
MacroAssembler::move8SignExtend(Register src, Register dest)
{
MOZ_CRASH("NYI: move8SignExtend");
Sxtb(ARMRegister(dest, 32), ARMRegister(src, 32));
}
void
MacroAssembler::move16SignExtend(Register src, Register dest)
{
MOZ_CRASH("NYI: move16SignExtend");
Sxth(ARMRegister(dest, 32), ARMRegister(src, 32));
}
void
MacroAssembler::moveDoubleToGPR64(FloatRegister src, Register64 dest)
{
MOZ_CRASH("NYI: moveDoubleToGPR64");
Fmov(ARMRegister(dest.reg, 64), ARMFPRegister(src, 64));
}
void
MacroAssembler::moveGPR64ToDouble(Register64 src, FloatRegister dest)
{
MOZ_CRASH("NYI: moveGPR64ToDouble");
Fmov(ARMFPRegister(dest, 64), ARMRegister(src.reg, 64));
}
void
MacroAssembler::move64To32(Register64 src, Register dest)
{
MOZ_CRASH("NYI: move64To32");
Mov(ARMRegister(dest, 32), ARMRegister(src.reg, 32));
}
void
MacroAssembler::move32To64ZeroExtend(Register src, Register64 dest)
{
MOZ_CRASH("NYI: move32To64ZeroExtend");
Mov(ARMRegister(dest.reg, 32), ARMRegister(src, 32));
}
void
MacroAssembler::move8To64SignExtend(Register src, Register64 dest)
{
MOZ_CRASH("NYI: move8To64SignExtend");
Sxtb(ARMRegister(dest.reg, 64), ARMRegister(src, 32));
}
void
MacroAssembler::move16To64SignExtend(Register src, Register64 dest)
{
MOZ_CRASH("NYI: move16To64SignExtend");
Sxth(ARMRegister(dest.reg, 64), ARMRegister(src, 32));
}
void
MacroAssembler::move32To64SignExtend(Register src, Register64 dest)
{
MOZ_CRASH("NYI: move32To64SignExtend");
Sxtw(ARMRegister(dest.reg, 64), ARMRegister(src, 32));
}
// ===============================================================
@ -164,7 +164,7 @@ MacroAssembler::and64(Imm64 imm, Register64 dest)
void
MacroAssembler::and64(Register64 src, Register64 dest)
{
MOZ_CRASH("NYI: and64");
And(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), ARMRegister(src.reg, 64));
}
void
@ -357,13 +357,25 @@ MacroAssembler::add64(Imm64 imm, Register64 dest)
CodeOffset
MacroAssembler::sub32FromStackPtrWithPatch(Register dest)
{
MOZ_CRASH("NYI - sub32FromStackPtrWithPatch");
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch = temps.AcquireX();
CodeOffset offs = CodeOffset(currentOffset());
movz(scratch, 0, 0);
movk(scratch, 0, 16);
Sub(ARMRegister(dest, 64), sp, scratch);
return offs;
}
void
MacroAssembler::patchSub32FromStackPtr(CodeOffset offset, Imm32 imm)
{
MOZ_CRASH("NYI - patchSub32FromStackPtr");
Instruction* i1 = getInstructionAt(BufferOffset(offset.offset()));
MOZ_ASSERT(i1->IsMovz());
i1->SetInstructionBits(i1->InstructionBits() | ImmMoveWide(uint16_t(imm.value)));
Instruction* i2 = getInstructionAt(BufferOffset(offset.offset() + 4));
MOZ_ASSERT(i2->IsMovk());
i2->SetInstructionBits(i2->InstructionBits() | ImmMoveWide(uint16_t(imm.value >> 16)));
}
void
@ -438,13 +450,13 @@ MacroAssembler::subPtr(const Address& addr, Register dest)
void
MacroAssembler::sub64(Register64 src, Register64 dest)
{
MOZ_CRASH("NYI: sub64");
Sub(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), ARMRegister(src.reg, 64));
}
void
MacroAssembler::sub64(Imm64 imm, Register64 dest)
{
MOZ_CRASH("NYI: sub64");
Sub(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value));
}
void
@ -462,7 +474,7 @@ MacroAssembler::subFloat32(FloatRegister src, FloatRegister dest)
void
MacroAssembler::mul32(Register rhs, Register srcDest)
{
MOZ_CRASH("NYI - mul32");
mul32(srcDest, rhs, srcDest, nullptr, nullptr);
}
void
@ -493,7 +505,8 @@ MacroAssembler::mul64(Imm64 imm, const Register64& dest)
void
MacroAssembler::mul64(const Register64& src, const Register64& dest, const Register temp)
{
MOZ_CRASH("NYI: mul64");
MOZ_ASSERT(temp == Register::Invalid());
Mul(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), ARMRegister(src.reg, 64));
}
void
@ -531,13 +544,26 @@ MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp, FloatRegister dest)
void
MacroAssembler::quotient32(Register rhs, Register srcDest, bool isUnsigned)
{
MOZ_CRASH("NYI - quotient32");
if (isUnsigned)
Udiv(ARMRegister(srcDest, 32), ARMRegister(srcDest, 32), ARMRegister(rhs, 32));
else
Sdiv(ARMRegister(srcDest, 32), ARMRegister(srcDest, 32), ARMRegister(rhs, 32));
}
// This does not deal with x % 0 or INT_MIN % -1, the caller needs to filter
// those cases when they may occur.
void
MacroAssembler::remainder32(Register rhs, Register srcDest, bool isUnsigned)
{
MOZ_CRASH("NYI - remainder32");
vixl::UseScratchRegisterScope temps(this);
ARMRegister scratch = temps.AcquireW();
if (isUnsigned)
Udiv(scratch, ARMRegister(srcDest, 32), ARMRegister(rhs, 32));
else
Sdiv(scratch, ARMRegister(srcDest, 32), ARMRegister(rhs, 32));
Mul(scratch, scratch, ARMRegister(rhs, 32));
Sub(ARMRegister(srcDest, 32), ARMRegister(srcDest, 32), scratch);
}
void
@ -586,49 +612,53 @@ MacroAssembler::negateDouble(FloatRegister reg)
void
MacroAssembler::absFloat32(FloatRegister src, FloatRegister dest)
{
MOZ_CRASH("NYI - absFloat32");
fabs(ARMFPRegister(dest, 32), ARMFPRegister(src, 32));
}
void
MacroAssembler::absDouble(FloatRegister src, FloatRegister dest)
{
MOZ_CRASH("NYI - absDouble");
fabs(ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
}
void
MacroAssembler::sqrtFloat32(FloatRegister src, FloatRegister dest)
{
MOZ_CRASH("NYI - sqrtFloat32");
fsqrt(ARMFPRegister(dest, 32), ARMFPRegister(src, 32));
}
void
MacroAssembler::sqrtDouble(FloatRegister src, FloatRegister dest)
{
MOZ_CRASH("NYI - sqrtDouble");
fsqrt(ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
}
void
MacroAssembler::minFloat32(FloatRegister other, FloatRegister srcDest, bool handleNaN)
{
MOZ_CRASH("NYI - minFloat32");
MOZ_ASSERT(handleNaN); // Always true for wasm
fmin(ARMFPRegister(srcDest, 32), ARMFPRegister(srcDest, 32), ARMFPRegister(other, 32));
}
void
MacroAssembler::minDouble(FloatRegister other, FloatRegister srcDest, bool handleNaN)
{
MOZ_CRASH("NYI - minDouble");
MOZ_ASSERT(handleNaN); // Always true for wasm
fmin(ARMFPRegister(srcDest, 64), ARMFPRegister(srcDest, 64), ARMFPRegister(other, 64));
}
void
MacroAssembler::maxFloat32(FloatRegister other, FloatRegister srcDest, bool handleNaN)
{
MOZ_CRASH("NYI - maxFloat32");
MOZ_ASSERT(handleNaN); // Always true for wasm
fmax(ARMFPRegister(srcDest, 32), ARMFPRegister(srcDest, 32), ARMFPRegister(other, 32));
}
void
MacroAssembler::maxDouble(FloatRegister other, FloatRegister srcDest, bool handleNaN)
{
MOZ_CRASH("NYI - maxDouble");
MOZ_ASSERT(handleNaN); // Always true for wasm
fmax(ARMFPRegister(srcDest, 64), ARMFPRegister(srcDest, 64), ARMFPRegister(other, 64));
}
// ===============================================================
@ -651,7 +681,7 @@ MacroAssembler::lshift64(Imm32 imm, Register64 dest)
void
MacroAssembler::lshift64(Register shift, Register64 srcDest)
{
MOZ_CRASH("NYI: lshift64");
Lsl(ARMRegister(srcDest.reg, 64), ARMRegister(srcDest.reg, 64), ARMRegister(shift, 64));
}
void
@ -724,19 +754,19 @@ MacroAssembler::rshift64(Imm32 imm, Register64 dest)
void
MacroAssembler::rshift64(Register shift, Register64 srcDest)
{
MOZ_CRASH("NYI: rshift64");
Lsr(ARMRegister(srcDest.reg, 64), ARMRegister(srcDest.reg, 64), ARMRegister(shift, 64));
}
void
MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest)
{
MOZ_CRASH("NYI: rshift64Arithmetic");
Asr(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), imm.value);
}
void
MacroAssembler::rshift64Arithmetic(Register shift, Register64 srcDest)
{
MOZ_CRASH("NYI: rshift64Arithmetic");
Asr(ARMRegister(srcDest.reg, 64), ARMRegister(srcDest.reg, 64), ARMRegister(shift, 64));
}
// ===============================================================
@ -764,49 +794,65 @@ MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest)
void
MacroAssembler::rotateLeft(Imm32 count, Register input, Register dest)
{
MOZ_CRASH("NYI: rotateLeft by immediate");
Ror(ARMRegister(dest, 32), ARMRegister(input, 32), (32 - count.value) & 31);
}
void
MacroAssembler::rotateLeft(Register count, Register input, Register dest)
{
MOZ_CRASH("NYI: rotateLeft by register");
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch = temps.AcquireW();
// Really 32 - count, but the upper bits of the result are ignored.
Neg(scratch, ARMRegister(count, 32));
Ror(ARMRegister(dest, 32), ARMRegister(input, 32), scratch);
}
void
MacroAssembler::rotateRight(Imm32 count, Register input, Register dest)
{
MOZ_CRASH("NYI: rotateRight by immediate");
Ror(ARMRegister(dest, 32), ARMRegister(input, 32), count.value & 31);
}
void
MacroAssembler::rotateRight(Register count, Register input, Register dest)
{
MOZ_CRASH("NYI: rotateRight by register");
Ror(ARMRegister(dest, 32), ARMRegister(input, 32), ARMRegister(count, 32));
}
void
MacroAssembler::rotateLeft64(Register count, Register64 input, Register64 dest, Register temp)
{
MOZ_CRASH("NYI: rotateLeft64");
MOZ_ASSERT(temp == Register::Invalid());
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch = temps.AcquireX();
// Really 64 - count, but the upper bits of the result are ignored.
Neg(scratch, ARMRegister(count, 64));
Ror(ARMRegister(dest.reg, 64), ARMRegister(input.reg, 64), scratch);
}
void
MacroAssembler::rotateLeft64(Imm32 count, Register64 input, Register64 dest, Register temp)
{
MOZ_CRASH("NYI: rotateLeft64");
MOZ_ASSERT(temp == Register::Invalid());
Ror(ARMRegister(dest.reg, 64), ARMRegister(input.reg, 64), (64 - count.value) & 63);
}
void
MacroAssembler::rotateRight64(Register count, Register64 input, Register64 dest, Register temp)
{
MOZ_CRASH("NYI: rotateRight64");
MOZ_ASSERT(temp == Register::Invalid());
Ror(ARMRegister(dest.reg, 64), ARMRegister(input.reg, 64), ARMRegister(count, 64));
}
void
MacroAssembler::rotateRight64(Imm32 count, Register64 input, Register64 dest, Register temp)
{
MOZ_CRASH("NYI: rotateRight64");
MOZ_ASSERT(temp == Register::Invalid());
Ror(ARMRegister(dest.reg, 64), ARMRegister(input.reg, 64), count.value & 63);
}
// ===============================================================
@ -815,37 +861,84 @@ MacroAssembler::rotateRight64(Imm32 count, Register64 input, Register64 dest, Re
void
MacroAssembler::clz32(Register src, Register dest, bool knownNotZero)
{
MOZ_CRASH("NYI: clz32");
Clz(ARMRegister(dest, 32), ARMRegister(src, 32));
}
void
MacroAssembler::ctz32(Register src, Register dest, bool knownNotZero)
{
MOZ_CRASH("NYI: ctz32");
Rbit(ARMRegister(dest, 32), ARMRegister(src, 32));
Clz(ARMRegister(dest, 32), ARMRegister(dest, 32));
}
void
MacroAssembler::clz64(Register64 src, Register dest)
{
MOZ_CRASH("NYI: clz64");
Clz(ARMRegister(dest, 64), ARMRegister(src.reg, 64));
}
void
MacroAssembler::ctz64(Register64 src, Register dest)
{
MOZ_CRASH("NYI: ctz64");
Rbit(ARMRegister(dest, 64), ARMRegister(src.reg, 64));
Clz(ARMRegister(dest, 64), ARMRegister(dest, 64));
}
void
MacroAssembler::popcnt32(Register src, Register dest, Register temp)
MacroAssembler::popcnt32(Register src_, Register dest_, Register tmp_)
{
MOZ_CRASH("NYI: popcnt32");
MOZ_ASSERT(tmp_ != Register::Invalid());
// Equivalent to mozilla::CountPopulation32().
ARMRegister src(src_, 32);
ARMRegister dest(dest_, 32);
ARMRegister tmp(tmp_, 32);
Mov(tmp, src);
if (src_ != dest_)
Mov(dest, src);
Lsr(dest, dest, 1);
And(dest, dest, 0x55555555);
Sub(dest, tmp, dest);
Lsr(tmp, dest, 2);
And(tmp, tmp, 0x33333333);
And(dest, dest, 0x33333333);
Add(dest, tmp, dest);
Add(dest, dest, Operand(dest, vixl::LSR, 4));
And(dest, dest, 0x0F0F0F0F);
Add(dest, dest, Operand(dest, vixl::LSL, 8));
Add(dest, dest, Operand(dest, vixl::LSL, 16));
Lsr(dest, dest, 24);
}
void
MacroAssembler::popcnt64(Register64 src, Register64 dest, Register temp)
MacroAssembler::popcnt64(Register64 src_, Register64 dest_, Register tmp_)
{
MOZ_CRASH("NYI: popcnt64");
MOZ_ASSERT(tmp_ != Register::Invalid());
// Equivalent to mozilla::CountPopulation64(), though likely more efficient.
ARMRegister src(src_.reg, 64);
ARMRegister dest(dest_.reg, 64);
ARMRegister tmp(tmp_, 64);
Mov(tmp, src);
if (src_ != dest_)
Mov(dest, src);
Lsr(dest, dest, 1);
And(dest, dest, 0x5555555555555555);
Sub(dest, tmp, dest);
Lsr(tmp, dest, 2);
And(tmp, tmp, 0x3333333333333333);
And(dest, dest, 0x3333333333333333);
Add(dest, tmp, dest);
Add(dest, dest, Operand(dest, vixl::LSR, 4));
And(dest, dest, 0x0F0F0F0F0F0F0F0F);
Add(dest, dest, Operand(dest, vixl::LSL, 8));
Add(dest, dest, Operand(dest, vixl::LSL, 16));
Add(dest, dest, Operand(dest, vixl::LSL, 32));
Lsr(dest, dest, 56);
}
// ===============================================================
@ -929,13 +1022,19 @@ MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress lhs, Imm32 rhs, L
void
MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val, Label* success, Label* fail)
{
MOZ_CRASH("NYI: branch64 reg-imm");
Cmp(ARMRegister(lhs.reg, 64), val.value);
B(success, cond);
if (fail)
B(fail);
}
void
MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs, Label* success, Label* fail)
{
MOZ_CRASH("NYI: branch64 reg-reg");
Cmp(ARMRegister(lhs.reg, 64), ARMRegister(rhs.reg, 64));
B(success, cond);
if (fail)
B(fail);
}
void
@ -1707,7 +1806,8 @@ MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr, JSWhyMag
void
MacroAssembler::branchToComputedAddress(const BaseIndex& addr)
{
MOZ_CRASH("branchToComputedAddress");
// Not used by Rabaldr.
MOZ_CRASH("NYI - branchToComputedAddress");
}
void
@ -1760,6 +1860,12 @@ MacroAssembler::test32MovePtr(Condition cond, const Address& addr, Imm32 mask, R
Csel(ARMRegister(dest, 64), ARMRegister(src, 64), ARMRegister(dest, 64), cond);
}
void
MacroAssembler::spectreMovePtr(Condition cond, Register src, Register dest)
{
Csel(ARMRegister(dest, 64), ARMRegister(src, 64), ARMRegister(dest, 64), cond);
}
void
MacroAssembler::boundsCheck32ForLoad(Register index, Register length, Register scratch,
Label* failure)
@ -1826,7 +1932,12 @@ MacroAssembler::storeFloat32x3(FloatRegister src, const BaseIndex& dest)
void
MacroAssembler::memoryBarrier(MemoryBarrierBits barrier)
{
MOZ_CRASH("NYI");
if (barrier == MembarStoreStore)
Dmb(vixl::InnerShareable, vixl::BarrierWrites);
else if (barrier == MembarLoadLoad)
Dmb(vixl::InnerShareable, vixl::BarrierReads);
else if (barrier)
Dmb(vixl::InnerShareable, vixl::BarrierAll);
}
// ===============================================================
@ -1853,14 +1964,16 @@ template <class L>
void
MacroAssembler::wasmBoundsCheck(Condition cond, Register index, Register boundsCheckLimit, L label)
{
MOZ_CRASH("NYI");
// Not used on ARM64, we rely on signal handling instead
MOZ_CRASH("NYI - wasmBoundsCheck");
}
template <class L>
void
MacroAssembler::wasmBoundsCheck(Condition cond, Register index, Address boundsCheckLimit, L label)
{
MOZ_CRASH("NYI");
// Not used on ARM64, we rely on signal handling instead
MOZ_CRASH("NYI - wasmBoundsCheck");
}
//}}} check_macroassembler_style
@ -1950,14 +2063,29 @@ MacroAssemblerCompat::moveStackPtrTo(Register dest)
void
MacroAssemblerCompat::loadStackPtr(const Address& src)
{
Ldr(GetStackPointer64(), toMemOperand(src));
syncStackPtr();
if (sp.Is(GetStackPointer64())) {
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch = temps.AcquireX();
Ldr(scratch, toMemOperand(src));
Mov(sp, scratch);
// syncStackPtr() not needed since our SP is the real SP.
} else {
Ldr(GetStackPointer64(), toMemOperand(src));
syncStackPtr();
}
}
void
MacroAssemblerCompat::storeStackPtr(const Address& dest)
{
Str(GetStackPointer64(), toMemOperand(dest));
if (sp.Is(GetStackPointer64())) {
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch = temps.AcquireX();
Mov(scratch, sp);
Str(scratch, toMemOperand(dest));
} else {
Str(GetStackPointer64(), toMemOperand(dest));
}
}
void
@ -2009,12 +2137,9 @@ MacroAssemblerCompat::ensureDouble(const ValueOperand& source, FloatRegister des
{
Label isDouble, done;
// TODO: splitTagForTest really should not leak a scratch register.
Register tag = splitTagForTest(source);
{
vixl::UseScratchRegisterScope temps(this);
temps.Exclude(ARMRegister(tag, 64));
ScratchTagScope tag(asMasm(), source);
splitTagForTest(source, tag);
asMasm().branchTestDouble(Assembler::Equal, tag, &isDouble);
asMasm().branchTestInt32(Assembler::NotEqual, tag, failure);
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -40,6 +40,8 @@ struct ImmTag : public Imm32
{ }
};
class ScratchTagScope;
class MacroAssemblerCompat : public vixl::MacroAssembler
{
public:
@ -516,12 +518,7 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
}
using vixl::MacroAssembler::B;
void B(wasm::OldTrapDesc) {
MOZ_CRASH("NYI");
}
void B(wasm::OldTrapDesc, Condition cond) {
MOZ_CRASH("NYI");
}
void B(wasm::OldTrapDesc, Condition cond = Always);
void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail,
bool negativeZeroCheck = true)
@ -697,7 +694,7 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
Br(vixl::ip0);
}
void jump(wasm::OldTrapDesc target) {
MOZ_CRASH("NYI");
B(target);
}
void align(int alignment) {
@ -710,7 +707,7 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
armbuffer_.align(alignment);
}
void nopAlign(int alignment) {
MOZ_CRASH("NYI");
armbuffer_.align(alignment);
}
void movePtr(Register src, Register dest) {
@ -1183,14 +1180,8 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
splitTag(dest, dest);
}
// Extracts the tag of a value and places it in ScratchReg.
Register splitTagForTest(const ValueOperand& value) {
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch64 = temps.AcquireX();
MOZ_ASSERT(scratch64.asUnsized() != value.valueReg());
Lsr(scratch64, ARMRegister(value.valueReg(), 64), JSVAL_TAG_SHIFT);
return scratch64.asUnsized(); // FIXME: Surely we can make a better interface.
}
// Extracts the tag of a value and places it in tag
inline void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag);
void cmpTag(const ValueOperand& operand, ImmTag tag) {
MOZ_CRASH("cmpTag");
}
@ -1400,7 +1391,7 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
}
void unboxPrivate(const ValueOperand& src, Register dest) {
ubfx(ARMRegister(dest, 64), ARMRegister(src.valueReg(), 64), 1, JSVAL_TAG_SHIFT - 1);
Lsl(ARMRegister(dest, 64), ARMRegister(src.valueReg(), 64), 1);
}
void notBoolean(const ValueOperand& val) {
@ -1930,6 +1921,13 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
return value;
}
void wasmLoadImpl(const wasm::MemoryAccessDesc& access, Register memoryBase,
Register ptr, Register ptrScratch, AnyRegister outany,
Register64 out64);
void wasmStoreImpl(const wasm::MemoryAccessDesc& access, AnyRegister valany,
Register64 val64, Register memoryBase, Register ptr,
Register ptrScratch);
// Emit a BLR or NOP instruction. ToggleCall can be used to patch
// this instruction.
CodeOffset toggledCall(JitCode* target, bool enabled) {
@ -2101,6 +2099,64 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
}
};
// See documentation for ScratchTagScope and ScratchTagScopeRelease in
// MacroAssembler-x64.h.
class ScratchTagScope
{
vixl::UseScratchRegisterScope temps_;
ARMRegister scratch64_;
bool owned_;
mozilla::DebugOnly<bool> released_;
public:
ScratchTagScope(MacroAssemblerCompat& masm, const ValueOperand&)
: temps_(&masm),
owned_(true),
released_(false)
{
scratch64_ = temps_.AcquireX();
}
operator Register() {
MOZ_ASSERT(!released_);
return scratch64_.asUnsized();
}
void release() {
MOZ_ASSERT(!released_);
released_ = true;
if (owned_) {
temps_.Release(scratch64_);
owned_ = false;
}
}
void reacquire() {
MOZ_ASSERT(released_);
released_ = false;
}
};
class ScratchTagScopeRelease
{
ScratchTagScope* ts_;
public:
explicit ScratchTagScopeRelease(ScratchTagScope* ts) : ts_(ts) {
ts_->release();
}
~ScratchTagScopeRelease() {
ts_->reacquire();
}
};
inline void
MacroAssemblerCompat::splitTagForTest(const ValueOperand& value, ScratchTagScope& tag)
{
splitTag(value, tag);
}
typedef MacroAssemblerCompat MacroAssemblerSpecific;
} // namespace jit

Просмотреть файл

@ -951,7 +951,38 @@ class Assembler : public MozBaseAssembler {
}
static inline DoubleCondition InvertCondition(DoubleCondition cond) {
MOZ_CRASH("Not yet implemented: InvertCondition(DoubleCondition)");
switch (cond) {
case DoubleOrdered:
return DoubleUnordered;
case DoubleEqual:
return DoubleNotEqualOrUnordered;
case DoubleNotEqual:
return DoubleEqualOrUnordered;
case DoubleGreaterThan:
return DoubleLessThanOrEqualOrUnordered;
case DoubleGreaterThanOrEqual:
return DoubleLessThanOrUnordered;
case DoubleLessThan:
return DoubleGreaterThanOrEqualOrUnordered;
case DoubleLessThanOrEqual:
return DoubleGreaterThanOrUnordered;
case DoubleUnordered:
return DoubleOrdered;
case DoubleEqualOrUnordered:
return DoubleNotEqual;
case DoubleNotEqualOrUnordered:
return DoubleEqual;
case DoubleGreaterThanOrUnordered:
return DoubleLessThanOrEqual;
case DoubleGreaterThanOrEqualOrUnordered:
return DoubleLessThan;
case DoubleLessThanOrUnordered:
return DoubleGreaterThanOrEqual;
case DoubleLessThanOrEqualOrUnordered:
return DoubleGreaterThan;
default:
MOZ_CRASH("Bad condition");
}
}
static inline Condition ConditionFromDoubleCondition(DoubleCondition cond) {

Просмотреть файл

@ -311,6 +311,8 @@ class Instruction {
bool IsNOP() const;
bool IsADR() const;
bool IsADRP() const;
bool IsMovz() const;
bool IsMovk() const;
bool IsBranchLinkImm() const;
bool IsTargetReachable(Instruction* target) const;
ptrdiff_t ImmPCRawOffset() const;

Просмотреть файл

@ -1480,9 +1480,25 @@ class MacroAssembler : public js::jit::Assembler {
#ifdef JS_SIMULATOR_ARM64
hlt(kUnreachableOpcode);
#else
// Branch to 0 to generate a segfault.
// lr - kInstructionSize is the address of the offending instruction.
blr(xzr);
// A couple of strategies we can use here. There are no unencoded
// instructions in the instruction set that are guaranteed to remain that
// way. However there are some currently (as of 2018) unencoded
// instructions that are good candidates.
//
// Ideally, unencoded instructions should be non-destructive to the register
// state, and should be unencoded at all exception levels.
//
// At the trap the pc will hold the address of the offending instruction.
// Some candidates for unencoded instructions:
//
// 0xd4a00000 (essentially dcps0, a good one since it is nonsensical and may
// remain unencoded in the future for that reason)
// 0x33000000 (bfm variant)
// 0xd67f0000 (br variant)
// 0x5ac00c00 (rbit variant)
Emit(0xd4a00000); // "dcps0", also has 16-bit payload if needed
#endif
}
void Uxtb(const Register& rd, const Register& rn) {

Просмотреть файл

@ -389,12 +389,12 @@ void Assembler::ldr(Instruction* at, const CPURegister& rt, int imm19) {
BufferOffset Assembler::hint(SystemHint code) {
return Emit(HINT | ImmHint(code) | Rt(xzr));
return Emit(HINT | ImmHint(code));
}
void Assembler::hint(Instruction* at, SystemHint code) {
Emit(at, HINT | ImmHint(code) | Rt(xzr));
Emit(at, HINT | ImmHint(code));
}

Просмотреть файл

@ -59,8 +59,8 @@ class MozBaseAssembler : public js::jit::AssemblerShared {
static const size_t BufferCodeAlignment = 8;
static const size_t BufferMaxPoolOffset = 1024;
static const unsigned BufferPCBias = 0;
static const uint32_t BufferAlignmentFillInstruction = BRK | (0xdead << ImmException_offset);
static const uint32_t BufferNopFillInstruction = HINT | (31 << Rt_offset);
static const uint32_t BufferAlignmentFillInstruction = HINT | (NOP << ImmHint_offset);
static const uint32_t BufferNopFillInstruction = HINT | (NOP << ImmHint_offset);
static const unsigned BufferNumDebugNopsToInsert = 0;
#ifdef JS_DISASM_ARM64

Просмотреть файл

@ -95,6 +95,17 @@ bool Instruction::IsADRP() const {
}
bool Instruction::IsMovz() const {
return (Mask(MoveWideImmediateMask) == MOVZ_x) ||
(Mask(MoveWideImmediateMask) == MOVZ_w);
}
bool Instruction::IsMovk() const {
return (Mask(MoveWideImmediateMask) == MOVK_x) ||
(Mask(MoveWideImmediateMask) == MOVK_w);
}
bool Instruction::IsBranchLinkImm() const {
return Mask(UnconditionalBranchFMask) == (UnconditionalBranchFixed | BL);
}

Просмотреть файл

@ -33,15 +33,16 @@
#include "threading/LockGuard.h"
#include "vm/Runtime.h"
#include "wasm/WasmInstance.h"
#include "wasm/WasmProcess.h"
#include "wasm/WasmSignalHandlers.h"
js::jit::SimulatorProcess* js::jit::SimulatorProcess::singleton_ = nullptr;
namespace vixl {
using mozilla::DebugOnly;
using js::jit::ABIFunctionType;
using js::jit::JitActivation;
using js::jit::SimulatorProcess;
Simulator::Simulator(JSContext* cx, Decoder* decoder, FILE* stream)
@ -218,13 +219,13 @@ uintptr_t* Simulator::addressOfStackLimit() {
bool Simulator::overRecursed(uintptr_t newsp) const {
if (newsp)
newsp = xreg(31, Reg31IsStackPointer);
newsp = get_sp();
return newsp <= stackLimit();
}
bool Simulator::overRecursedWithExtra(uint32_t extra) const {
uintptr_t newsp = xreg(31, Reg31IsStackPointer) - extra;
uintptr_t newsp = get_sp() - extra;
return newsp <= stackLimit();
}
@ -235,31 +236,91 @@ void Simulator::trigger_wasm_interrupt() {
}
static inline JitActivation*
GetJitActivation(JSContext* cx)
{
if (!js::wasm::CodeExists)
return nullptr;
if (!cx->activation() || !cx->activation()->isJit())
return nullptr;
return cx->activation()->asJit();
}
JS::ProfilingFrameIterator::RegisterState
Simulator::registerState()
{
JS::ProfilingFrameIterator::RegisterState state;
state.pc = (uint8_t*) get_pc();
state.fp = (uint8_t*) get_fp();
state.lr = (uint8_t*) get_lr();
state.sp = (uint8_t*) get_sp();
return state;
}
// The signal handler only redirects the PC to the interrupt stub when the PC is
// in function code. However, this guard is racy for the ARM simulator since the
// signal handler samples PC in the middle of simulating an instruction and thus
// the current PC may have advanced once since the signal handler's guard. So we
// re-check here.
void Simulator::handle_wasm_interrupt() {
void Simulator::handle_wasm_interrupt()
{
if (!js::wasm::CodeExists)
return;
uint8_t* pc = (uint8_t*)get_pc();
uint8_t* fp = (uint8_t*)xreg(30);
const js::wasm::ModuleSegment* ms = nullptr;
if (!js::wasm::InInterruptibleCode(cx_, pc, &ms))
return;
JS::ProfilingFrameIterator::RegisterState state;
state.pc = pc;
state.fp = fp;
state.lr = (uint8_t*) xreg(30);
state.sp = (uint8_t*) xreg(31);
JitActivation* act = GetJitActivation(cx_);
if (!act)
return;
if (!cx_->activation_->asJit()->startWasmInterrupt(state))
if (!act->startWasmInterrupt(registerState()))
return;
set_pc((Instruction*)ms->interruptCode());
}
bool
Simulator::handle_wasm_seg_fault(uintptr_t addr, unsigned numBytes)
{
JitActivation* act = GetJitActivation(cx_);
if (!act)
return false;
uint8_t* pc = (uint8_t*)get_pc();
uint8_t* fp = (uint8_t*)get_fp();
const js::wasm::CodeSegment* segment = js::wasm::LookupCodeSegment(pc);
if (!segment)
return false;
const js::wasm::ModuleSegment* moduleSegment = segment->asModule();
js::wasm::Instance* instance = js::wasm::LookupFaultingInstance(*moduleSegment, pc, fp);
if (!instance)
return false;
MOZ_RELEASE_ASSERT(&instance->code() == &moduleSegment->code());
if (!instance->memoryAccessInGuardRegion((uint8_t*)addr, numBytes))
return false;
const js::wasm::MemoryAccess* memoryAccess = instance->code().lookupMemoryAccess(pc);
if (!memoryAccess) {
if (!act->startWasmInterrupt(registerState()))
MOZ_CRASH("Cannot start interrupt");
if (!instance->code().containsCodePC(pc))
MOZ_CRASH("Cannot map PC to trap handler");
set_pc((Instruction*)moduleSegment->outOfBoundsCode());
return true;
}
MOZ_ASSERT(memoryAccess->hasTrapOutOfLineCode());
set_pc((Instruction*)memoryAccess->trapOutOfLineCode(moduleSegment->base()));
return true;
}
int64_t Simulator::call(uint8_t* entry, int argument_count, ...) {
va_list parameters;
@ -303,12 +364,12 @@ int64_t Simulator::call(uint8_t* entry, int argument_count, ...) {
va_end(parameters);
// Call must transition back to native code on exit.
VIXL_ASSERT(xreg(30) == int64_t(kEndOfSimAddress));
VIXL_ASSERT(get_lr() == int64_t(kEndOfSimAddress));
// Execute the simulation.
DebugOnly<int64_t> entryStack = xreg(31, Reg31IsStackPointer);
DebugOnly<int64_t> entryStack = get_sp();
RunFrom((Instruction*)entry);
DebugOnly<int64_t> exitStack = xreg(31, Reg31IsStackPointer);
DebugOnly<int64_t> exitStack = get_sp();
VIXL_ASSERT(entryStack == exitStack);
int64_t result = xreg(0);
@ -403,6 +464,29 @@ void* Simulator::RedirectNativeFunction(void* nativeFunction, ABIFunctionType ty
return redirection->addressOfSvcInstruction();
}
bool
Simulator::handle_wasm_ill_fault()
{
JitActivation* act = GetJitActivation(cx_);
if (!act)
return false;
uint8_t* pc = (uint8_t*)get_pc();
const js::wasm::CodeSegment* segment = js::wasm::LookupCodeSegment(pc);
if (!segment || !segment->isModule())
return false;
const js::wasm::ModuleSegment* moduleSegment = segment->asModule();
js::wasm::Trap trap;
js::wasm::BytecodeOffset bytecode;
if (!moduleSegment->code().lookupTrap(pc, &trap, &bytecode))
return false;
act->startWasmTrap(trap, bytecode.offset, registerState());
set_pc((Instruction*)moduleSegment->trapCode());
return true;
}
void Simulator::VisitException(const Instruction* instr) {
switch (instr->Mask(ExceptionMask)) {
@ -415,7 +499,8 @@ void Simulator::VisitException(const Instruction* instr) {
case HLT:
switch (instr->ImmException()) {
case kUnreachableOpcode:
DoUnreachable(instr);
if (!handle_wasm_ill_fault())
DoUnreachable(instr);
return;
case kTraceOpcode:
DoTrace(instr);
@ -439,12 +524,12 @@ void Simulator::VisitException(const Instruction* instr) {
return;
case kMarkStackPointer: {
js::AutoEnterOOMUnsafeRegion oomUnsafe;
if (!spStack_.append(xreg(31, Reg31IsStackPointer)))
if (!spStack_.append(get_sp()))
oomUnsafe.crash("tracking stack for ARM64 simulator");
return;
}
case kCheckStackPointer: {
int64_t current = xreg(31, Reg31IsStackPointer);
int64_t current = get_sp();
int64_t expected = spStack_.popCopy();
VIXL_ASSERT(current == expected);
return;
@ -492,6 +577,10 @@ typedef int64_t (*Prototype_General7)(int64_t arg0, int64_t arg1, int64_t arg2,
int64_t arg4, int64_t arg5, int64_t arg6);
typedef int64_t (*Prototype_General8)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3,
int64_t arg4, int64_t arg5, int64_t arg6, int64_t arg7);
typedef int64_t (*Prototype_GeneralGeneralGeneralInt64)(int64_t arg0, int32_t arg1, int32_t arg2,
int64_t arg3);
typedef int64_t (*Prototype_GeneralGeneralInt64Int64)(int64_t arg0, int32_t arg1, int64_t arg2,
int64_t arg3);
typedef int64_t (*Prototype_Int_Double)(double arg0);
typedef int64_t (*Prototype_Int_IntDouble)(int32_t arg0, double arg1);
@ -500,6 +589,7 @@ typedef int64_t (*Prototype_Int_IntDoubleIntInt)(uint64_t arg0, double arg1,
uint64_t arg2, uint64_t arg3);
typedef float (*Prototype_Float32_Float32)(float arg0);
typedef float (*Prototype_Float32_Float32Float32)(float arg0, float arg1);
typedef double (*Prototype_Double_None)();
typedef double (*Prototype_Double_Double)(double arg0);
@ -538,7 +628,7 @@ Simulator::VisitCallRedirection(const Instruction* instr)
DebugOnly<int64_t> x27 = xreg(27);
DebugOnly<int64_t> x28 = xreg(28);
DebugOnly<int64_t> x29 = xreg(29);
DebugOnly<int64_t> savedSP = xreg(31, Reg31IsStackPointer);
DebugOnly<int64_t> savedSP = get_sp();
// Remember LR for returning from the "call".
int64_t savedLR = xreg(30);
@ -561,6 +651,7 @@ Simulator::VisitCallRedirection(const Instruction* instr)
double d2 = dreg(2);
double d3 = dreg(3);
float s0 = sreg(0);
float s1 = sreg(1);
// Dispatch the call and set the return value.
switch (redir->type()) {
@ -610,6 +701,16 @@ Simulator::VisitCallRedirection(const Instruction* instr)
setGPR64Result(ret);
break;
}
case js::jit::Args_Int_GeneralGeneralGeneralInt64: {
int64_t ret = reinterpret_cast<Prototype_GeneralGeneralGeneralInt64>(nativeFn)(x0, x1, x2, x3);
setGPR64Result(ret);
break;
}
case js::jit::Args_Int_GeneralGeneralInt64Int64: {
int64_t ret = reinterpret_cast<Prototype_GeneralGeneralInt64Int64>(nativeFn)(x0, x1, x2, x3);
setGPR64Result(ret);
break;
}
// Cases with GPR return type. This can be int32 or int64, but int64 is a safer assumption.
case js::jit::Args_Int_Double: {
@ -641,6 +742,11 @@ Simulator::VisitCallRedirection(const Instruction* instr)
setFP32Result(ret);
break;
}
case js::jit::Args_Float32_Float32Float32: {
float ret = reinterpret_cast<Prototype_Float32_Float32Float32>(nativeFn)(s0, s1);
setFP32Result(ret);
break;
}
// Cases with double return type.
case js::jit::Args_Double_None: {
@ -705,7 +811,7 @@ Simulator::VisitCallRedirection(const Instruction* instr)
VIXL_ASSERT(xreg(29) == x29);
// Assert that the stack is unchanged.
VIXL_ASSERT(savedSP == xreg(31, Reg31IsStackPointer));
VIXL_ASSERT(savedSP == get_sp());
// Simulate a return.
set_lr(savedLR);

Просмотреть файл

@ -1012,7 +1012,38 @@ void Simulator::VisitLoadStoreRegisterOffset(const Instruction* instr) {
LoadStoreHelper(instr, offset, Offset);
}
template<typename T>
static T Faulted() {
return ~0;
}
template<>
Simulator::qreg_t Faulted() {
static_assert(kQRegSizeInBytes == 16, "Known constraint");
static Simulator::qreg_t dummy = { {
255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255
} };
return dummy;
}
template<typename T> T
Simulator::Read(uintptr_t address)
{
address = Memory::AddressUntag(address);
if (handle_wasm_seg_fault(address, sizeof(T)))
return Faulted<T>();
return Memory::Read<T>(address);
}
template <typename T> void
Simulator::Write(uintptr_t address, T value)
{
address = Memory::AddressUntag(address);
if (handle_wasm_seg_fault(address, sizeof(T)))
return;
Memory::Write<T>(address, value);
}
void Simulator::LoadStoreHelper(const Instruction* instr,
int64_t offset,
@ -1023,43 +1054,43 @@ void Simulator::LoadStoreHelper(const Instruction* instr,
LoadStoreOp op = static_cast<LoadStoreOp>(instr->Mask(LoadStoreMask));
switch (op) {
case LDRB_w:
set_wreg(srcdst, Memory::Read<uint8_t>(address), NoRegLog); break;
set_wreg(srcdst, Read<uint8_t>(address), NoRegLog); break;
case LDRH_w:
set_wreg(srcdst, Memory::Read<uint16_t>(address), NoRegLog); break;
set_wreg(srcdst, Read<uint16_t>(address), NoRegLog); break;
case LDR_w:
set_wreg(srcdst, Memory::Read<uint32_t>(address), NoRegLog); break;
set_wreg(srcdst, Read<uint32_t>(address), NoRegLog); break;
case LDR_x:
set_xreg(srcdst, Memory::Read<uint64_t>(address), NoRegLog); break;
set_xreg(srcdst, Read<uint64_t>(address), NoRegLog); break;
case LDRSB_w:
set_wreg(srcdst, Memory::Read<int8_t>(address), NoRegLog); break;
set_wreg(srcdst, Read<int8_t>(address), NoRegLog); break;
case LDRSH_w:
set_wreg(srcdst, Memory::Read<int16_t>(address), NoRegLog); break;
set_wreg(srcdst, Read<int16_t>(address), NoRegLog); break;
case LDRSB_x:
set_xreg(srcdst, Memory::Read<int8_t>(address), NoRegLog); break;
set_xreg(srcdst, Read<int8_t>(address), NoRegLog); break;
case LDRSH_x:
set_xreg(srcdst, Memory::Read<int16_t>(address), NoRegLog); break;
set_xreg(srcdst, Read<int16_t>(address), NoRegLog); break;
case LDRSW_x:
set_xreg(srcdst, Memory::Read<int32_t>(address), NoRegLog); break;
set_xreg(srcdst, Read<int32_t>(address), NoRegLog); break;
case LDR_b:
set_breg(srcdst, Memory::Read<uint8_t>(address), NoRegLog); break;
set_breg(srcdst, Read<uint8_t>(address), NoRegLog); break;
case LDR_h:
set_hreg(srcdst, Memory::Read<uint16_t>(address), NoRegLog); break;
set_hreg(srcdst, Read<uint16_t>(address), NoRegLog); break;
case LDR_s:
set_sreg(srcdst, Memory::Read<float>(address), NoRegLog); break;
set_sreg(srcdst, Read<float>(address), NoRegLog); break;
case LDR_d:
set_dreg(srcdst, Memory::Read<double>(address), NoRegLog); break;
set_dreg(srcdst, Read<double>(address), NoRegLog); break;
case LDR_q:
set_qreg(srcdst, Memory::Read<qreg_t>(address), NoRegLog); break;
set_qreg(srcdst, Read<qreg_t>(address), NoRegLog); break;
case STRB_w: Memory::Write<uint8_t>(address, wreg(srcdst)); break;
case STRH_w: Memory::Write<uint16_t>(address, wreg(srcdst)); break;
case STR_w: Memory::Write<uint32_t>(address, wreg(srcdst)); break;
case STR_x: Memory::Write<uint64_t>(address, xreg(srcdst)); break;
case STR_b: Memory::Write<uint8_t>(address, breg(srcdst)); break;
case STR_h: Memory::Write<uint16_t>(address, hreg(srcdst)); break;
case STR_s: Memory::Write<float>(address, sreg(srcdst)); break;
case STR_d: Memory::Write<double>(address, dreg(srcdst)); break;
case STR_q: Memory::Write<qreg_t>(address, qreg(srcdst)); break;
case STRB_w: Write<uint8_t>(address, wreg(srcdst)); break;
case STRH_w: Write<uint16_t>(address, wreg(srcdst)); break;
case STR_w: Write<uint32_t>(address, wreg(srcdst)); break;
case STR_x: Write<uint64_t>(address, xreg(srcdst)); break;
case STR_b: Write<uint8_t>(address, breg(srcdst)); break;
case STR_h: Write<uint16_t>(address, hreg(srcdst)); break;
case STR_s: Write<float>(address, sreg(srcdst)); break;
case STR_d: Write<double>(address, dreg(srcdst)); break;
case STR_q: Write<qreg_t>(address, qreg(srcdst)); break;
// Ignore prfm hint instructions.
case PRFM: break;
@ -1129,58 +1160,58 @@ void Simulator::LoadStorePairHelper(const Instruction* instr,
// Use NoRegLog to suppress the register trace (LOG_REGS, LOG_FP_REGS). We
// will print a more detailed log.
case LDP_w: {
set_wreg(rt, Memory::Read<uint32_t>(address), NoRegLog);
set_wreg(rt2, Memory::Read<uint32_t>(address2), NoRegLog);
set_wreg(rt, Read<uint32_t>(address), NoRegLog);
set_wreg(rt2, Read<uint32_t>(address2), NoRegLog);
break;
}
case LDP_s: {
set_sreg(rt, Memory::Read<float>(address), NoRegLog);
set_sreg(rt2, Memory::Read<float>(address2), NoRegLog);
set_sreg(rt, Read<float>(address), NoRegLog);
set_sreg(rt2, Read<float>(address2), NoRegLog);
break;
}
case LDP_x: {
set_xreg(rt, Memory::Read<uint64_t>(address), NoRegLog);
set_xreg(rt2, Memory::Read<uint64_t>(address2), NoRegLog);
set_xreg(rt, Read<uint64_t>(address), NoRegLog);
set_xreg(rt2, Read<uint64_t>(address2), NoRegLog);
break;
}
case LDP_d: {
set_dreg(rt, Memory::Read<double>(address), NoRegLog);
set_dreg(rt2, Memory::Read<double>(address2), NoRegLog);
set_dreg(rt, Read<double>(address), NoRegLog);
set_dreg(rt2, Read<double>(address2), NoRegLog);
break;
}
case LDP_q: {
set_qreg(rt, Memory::Read<qreg_t>(address), NoRegLog);
set_qreg(rt2, Memory::Read<qreg_t>(address2), NoRegLog);
set_qreg(rt, Read<qreg_t>(address), NoRegLog);
set_qreg(rt2, Read<qreg_t>(address2), NoRegLog);
break;
}
case LDPSW_x: {
set_xreg(rt, Memory::Read<int32_t>(address), NoRegLog);
set_xreg(rt2, Memory::Read<int32_t>(address2), NoRegLog);
set_xreg(rt, Read<int32_t>(address), NoRegLog);
set_xreg(rt2, Read<int32_t>(address2), NoRegLog);
break;
}
case STP_w: {
Memory::Write<uint32_t>(address, wreg(rt));
Memory::Write<uint32_t>(address2, wreg(rt2));
Write<uint32_t>(address, wreg(rt));
Write<uint32_t>(address2, wreg(rt2));
break;
}
case STP_s: {
Memory::Write<float>(address, sreg(rt));
Memory::Write<float>(address2, sreg(rt2));
Write<float>(address, sreg(rt));
Write<float>(address2, sreg(rt2));
break;
}
case STP_x: {
Memory::Write<uint64_t>(address, xreg(rt));
Memory::Write<uint64_t>(address2, xreg(rt2));
Write<uint64_t>(address, xreg(rt));
Write<uint64_t>(address2, xreg(rt2));
break;
}
case STP_d: {
Memory::Write<double>(address, dreg(rt));
Memory::Write<double>(address2, dreg(rt2));
Write<double>(address, dreg(rt));
Write<double>(address2, dreg(rt2));
break;
}
case STP_q: {
Memory::Write<qreg_t>(address, qreg(rt));
Memory::Write<qreg_t>(address2, qreg(rt2));
Write<qreg_t>(address, qreg(rt));
Write<qreg_t>(address2, qreg(rt2));
break;
}
default: VIXL_UNREACHABLE();
@ -1276,32 +1307,32 @@ void Simulator::VisitLoadStoreExclusive(const Instruction* instr) {
case LDXRB_w:
case LDAXRB_w:
case LDARB_w:
set_wreg(rt, Memory::Read<uint8_t>(address), NoRegLog);
set_wreg(rt, Read<uint8_t>(address), NoRegLog);
break;
case LDXRH_w:
case LDAXRH_w:
case LDARH_w:
set_wreg(rt, Memory::Read<uint16_t>(address), NoRegLog);
set_wreg(rt, Read<uint16_t>(address), NoRegLog);
break;
case LDXR_w:
case LDAXR_w:
case LDAR_w:
set_wreg(rt, Memory::Read<uint32_t>(address), NoRegLog);
set_wreg(rt, Read<uint32_t>(address), NoRegLog);
break;
case LDXR_x:
case LDAXR_x:
case LDAR_x:
set_xreg(rt, Memory::Read<uint64_t>(address), NoRegLog);
set_xreg(rt, Read<uint64_t>(address), NoRegLog);
break;
case LDXP_w:
case LDAXP_w:
set_wreg(rt, Memory::Read<uint32_t>(address), NoRegLog);
set_wreg(rt2, Memory::Read<uint32_t>(address + element_size), NoRegLog);
set_wreg(rt, Read<uint32_t>(address), NoRegLog);
set_wreg(rt2, Read<uint32_t>(address + element_size), NoRegLog);
break;
case LDXP_x:
case LDAXP_x:
set_xreg(rt, Memory::Read<uint64_t>(address), NoRegLog);
set_xreg(rt2, Memory::Read<uint64_t>(address + element_size), NoRegLog);
set_xreg(rt, Read<uint64_t>(address), NoRegLog);
set_xreg(rt2, Read<uint64_t>(address + element_size), NoRegLog);
break;
default:
VIXL_UNREACHABLE();
@ -1341,32 +1372,32 @@ void Simulator::VisitLoadStoreExclusive(const Instruction* instr) {
case STXRB_w:
case STLXRB_w:
case STLRB_w:
Memory::Write<uint8_t>(address, wreg(rt));
Write<uint8_t>(address, wreg(rt));
break;
case STXRH_w:
case STLXRH_w:
case STLRH_w:
Memory::Write<uint16_t>(address, wreg(rt));
Write<uint16_t>(address, wreg(rt));
break;
case STXR_w:
case STLXR_w:
case STLR_w:
Memory::Write<uint32_t>(address, wreg(rt));
Write<uint32_t>(address, wreg(rt));
break;
case STXR_x:
case STLXR_x:
case STLR_x:
Memory::Write<uint64_t>(address, xreg(rt));
Write<uint64_t>(address, xreg(rt));
break;
case STXP_w:
case STLXP_w:
Memory::Write<uint32_t>(address, wreg(rt));
Memory::Write<uint32_t>(address + element_size, wreg(rt2));
Write<uint32_t>(address, wreg(rt));
Write<uint32_t>(address + element_size, wreg(rt2));
break;
case STXP_x:
case STLXP_x:
Memory::Write<uint64_t>(address, xreg(rt));
Memory::Write<uint64_t>(address + element_size, xreg(rt2));
Write<uint64_t>(address, xreg(rt));
Write<uint64_t>(address + element_size, xreg(rt2));
break;
default:
VIXL_UNREACHABLE();
@ -1393,27 +1424,27 @@ void Simulator::VisitLoadLiteral(const Instruction* instr) {
// Use NoRegLog to suppress the register trace (LOG_REGS, LOG_VREGS), then
// print a more detailed log.
case LDR_w_lit:
set_wreg(rt, Memory::Read<uint32_t>(address), NoRegLog);
set_wreg(rt, Read<uint32_t>(address), NoRegLog);
LogRead(address, rt, kPrintWReg);
break;
case LDR_x_lit:
set_xreg(rt, Memory::Read<uint64_t>(address), NoRegLog);
set_xreg(rt, Read<uint64_t>(address), NoRegLog);
LogRead(address, rt, kPrintXReg);
break;
case LDR_s_lit:
set_sreg(rt, Memory::Read<float>(address), NoRegLog);
set_sreg(rt, Read<float>(address), NoRegLog);
LogVRead(address, rt, kPrintSReg);
break;
case LDR_d_lit:
set_dreg(rt, Memory::Read<double>(address), NoRegLog);
set_dreg(rt, Read<double>(address), NoRegLog);
LogVRead(address, rt, kPrintDReg);
break;
case LDR_q_lit:
set_qreg(rt, Memory::Read<qreg_t>(address), NoRegLog);
set_qreg(rt, Read<qreg_t>(address), NoRegLog);
LogVRead(address, rt, kPrintReg1Q);
break;
case LDRSW_x_lit:
set_xreg(rt, Memory::Read<int32_t>(address), NoRegLog);
set_xreg(rt, Read<int32_t>(address), NoRegLog);
LogRead(address, rt, kPrintWReg);
break;
@ -2242,7 +2273,7 @@ void Simulator::SysOp_W(int op, int64_t val) {
case CIVAC: {
// Perform a dummy memory access to ensure that we have read access
// to the specified address.
volatile uint8_t y = Memory::Read<uint8_t>(val);
volatile uint8_t y = Read<uint8_t>(val);
USE(y);
// TODO: Implement "case ZVA:".
break;

Просмотреть файл

@ -721,6 +721,9 @@ class Simulator : public DecoderVisitor {
static bool supportsAtomics() {
return true;
}
template<typename T> T Read(uintptr_t address);
template <typename T> void Write(uintptr_t address_, T value);
JS::ProfilingFrameIterator::RegisterState registerState();
void ResetState();
@ -731,6 +734,9 @@ class Simulator : public DecoderVisitor {
// Simulation helpers.
const Instruction* pc() const { return pc_; }
const Instruction* get_pc() const { return pc_; }
int64_t get_sp() const { return xreg(31, Reg31IsStackPointer); }
int64_t get_lr() const { return xreg(30); }
int64_t get_fp() const { return xreg(29); }
template <typename T>
T get_pc_as() const { return reinterpret_cast<T>(const_cast<Instruction*>(pc())); }
@ -742,6 +748,8 @@ class Simulator : public DecoderVisitor {
void trigger_wasm_interrupt();
void handle_wasm_interrupt();
bool handle_wasm_ill_fault();
bool handle_wasm_seg_fault(uintptr_t addr, unsigned numBytes);
void increment_pc() {
if (!pc_modified_) {

Просмотреть файл

@ -1217,35 +1217,35 @@ AssemblerMIPSShared::as_dextu(Register rt, Register rs, uint16_t pos, uint16_t s
// FP instructions
BufferOffset
AssemblerMIPSShared::as_ld(FloatRegister fd, Register base, int32_t off)
AssemblerMIPSShared::as_ldc1(FloatRegister ft, Register base, int32_t off)
{
MOZ_ASSERT(Imm16::IsInSignedRange(off));
spew("ldc1 %3s, (0x%x)%2s", fd.name(), off, base.name());
return writeInst(InstImm(op_ldc1, base, fd, Imm16(off)).encode());
spew("ldc1 %3s, (0x%x)%2s", ft.name(), off, base.name());
return writeInst(InstImm(op_ldc1, base, ft, Imm16(off)).encode());
}
BufferOffset
AssemblerMIPSShared::as_sd(FloatRegister fd, Register base, int32_t off)
AssemblerMIPSShared::as_sdc1(FloatRegister ft, Register base, int32_t off)
{
MOZ_ASSERT(Imm16::IsInSignedRange(off));
spew("sdc1 %3s, (0x%x)%2s", fd.name(), off, base.name());
return writeInst(InstImm(op_sdc1, base, fd, Imm16(off)).encode());
spew("sdc1 %3s, (0x%x)%2s", ft.name(), off, base.name());
return writeInst(InstImm(op_sdc1, base, ft, Imm16(off)).encode());
}
BufferOffset
AssemblerMIPSShared::as_ls(FloatRegister fd, Register base, int32_t off)
AssemblerMIPSShared::as_lwc1(FloatRegister ft, Register base, int32_t off)
{
MOZ_ASSERT(Imm16::IsInSignedRange(off));
spew("lwc1 %3s, (0x%x)%2s", fd.name(), off, base.name());
return writeInst(InstImm(op_lwc1, base, fd, Imm16(off)).encode());
spew("lwc1 %3s, (0x%x)%2s", ft.name(), off, base.name());
return writeInst(InstImm(op_lwc1, base, ft, Imm16(off)).encode());
}
BufferOffset
AssemblerMIPSShared::as_ss(FloatRegister fd, Register base, int32_t off)
AssemblerMIPSShared::as_swc1(FloatRegister ft, Register base, int32_t off)
{
MOZ_ASSERT(Imm16::IsInSignedRange(off));
spew("swc1 %3s, (0x%x)%2s", fd.name(), off, base.name());
return writeInst(InstImm(op_swc1, base, fd, Imm16(off)).encode());
spew("swc1 %3s, (0x%x)%2s", ft.name(), off, base.name());
return writeInst(InstImm(op_swc1, base, ft, Imm16(off)).encode());
}
BufferOffset

Просмотреть файл

@ -1139,13 +1139,11 @@ class AssemblerMIPSShared : public AssemblerShared
// FP instructions
// Use these two functions only when you are sure address is aligned.
// Otherwise, use ma_ld and ma_sd.
BufferOffset as_ld(FloatRegister fd, Register base, int32_t off);
BufferOffset as_sd(FloatRegister fd, Register base, int32_t off);
BufferOffset as_ldc1(FloatRegister ft, Register base, int32_t off);
BufferOffset as_sdc1(FloatRegister ft, Register base, int32_t off);
BufferOffset as_ls(FloatRegister fd, Register base, int32_t off);
BufferOffset as_ss(FloatRegister fd, Register base, int32_t off);
BufferOffset as_lwc1(FloatRegister ft, Register base, int32_t off);
BufferOffset as_swc1(FloatRegister ft, Register base, int32_t off);
// Loongson-specific FP load and store instructions
BufferOffset as_gsldl(FloatRegister fd, Register base, int32_t off);

Просмотреть файл

@ -1592,7 +1592,7 @@ CodeGeneratorMIPSShared::visitCopySignD(LCopySignD* ins)
void
CodeGeneratorMIPSShared::visitValue(LValue* value)
{
const ValueOperand out = GetValueOutput(value);
const ValueOperand out = ToOutValue(value);
masm.moveValue(value->value(), out);
}

Просмотреть файл

@ -1037,6 +1037,28 @@ MacroAssembler::storeFloat32x3(FloatRegister src, const BaseIndex& dest)
MOZ_CRASH("NYI");
}
void
MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const Address& addr)
{
ma_sd(src, addr);
}
void
MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const BaseIndex& addr)
{
ma_sd(src, addr);
}
void
MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const Address& addr)
{
ma_ss(src, addr);
}
void
MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const BaseIndex& addr)
{
ma_ss(src, addr);
}
void
MacroAssembler::memoryBarrier(MemoryBarrierBits barrier)
{

Просмотреть файл

@ -1118,14 +1118,6 @@ MacroAssemblerMIPSShared::ma_lis(FloatRegister dest, float value)
}
}
void
MacroAssemblerMIPSShared::ma_liNegZero(FloatRegister dest)
{
moveToDoubleLo(zero, dest);
ma_li(ScratchRegister, Imm32(INT_MIN));
asMasm().moveToDoubleHi(ScratchRegister, dest);
}
void
MacroAssemblerMIPSShared::ma_sd(FloatRegister ft, BaseIndex address)
{
@ -1178,6 +1170,20 @@ MacroAssemblerMIPSShared::ma_ss(FloatRegister ft, BaseIndex address)
asMasm().ma_ss(ft, Address(SecondScratchReg, address.offset));
}
void
MacroAssemblerMIPSShared::ma_ld(FloatRegister ft, const BaseIndex& src)
{
asMasm().computeScaledAddress(src, SecondScratchReg);
asMasm().ma_ld(ft, Address(SecondScratchReg, src.offset));
}
void
MacroAssemblerMIPSShared::ma_ls(FloatRegister ft, const BaseIndex& src)
{
asMasm().computeScaledAddress(src, SecondScratchReg);
asMasm().ma_ls(ft, Address(SecondScratchReg, src.offset));
}
void
MacroAssemblerMIPSShared::ma_bc1s(FloatRegister lhs, FloatRegister rhs, Label* label,
DoubleCondition c, JumpKind jumpKind, FPConditionBit fcc)
@ -1290,6 +1296,44 @@ MacroAssemblerMIPSShared::minMaxFloat32(FloatRegister srcDest, FloatRegister sec
bind(&done);
}
void
MacroAssemblerMIPSShared::loadDouble(const Address& address, FloatRegister dest)
{
asMasm().ma_ld(dest, address);
}
void
MacroAssemblerMIPSShared::loadDouble(const BaseIndex& src, FloatRegister dest)
{
asMasm().ma_ld(dest, src);
}
void
MacroAssemblerMIPSShared::loadFloatAsDouble(const Address& address, FloatRegister dest)
{
asMasm().ma_ls(dest, address);
as_cvtds(dest, dest);
}
void
MacroAssemblerMIPSShared::loadFloatAsDouble(const BaseIndex& src, FloatRegister dest)
{
asMasm().loadFloat32(src, dest);
as_cvtds(dest, dest);
}
void
MacroAssemblerMIPSShared::loadFloat32(const Address& address, FloatRegister dest)
{
asMasm().ma_ls(dest, address);
}
void
MacroAssemblerMIPSShared::loadFloat32(const BaseIndex& src, FloatRegister dest)
{
asMasm().ma_ls(dest, src);
}
void
MacroAssemblerMIPSShared::ma_call(ImmPtr dest)
{
@ -1371,7 +1415,7 @@ void
MacroAssembler::Push(FloatRegister f)
{
ma_push(f);
adjustFrame(int32_t(sizeof(double)));
adjustFrame(int32_t(f.pushSize()));
}
void
@ -1385,21 +1429,21 @@ void
MacroAssembler::Pop(FloatRegister f)
{
ma_pop(f);
adjustFrame(-int32_t(sizeof(double)));
adjustFrame(-int32_t(f.pushSize()));
}
void
MacroAssembler::Pop(const ValueOperand& val)
{
popValue(val);
framePushed_ -= sizeof(Value);
adjustFrame(-int32_t(sizeof(Value)));
}
void
MacroAssembler::PopStackPtr()
{
loadPtr(Address(StackPointer, 0), StackPointer);
framePushed_ -= sizeof(intptr_t);
adjustFrame(-int32_t(sizeof(intptr_t)));
}
@ -1972,12 +2016,10 @@ MacroAssemblerMIPSShared::wasmLoadImpl(const wasm::MemoryAccessDesc& access, Reg
asMasm().memoryBarrierBefore(access.sync());
if (isFloat) {
if (byteSize == 4) {
asMasm().loadFloat32(address, output.fpu());
} else {
asMasm().computeScaledAddress(address, SecondScratchReg);
asMasm().as_ld(output.fpu(), SecondScratchReg, 0);
}
if (byteSize == 4)
asMasm().ma_ls(output.fpu(), address);
else
asMasm().ma_ld(output.fpu(), address);
} else {
asMasm().ma_load(output.gpr(), address, static_cast<LoadStoreSize>(8 * byteSize),
isSigned ? SignExtend : ZeroExtend);
@ -2036,15 +2078,10 @@ MacroAssemblerMIPSShared::wasmStoreImpl(const wasm::MemoryAccessDesc& access, An
asMasm().memoryBarrierBefore(access.sync());
if (isFloat) {
if (byteSize == 4) {
asMasm().storeFloat32(value.fpu(), address);
} else {
//asMasm().storeDouble(value.fpu(), address);
// For time being storeDouble for mips32 uses two store instructions,
// so we emit only one to get correct behavior in case of OOB access.
asMasm().computeScaledAddress(address, SecondScratchReg);
asMasm().as_sd(value.fpu(), SecondScratchReg, 0);
}
if (byteSize == 4)
asMasm().ma_ss(value.fpu(), address);
else
asMasm().ma_sd(value.fpu(), address);
} else {
asMasm().ma_store(value.gpr(), address,
static_cast<LoadStoreSize>(8 * byteSize),

Просмотреть файл

@ -172,10 +172,12 @@ class MacroAssemblerMIPSShared : public Assembler
// fp instructions
void ma_lis(FloatRegister dest, float value);
void ma_liNegZero(FloatRegister dest);
void ma_sd(FloatRegister fd, BaseIndex address);
void ma_ss(FloatRegister fd, BaseIndex address);
void ma_sd(FloatRegister src, BaseIndex address);
void ma_ss(FloatRegister src, BaseIndex address);
void ma_ld(FloatRegister dest, const BaseIndex& src);
void ma_ls(FloatRegister dest, const BaseIndex& src);
//FP branches
void ma_bc1s(FloatRegister lhs, FloatRegister rhs, Label* label, DoubleCondition c,
@ -192,12 +194,6 @@ class MacroAssemblerMIPSShared : public Assembler
void ma_cmp_set_double(Register dst, FloatRegister lhs, FloatRegister rhs, DoubleCondition c);
void ma_cmp_set_float32(Register dst, FloatRegister lhs, FloatRegister rhs, DoubleCondition c);
BufferOffset ma_BoundsCheck(Register bounded) {
BufferOffset bo = m_buffer.nextOffset();
ma_liPatchable(bounded, Imm32(0));
return bo;
}
void moveToDoubleLo(Register src, FloatRegister dest) {
as_mtc1(src, dest);
}
@ -217,6 +213,16 @@ class MacroAssemblerMIPSShared : public Assembler
void minMaxDouble(FloatRegister srcDest, FloatRegister other, bool handleNaN, bool isMax);
void minMaxFloat32(FloatRegister srcDest, FloatRegister other, bool handleNaN, bool isMax);
void loadDouble(const Address& addr, FloatRegister dest);
void loadDouble(const BaseIndex& src, FloatRegister dest);
// Load a float value into a register, then expand it to a double.
void loadFloatAsDouble(const Address& addr, FloatRegister dest);
void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest);
void loadFloat32(const Address& addr, FloatRegister dest);
void loadFloat32(const BaseIndex& src, FloatRegister dest);
void outOfLineWasmTruncateToInt32Check(FloatRegister input, Register output, MIRType fromType,
TruncFlags flags, Label* rejoin,
wasm::BytecodeOffset trapOffset);

Просмотреть файл

@ -131,6 +131,7 @@ class FloatRegister : public FloatRegisterMIPSShared
}
bool equiv(const FloatRegister& other) const { return other.kind_ == kind_; }
size_t size() const { return (kind_ == Double) ? 8 : 4; }
size_t pushSize() const { return size(); }
bool isInvalid() const {
return code_ == FloatRegisters::invalid_freg;
}

Просмотреть файл

@ -108,14 +108,6 @@ CodeGeneratorMIPS::ToValue(LInstruction* ins, size_t pos)
return ValueOperand(typeReg, payloadReg);
}
ValueOperand
CodeGeneratorMIPS::ToOutValue(LInstruction* ins)
{
Register typeReg = ToRegister(ins->getDef(TYPE_INDEX));
Register payloadReg = ToRegister(ins->getDef(PAYLOAD_INDEX));
return ValueOperand(typeReg, payloadReg);
}
ValueOperand
CodeGeneratorMIPS::ToTempValue(LInstruction* ins, size_t pos)
{
@ -160,8 +152,8 @@ CodeGeneratorMIPS::visitUnbox(LUnbox* unbox)
}
}
Register
CodeGeneratorMIPS::splitTagForTest(const ValueOperand& value)
void
CodeGeneratorMIPS::splitTagForTest(const ValueOperand& value, ScratchTagScope& tag)
{
return value.typeReg();
}

Просмотреть файл

@ -69,11 +69,10 @@ class CodeGeneratorMIPS : public CodeGeneratorMIPSShared
void visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool);
protected:
ValueOperand ToValue(LInstruction* ins, size_t pos);
ValueOperand ToOutValue(LInstruction* ins);
ValueOperand ToTempValue(LInstruction* ins, size_t pos);
// Functions for LTestVAndBranch.
Register splitTagForTest(const ValueOperand& value);
void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag);
public:
CodeGeneratorMIPS(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm)

Просмотреть файл

@ -1016,32 +1016,6 @@ MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src, Register
ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
}
// ========================================================================
// Memory access primitives.
void
MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const Address& addr)
{
ma_sd(src, addr);
}
void
MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const BaseIndex& addr)
{
MOZ_ASSERT(addr.offset == 0);
ma_sd(src, addr);
}
void
MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const Address& addr)
{
ma_ss(src, addr);
}
void
MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const BaseIndex& addr)
{
MOZ_ASSERT(addr.offset == 0);
ma_ss(src, addr);
}
// ========================================================================
// wasm support

Просмотреть файл

@ -870,7 +870,7 @@ void
MacroAssemblerMIPS::ma_ls(FloatRegister ft, Address address)
{
if (Imm16::IsInSignedRange(address.offset)) {
as_ls(ft, address.base, address.offset);
as_lwc1(ft, address.base, address.offset);
} else {
MOZ_ASSERT(address.base != ScratchRegister);
ma_li(ScratchRegister, Imm32(address.offset));
@ -878,7 +878,7 @@ MacroAssemblerMIPS::ma_ls(FloatRegister ft, Address address)
as_gslsx(ft, address.base, ScratchRegister, 0);
} else {
as_addu(ScratchRegister, address.base, ScratchRegister);
as_ls(ft, ScratchRegister, 0);
as_lwc1(ft, ScratchRegister, 0);
}
}
}
@ -886,37 +886,34 @@ MacroAssemblerMIPS::ma_ls(FloatRegister ft, Address address)
void
MacroAssemblerMIPS::ma_ld(FloatRegister ft, Address address)
{
// Use single precision load instructions so we don't have to worry about
// alignment.
int32_t off = address.offset + PAYLOAD_OFFSET;
int32_t off2 = address.offset + TAG_OFFSET;
if (Imm16::IsInSignedRange(off) && Imm16::IsInSignedRange(off2)) {
as_ls(ft, address.base, off);
as_ls(getOddPair(ft), address.base, off2);
if (Imm16::IsInSignedRange(address.offset)) {
as_ldc1(ft, address.base, address.offset);
} else {
MOZ_ASSERT(address.base != ScratchRegister);
ma_li(ScratchRegister, Imm32(off));
as_addu(ScratchRegister, address.base, ScratchRegister);
as_ls(ft, ScratchRegister, PAYLOAD_OFFSET);
as_ls(getOddPair(ft), ScratchRegister, TAG_OFFSET);
ma_li(ScratchRegister, Imm32(address.offset));
if (isLoongson()) {
as_gsldx(ft, address.base, ScratchRegister, 0);
} else {
as_addu(ScratchRegister, address.base, ScratchRegister);
as_ldc1(ft, ScratchRegister, 0);
}
}
}
void
MacroAssemblerMIPS::ma_sd(FloatRegister ft, Address address)
{
int32_t off = address.offset + PAYLOAD_OFFSET;
int32_t off2 = address.offset + TAG_OFFSET;
if (Imm16::IsInSignedRange(off) && Imm16::IsInSignedRange(off2)) {
as_ss(ft, address.base, off);
as_ss(getOddPair(ft), address.base, off2);
if (Imm16::IsInSignedRange(address.offset)) {
as_sdc1(ft, address.base, address.offset);
} else {
MOZ_ASSERT(address.base != ScratchRegister);
ma_li(ScratchRegister, Imm32(off));
as_addu(ScratchRegister, address.base, ScratchRegister);
as_ss(ft, ScratchRegister, PAYLOAD_OFFSET);
as_ss(getOddPair(ft), ScratchRegister, TAG_OFFSET);
ma_li(ScratchRegister, Imm32(address.offset));
if (isLoongson()) {
as_gssdx(ft, address.base, ScratchRegister, 0);
} else {
as_addu(ScratchRegister, address.base, ScratchRegister);
as_sdc1(ft, ScratchRegister, 0);
}
}
}
@ -924,7 +921,7 @@ void
MacroAssemblerMIPS::ma_ss(FloatRegister ft, Address address)
{
if (Imm16::IsInSignedRange(address.offset)) {
as_ss(ft, address.base, address.offset);
as_swc1(ft, address.base, address.offset);
} else {
MOZ_ASSERT(address.base != ScratchRegister);
ma_li(ScratchRegister, Imm32(address.offset));
@ -932,23 +929,51 @@ MacroAssemblerMIPS::ma_ss(FloatRegister ft, Address address)
as_gsssx(ft, address.base, ScratchRegister, 0);
} else {
as_addu(ScratchRegister, address.base, ScratchRegister);
as_ss(ft, ScratchRegister, 0);
as_swc1(ft, ScratchRegister, 0);
}
}
}
void
MacroAssemblerMIPS::ma_pop(FloatRegister fs)
MacroAssemblerMIPS::ma_ldc1WordAligned(FloatRegister ft, Register base, int32_t off)
{
ma_ld(fs.doubleOverlay(), Address(StackPointer, 0));
as_addiu(StackPointer, StackPointer, sizeof(double));
MOZ_ASSERT(Imm16::IsInSignedRange(off + PAYLOAD_OFFSET) &&
Imm16::IsInSignedRange(off + TAG_OFFSET));
as_lwc1(ft, base, off + PAYLOAD_OFFSET);
as_lwc1(getOddPair(ft), base, off + TAG_OFFSET);
}
void
MacroAssemblerMIPS::ma_push(FloatRegister fs)
MacroAssemblerMIPS::ma_sdc1WordAligned(FloatRegister ft, Register base, int32_t off)
{
as_addiu(StackPointer, StackPointer, -sizeof(double));
ma_sd(fs.doubleOverlay(), Address(StackPointer, 0));
MOZ_ASSERT(Imm16::IsInSignedRange(off + PAYLOAD_OFFSET) &&
Imm16::IsInSignedRange(off + TAG_OFFSET));
as_swc1(ft, base, off + PAYLOAD_OFFSET);
as_swc1(getOddPair(ft), base, off + TAG_OFFSET);
}
void
MacroAssemblerMIPS::ma_pop(FloatRegister f)
{
if (f.isDouble())
ma_ldc1WordAligned(f, StackPointer, 0);
else
as_lwc1(f, StackPointer, 0);
as_addiu(StackPointer, StackPointer, f.size());
}
void
MacroAssemblerMIPS::ma_push(FloatRegister f)
{
as_addiu(StackPointer, StackPointer, -f.size());
if(f.isDouble())
ma_sdc1WordAligned(f, StackPointer, 0);
else
as_swc1(f, StackPointer, 0);
}
bool
@ -1110,19 +1135,6 @@ MacroAssemblerMIPSCompat::loadPrivate(const Address& address, Register dest)
ma_lw(dest, Address(address.base, address.offset + PAYLOAD_OFFSET));
}
void
MacroAssemblerMIPSCompat::loadDouble(const Address& address, FloatRegister dest)
{
ma_ld(dest, address);
}
void
MacroAssemblerMIPSCompat::loadDouble(const BaseIndex& src, FloatRegister dest)
{
computeScaledAddress(src, SecondScratchReg);
ma_ld(dest, Address(SecondScratchReg, src.offset));
}
void
MacroAssemblerMIPSCompat::loadUnalignedDouble(const wasm::MemoryAccessDesc& access,
const BaseIndex& src, Register temp, FloatRegister dest)
@ -1155,33 +1167,6 @@ MacroAssemblerMIPSCompat::loadUnalignedDouble(const wasm::MemoryAccessDesc& acce
}
}
void
MacroAssemblerMIPSCompat::loadFloatAsDouble(const Address& address, FloatRegister dest)
{
ma_ls(dest, address);
as_cvtds(dest, dest);
}
void
MacroAssemblerMIPSCompat::loadFloatAsDouble(const BaseIndex& src, FloatRegister dest)
{
loadFloat32(src, dest);
as_cvtds(dest, dest);
}
void
MacroAssemblerMIPSCompat::loadFloat32(const Address& address, FloatRegister dest)
{
ma_ls(dest, address);
}
void
MacroAssemblerMIPSCompat::loadFloat32(const BaseIndex& src, FloatRegister dest)
{
computeScaledAddress(src, SecondScratchReg);
ma_ls(dest, Address(SecondScratchReg, src.offset));
}
void
MacroAssemblerMIPSCompat::loadUnalignedFloat32(const wasm::MemoryAccessDesc& access,
const BaseIndex& src, Register temp, FloatRegister dest)
@ -2190,7 +2175,7 @@ MacroAssembler::PushRegsInMask(LiveRegisterSet set)
diffF -= sizeof(double);
for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); ++iter) {
as_sd(*iter, SecondScratchReg, -diffF);
as_sdc1(*iter, SecondScratchReg, -diffF);
diffF -= sizeof(double);
}
@ -2216,7 +2201,7 @@ MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore)
LiveFloatRegisterSet fpignore(ignore.fpus().reduceSetForPush());
for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); ++iter) {
if (!ignore.has(*iter))
as_ld(*iter, SecondScratchReg, -diffF);
as_ldc1(*iter, SecondScratchReg, -diffF);
diffF -= sizeof(double);
}
freeStack(reservedF);
@ -2256,7 +2241,7 @@ MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest, Register scra
diffF -= sizeof(double);
for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); ++iter) {
as_sd(*iter, scratch, -diffF);
as_sdc1(*iter, scratch, -diffF);
diffF -= sizeof(double);
}
MOZ_ASSERT(diffF == 0);

Просмотреть файл

@ -46,6 +46,25 @@ static const int32_t LOW_32_OFFSET = 4;
static const int32_t HIGH_32_OFFSET = 0;
#endif
// See documentation for ScratchTagScope and ScratchTagScopeRelease in
// MacroAssembler-x64.h.
class ScratchTagScope
{
const ValueOperand& v_;
public:
ScratchTagScope(MacroAssembler&, const ValueOperand& v) : v_(v) {}
operator Register() { return v_.typeReg(); }
void release() {}
void reacquire() {}
};
class ScratchTagScopeRelease
{
public:
explicit ScratchTagScopeRelease(ScratchTagScope*) {}
};
class MacroAssemblerMIPS : public MacroAssemblerMIPSShared
{
public:
@ -53,6 +72,8 @@ class MacroAssemblerMIPS : public MacroAssemblerMIPSShared
using MacroAssemblerMIPSShared::ma_li;
using MacroAssemblerMIPSShared::ma_ss;
using MacroAssemblerMIPSShared::ma_sd;
using MacroAssemblerMIPSShared::ma_ls;
using MacroAssemblerMIPSShared::ma_ld;
using MacroAssemblerMIPSShared::ma_load;
using MacroAssemblerMIPSShared::ma_store;
using MacroAssemblerMIPSShared::ma_cmp_set;
@ -122,13 +143,16 @@ class MacroAssemblerMIPS : public MacroAssemblerMIPSShared
void ma_mv(FloatRegister src, ValueOperand dest);
void ma_mv(ValueOperand src, FloatRegister dest);
void ma_ls(FloatRegister fd, Address address);
void ma_ld(FloatRegister fd, Address address);
void ma_sd(FloatRegister fd, Address address);
void ma_ss(FloatRegister fd, Address address);
void ma_ls(FloatRegister ft, Address address);
void ma_ld(FloatRegister ft, Address address);
void ma_sd(FloatRegister ft, Address address);
void ma_ss(FloatRegister ft, Address address);
void ma_pop(FloatRegister fs);
void ma_push(FloatRegister fs);
void ma_ldc1WordAligned(FloatRegister ft, Register base, int32_t off);
void ma_sdc1WordAligned(FloatRegister ft, Register base, int32_t off);
void ma_pop(FloatRegister f);
void ma_push(FloatRegister f);
void ma_cmp_set(Register dst, Register lhs, ImmPtr imm, Condition c) {
ma_cmp_set(dst, lhs, Imm32(uint32_t(imm.value)), c);
@ -328,9 +352,8 @@ class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS
ma_negu(reg, reg);
}
// Returns the register containing the type tag.
Register splitTagForTest(const ValueOperand& value) {
return value.typeReg();
void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag) {
MOZ_ASSERT(value.typeReg() == tag);
}
// unboxing code
@ -636,17 +659,9 @@ class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS
void storeUnalignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
void storeUnalignedSimd128Float(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
void loadDouble(const Address& addr, FloatRegister dest);
void loadDouble(const BaseIndex& src, FloatRegister dest);
void loadUnalignedDouble(const wasm::MemoryAccessDesc& access, const BaseIndex& src,
Register temp, FloatRegister dest);
// Load a float value into a register, then expand it to a double.
void loadFloatAsDouble(const Address& addr, FloatRegister dest);
void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest);
void loadFloat32(const Address& addr, FloatRegister dest);
void loadFloat32(const BaseIndex& src, FloatRegister dest);
void loadUnalignedFloat32(const wasm::MemoryAccessDesc& access, const BaseIndex& src,
Register temp, FloatRegister dest);

Просмотреть файл

@ -70,24 +70,24 @@ GenerateReturn(MacroAssembler& masm, int returnCode)
MOZ_ASSERT(masm.framePushed() == sizeof(EnterJITRegs));
// Restore non-volatile registers
masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s0)), s0);
masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s1)), s1);
masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s2)), s2);
masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s3)), s3);
masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s4)), s4);
masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s5)), s5);
masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s6)), s6);
masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s7)), s7);
masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, fp)), fp);
masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, ra)), ra);
masm.as_lw(s0, StackPointer, offsetof(EnterJITRegs, s0));
masm.as_lw(s1, StackPointer, offsetof(EnterJITRegs, s1));
masm.as_lw(s2, StackPointer, offsetof(EnterJITRegs, s2));
masm.as_lw(s3, StackPointer, offsetof(EnterJITRegs, s3));
masm.as_lw(s4, StackPointer, offsetof(EnterJITRegs, s4));
masm.as_lw(s5, StackPointer, offsetof(EnterJITRegs, s5));
masm.as_lw(s6, StackPointer, offsetof(EnterJITRegs, s6));
masm.as_lw(s7, StackPointer, offsetof(EnterJITRegs, s7));
masm.as_lw(fp, StackPointer, offsetof(EnterJITRegs, fp));
masm.as_lw(ra, StackPointer, offsetof(EnterJITRegs, ra));
// Restore non-volatile floating point registers
masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f20)), f20);
masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f22)), f22);
masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f24)), f24);
masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f26)), f26);
masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f28)), f28);
masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f30)), f30);
masm.as_ldc1(f20, StackPointer, offsetof(EnterJITRegs, f20));
masm.as_ldc1(f22, StackPointer, offsetof(EnterJITRegs, f22));
masm.as_ldc1(f24, StackPointer, offsetof(EnterJITRegs, f24));
masm.as_ldc1(f26, StackPointer, offsetof(EnterJITRegs, f26));
masm.as_ldc1(f28, StackPointer, offsetof(EnterJITRegs, f28));
masm.as_ldc1(f30, StackPointer, offsetof(EnterJITRegs, f30));
masm.freeStack(sizeof(EnterJITRegs));
@ -101,23 +101,23 @@ GeneratePrologue(MacroAssembler& masm)
// rather than the JIT'd code, because they are scanned by the conservative
// scanner.
masm.reserveStack(sizeof(EnterJITRegs));
masm.storePtr(s0, Address(StackPointer, offsetof(EnterJITRegs, s0)));
masm.storePtr(s1, Address(StackPointer, offsetof(EnterJITRegs, s1)));
masm.storePtr(s2, Address(StackPointer, offsetof(EnterJITRegs, s2)));
masm.storePtr(s3, Address(StackPointer, offsetof(EnterJITRegs, s3)));
masm.storePtr(s4, Address(StackPointer, offsetof(EnterJITRegs, s4)));
masm.storePtr(s5, Address(StackPointer, offsetof(EnterJITRegs, s5)));
masm.storePtr(s6, Address(StackPointer, offsetof(EnterJITRegs, s6)));
masm.storePtr(s7, Address(StackPointer, offsetof(EnterJITRegs, s7)));
masm.storePtr(fp, Address(StackPointer, offsetof(EnterJITRegs, fp)));
masm.storePtr(ra, Address(StackPointer, offsetof(EnterJITRegs, ra)));
masm.as_sw(s0, StackPointer, offsetof(EnterJITRegs, s0));
masm.as_sw(s1, StackPointer, offsetof(EnterJITRegs, s1));
masm.as_sw(s2, StackPointer, offsetof(EnterJITRegs, s2));
masm.as_sw(s3, StackPointer, offsetof(EnterJITRegs, s3));
masm.as_sw(s4, StackPointer, offsetof(EnterJITRegs, s4));
masm.as_sw(s5, StackPointer, offsetof(EnterJITRegs, s5));
masm.as_sw(s6, StackPointer, offsetof(EnterJITRegs, s6));
masm.as_sw(s7, StackPointer, offsetof(EnterJITRegs, s7));
masm.as_sw(fp, StackPointer, offsetof(EnterJITRegs, fp));
masm.as_sw(ra, StackPointer, offsetof(EnterJITRegs, ra));
masm.as_sd(f20, StackPointer, offsetof(EnterJITRegs, f20));
masm.as_sd(f22, StackPointer, offsetof(EnterJITRegs, f22));
masm.as_sd(f24, StackPointer, offsetof(EnterJITRegs, f24));
masm.as_sd(f26, StackPointer, offsetof(EnterJITRegs, f26));
masm.as_sd(f28, StackPointer, offsetof(EnterJITRegs, f28));
masm.as_sd(f30, StackPointer, offsetof(EnterJITRegs, f30));
masm.as_sdc1(f20, StackPointer, offsetof(EnterJITRegs, f20));
masm.as_sdc1(f22, StackPointer, offsetof(EnterJITRegs, f22));
masm.as_sdc1(f24, StackPointer, offsetof(EnterJITRegs, f24));
masm.as_sdc1(f26, StackPointer, offsetof(EnterJITRegs, f26));
masm.as_sdc1(f28, StackPointer, offsetof(EnterJITRegs, f28));
masm.as_sdc1(f30, StackPointer, offsetof(EnterJITRegs, f30));
}
@ -364,9 +364,10 @@ JitRuntime::generateInvalidator(MacroAssembler& masm, Label* bailoutTail)
// Save floating point registers
// We can use as_sd because stack is alligned.
for (uint32_t i = 0; i < FloatRegisters::TotalDouble; i ++)
masm.as_sd(FloatRegister::FromIndex(i, FloatRegister::Double), StackPointer,
InvalidationBailoutStack::offsetOfFpRegs() + i * sizeof(double));
for (uint32_t i = 0; i < FloatRegisters::TotalDouble; i ++) {
masm.as_sdc1(FloatRegister::FromIndex(i, FloatRegister::Double), StackPointer,
InvalidationBailoutStack::offsetOfFpRegs() + i * sizeof(double));
}
// Pass pointer to InvalidationBailoutStack structure.
masm.movePtr(StackPointer, a0);
@ -576,10 +577,11 @@ PushBailoutFrame(MacroAssembler& masm, uint32_t frameClass, Register spArg)
}
// Save floating point registers
// We can use as_sd because stack is alligned.
for (uint32_t i = 0; i < FloatRegisters::TotalDouble; i++)
masm.as_sd(FloatRegister::FromIndex(i, FloatRegister::Double), StackPointer,
BailoutStack::offsetOfFpRegs() + i * sizeof(double));
// We can use as_sdc1 because stack is alligned.
for (uint32_t i = 0; i < FloatRegisters::TotalDouble; i++) {
masm.as_sdc1(FloatRegister::FromIndex(i, FloatRegister::Double), StackPointer,
BailoutStack::offsetOfFpRegs() + i * sizeof(double));
}
// Store the frameSize_ or tableOffset_ stored in ra
// See: JitRuntime::generateBailoutTable()
@ -775,8 +777,8 @@ JitRuntime::generateVMWrapper(JSContext* cx, MacroAssembler& masm, const VMFunct
break;
case VMFunction::DoubleByRef:
// Copy double sized argument to aligned place.
masm.ma_ld(ScratchDoubleReg, Address(argsBase, argDisp));
masm.as_sd(ScratchDoubleReg, doubleArgs, doubleArgDisp);
masm.ma_ldc1WordAligned(ScratchDoubleReg, argsBase, argDisp);
masm.as_sdc1(ScratchDoubleReg, doubleArgs, doubleArgDisp);
masm.passABIArg(MoveOperand(doubleArgs, doubleArgDisp, MoveOperand::EFFECTIVE_ADDRESS),
MoveOp::GENERAL);
doubleArgDisp += sizeof(double);
@ -841,7 +843,7 @@ JitRuntime::generateVMWrapper(JSContext* cx, MacroAssembler& masm, const VMFunct
case Type_Double:
if (cx->runtime()->jitSupportsFloatingPoint) {
masm.as_ld(ReturnDoubleReg, StackPointer, 0);
masm.as_ldc1(ReturnDoubleReg, StackPointer, 0);
} else {
masm.assumeUnreachable("Unable to load into float reg, with no FP support.");
}

Просмотреть файл

@ -122,6 +122,8 @@ class FloatRegister : public FloatRegisterMIPSShared
}
bool equiv(const FloatRegister& other) const { return other.kind_ == kind_; }
size_t size() const { return (kind_ == Codes::Double) ? sizeof(double) : sizeof (float); }
// Always push doubles to maintain 8-byte stack alignment.
size_t pushSize() const { return sizeof(double); }
bool isInvalid() const {
return reg_ == FloatRegisters::invalid_freg;
}

Просмотреть файл

@ -109,12 +109,6 @@ CodeGeneratorMIPS64::ToValue(LInstruction* ins, size_t pos)
return ValueOperand(ToRegister(ins->getOperand(pos)));
}
ValueOperand
CodeGeneratorMIPS64::ToOutValue(LInstruction* ins)
{
return ValueOperand(ToRegister(ins->getDef(0)));
}
ValueOperand
CodeGeneratorMIPS64::ToTempValue(LInstruction* ins, size_t pos)
{
@ -199,12 +193,10 @@ CodeGeneratorMIPS64::visitUnbox(LUnbox* unbox)
}
}
Register
CodeGeneratorMIPS64::splitTagForTest(const ValueOperand& value)
void
CodeGeneratorMIPS64::splitTagForTest(const ValueOperand& value, ScratchTagScope& Tag)
{
MOZ_ASSERT(value.valueReg() != SecondScratchReg);
masm.splitTag(value.valueReg(), SecondScratchReg);
return SecondScratchReg;
masm.splitTag(value.valueReg(), tag);
}
void

Просмотреть файл

@ -75,11 +75,10 @@ class CodeGeneratorMIPS64 : public CodeGeneratorMIPSShared
void visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool);
protected:
ValueOperand ToValue(LInstruction* ins, size_t pos);
ValueOperand ToOutValue(LInstruction* ins);
ValueOperand ToTempValue(LInstruction* ins, size_t pos);
// Functions for LTestVAndBranch.
Register splitTagForTest(const ValueOperand& value);
void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag);
public:
CodeGeneratorMIPS64(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm)

Просмотреть файл

@ -768,32 +768,6 @@ MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src, Register
as_sll(dest, dest, 0);
}
// ========================================================================
// Memory access primitives.
void
MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const Address& addr)
{
ma_sd(src, addr);
}
void
MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const BaseIndex& addr)
{
MOZ_ASSERT(addr.offset == 0);
ma_sd(src, addr);
}
void
MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const Address& addr)
{
ma_ss(src, addr);
}
void
MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const BaseIndex& addr)
{
MOZ_ASSERT(addr.offset == 0);
ma_ss(src, addr);
}
// ========================================================================
// wasm support
@ -809,7 +783,7 @@ void
MacroAssembler::wasmBoundsCheck(Condition cond, Register index, Address boundsCheckLimit, L label)
{
SecondScratchRegisterScope scratch2(*this);
load32(boundsCheckLimit,SecondScratchReg);
load32(boundsCheckLimit, SecondScratchReg);
ma_b(index, SecondScratchReg, label, cond);
}

Просмотреть файл

@ -850,7 +850,7 @@ void
MacroAssemblerMIPS64::ma_ls(FloatRegister ft, Address address)
{
if (Imm16::IsInSignedRange(address.offset)) {
as_ls(ft, address.base, address.offset);
as_lwc1(ft, address.base, address.offset);
} else {
MOZ_ASSERT(address.base != ScratchRegister);
ma_li(ScratchRegister, Imm32(address.offset));
@ -858,7 +858,7 @@ MacroAssemblerMIPS64::ma_ls(FloatRegister ft, Address address)
as_gslsx(ft, address.base, ScratchRegister, 0);
} else {
as_daddu(ScratchRegister, address.base, ScratchRegister);
as_ls(ft, ScratchRegister, 0);
as_lwc1(ft, ScratchRegister, 0);
}
}
}
@ -867,7 +867,7 @@ void
MacroAssemblerMIPS64::ma_ld(FloatRegister ft, Address address)
{
if (Imm16::IsInSignedRange(address.offset)) {
as_ld(ft, address.base, address.offset);
as_ldc1(ft, address.base, address.offset);
} else {
MOZ_ASSERT(address.base != ScratchRegister);
ma_li(ScratchRegister, Imm32(address.offset));
@ -875,7 +875,7 @@ MacroAssemblerMIPS64::ma_ld(FloatRegister ft, Address address)
as_gsldx(ft, address.base, ScratchRegister, 0);
} else {
as_daddu(ScratchRegister, address.base, ScratchRegister);
as_ld(ft, ScratchRegister, 0);
as_ldc1(ft, ScratchRegister, 0);
}
}
}
@ -884,7 +884,7 @@ void
MacroAssemblerMIPS64::ma_sd(FloatRegister ft, Address address)
{
if (Imm16::IsInSignedRange(address.offset)) {
as_sd(ft, address.base, address.offset);
as_sdc1(ft, address.base, address.offset);
} else {
MOZ_ASSERT(address.base != ScratchRegister);
ma_li(ScratchRegister, Imm32(address.offset));
@ -892,7 +892,7 @@ MacroAssemblerMIPS64::ma_sd(FloatRegister ft, Address address)
as_gssdx(ft, address.base, ScratchRegister, 0);
} else {
as_daddu(ScratchRegister, address.base, ScratchRegister);
as_sd(ft, ScratchRegister, 0);
as_sdc1(ft, ScratchRegister, 0);
}
}
}
@ -901,7 +901,7 @@ void
MacroAssemblerMIPS64::ma_ss(FloatRegister ft, Address address)
{
if (Imm16::IsInSignedRange(address.offset)) {
as_ss(ft, address.base, address.offset);
as_swc1(ft, address.base, address.offset);
} else {
MOZ_ASSERT(address.base != ScratchRegister);
ma_li(ScratchRegister, Imm32(address.offset));
@ -909,23 +909,23 @@ MacroAssemblerMIPS64::ma_ss(FloatRegister ft, Address address)
as_gsssx(ft, address.base, ScratchRegister, 0);
} else {
as_daddu(ScratchRegister, address.base, ScratchRegister);
as_ss(ft, ScratchRegister, 0);
as_swc1(ft, ScratchRegister, 0);
}
}
}
void
MacroAssemblerMIPS64::ma_pop(FloatRegister fs)
MacroAssemblerMIPS64::ma_pop(FloatRegister f)
{
ma_ld(fs, Address(StackPointer, 0));
as_ldc1(f, StackPointer, 0);
as_daddiu(StackPointer, StackPointer, sizeof(double));
}
void
MacroAssemblerMIPS64::ma_push(FloatRegister fs)
MacroAssemblerMIPS64::ma_push(FloatRegister f)
{
as_daddiu(StackPointer, StackPointer, (int32_t)-sizeof(double));
ma_sd(fs, Address(StackPointer, 0));
as_sdc1(f, StackPointer, 0);
}
bool
@ -1088,19 +1088,6 @@ MacroAssemblerMIPS64Compat::loadPrivate(const Address& address, Register dest)
ma_dsll(dest, dest, Imm32(1));
}
void
MacroAssemblerMIPS64Compat::loadDouble(const Address& address, FloatRegister dest)
{
ma_ld(dest, address);
}
void
MacroAssemblerMIPS64Compat::loadDouble(const BaseIndex& src, FloatRegister dest)
{
computeScaledAddress(src, SecondScratchReg);
ma_ld(dest, Address(SecondScratchReg, src.offset));
}
void
MacroAssemblerMIPS64Compat::loadUnalignedDouble(const wasm::MemoryAccessDesc& access,
const BaseIndex& src, Register temp, FloatRegister dest)
@ -1120,33 +1107,6 @@ MacroAssemblerMIPS64Compat::loadUnalignedDouble(const wasm::MemoryAccessDesc& ac
moveToDouble(temp, dest);
}
void
MacroAssemblerMIPS64Compat::loadFloatAsDouble(const Address& address, FloatRegister dest)
{
ma_ls(dest, address);
as_cvtds(dest, dest);
}
void
MacroAssemblerMIPS64Compat::loadFloatAsDouble(const BaseIndex& src, FloatRegister dest)
{
loadFloat32(src, dest);
as_cvtds(dest, dest);
}
void
MacroAssemblerMIPS64Compat::loadFloat32(const Address& address, FloatRegister dest)
{
ma_ls(dest, address);
}
void
MacroAssemblerMIPS64Compat::loadFloat32(const BaseIndex& src, FloatRegister dest)
{
computeScaledAddress(src, SecondScratchReg);
ma_ls(dest, Address(SecondScratchReg, src.offset));
}
void
MacroAssemblerMIPS64Compat::loadUnalignedFloat32(const wasm::MemoryAccessDesc& access,
const BaseIndex& src, Register temp, FloatRegister dest)
@ -1847,9 +1807,12 @@ MacroAssemblerMIPS64Compat::ensureDouble(const ValueOperand& source, FloatRegist
Label* failure)
{
Label isDouble, done;
Register tag = splitTagForTest(source);
asMasm().branchTestDouble(Assembler::Equal, tag, &isDouble);
asMasm().branchTestInt32(Assembler::NotEqual, tag, failure);
{
ScratchTagScope tag(this, source);
splitTagForTest(source, tag);
asMasm().branchTestDouble(Assembler::Equal, tag, &isDouble);
asMasm().branchTestInt32(Assembler::NotEqual, tag, failure);
}
unboxInt32(source, ScratchRegister);
convertInt32ToDouble(ScratchRegister, dest);

Просмотреть файл

@ -44,6 +44,29 @@ static constexpr ValueOperand JSReturnOperand{JSReturnReg};
static const int defaultShift = 3;
static_assert(1 << defaultShift == sizeof(JS::Value), "The defaultShift is wrong");
// See documentation for ScratchTagScope and ScratchTagScopeRelease in
// MacroAssembler-x64.h.
class ScratchTagScope : public SecondScratchRegisterScope
{
public:
ScratchTagScope(MacroAssembler& masm, const ValueOperand&)
: SecondScratchRegisterScope(masm)
{}
};
class ScratchTagScopeRelease
{
ScratchTagScope* ts_;
public:
explicit ScratchTagScopeRelease(ScratchTagScope* ts) : ts_(ts) {
ts_->release();
}
~ScratchTagScopeRelease() {
ts_->reacquire();
}
};
class MacroAssemblerMIPS64 : public MacroAssemblerMIPSShared
{
public:
@ -51,6 +74,8 @@ class MacroAssemblerMIPS64 : public MacroAssemblerMIPSShared
using MacroAssemblerMIPSShared::ma_li;
using MacroAssemblerMIPSShared::ma_ss;
using MacroAssemblerMIPSShared::ma_sd;
using MacroAssemblerMIPSShared::ma_ls;
using MacroAssemblerMIPSShared::ma_ld;
using MacroAssemblerMIPSShared::ma_load;
using MacroAssemblerMIPSShared::ma_store;
using MacroAssemblerMIPSShared::ma_cmp_set;
@ -133,13 +158,13 @@ class MacroAssemblerMIPS64 : public MacroAssemblerMIPSShared
void ma_mv(FloatRegister src, ValueOperand dest);
void ma_mv(ValueOperand src, FloatRegister dest);
void ma_ls(FloatRegister fd, Address address);
void ma_ld(FloatRegister fd, Address address);
void ma_sd(FloatRegister fd, Address address);
void ma_ss(FloatRegister fd, Address address);
void ma_ls(FloatRegister ft, Address address);
void ma_ld(FloatRegister ft, Address address);
void ma_sd(FloatRegister ft, Address address);
void ma_ss(FloatRegister ft, Address address);
void ma_pop(FloatRegister fs);
void ma_push(FloatRegister fs);
void ma_pop(FloatRegister f);
void ma_push(FloatRegister f);
void ma_cmp_set(Register dst, Register lhs, ImmWord imm, Condition c);
void ma_cmp_set(Register dst, Register lhs, ImmPtr imm, Condition c);
@ -354,10 +379,8 @@ class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
splitTag(operand.valueReg(), dest);
}
// Returns the register containing the type tag.
Register splitTagForTest(const ValueOperand& value) {
splitTag(value, SecondScratchReg);
return SecondScratchReg;
void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag) {
splitTag(value, tag);
}
// unboxing code
@ -660,17 +683,8 @@ class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
void storeUnalignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
void storeUnalignedSimd128Float(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
void loadDouble(const Address& addr, FloatRegister dest);
void loadDouble(const BaseIndex& src, FloatRegister dest);
void loadUnalignedDouble(const wasm::MemoryAccessDesc& access, const BaseIndex& src,
Register temp, FloatRegister dest);
// Load a float value into a register, then expand it to a double.
void loadFloatAsDouble(const Address& addr, FloatRegister dest);
void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest);
void loadFloat32(const Address& addr, FloatRegister dest);
void loadFloat32(const BaseIndex& src, FloatRegister dest);
void loadUnalignedFloat32(const wasm::MemoryAccessDesc& access, const BaseIndex& src,
Register temp, FloatRegister dest);

Просмотреть файл

@ -64,6 +64,8 @@ struct EnterJITRegs
static void
GenerateReturn(MacroAssembler& masm, int returnCode)
{
MOZ_ASSERT(masm.framePushed() == sizeof(EnterJITRegs));
if (isLoongson()) {
// Restore non-volatile registers
masm.as_ld(s0, StackPointer, offsetof(EnterJITRegs, s0));
@ -92,14 +94,14 @@ GenerateReturn(MacroAssembler& masm, int returnCode)
masm.as_ld(ra, StackPointer, offsetof(EnterJITRegs, ra));
// Restore non-volatile floating point registers
masm.as_ld(f24, StackPointer, offsetof(EnterJITRegs, f24));
masm.as_ld(f25, StackPointer, offsetof(EnterJITRegs, f25));
masm.as_ld(f26, StackPointer, offsetof(EnterJITRegs, f26));
masm.as_ld(f27, StackPointer, offsetof(EnterJITRegs, f27));
masm.as_ld(f28, StackPointer, offsetof(EnterJITRegs, f28));
masm.as_ld(f29, StackPointer, offsetof(EnterJITRegs, f29));
masm.as_ld(f30, StackPointer, offsetof(EnterJITRegs, f30));
masm.as_ld(f31, StackPointer, offsetof(EnterJITRegs, f31));
masm.as_ldc1(f24, StackPointer, offsetof(EnterJITRegs, f24));
masm.as_ldc1(f25, StackPointer, offsetof(EnterJITRegs, f25));
masm.as_ldc1(f26, StackPointer, offsetof(EnterJITRegs, f26));
masm.as_ldc1(f27, StackPointer, offsetof(EnterJITRegs, f27));
masm.as_ldc1(f28, StackPointer, offsetof(EnterJITRegs, f28));
masm.as_ldc1(f29, StackPointer, offsetof(EnterJITRegs, f29));
masm.as_ldc1(f30, StackPointer, offsetof(EnterJITRegs, f30));
masm.as_ldc1(f31, StackPointer, offsetof(EnterJITRegs, f31));
}
masm.freeStack(sizeof(EnterJITRegs));
@ -139,14 +141,14 @@ GeneratePrologue(MacroAssembler& masm)
masm.as_sd(ra, StackPointer, offsetof(EnterJITRegs, ra));
masm.as_sd(a7, StackPointer, offsetof(EnterJITRegs, a7));
masm.as_sd(f24, StackPointer, offsetof(EnterJITRegs, f24));
masm.as_sd(f25, StackPointer, offsetof(EnterJITRegs, f25));
masm.as_sd(f26, StackPointer, offsetof(EnterJITRegs, f26));
masm.as_sd(f27, StackPointer, offsetof(EnterJITRegs, f27));
masm.as_sd(f28, StackPointer, offsetof(EnterJITRegs, f28));
masm.as_sd(f29, StackPointer, offsetof(EnterJITRegs, f29));
masm.as_sd(f30, StackPointer, offsetof(EnterJITRegs, f30));
masm.as_sd(f31, StackPointer, offsetof(EnterJITRegs, f31));
masm.as_sdc1(f24, StackPointer, offsetof(EnterJITRegs, f24));
masm.as_sdc1(f25, StackPointer, offsetof(EnterJITRegs, f25));
masm.as_sdc1(f26, StackPointer, offsetof(EnterJITRegs, f26));
masm.as_sdc1(f27, StackPointer, offsetof(EnterJITRegs, f27));
masm.as_sdc1(f28, StackPointer, offsetof(EnterJITRegs, f28));
masm.as_sdc1(f29, StackPointer, offsetof(EnterJITRegs, f29));
masm.as_sdc1(f30, StackPointer, offsetof(EnterJITRegs, f30));
masm.as_sdc1(f31, StackPointer, offsetof(EnterJITRegs, f31));
}
@ -796,7 +798,7 @@ JitRuntime::generateVMWrapper(JSContext* cx, MacroAssembler& masm, const VMFunct
case Type_Double:
if (cx->runtime()->jitSupportsFloatingPoint) {
masm.as_ld(ReturnDoubleReg, StackPointer, 0);
masm.as_ldc1(ReturnDoubleReg, StackPointer, 0);
} else {
masm.assumeUnreachable("Unable to load into float reg, with no FP support.");
}

Просмотреть файл

@ -48,7 +48,6 @@ class CodeGeneratorNone : public CodeGeneratorShared
}
void emitTableSwitchDispatch(MTableSwitch*, Register, Register) { MOZ_CRASH(); }
ValueOperand ToValue(LInstruction*, size_t) { MOZ_CRASH(); }
ValueOperand ToOutValue(LInstruction*) { MOZ_CRASH(); }
ValueOperand ToTempValue(LInstruction*, size_t) { MOZ_CRASH(); }
void generateInvalidateEpilogue() { MOZ_CRASH(); }
void setReturnDoubleRegs(LiveRegisterSet* regs) { MOZ_CRASH(); }

Просмотреть файл

@ -176,6 +176,21 @@ class Operand
Operand (Register, int32_t ) { MOZ_CRASH(); }
};
class ScratchTagScope
{
public:
ScratchTagScope(MacroAssembler&, const ValueOperand) {}
operator Register() { MOZ_CRASH(); }
void release() { MOZ_CRASH(); }
void reacquire() { MOZ_CRASH(); }
};
class ScratchTagScopeRelease
{
public:
explicit ScratchTagScopeRelease(ScratchTagScope*) {}
};
class MacroAssemblerNone : public Assembler
{
public:
@ -313,7 +328,7 @@ class MacroAssemblerNone : public Assembler
template <typename T> void computeEffectiveAddress(T, Register) { MOZ_CRASH(); }
Register splitTagForTest(ValueOperand) { MOZ_CRASH(); }
void splitTagForTest(ValueOperand, ScratchTagScope&) { MOZ_CRASH(); }
void boxDouble(FloatRegister, ValueOperand, FloatRegister) { MOZ_CRASH(); }
void boxNonDouble(JSValueType, Register, ValueOperand) { MOZ_CRASH(); }

Просмотреть файл

@ -193,7 +193,7 @@ ToRegisterOrInt32Constant(const LAllocation* a)
}
static inline ValueOperand
GetValueOutput(LInstruction* ins)
ToOutValue(LInstruction* ins)
{
#if defined(JS_NUNBOX32)
return ValueOperand(ToRegister(ins->getDef(TYPE_INDEX)),

Просмотреть файл

@ -7788,7 +7788,7 @@ class LLoadUnboxedExpando : public LInstructionHelper<1, 1, 0>
};
// Guard that a value is in a TypeSet.
class LTypeBarrierV : public LInstructionHelper<0, BOX_PIECES, 2>
class LTypeBarrierV : public LInstructionHelper<BOX_PIECES, BOX_PIECES, 2>
{
public:
LIR_HEADER(TypeBarrierV)
@ -7814,7 +7814,7 @@ class LTypeBarrierV : public LInstructionHelper<0, BOX_PIECES, 2>
};
// Guard that a object is in a TypeSet.
class LTypeBarrierO : public LInstructionHelper<0, 1, 1>
class LTypeBarrierO : public LInstructionHelper<1, 1, 1>
{
public:
LIR_HEADER(TypeBarrierO)
@ -7834,32 +7834,6 @@ class LTypeBarrierO : public LInstructionHelper<0, 1, 1>
}
};
// Guard that a value is in a TypeSet.
class LMonitorTypes : public LInstructionHelper<0, BOX_PIECES, 2>
{
public:
LIR_HEADER(MonitorTypes)
LMonitorTypes(const LBoxAllocation& input, const LDefinition& unboxTemp,
const LDefinition& objTemp) {
setBoxOperand(Input, input);
setTemp(0, unboxTemp);
setTemp(1, objTemp);
}
static const size_t Input = 0;
const MMonitorTypes* mir() const {
return mir_->toMonitorTypes();
}
const LDefinition* unboxTemp() {
return getTemp(0);
}
const LDefinition* objTemp() {
return getTemp(1);
}
};
// Generational write barrier used when writing an object to another object.
class LPostWriteBarrierO : public LInstructionHelper<0, 2, 1>
{

Просмотреть файл

@ -270,7 +270,6 @@
_(LoadUnboxedExpando) \
_(TypeBarrierV) \
_(TypeBarrierO) \
_(MonitorTypes) \
_(PostWriteBarrierO) \
_(PostWriteBarrierS) \
_(PostWriteBarrierV) \

Просмотреть файл

@ -159,6 +159,46 @@ LIRGeneratorShared::defineInt64ReuseInput(LInstructionHelper<INT64_PIECES, Ops,
}
template <size_t Ops, size_t Temps> void
LIRGeneratorShared::defineBoxReuseInput(LInstructionHelper<BOX_PIECES, Ops, Temps>* lir,
MDefinition* mir, uint32_t operand)
{
// The input should be used at the start of the instruction, to avoid moves.
MOZ_ASSERT(lir->getOperand(operand)->toUse()->usedAtStart());
#ifdef JS_NUNBOX32
MOZ_ASSERT(lir->getOperand(operand + 1)->toUse()->usedAtStart());
#endif
MOZ_ASSERT(!lir->isCall());
MOZ_ASSERT(mir->type() == MIRType::Value);
uint32_t vreg = getVirtualRegister();
#ifdef JS_NUNBOX32
static_assert(VREG_TYPE_OFFSET == 0, "Code below assumes VREG_TYPE_OFFSET == 0");
static_assert(VREG_DATA_OFFSET == 1, "Code below assumes VREG_DATA_OFFSET == 1");
LDefinition def1(LDefinition::TYPE, LDefinition::MUST_REUSE_INPUT);
def1.setReusedInput(operand);
def1.setVirtualRegister(vreg);
lir->setDef(0, def1);
getVirtualRegister();
LDefinition def2(LDefinition::PAYLOAD, LDefinition::MUST_REUSE_INPUT);
def2.setReusedInput(operand + 1);
def2.setVirtualRegister(vreg + 1);
lir->setDef(1, def2);
#else
LDefinition def(LDefinition::BOX, LDefinition::MUST_REUSE_INPUT);
def.setReusedInput(operand);
def.setVirtualRegister(vreg);
lir->setDef(0, def);
#endif
lir->setMir(mir);
mir->setVirtualRegister(vreg);
add(lir);
}
template <size_t Temps> void
LIRGeneratorShared::defineBox(details::LInstructionFixedDefsTempsHelper<BOX_PIECES, Temps>* lir,
MDefinition* mir, LDefinition::Policy policy)
@ -679,8 +719,8 @@ VirtualRegisterOfPayload(MDefinition* mir)
if (!inner->isConstant() && inner->type() != MIRType::Double && inner->type() != MIRType::Float32)
return inner->virtualRegister();
}
if (mir->isTypeBarrier())
return VirtualRegisterOfPayload(mir->getOperand(0));
if (mir->isTypeBarrier() && mir->toTypeBarrier()->canRedefineInput())
return VirtualRegisterOfPayload(mir->toTypeBarrier()->input());
return mir->virtualRegister() + VREG_DATA_OFFSET;
}

Просмотреть файл

@ -187,6 +187,10 @@ class LIRGeneratorShared : public MDefinitionVisitor
inline void defineReuseInput(LInstructionHelper<1, Ops, Temps>* lir, MDefinition* mir,
uint32_t operand);
template <size_t Ops, size_t Temps>
inline void defineBoxReuseInput(LInstructionHelper<BOX_PIECES, Ops, Temps>* lir,
MDefinition* mir, uint32_t operand);
template <size_t Ops, size_t Temps>
inline void defineInt64ReuseInput(LInstructionHelper<INT64_PIECES, Ops, Temps>* lir,
MDefinition* mir, uint32_t operand);

Просмотреть файл

@ -30,12 +30,6 @@ CodeGeneratorX64::ToValue(LInstruction* ins, size_t pos)
return ValueOperand(ToRegister(ins->getOperand(pos)));
}
ValueOperand
CodeGeneratorX64::ToOutValue(LInstruction* ins)
{
return ValueOperand(ToRegister(ins->getDef(0)));
}
ValueOperand
CodeGeneratorX64::ToTempValue(LInstruction* ins, size_t pos)
{
@ -73,7 +67,7 @@ FrameSizeClass::frameSize() const
void
CodeGeneratorX64::visitValue(LValue* value)
{
ValueOperand result = GetValueOutput(value);
ValueOperand result = ToOutValue(value);
masm.moveValue(value->value(), result);
}
@ -81,7 +75,7 @@ void
CodeGeneratorX64::visitBox(LBox* box)
{
const LAllocation* in = box->getOperand(0);
ValueOperand result = GetValueOutput(box);
ValueOperand result = ToOutValue(box);
masm.moveValue(TypedOrValueRegister(box->type(), ToAnyRegister(in)), result);
}

Просмотреть файл

@ -21,7 +21,6 @@ class CodeGeneratorX64 : public CodeGeneratorX86Shared
protected:
Operand ToOperand64(const LInt64Allocation& a);
ValueOperand ToValue(LInstruction* ins, size_t pos);
ValueOperand ToOutValue(LInstruction* ins);
ValueOperand ToTempValue(LInstruction* ins, size_t pos);
void storeUnboxedValue(const LAllocation* value, MIRType valueType,

Просмотреть файл

@ -846,6 +846,12 @@ MacroAssembler::test32MovePtr(Condition cond, const Address& addr, Imm32 mask, R
cmovCCq(cond, Operand(src), dest);
}
void
MacroAssembler::spectreMovePtr(Condition cond, Register src, Register dest)
{
cmovCCq(cond, Operand(src), dest);
}
// ========================================================================
// Truncate floating point.
@ -949,9 +955,12 @@ void
MacroAssemblerX64::ensureDouble(const ValueOperand& source, FloatRegister dest, Label* failure)
{
Label isDouble, done;
Register tag = splitTagForTest(source);
asMasm().branchTestDouble(Assembler::Equal, tag, &isDouble);
asMasm().branchTestInt32(Assembler::NotEqual, tag, failure);
{
ScratchTagScope tag(asMasm(), source);
splitTagForTest(source, tag);
asMasm().branchTestDouble(Assembler::Equal, tag, &isDouble);
asMasm().branchTestInt32(Assembler::NotEqual, tag, failure);
}
ScratchRegisterScope scratch(asMasm());
unboxInt32(source, scratch);

Просмотреть файл

@ -32,6 +32,44 @@ struct ImmTag : public Imm32
{ }
};
// ScratchTagScope and ScratchTagScopeRelease are used to manage the tag
// register for splitTagForTest(), which has different register management on
// different platforms. On 64-bit platforms it requires a scratch register that
// does not interfere with other operations; on 32-bit platforms it uses a
// register that is already part of the Value.
//
// The ScratchTagScope RAII type acquires the appropriate register; a reference
// to a variable of this type is then passed to splitTagForTest().
//
// On 64-bit platforms ScratchTagScopeRelease makes the owned scratch register
// available in a dynamic scope during compilation. However it is important to
// remember that that does not preserve the register value in any way, so this
// RAII type should only be used along paths that eventually branch past further
// uses of the extracted tag value.
//
// On 32-bit platforms ScratchTagScopeRelease has no effect, since it does not
// manage a register, it only aliases a register in the ValueOperand.
class ScratchTagScope : public ScratchRegisterScope
{
public:
ScratchTagScope(MacroAssembler& masm, const ValueOperand&)
: ScratchRegisterScope(masm)
{}
};
class ScratchTagScopeRelease
{
ScratchTagScope* ts_;
public:
explicit ScratchTagScopeRelease(ScratchTagScope* ts) : ts_(ts) {
ts_->release();
}
~ScratchTagScopeRelease() {
ts_->reacquire();
}
};
class MacroAssemblerX64 : public MacroAssemblerX86Shared
{
private:
@ -662,19 +700,19 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
splitTag(Operand(operand), dest);
}
// Extracts the tag of a value and places it in ScratchReg.
Register splitTagForTest(const ValueOperand& value) {
splitTag(value, ScratchReg);
return ScratchReg;
// Extracts the tag of a value and places it in tag.
void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag) {
splitTag(value, tag);
}
void cmpTag(const ValueOperand& operand, ImmTag tag) {
Register reg = splitTagForTest(operand);
ScratchTagScope reg(asMasm(), operand);
splitTagForTest(operand, reg);
cmp32(reg, tag);
}
Condition testMagic(Condition cond, const ValueOperand& src) {
ScratchRegisterScope scratch(asMasm());
splitTag(src, scratch);
ScratchTagScope scratch(asMasm(), src);
splitTagForTest(src, scratch);
return testMagic(cond, scratch);
}
Condition testError(Condition cond, const ValueOperand& src) {

Просмотреть файл

@ -70,14 +70,6 @@ CodeGeneratorX86::ToValue(LInstruction* ins, size_t pos)
return ValueOperand(typeReg, payloadReg);
}
ValueOperand
CodeGeneratorX86::ToOutValue(LInstruction* ins)
{
Register typeReg = ToRegister(ins->getDef(TYPE_INDEX));
Register payloadReg = ToRegister(ins->getDef(PAYLOAD_INDEX));
return ValueOperand(typeReg, payloadReg);
}
ValueOperand
CodeGeneratorX86::ToTempValue(LInstruction* ins, size_t pos)
{

Просмотреть файл

@ -25,7 +25,6 @@ class CodeGeneratorX86 : public CodeGeneratorX86Shared
protected:
ValueOperand ToValue(LInstruction* ins, size_t pos);
ValueOperand ToOutValue(LInstruction* ins);
ValueOperand ToTempValue(LInstruction* ins, size_t pos);
template <typename T> void emitWasmLoad(T* ins);

Просмотреть файл

@ -1035,6 +1035,12 @@ MacroAssembler::test32MovePtr(Condition cond, const Address& addr, Imm32 mask, R
cmovCCl(cond, Operand(src), dest);
}
void
MacroAssembler::spectreMovePtr(Condition cond, Register src, Register dest)
{
cmovCCl(cond, Operand(src), dest);
}
// ========================================================================
// Truncate floating point.

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше