Merge mozilla-inbound to mozilla-central

This commit is contained in:
Ed Morley 2013-02-08 11:00:58 +00:00
Родитель abb237c60e 2118fd0622
Коммит 558f4d9a53
225 изменённых файлов: 9425 добавлений и 7034 удалений

Просмотреть файл

@ -207,12 +207,17 @@ SettingsListener.observe('devtools.debugger.remote-enabled', false, function(val
#ifdef MOZ_WIDGET_GONK
let enableAdb = value;
if (Services.prefs.getBoolPref('marionette.defaultPrefs.enabled')) {
// Marionette is enabled. Force adb on, since marionette requires remote
// debugging to be disabled (we don't want adb to track the remote debugger
// setting).
try {
if (Services.prefs.getBoolPref('marionette.defaultPrefs.enabled')) {
// Marionette is enabled. Force adb on, since marionette requires remote
// debugging to be disabled (we don't want adb to track the remote debugger
// setting).
enableAdb = true;
enableAdb = true;
}
} catch (e) {
// This means that the pref doesn't exist. Which is fine. We just leave
// enableAdb alone.
}
// Configure adb.

Просмотреть файл

@ -46,12 +46,6 @@ let Keyboard = {
let frameLoader = subject.QueryInterface(Ci.nsIFrameLoader);
let mm = frameLoader.messageManager;
mm.addMessageListener('Forms:Input', this);
try {
mm.loadFrameScript(kFormsFrameScript, true);
} catch (e) {
dump('Error loading ' + kFormsFrameScript + ' as frame script: ' + e + '\n');
}
},
receiveMessage: function keyboardReceiveMessage(msg) {

Просмотреть файл

@ -43,7 +43,6 @@ ProcessGlobal.prototype = {
switch (topic) {
case 'app-startup': {
Services.obs.addObserver(this, 'console-api-log-event', false);
Services.obs.addObserver(this, 'remote-browser-frame-shown', false);
break;
}
case 'console-api-log-event': {

Просмотреть файл

@ -19,8 +19,9 @@ function log(msg) {
#ifdef MOZ_WIDGET_GONK
let librecovery = (function() {
let library;
try {
let library = ctypes.open("librecovery.so");
library = ctypes.open("librecovery.so");
} catch (e) {
log("Unable to open librecovery.so");
throw Cr.NS_ERROR_FAILURE;

Просмотреть файл

@ -908,8 +908,13 @@ var SocialToolbar = {
toolbarButton.setAttribute("tooltiptext", icon.label);
let badge = icon.counter || "";
if (toolbarButton.getAttribute("badge") != badge)
toolbarButton.setAttribute("badge", badge);
toolbarButton.setAttribute("badge", badge);
let ariaLabel = icon.label;
// if there is a badge value, we must use a localizable string to insert it.
if (badge)
ariaLabel = gNavigatorBundle.getFormattedString("social.aria.toolbarButtonBadgeText",
[ariaLabel, badge]);
toolbarButton.setAttribute("aria-label", ariaLabel);
}
let socialToolbarItem = document.getElementById("social-toolbar-item");
socialToolbarItem.appendChild(toolbarButtons);

Просмотреть файл

@ -4681,9 +4681,14 @@ var TabsProgressListener = {
// We can't look for this during onLocationChange since at that point the
// document URI is not yet the about:-uri of the error page.
let doc = aWebProgress.DOMWindow.document;
if (aStateFlags & Ci.nsIWebProgressListener.STATE_STOP &&
Components.isSuccessCode(aStatus) &&
aWebProgress.DOMWindow.document.documentURI.startsWith("about:")) {
doc.documentURI.startsWith("about:") &&
!doc.documentElement.hasAttribute("hasBrowserHandlers")) {
// STATE_STOP may be received twice for documents, thus store an
// attribute to ensure handling it just once.
doc.documentElement.setAttribute("hasBrowserHandlers", "true");
aBrowser.addEventListener("click", BrowserOnClick, true);
aBrowser.addEventListener("pagehide", function onPageHide(event) {
if (event.target.defaultView.frameElement)
@ -4693,7 +4698,7 @@ var TabsProgressListener = {
}, true);
// We also want to make changes to page UI for unprivileged about pages.
BrowserOnAboutPageLoad(aWebProgress.DOMWindow.document);
BrowserOnAboutPageLoad(doc);
}
},

Просмотреть файл

@ -8,8 +8,7 @@ let proxyPrefValue;
function test() {
waitForExplicitFinish();
gBrowser.selectedTab = gBrowser.addTab();
window.addEventListener("DOMContentLoaded", checkPage, false);
let tab = gBrowser.selectedTab = gBrowser.addTab();
// Go offline and disable the proxy and cache, then try to load the test URL.
Services.io.offline = true;
@ -22,16 +21,29 @@ function test() {
Services.prefs.setBoolPref("browser.cache.disk.enable", false);
Services.prefs.setBoolPref("browser.cache.memory.enable", false);
content.location = "http://example.com/";
window.addEventListener("DOMContentLoaded", function load() {
if (content.location == "about:blank") {
info("got about:blank, which is expected once, so return");
return;
}
window.removeEventListener("DOMContentLoaded", load, false);
let observer = new MutationObserver(function (mutations) {
for (let mutation of mutations) {
if (mutation.attributeName == "hasBrowserHandlers") {
observer.disconnect();
checkPage();
return;
}
}
});
let docElt = tab.linkedBrowser.contentDocument.documentElement;
observer.observe(docElt, { attributes: true });
}, false);
}
function checkPage() {
if(content.location == "about:blank") {
info("got about:blank, which is expected once, so return");
return;
}
window.removeEventListener("DOMContentLoaded", checkPage, false);
ok(Services.io.offline, "Setting Services.io.offline to true.");
is(gBrowser.contentDocument.documentURI.substring(0,27),
"about:neterror?e=netOffline", "Loading the Offline mode neterror page.");

Просмотреть файл

@ -110,11 +110,15 @@ var tests = {
}, function () {
let badge = statusIcon.getAttribute("badge");
is(badge, "42", "status value is correct");
// If there is a counter, the aria-label should reflect it.
is(statusIcon.getAttribute("aria-label"), "Test Ambient 1 \u2046 (42)");
ambience.counter = 0;
Social.provider.setAmbientNotification(ambience);
badge = statusIcon.getAttribute("badge");
is(badge, "", "status value is correct");
// If there is no counter, the aria-label should be the same as the label
is(statusIcon.getAttribute("aria-label"), "Test Ambient 1 \u2046");
// The menu bar isn't as easy to instrument on Mac.
if (navigator.platform.contains("Mac"))

Просмотреть файл

@ -930,8 +930,14 @@ DownloadsPlacesView.prototype = {
}
if (shouldCreateShell) {
let shell = new DownloadElementShell(aDataItem, aPlacesNode,
this._getAnnotationsFor(downloadURI));
// Bug 836271: The annotations for a url should be cached only when the
// places node is available, i.e. when we know we we'd be notified for
// annoation changes.
// Otherwise we may cache NOT_AVILABLE values first for a given session
// download, and later use these NOT_AVILABLE values when a history
// download for the same URL is added.
let cachedAnnotations = aPlacesNode ? this._getAnnotationsFor(downloadURI) : null;
let shell = new DownloadElementShell(aDataItem, aPlacesNode, cachedAnnotations);
newOrUpdatedShell = shell;
shellsForURI.add(shell);
if (aDataItem)
@ -1173,42 +1179,63 @@ DownloadsPlacesView.prototype = {
let suppressOnSelect = this._richlistbox.suppressOnSelect;
this._richlistbox.suppressOnSelect = true;
// Remove the invalidated history downloads from the list and unset the
// places node for data downloads.
// Loop backwards since _removeHistoryDownloadFromView may removeChild().
for (let i = this._richlistbox.childNodes.length - 1; i >= 0; --i) {
let element = this._richlistbox.childNodes[i];
if (element._shell.placesNode)
this._removeHistoryDownloadFromView(element._shell.placesNode);
try {
// Remove the invalidated history downloads from the list and unset the
// places node for data downloads.
// Loop backwards since _removeHistoryDownloadFromView may removeChild().
for (let i = this._richlistbox.childNodes.length - 1; i >= 0; --i) {
let element = this._richlistbox.childNodes[i];
if (element._shell.placesNode)
this._removeHistoryDownloadFromView(element._shell.placesNode);
}
}
finally {
this._richlistbox.suppressOnSelect = suppressOnSelect;
}
let elementsToAppendFragment = document.createDocumentFragment();
for (let i = 0; i < aContainer.childCount; i++) {
try {
this._addDownloadData(null, aContainer.getChild(i), false,
elementsToAppendFragment);
if (aContainer.childCount > 0) {
let elementsToAppendFragment = document.createDocumentFragment();
for (let i = 0; i < aContainer.childCount; i++) {
try {
this._addDownloadData(null, aContainer.getChild(i), false,
elementsToAppendFragment);
}
catch(ex) {
Cu.reportError(ex);
}
}
catch(ex) {
Cu.reportError(ex);
// _addDownloadData may not add new elements if there were already
// data items in place.
if (elementsToAppendFragment.firstChild) {
this._appendDownloadsFragment(elementsToAppendFragment);
this._ensureVisibleElementsAreActive();
}
}
this._appendDownloadsFragment(elementsToAppendFragment);
this._ensureVisibleElementsAreActive();
this._richlistbox.suppressOnSelect = suppressOnSelect;
goUpdateDownloadCommands();
},
_appendDownloadsFragment: function DPV__appendDownloadsFragment(aDOMFragment) {
// Workaround multiple reflows hang by removing the richlistbox
// and adding it back when we're done.
// Hack for bug 836283: reset xbl fields to their old values after the
// binding is reattached to avoid breaking the selection state
let xblFields = new Map();
for (let [key, value] in Iterator(this._richlistbox)) {
xblFields.set(key, value);
}
let parentNode = this._richlistbox.parentNode;
let nextSibling = this._richlistbox.nextSibling;
parentNode.removeChild(this._richlistbox);
this._richlistbox.appendChild(aDOMFragment);
parentNode.insertBefore(this._richlistbox, nextSibling);
for (let [key, value] of xblFields) {
this._richlistbox[key] = value;
}
},
nodeInserted: function DPV_nodeInserted(aParent, aPlacesNode) {

Просмотреть файл

@ -334,30 +334,26 @@ this.PlacesUIUtils = {
* See documentation at the top of bookmarkProperties.js
* @param aWindow
* Owner window for the new dialog.
* @param aResizable [optional]
* Whether the dialog is allowed to resize. Do not pass this for new
* callers since it's deprecated. It'll be removed in future releases.
*
* @see documentation at the top of bookmarkProperties.js
* @return true if any transaction has been performed, false otherwise.
*/
showBookmarkDialog:
function PUIU_showBookmarkDialog(aInfo, aParentWindow, aResizable) {
function PUIU_showBookmarkDialog(aInfo, aParentWindow) {
// Preserve size attributes differently based on the fact the dialog has
// a folder picker or not. If the picker is visible, the dialog should
// be resizable since it may not show enough content for the folders
// hierarchy.
let hasFolderPicker = !("hiddenRows" in aInfo) ||
aInfo.hiddenRows.indexOf("folderPicker") == -1;
let resizable = aResizable !== undefined ? aResizable : hasFolderPicker;
// Use a different chrome url, since this allows to persist different sizes,
// based on resizability of the dialog.
let dialogURL = resizable ?
let dialogURL = hasFolderPicker ?
"chrome://browser/content/places/bookmarkProperties2.xul" :
"chrome://browser/content/places/bookmarkProperties.xul";
let features =
"centerscreen,chrome,modal,resizable=" + (resizable ? "yes" : "no");
"centerscreen,chrome,modal,resizable=" + (hasFolderPicker ? "yes" : "no");
aParentWindow.openDialog(dialogURL, "", features, aInfo);
return ("performed" in aInfo && aInfo.performed);

Просмотреть файл

@ -399,6 +399,9 @@ social.error.ok.accesskey=O
social.error.closeSidebar.label=Close This Sidebar
social.error.closeSidebar.accesskey=C
# LOCALIZATION NOTE: %1$S is the label for the toolbar button, %2$S is the associated badge numbering that the social provider may provide.
social.aria.toolbarButtonBadgeText=%1$S (%2$S)
# Identity notifications popups
identity.termsOfService = Terms of Service
identity.privacyPolicy = Privacy Policy

Просмотреть файл

@ -21,7 +21,6 @@ EXTRA_JS_MODULES = \
NewTabUtils.jsm \
offlineAppCache.jsm \
SignInToWebsite.jsm \
TelemetryTimestamps.jsm \
webappsUI.jsm \
webrtcUI.jsm \
KeywordURLResetPrompter.jsm \

Просмотреть файл

@ -20,7 +20,6 @@ include $(topsrcdir)/config/rules.mk
_BROWSER_FILES = \
browser_NetworkPrioritizer.js \
browser_TelemetryTimestamps.js \
# bug 793906 - temporarily disabling desktop UI while working on b2g
# browser_SignInToWebsite.js \
$(NULL)

Просмотреть файл

@ -1,74 +0,0 @@
/* Any copyright is dedicated to the Public Domain.
* http://creativecommons.org/publicdomain/zero/1.0/ */
function getSimpleMeasurementsFromTelemetryPing() {
const TelemetryPing = Cc["@mozilla.org/base/telemetry-ping;1"].getService(Ci.nsITelemetryPing);
let ping = TelemetryPing.getPayload();
return ping.simpleMeasurements;
}
function test() {
waitForExplicitFinish()
const Telemetry = Services.telemetry;
Telemetry.asyncFetchTelemetryData(function () {
actualTest();
finish();
});
}
function actualTest() {
// Test the module logic
let tmp = {};
Cu.import("resource:///modules/TelemetryTimestamps.jsm", tmp);
let TelemetryTimestamps = tmp.TelemetryTimestamps;
let now = Date.now();
TelemetryTimestamps.add("foo");
ok(TelemetryTimestamps.get().foo, "foo was added");
ok(TelemetryTimestamps.get().foo >= now, "foo has a reasonable value");
// Add timestamp with value
// Use a value far in the future since TelemetryPing substracts the time of
// process initialization.
const YEAR_4000_IN_MS = 64060588800000;
TelemetryTimestamps.add("bar", YEAR_4000_IN_MS);
ok(TelemetryTimestamps.get().bar, "bar was added");
is(TelemetryTimestamps.get().bar, YEAR_4000_IN_MS, "bar has the right value");
// Can't add the same timestamp twice
TelemetryTimestamps.add("bar", 2);
is(TelemetryTimestamps.get().bar, YEAR_4000_IN_MS, "bar wasn't overwritten");
let threw = false;
try {
TelemetryTimestamps.add("baz", "this isn't a number");
} catch (ex) {
threw = true;
}
ok(threw, "adding baz threw");
ok(!TelemetryTimestamps.get().baz, "no baz was added");
// Test that the data gets added to the telemetry ping properly
let simpleMeasurements = getSimpleMeasurementsFromTelemetryPing();
ok(simpleMeasurements, "got simple measurements from ping data");
ok(simpleMeasurements.foo > 1, "foo was included");
ok(simpleMeasurements.bar > 1, "bar was included");
ok(!simpleMeasurements.baz, "baz wasn't included since it wasn't added");
// Check browser timestamps that we add
let props = [
// These can't be reliably tested when the test is run alone
//"delayedStartupStarted",
//"delayedStartupFinished",
"sessionRestoreInitialized",
// This doesn't get hit in the testing profile
//"sessionRestoreRestoring"
];
props.forEach(function (p) {
let value = simpleMeasurements[p];
ok(value, p + " exists");
ok(!isNaN(value), p + " is a number");
ok(value > 0, p + " value is reasonable");
});
}

Просмотреть файл

@ -7541,15 +7541,30 @@ nsDocument::UnblockOnload(bool aFireSync)
--mOnloadBlockCount;
// If mScriptGlobalObject is null, we shouldn't be messing with the loadgroup
// -- it's not ours.
if (mOnloadBlockCount == 0 && mScriptGlobalObject) {
if (aFireSync && mAsyncOnloadBlockCount == 0) {
// Increment mOnloadBlockCount, since DoUnblockOnload will decrement it
++mOnloadBlockCount;
DoUnblockOnload();
} else {
PostUnblockOnloadEvent();
if (mOnloadBlockCount == 0) {
if (mScriptGlobalObject) {
// Only manipulate the loadgroup in this case, because if mScriptGlobalObject
// is null, it's not ours.
if (aFireSync && mAsyncOnloadBlockCount == 0) {
// Increment mOnloadBlockCount, since DoUnblockOnload will decrement it
++mOnloadBlockCount;
DoUnblockOnload();
} else {
PostUnblockOnloadEvent();
}
} else if (mIsBeingUsedAsImage) {
// To correctly unblock onload for a document that contains an SVG
// image, we need to know when all of the SVG document's resources are
// done loading, in a way comparable to |window.onload|. We fire this
// event to indicate that the SVG should be considered fully loaded.
// Because scripting is disabled on SVG-as-image documents, this event
// is not accessible to content authors. (See bug 837135.)
nsRefPtr<nsAsyncDOMEvent> e =
new nsAsyncDOMEvent(this,
NS_LITERAL_STRING("MozSVGAsImageDocumentLoad"),
false,
false);
e->PostDOMEvent();
}
}
}

Просмотреть файл

@ -66,7 +66,6 @@
#include "nsCaret.h"
#include "nsSubDocumentFrame.h"
#include "nsIFrameTraversal.h"
#include "nsLayoutCID.h"
#include "nsLayoutUtils.h"
#include "nsIInterfaceRequestorUtils.h"
@ -117,8 +116,6 @@ using namespace mozilla::dom;
static const nsIntPoint kInvalidRefPoint = nsIntPoint(-1,-1);
static NS_DEFINE_CID(kFrameTraversalCID, NS_FRAMETRAVERSAL_CID);
static bool sLeftClickOnly = true;
static bool sKeyCausesActivation = true;
static uint32_t sESMInstanceCount = 0;

Просмотреть файл

@ -1487,7 +1487,9 @@ Navigator::OnNavigation()
#ifdef MOZ_MEDIA_NAVIGATOR
// Inform MediaManager in case there are live streams or pending callbacks.
MediaManager *manager = MediaManager::Get();
manager->OnNavigation(win->WindowID());
if (manager) {
manager->OnNavigation(win->WindowID());
}
#endif
if (mCameraManager) {
mCameraManager->OnNavigation(win->WindowID());

Просмотреть файл

@ -92,7 +92,7 @@ nsContentPermissionRequestProxy::GetElement(nsIDOMElement * *aRequestingElement)
return NS_ERROR_FAILURE;
}
NS_ADDREF(*aRequestingElement = mParent->mElement);
NS_IF_ADDREF(*aRequestingElement = mParent->mElement);
return NS_OK;
}

Просмотреть файл

@ -1061,7 +1061,7 @@ nsJSContext::JSOptionChangedCallback(const char *pref, void *data)
else
newDefaultJSOptions &= ~JSOPTION_WERROR;
::JS_SetOptions(context->mContext, newDefaultJSOptions & JSRUNOPTION_MASK);
::JS_SetOptions(context->mContext, newDefaultJSOptions & JSOPTION_MASK);
::JS_SetParallelCompilationEnabled(context->mContext, parallelIonCompilation);

Просмотреть файл

@ -8,858 +8,29 @@ let { classes: Cc, interfaces: Ci, results: Cr, utils: Cu } = Components;
Cu.import("resource://gre/modules/XPCOMUtils.jsm");
Cu.import("resource://gre/modules/Services.jsm");
Cu.import("resource://gre/modules/Geometry.jsm");
Cu.import("resource://gre/modules/BrowserElementPromptService.jsm");
// Event whitelisted for bubbling.
let whitelistedEvents = [
Ci.nsIDOMKeyEvent.DOM_VK_ESCAPE, // Back button.
Ci.nsIDOMKeyEvent.DOM_VK_SLEEP, // Power button.
Ci.nsIDOMKeyEvent.DOM_VK_CONTEXT_MENU,
Ci.nsIDOMKeyEvent.DOM_VK_F5, // Search button.
Ci.nsIDOMKeyEvent.DOM_VK_PAGE_UP, // Volume up.
Ci.nsIDOMKeyEvent.DOM_VK_PAGE_DOWN // Volume down.
];
function debug(msg) {
//dump("BrowserElementChild - " + msg + "\n");
}
function sendAsyncMsg(msg, data) {
if (!data) {
data = { };
}
// NB: this must happen before we process any messages from
// mozbrowser API clients.
docShell.isActive = true;
data.msg_name = msg;
sendAsyncMessage('browser-element-api:call', data);
let infos = sendSyncMessage('browser-element-api:call',
{ 'msg_name': 'hello' })[0];
docShell.QueryInterface(Ci.nsIDocShellTreeItem).name = infos.name;
docShell.setFullscreenAllowed(infos.fullscreenAllowed);
if (!('BrowserElementIsPreloaded' in this)) {
// This is a produc-specific file that's sometimes unavailable.
try {
Services.scriptloader.loadSubScript("chrome://browser/content/forms.js");
} catch (e) {
}
Services.scriptloader.loadSubScript("chrome://global/content/BrowserElementPanning.js");
Services.scriptloader.loadSubScript("chrome://global/content/BrowserElementChildPreload.js");
}
function sendSyncMsg(msg, data) {
if (!data) {
data = { };
}
data.msg_name = msg;
return sendSyncMessage('browser-element-api:call', data);
}
/**
* The BrowserElementChild implements one half of <iframe mozbrowser>.
* (The other half is, unsurprisingly, BrowserElementParent.)
*
* This script is injected into an <iframe mozbrowser> via
* nsIMessageManager::LoadFrameScript().
*
* Our job here is to listen for events within this frame and bubble them up to
* the parent process.
*/
var global = this;
function BrowserElementChild() {
// Maps outer window id --> weak ref to window. Used by modal dialog code.
this._windowIDDict = {};
// _forcedVisible corresponds to the visibility state our owner has set on us
// (via iframe.setVisible). ownerVisible corresponds to whether the docShell
// whose window owns this element is visible.
//
// Our docShell is visible iff _forcedVisible and _ownerVisible are both
// true.
this._forcedVisible = true;
this._ownerVisible = true;
this._nextPaintHandler = null;
this._init();
};
BrowserElementChild.prototype = {
QueryInterface: XPCOMUtils.generateQI([Ci.nsIObserver,
Ci.nsISupportsWeakReference]),
_init: function() {
debug("Starting up.");
// NB: this must happen before we process any messages from
// mozbrowser API clients.
docShell.isActive = true;
sendAsyncMsg("hello");
// Set the docshell's name according to our <iframe>'s name attribute.
docShell.QueryInterface(Ci.nsIDocShellTreeItem).name =
sendSyncMsg('get-name')[0];
docShell.setFullscreenAllowed(sendSyncMsg('get-fullscreen-allowed')[0]);
BrowserElementPromptService.mapWindowToBrowserElementChild(content, this);
docShell.QueryInterface(Ci.nsIWebProgress)
.addProgressListener(this._progressListener,
Ci.nsIWebProgress.NOTIFY_LOCATION |
Ci.nsIWebProgress.NOTIFY_SECURITY |
Ci.nsIWebProgress.NOTIFY_STATE_WINDOW);
docShell.QueryInterface(Ci.nsIWebNavigation)
.sessionHistory = Cc["@mozilla.org/browser/shistory;1"]
.createInstance(Ci.nsISHistory);
// This is necessary to get security web progress notifications.
var securityUI = Cc['@mozilla.org/secure_browser_ui;1']
.createInstance(Ci.nsISecureBrowserUI);
securityUI.init(content);
// A cache of the menuitem dom objects keyed by the id we generate
// and pass to the embedder
this._ctxHandlers = {};
// Counter of contextmenu events fired
this._ctxCounter = 0;
addEventListener('DOMTitleChanged',
this._titleChangedHandler.bind(this),
/* useCapture = */ true,
/* wantsUntrusted = */ false);
addEventListener('DOMLinkAdded',
this._iconChangedHandler.bind(this),
/* useCapture = */ true,
/* wantsUntrusted = */ false);
// Registers a MozAfterPaint handler for the very first paint.
this._addMozAfterPaintHandler(function () {
sendAsyncMsg('firstpaint');
});
let self = this;
let mmCalls = {
"purge-history": this._recvPurgeHistory,
"get-screenshot": this._recvGetScreenshot,
"set-visible": this._recvSetVisible,
"get-visible": this._recvVisible,
"send-mouse-event": this._recvSendMouseEvent,
"send-touch-event": this._recvSendTouchEvent,
"get-can-go-back": this._recvCanGoBack,
"get-can-go-forward": this._recvCanGoForward,
"go-back": this._recvGoBack,
"go-forward": this._recvGoForward,
"reload": this._recvReload,
"stop": this._recvStop,
"unblock-modal-prompt": this._recvStopWaiting,
"fire-ctx-callback": this._recvFireCtxCallback,
"owner-visibility-change": this._recvOwnerVisibilityChange,
"exit-fullscreen": this._recvExitFullscreen.bind(this),
"activate-next-paint-listener": this._activateNextPaintListener.bind(this),
"deactivate-next-paint-listener": this._deactivateNextPaintListener.bind(this)
}
addMessageListener("browser-element-api:call", function(aMessage) {
if (aMessage.data.msg_name in mmCalls) {
return mmCalls[aMessage.data.msg_name].apply(self, arguments);
}
});
let els = Cc["@mozilla.org/eventlistenerservice;1"]
.getService(Ci.nsIEventListenerService);
// We are using the system group for those events so if something in the
// content called .stopPropagation() this will still be called.
els.addSystemEventListener(global, 'keydown',
this._keyEventHandler.bind(this),
/* useCapture = */ true);
els.addSystemEventListener(global, 'keypress',
this._keyEventHandler.bind(this),
/* useCapture = */ true);
els.addSystemEventListener(global, 'keyup',
this._keyEventHandler.bind(this),
/* useCapture = */ true);
els.addSystemEventListener(global, 'DOMWindowClose',
this._windowCloseHandler.bind(this),
/* useCapture = */ false);
els.addSystemEventListener(global, 'DOMWindowCreated',
this._windowCreatedHandler.bind(this),
/* useCapture = */ true);
els.addSystemEventListener(global, 'contextmenu',
this._contextmenuHandler.bind(this),
/* useCapture = */ false);
els.addSystemEventListener(global, 'scroll',
this._scrollEventHandler.bind(this),
/* useCapture = */ false);
Services.obs.addObserver(this,
"fullscreen-origin-change",
/* ownsWeak = */ true);
Services.obs.addObserver(this,
'ask-parent-to-exit-fullscreen',
/* ownsWeak = */ true);
Services.obs.addObserver(this,
'ask-parent-to-rollback-fullscreen',
/* ownsWeak = */ true);
},
observe: function(subject, topic, data) {
// Ignore notifications not about our document.
if (subject != content.document)
return;
switch (topic) {
case 'fullscreen-origin-change':
sendAsyncMsg('fullscreen-origin-change', { _payload_: data });
break;
case 'ask-parent-to-exit-fullscreen':
sendAsyncMsg('exit-fullscreen');
break;
case 'ask-parent-to-rollback-fullscreen':
sendAsyncMsg('rollback-fullscreen');
break;
}
},
_tryGetInnerWindowID: function(win) {
let utils = win.QueryInterface(Ci.nsIInterfaceRequestor)
.getInterface(Ci.nsIDOMWindowUtils);
try {
return utils.currentInnerWindowID;
}
catch(e) {
return null;
}
},
/**
* Show a modal prompt. Called by BrowserElementPromptService.
*/
showModalPrompt: function(win, args) {
let utils = win.QueryInterface(Ci.nsIInterfaceRequestor)
.getInterface(Ci.nsIDOMWindowUtils);
args.windowID = { outer: utils.outerWindowID,
inner: this._tryGetInnerWindowID(win) };
sendAsyncMsg('showmodalprompt', args);
let returnValue = this._waitForResult(win);
if (args.promptType == 'prompt' ||
args.promptType == 'confirm' ||
args.promptType == 'custom-prompt') {
return returnValue;
}
},
/**
* Spin in a nested event loop until we receive a unblock-modal-prompt message for
* this window.
*/
_waitForResult: function(win) {
debug("_waitForResult(" + win + ")");
let utils = win.QueryInterface(Ci.nsIInterfaceRequestor)
.getInterface(Ci.nsIDOMWindowUtils);
let outerWindowID = utils.outerWindowID;
let innerWindowID = this._tryGetInnerWindowID(win);
if (innerWindowID === null) {
// I have no idea what waiting for a result means when there's no inner
// window, so let's just bail.
debug("_waitForResult: No inner window. Bailing.");
return;
}
this._windowIDDict[outerWindowID] = Cu.getWeakReference(win);
debug("Entering modal state (outerWindowID=" + outerWindowID + ", " +
"innerWindowID=" + innerWindowID + ")");
// In theory, we're supposed to pass |modalStateWin| back to
// leaveModalStateWithWindow. But in practice, the window is always null,
// because it's the window associated with this script context, which
// doesn't have a window. But we'll play along anyway in case this
// changes.
var modalStateWin = utils.enterModalStateWithWindow();
// We'll decrement win.modalDepth when we receive a unblock-modal-prompt message
// for the window.
if (!win.modalDepth) {
win.modalDepth = 0;
}
win.modalDepth++;
let origModalDepth = win.modalDepth;
let thread = Services.tm.currentThread;
debug("Nested event loop - begin");
while (win.modalDepth == origModalDepth) {
// Bail out of the loop if the inner window changed; that means the
// window navigated.
if (this._tryGetInnerWindowID(win) !== innerWindowID) {
debug("_waitForResult: Inner window ID changed " +
"while in nested event loop.");
break;
}
thread.processNextEvent(/* mayWait = */ true);
}
debug("Nested event loop - finish");
// If we exited the loop because the inner window changed, then bail on the
// modal prompt.
if (innerWindowID !== this._tryGetInnerWindowID(win)) {
throw Components.Exception("Modal state aborted by navigation",
Cr.NS_ERROR_NOT_AVAILABLE);
}
let returnValue = win.modalReturnValue;
delete win.modalReturnValue;
utils.leaveModalStateWithWindow(modalStateWin);
debug("Leaving modal state (outerID=" + outerWindowID + ", " +
"innerID=" + innerWindowID + ")");
return returnValue;
},
_recvStopWaiting: function(msg) {
let outerID = msg.json.windowID.outer;
let innerID = msg.json.windowID.inner;
let returnValue = msg.json.returnValue;
debug("recvStopWaiting(outer=" + outerID + ", inner=" + innerID +
", returnValue=" + returnValue + ")");
if (!this._windowIDDict[outerID]) {
debug("recvStopWaiting: No record of outer window ID " + outerID);
return;
}
let win = this._windowIDDict[outerID].get();
delete this._windowIDDict[outerID];
if (!win) {
debug("recvStopWaiting, but window is gone\n");
return;
}
if (innerID !== this._tryGetInnerWindowID(win)) {
debug("recvStopWaiting, but inner ID has changed\n");
return;
}
debug("recvStopWaiting " + win);
win.modalReturnValue = returnValue;
win.modalDepth--;
},
_recvExitFullscreen: function() {
var utils = content.document.defaultView
.QueryInterface(Ci.nsIInterfaceRequestor)
.getInterface(Ci.nsIDOMWindowUtils);
utils.exitFullscreen();
},
_titleChangedHandler: function(e) {
debug("Got titlechanged: (" + e.target.title + ")");
var win = e.target.defaultView;
// Ignore titlechanges which don't come from the top-level
// <iframe mozbrowser> window.
if (win == content) {
sendAsyncMsg('titlechange', { _payload_: e.target.title });
}
else {
debug("Not top level!");
}
},
_iconChangedHandler: function(e) {
debug("Got iconchanged: (" + e.target.href + ")");
var hasIcon = e.target.rel.split(' ').some(function(x) {
return x.toLowerCase() === 'icon';
});
if (hasIcon) {
var win = e.target.ownerDocument.defaultView;
// Ignore iconchanges which don't come from the top-level
// <iframe mozbrowser> window.
if (win == content) {
sendAsyncMsg('iconchange', { _payload_: e.target.href });
}
else {
debug("Not top level!");
}
}
},
_addMozAfterPaintHandler: function(callback) {
function onMozAfterPaint() {
let uri = docShell.QueryInterface(Ci.nsIWebNavigation).currentURI;
debug("Got afterpaint event: " + uri.spec);
if (uri.spec != "about:blank") {
removeEventListener('MozAfterPaint', onMozAfterPaint,
/* useCapture = */ true);
callback();
}
}
addEventListener('MozAfterPaint', onMozAfterPaint, /* useCapture = */ true);
return onMozAfterPaint;
},
_removeMozAfterPaintHandler: function(listener) {
removeEventListener('MozAfterPaint', listener,
/* useCapture = */ true);
},
_activateNextPaintListener: function(e) {
if (!this._nextPaintHandler) {
this._nextPaintHandler = this._addMozAfterPaintHandler(function () {
this._nextPaintHandler = null;
sendAsyncMsg('nextpaint');
}.bind(this));
}
},
_deactivateNextPaintListener: function(e) {
if (this._nextPaintHandler) {
this._removeMozAfterPaintHandler(this._nextPaintHandler);
this._nextPaintHandler = null;
}
},
_windowCloseHandler: function(e) {
let win = e.target;
if (win != content || e.defaultPrevented) {
return;
}
debug("Closing window " + win);
sendAsyncMsg('close');
// Inform the window implementation that we handled this close ourselves.
e.preventDefault();
},
_windowCreatedHandler: function(e) {
let targetDocShell = e.target.defaultView
.QueryInterface(Ci.nsIInterfaceRequestor)
.getInterface(Ci.nsIWebNavigation);
if (targetDocShell != docShell) {
return;
}
let uri = docShell.QueryInterface(Ci.nsIWebNavigation).currentURI;
debug("Window created: " + uri.spec);
if (uri.spec != "about:blank") {
this._addMozAfterPaintHandler(function () {
sendAsyncMsg('documentfirstpaint');
});
}
},
_contextmenuHandler: function(e) {
debug("Got contextmenu");
if (e.defaultPrevented) {
return;
}
e.preventDefault();
this._ctxCounter++;
this._ctxHandlers = {};
var elem = e.target;
var menuData = {systemTargets: [], contextmenu: null};
var ctxMenuId = null;
while (elem && elem.parentNode) {
var ctxData = this._getSystemCtxMenuData(elem);
if (ctxData) {
menuData.systemTargets.push({
nodeName: elem.nodeName,
data: ctxData
});
}
if (!ctxMenuId && 'hasAttribute' in elem && elem.hasAttribute('contextmenu')) {
ctxMenuId = elem.getAttribute('contextmenu');
}
elem = elem.parentNode;
}
if (ctxMenuId) {
var menu = e.target.ownerDocument.getElementById(ctxMenuId);
if (menu) {
menuData.contextmenu = this._buildMenuObj(menu, '');
}
}
sendAsyncMsg('contextmenu', menuData);
},
_getSystemCtxMenuData: function(elem) {
if ((elem instanceof Ci.nsIDOMHTMLAnchorElement && elem.href) ||
(elem instanceof Ci.nsIDOMHTMLAreaElement && elem.href)) {
return elem.href;
}
if (elem instanceof Ci.nsIImageLoadingContent && elem.currentURI) {
return elem.currentURI.spec;
}
if ((elem instanceof Ci.nsIDOMHTMLMediaElement) ||
(elem instanceof Ci.nsIDOMHTMLImageElement)) {
return elem.currentSrc || elem.src;
}
return false;
},
_scrollEventHandler: function(e) {
let win = e.target.defaultView;
if (win != content) {
return;
}
debug("scroll event " + win);
sendAsyncMsg("scroll", { top: win.scrollY, left: win.scrollX });
},
_recvPurgeHistory: function(data) {
debug("Received purgeHistory message: (" + data.json.id + ")");
let history = docShell.QueryInterface(Ci.nsIWebNavigation).sessionHistory;
try {
if (history && history.count) {
history.PurgeHistory(history.count);
}
} catch(e) {}
sendAsyncMsg('got-purge-history', { id: data.json.id, successRv: true });
},
_recvGetScreenshot: function(data) {
debug("Received getScreenshot message: (" + data.json.id + ")");
let self = this;
let maxWidth = data.json.args.width;
let maxHeight = data.json.args.height;
let domRequestID = data.json.id;
let takeScreenshotClosure = function() {
self._takeScreenshot(maxWidth, maxHeight, domRequestID);
};
let maxDelayMS = 2000;
try {
maxDelayMS = Services.prefs.getIntPref('dom.browserElement.maxScreenshotDelayMS');
}
catch(e) {}
// Try to wait for the event loop to go idle before we take the screenshot,
// but once we've waited maxDelayMS milliseconds, go ahead and take it
// anyway.
Cc['@mozilla.org/message-loop;1'].getService(Ci.nsIMessageLoop).postIdleTask(
takeScreenshotClosure, maxDelayMS);
},
/**
* Actually take a screenshot and foward the result up to our parent, given
* the desired maxWidth and maxHeight, and given the DOMRequest ID associated
* with the request from the parent.
*/
_takeScreenshot: function(maxWidth, maxHeight, domRequestID) {
// You can think of the screenshotting algorithm as carrying out the
// following steps:
//
// - Let scaleWidth be the factor by which we'd need to downscale the
// viewport so it would fit within maxWidth. (If the viewport's width
// is less than maxWidth, let scaleWidth be 1.) Compute scaleHeight
// the same way.
//
// - Scale the viewport by max(scaleWidth, scaleHeight). Now either the
// viewport's width is no larger than maxWidth, the viewport's height is
// no larger than maxHeight, or both.
//
// - Crop the viewport so its width is no larger than maxWidth and its
// height is no larger than maxHeight.
//
// - Return a screenshot of the page's viewport scaled and cropped per
// above.
debug("Taking a screenshot: maxWidth=" + maxWidth +
", maxHeight=" + maxHeight +
", domRequestID=" + domRequestID + ".");
let scaleWidth = Math.min(1, maxWidth / content.innerWidth);
let scaleHeight = Math.min(1, maxHeight / content.innerHeight);
let scale = Math.max(scaleWidth, scaleHeight);
let canvasWidth = Math.min(maxWidth, Math.round(content.innerWidth * scale));
let canvasHeight = Math.min(maxHeight, Math.round(content.innerHeight * scale));
var canvas = content.document
.createElementNS("http://www.w3.org/1999/xhtml", "canvas");
canvas.mozOpaque = true;
canvas.width = canvasWidth;
canvas.height = canvasHeight;
var ctx = canvas.getContext("2d");
ctx.scale(scale, scale);
ctx.drawWindow(content, 0, 0, content.innerWidth, content.innerHeight,
"rgb(255,255,255)");
// Take a JPEG screenshot to hack around the fact that we can't specify
// opaque PNG. This requires us to unpremultiply the alpha channel, which
// is expensive on ARM processors because they lack a hardware integer
// division instruction.
canvas.toBlob(function(blob) {
sendAsyncMsg('got-screenshot', {
id: domRequestID,
successRv: blob
});
}, 'image/jpeg');
},
_recvFireCtxCallback: function(data) {
debug("Received fireCtxCallback message: (" + data.json.menuitem + ")");
// We silently ignore if the embedder uses an incorrect id in the callback
if (data.json.menuitem in this._ctxHandlers) {
this._ctxHandlers[data.json.menuitem].click();
this._ctxHandlers = {};
} else {
debug("Ignored invalid contextmenu invocation");
}
},
_buildMenuObj: function(menu, idPrefix) {
function maybeCopyAttribute(src, target, attribute) {
if (src.getAttribute(attribute)) {
target[attribute] = src.getAttribute(attribute);
}
}
var menuObj = {type: 'menu', items: []};
maybeCopyAttribute(menu, menuObj, 'label');
for (var i = 0, child; child = menu.children[i++];) {
if (child.nodeName === 'MENU') {
menuObj.items.push(this._buildMenuObj(child, idPrefix + i + '_'));
} else if (child.nodeName === 'MENUITEM') {
var id = this._ctxCounter + '_' + idPrefix + i;
var menuitem = {id: id, type: 'menuitem'};
maybeCopyAttribute(child, menuitem, 'label');
maybeCopyAttribute(child, menuitem, 'icon');
this._ctxHandlers[id] = child;
menuObj.items.push(menuitem);
}
}
return menuObj;
},
_recvSetVisible: function(data) {
debug("Received setVisible message: (" + data.json.visible + ")");
this._forcedVisible = data.json.visible;
this._updateDocShellVisibility();
},
_recvVisible: function(data) {
sendAsyncMsg('got-visible', {
id: data.json.id,
successRv: docShell.isActive
});
},
/**
* Called when the window which contains this iframe becomes hidden or
* visible.
*/
_recvOwnerVisibilityChange: function(data) {
debug("Received ownerVisibilityChange: (" + data.json.visible + ")");
this._ownerVisible = data.json.visible;
this._updateDocShellVisibility();
},
_updateDocShellVisibility: function() {
var visible = this._forcedVisible && this._ownerVisible;
if (docShell.isActive !== visible) {
docShell.isActive = visible;
}
},
_recvSendMouseEvent: function(data) {
let json = data.json;
let utils = content.QueryInterface(Ci.nsIInterfaceRequestor)
.getInterface(Ci.nsIDOMWindowUtils);
utils.sendMouseEvent(json.type, json.x, json.y, json.button,
json.clickCount, json.modifiers);
},
_recvSendTouchEvent: function(data) {
let json = data.json;
let utils = content.QueryInterface(Ci.nsIInterfaceRequestor)
.getInterface(Ci.nsIDOMWindowUtils);
utils.sendTouchEvent(json.type, json.identifiers, json.touchesX,
json.touchesY, json.radiisX, json.radiisY,
json.rotationAngles, json.forces, json.count,
json.modifiers);
},
_recvCanGoBack: function(data) {
var webNav = docShell.QueryInterface(Ci.nsIWebNavigation);
sendAsyncMsg('got-can-go-back', {
id: data.json.id,
successRv: webNav.canGoBack
});
},
_recvCanGoForward: function(data) {
var webNav = docShell.QueryInterface(Ci.nsIWebNavigation);
sendAsyncMsg('got-can-go-forward', {
id: data.json.id,
successRv: webNav.canGoForward
});
},
_recvGoBack: function(data) {
try {
docShell.QueryInterface(Ci.nsIWebNavigation).goBack();
} catch(e) {
// Silently swallow errors; these happen when we can't go back.
}
},
_recvGoForward: function(data) {
try {
docShell.QueryInterface(Ci.nsIWebNavigation).goForward();
} catch(e) {
// Silently swallow errors; these happen when we can't go forward.
}
},
_recvReload: function(data) {
let webNav = docShell.QueryInterface(Ci.nsIWebNavigation);
let reloadFlags = data.json.hardReload ?
webNav.LOAD_FLAGS_BYPASS_PROXY | webNav.LOAD_FLAGS_BYPASS_CACHE :
webNav.LOAD_FLAGS_NONE;
try {
webNav.reload(reloadFlags);
} catch(e) {
// Silently swallow errors; these can happen if a used cancels reload
}
},
_recvStop: function(data) {
let webNav = docShell.QueryInterface(Ci.nsIWebNavigation);
webNav.stop(webNav.STOP_NETWORK);
},
_keyEventHandler: function(e) {
if (whitelistedEvents.indexOf(e.keyCode) != -1 && !e.defaultPrevented) {
sendAsyncMsg('keyevent', {
type: e.type,
keyCode: e.keyCode,
charCode: e.charCode,
});
}
},
// The docShell keeps a weak reference to the progress listener, so we need
// to keep a strong ref to it ourselves.
_progressListener: {
QueryInterface: XPCOMUtils.generateQI([Ci.nsIWebProgressListener,
Ci.nsISupportsWeakReference]),
_seenLoadStart: false,
onLocationChange: function(webProgress, request, location, flags) {
// We get progress events from subshells here, which is kind of weird.
if (webProgress != docShell) {
return;
}
// Ignore locationchange events which occur before the first loadstart.
// These are usually about:blank loads we don't care about.
if (!this._seenLoadStart) {
return;
}
// Remove password and wyciwyg from uri.
location = Cc["@mozilla.org/docshell/urifixup;1"]
.getService(Ci.nsIURIFixup).createExposableURI(location);
sendAsyncMsg('locationchange', { _payload_: location.spec });
},
onStateChange: function(webProgress, request, stateFlags, status) {
if (webProgress != docShell) {
return;
}
if (stateFlags & Ci.nsIWebProgressListener.STATE_START) {
this._seenLoadStart = true;
sendAsyncMsg('loadstart');
}
if (stateFlags & Ci.nsIWebProgressListener.STATE_STOP) {
sendAsyncMsg('loadend');
// Ignoring NS_BINDING_ABORTED, which is set when loading page is
// stopped.
if (status == Cr.NS_OK ||
status == Cr.NS_BINDING_ABORTED) {
return;
}
// TODO See nsDocShell::DisplayLoadError for a list of all the error
// codes (the status param) we should eventually handle here.
sendAsyncMsg('error', { type: 'other' });
}
},
onSecurityChange: function(webProgress, request, state) {
if (webProgress != docShell) {
return;
}
var stateDesc;
if (state & Ci.nsIWebProgressListener.STATE_IS_SECURE) {
stateDesc = 'secure';
}
else if (state & Ci.nsIWebProgressListener.STATE_IS_BROKEN) {
stateDesc = 'broken';
}
else if (state & Ci.nsIWebProgressListener.STATE_IS_INSECURE) {
stateDesc = 'insecure';
}
else {
debug("Unexpected securitychange state!");
stateDesc = '???';
}
// XXX Until bug 764496 is fixed, this will always return false.
var isEV = !!(state & Ci.nsIWebProgressListener.STATE_IDENTITY_EV_TOPLEVEL);
sendAsyncMsg('securitychange', { state: stateDesc, extendedValidation: isEV });
},
onStatusChange: function(webProgress, request, status, message) {},
onProgressChange: function(webProgress, request, curSelfProgress,
maxSelfProgress, curTotalProgress, maxTotalProgress) {},
},
// Expose the message manager for WebApps and others.
_messageManagerPublic: {
sendAsyncMessage: global.sendAsyncMessage.bind(global),
sendSyncMessage: global.sendSyncMessage.bind(global),
addMessageListener: global.addMessageListener.bind(global),
removeMessageListener: global.removeMessageListener.bind(global)
},
get messageManager() {
return this._messageManagerPublic;
}
};
var api = new BrowserElementChild();
// FIXME/bug 775438: use a JSM?
//
// The code in this included file depends on the |addEventListener|,
// |addMessageListener|, |content|, |Geometry| and |Services| symbols
// being "exported" from here.
#include BrowserElementScrolling.js
var BrowserElementIsReady = true;

Просмотреть файл

@ -0,0 +1,846 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
"use strict";
dump("######################## BrowserElementChildPreload.js loaded\n");
var BrowserElementIsReady = false;
let { classes: Cc, interfaces: Ci, results: Cr, utils: Cu } = Components;
Cu.import("resource://gre/modules/XPCOMUtils.jsm");
Cu.import("resource://gre/modules/Services.jsm");
Cu.import("resource://gre/modules/BrowserElementPromptService.jsm");
// Event whitelisted for bubbling.
let whitelistedEvents = [
Ci.nsIDOMKeyEvent.DOM_VK_ESCAPE, // Back button.
Ci.nsIDOMKeyEvent.DOM_VK_SLEEP, // Power button.
Ci.nsIDOMKeyEvent.DOM_VK_CONTEXT_MENU,
Ci.nsIDOMKeyEvent.DOM_VK_F5, // Search button.
Ci.nsIDOMKeyEvent.DOM_VK_PAGE_UP, // Volume up.
Ci.nsIDOMKeyEvent.DOM_VK_PAGE_DOWN // Volume down.
];
function debug(msg) {
//dump("BrowserElementChildPreload - " + msg + "\n");
}
function sendAsyncMsg(msg, data) {
// Ensure that we don't send any messages before BrowserElementChild.js
// finishes loading.
if (!BrowserElementIsReady)
return;
if (!data) {
data = { };
}
data.msg_name = msg;
sendAsyncMessage('browser-element-api:call', data);
}
/**
* The BrowserElementChild implements one half of <iframe mozbrowser>.
* (The other half is, unsurprisingly, BrowserElementParent.)
*
* This script is injected into an <iframe mozbrowser> via
* nsIMessageManager::LoadFrameScript().
*
* Our job here is to listen for events within this frame and bubble them up to
* the parent process.
*/
var global = this;
function BrowserElementChild() {
// Maps outer window id --> weak ref to window. Used by modal dialog code.
this._windowIDDict = {};
// _forcedVisible corresponds to the visibility state our owner has set on us
// (via iframe.setVisible). ownerVisible corresponds to whether the docShell
// whose window owns this element is visible.
//
// Our docShell is visible iff _forcedVisible and _ownerVisible are both
// true.
this._forcedVisible = true;
this._ownerVisible = true;
this._nextPaintHandler = null;
this._init();
};
BrowserElementChild.prototype = {
QueryInterface: XPCOMUtils.generateQI([Ci.nsIObserver,
Ci.nsISupportsWeakReference]),
_init: function() {
debug("Starting up.");
BrowserElementPromptService.mapWindowToBrowserElementChild(content, this);
docShell.QueryInterface(Ci.nsIWebProgress)
.addProgressListener(this._progressListener,
Ci.nsIWebProgress.NOTIFY_LOCATION |
Ci.nsIWebProgress.NOTIFY_SECURITY |
Ci.nsIWebProgress.NOTIFY_STATE_WINDOW);
docShell.QueryInterface(Ci.nsIWebNavigation)
.sessionHistory = Cc["@mozilla.org/browser/shistory;1"]
.createInstance(Ci.nsISHistory);
// This is necessary to get security web progress notifications.
var securityUI = Cc['@mozilla.org/secure_browser_ui;1']
.createInstance(Ci.nsISecureBrowserUI);
securityUI.init(content);
// A cache of the menuitem dom objects keyed by the id we generate
// and pass to the embedder
this._ctxHandlers = {};
// Counter of contextmenu events fired
this._ctxCounter = 0;
addEventListener('DOMTitleChanged',
this._titleChangedHandler.bind(this),
/* useCapture = */ true,
/* wantsUntrusted = */ false);
addEventListener('DOMLinkAdded',
this._iconChangedHandler.bind(this),
/* useCapture = */ true,
/* wantsUntrusted = */ false);
// Registers a MozAfterPaint handler for the very first paint.
this._addMozAfterPaintHandler(function () {
sendAsyncMsg('firstpaint');
});
let self = this;
let mmCalls = {
"purge-history": this._recvPurgeHistory,
"get-screenshot": this._recvGetScreenshot,
"set-visible": this._recvSetVisible,
"get-visible": this._recvVisible,
"send-mouse-event": this._recvSendMouseEvent,
"send-touch-event": this._recvSendTouchEvent,
"get-can-go-back": this._recvCanGoBack,
"get-can-go-forward": this._recvCanGoForward,
"go-back": this._recvGoBack,
"go-forward": this._recvGoForward,
"reload": this._recvReload,
"stop": this._recvStop,
"unblock-modal-prompt": this._recvStopWaiting,
"fire-ctx-callback": this._recvFireCtxCallback,
"owner-visibility-change": this._recvOwnerVisibilityChange,
"exit-fullscreen": this._recvExitFullscreen.bind(this),
"activate-next-paint-listener": this._activateNextPaintListener.bind(this),
"deactivate-next-paint-listener": this._deactivateNextPaintListener.bind(this)
}
addMessageListener("browser-element-api:call", function(aMessage) {
if (aMessage.data.msg_name in mmCalls) {
return mmCalls[aMessage.data.msg_name].apply(self, arguments);
}
});
let els = Cc["@mozilla.org/eventlistenerservice;1"]
.getService(Ci.nsIEventListenerService);
// We are using the system group for those events so if something in the
// content called .stopPropagation() this will still be called.
els.addSystemEventListener(global, 'keydown',
this._keyEventHandler.bind(this),
/* useCapture = */ true);
els.addSystemEventListener(global, 'keypress',
this._keyEventHandler.bind(this),
/* useCapture = */ true);
els.addSystemEventListener(global, 'keyup',
this._keyEventHandler.bind(this),
/* useCapture = */ true);
els.addSystemEventListener(global, 'DOMWindowClose',
this._windowCloseHandler.bind(this),
/* useCapture = */ false);
els.addSystemEventListener(global, 'DOMWindowCreated',
this._windowCreatedHandler.bind(this),
/* useCapture = */ true);
els.addSystemEventListener(global, 'contextmenu',
this._contextmenuHandler.bind(this),
/* useCapture = */ false);
els.addSystemEventListener(global, 'scroll',
this._scrollEventHandler.bind(this),
/* useCapture = */ false);
Services.obs.addObserver(this,
"fullscreen-origin-change",
/* ownsWeak = */ true);
Services.obs.addObserver(this,
'ask-parent-to-exit-fullscreen',
/* ownsWeak = */ true);
Services.obs.addObserver(this,
'ask-parent-to-rollback-fullscreen',
/* ownsWeak = */ true);
},
observe: function(subject, topic, data) {
// Ignore notifications not about our document.
if (subject != content.document)
return;
switch (topic) {
case 'fullscreen-origin-change':
sendAsyncMsg('fullscreen-origin-change', { _payload_: data });
break;
case 'ask-parent-to-exit-fullscreen':
sendAsyncMsg('exit-fullscreen');
break;
case 'ask-parent-to-rollback-fullscreen':
sendAsyncMsg('rollback-fullscreen');
break;
}
},
_tryGetInnerWindowID: function(win) {
let utils = win.QueryInterface(Ci.nsIInterfaceRequestor)
.getInterface(Ci.nsIDOMWindowUtils);
try {
return utils.currentInnerWindowID;
}
catch(e) {
return null;
}
},
/**
* Show a modal prompt. Called by BrowserElementPromptService.
*/
showModalPrompt: function(win, args) {
let utils = win.QueryInterface(Ci.nsIInterfaceRequestor)
.getInterface(Ci.nsIDOMWindowUtils);
args.windowID = { outer: utils.outerWindowID,
inner: this._tryGetInnerWindowID(win) };
sendAsyncMsg('showmodalprompt', args);
let returnValue = this._waitForResult(win);
if (args.promptType == 'prompt' ||
args.promptType == 'confirm' ||
args.promptType == 'custom-prompt') {
return returnValue;
}
},
/**
* Spin in a nested event loop until we receive a unblock-modal-prompt message for
* this window.
*/
_waitForResult: function(win) {
debug("_waitForResult(" + win + ")");
let utils = win.QueryInterface(Ci.nsIInterfaceRequestor)
.getInterface(Ci.nsIDOMWindowUtils);
let outerWindowID = utils.outerWindowID;
let innerWindowID = this._tryGetInnerWindowID(win);
if (innerWindowID === null) {
// I have no idea what waiting for a result means when there's no inner
// window, so let's just bail.
debug("_waitForResult: No inner window. Bailing.");
return;
}
this._windowIDDict[outerWindowID] = Cu.getWeakReference(win);
debug("Entering modal state (outerWindowID=" + outerWindowID + ", " +
"innerWindowID=" + innerWindowID + ")");
// In theory, we're supposed to pass |modalStateWin| back to
// leaveModalStateWithWindow. But in practice, the window is always null,
// because it's the window associated with this script context, which
// doesn't have a window. But we'll play along anyway in case this
// changes.
var modalStateWin = utils.enterModalStateWithWindow();
// We'll decrement win.modalDepth when we receive a unblock-modal-prompt message
// for the window.
if (!win.modalDepth) {
win.modalDepth = 0;
}
win.modalDepth++;
let origModalDepth = win.modalDepth;
let thread = Services.tm.currentThread;
debug("Nested event loop - begin");
while (win.modalDepth == origModalDepth) {
// Bail out of the loop if the inner window changed; that means the
// window navigated.
if (this._tryGetInnerWindowID(win) !== innerWindowID) {
debug("_waitForResult: Inner window ID changed " +
"while in nested event loop.");
break;
}
thread.processNextEvent(/* mayWait = */ true);
}
debug("Nested event loop - finish");
// If we exited the loop because the inner window changed, then bail on the
// modal prompt.
if (innerWindowID !== this._tryGetInnerWindowID(win)) {
throw Components.Exception("Modal state aborted by navigation",
Cr.NS_ERROR_NOT_AVAILABLE);
}
let returnValue = win.modalReturnValue;
delete win.modalReturnValue;
utils.leaveModalStateWithWindow(modalStateWin);
debug("Leaving modal state (outerID=" + outerWindowID + ", " +
"innerID=" + innerWindowID + ")");
return returnValue;
},
_recvStopWaiting: function(msg) {
let outerID = msg.json.windowID.outer;
let innerID = msg.json.windowID.inner;
let returnValue = msg.json.returnValue;
debug("recvStopWaiting(outer=" + outerID + ", inner=" + innerID +
", returnValue=" + returnValue + ")");
if (!this._windowIDDict[outerID]) {
debug("recvStopWaiting: No record of outer window ID " + outerID);
return;
}
let win = this._windowIDDict[outerID].get();
delete this._windowIDDict[outerID];
if (!win) {
debug("recvStopWaiting, but window is gone\n");
return;
}
if (innerID !== this._tryGetInnerWindowID(win)) {
debug("recvStopWaiting, but inner ID has changed\n");
return;
}
debug("recvStopWaiting " + win);
win.modalReturnValue = returnValue;
win.modalDepth--;
},
_recvExitFullscreen: function() {
var utils = content.document.defaultView
.QueryInterface(Ci.nsIInterfaceRequestor)
.getInterface(Ci.nsIDOMWindowUtils);
utils.exitFullscreen();
},
_titleChangedHandler: function(e) {
debug("Got titlechanged: (" + e.target.title + ")");
var win = e.target.defaultView;
// Ignore titlechanges which don't come from the top-level
// <iframe mozbrowser> window.
if (win == content) {
sendAsyncMsg('titlechange', { _payload_: e.target.title });
}
else {
debug("Not top level!");
}
},
_iconChangedHandler: function(e) {
debug("Got iconchanged: (" + e.target.href + ")");
var hasIcon = e.target.rel.split(' ').some(function(x) {
return x.toLowerCase() === 'icon';
});
if (hasIcon) {
var win = e.target.ownerDocument.defaultView;
// Ignore iconchanges which don't come from the top-level
// <iframe mozbrowser> window.
if (win == content) {
sendAsyncMsg('iconchange', { _payload_: e.target.href });
}
else {
debug("Not top level!");
}
}
},
_addMozAfterPaintHandler: function(callback) {
function onMozAfterPaint() {
let uri = docShell.QueryInterface(Ci.nsIWebNavigation).currentURI;
if (uri.spec != "about:blank") {
debug("Got afterpaint event: " + uri.spec);
removeEventListener('MozAfterPaint', onMozAfterPaint,
/* useCapture = */ true);
callback();
}
}
addEventListener('MozAfterPaint', onMozAfterPaint, /* useCapture = */ true);
return onMozAfterPaint;
},
_removeMozAfterPaintHandler: function(listener) {
removeEventListener('MozAfterPaint', listener,
/* useCapture = */ true);
},
_activateNextPaintListener: function(e) {
if (!this._nextPaintHandler) {
this._nextPaintHandler = this._addMozAfterPaintHandler(function () {
this._nextPaintHandler = null;
sendAsyncMsg('nextpaint');
}.bind(this));
}
},
_deactivateNextPaintListener: function(e) {
if (this._nextPaintHandler) {
this._removeMozAfterPaintHandler(this._nextPaintHandler);
this._nextPaintHandler = null;
}
},
_windowCloseHandler: function(e) {
let win = e.target;
if (win != content || e.defaultPrevented) {
return;
}
debug("Closing window " + win);
sendAsyncMsg('close');
// Inform the window implementation that we handled this close ourselves.
e.preventDefault();
},
_windowCreatedHandler: function(e) {
let targetDocShell = e.target.defaultView
.QueryInterface(Ci.nsIInterfaceRequestor)
.getInterface(Ci.nsIWebNavigation);
if (targetDocShell != docShell) {
return;
}
let uri = docShell.QueryInterface(Ci.nsIWebNavigation).currentURI;
debug("Window created: " + uri.spec);
if (uri.spec != "about:blank") {
this._addMozAfterPaintHandler(function () {
sendAsyncMsg('documentfirstpaint');
});
}
},
_contextmenuHandler: function(e) {
debug("Got contextmenu");
if (e.defaultPrevented) {
return;
}
e.preventDefault();
this._ctxCounter++;
this._ctxHandlers = {};
var elem = e.target;
var menuData = {systemTargets: [], contextmenu: null};
var ctxMenuId = null;
while (elem && elem.parentNode) {
var ctxData = this._getSystemCtxMenuData(elem);
if (ctxData) {
menuData.systemTargets.push({
nodeName: elem.nodeName,
data: ctxData
});
}
if (!ctxMenuId && 'hasAttribute' in elem && elem.hasAttribute('contextmenu')) {
ctxMenuId = elem.getAttribute('contextmenu');
}
elem = elem.parentNode;
}
if (ctxMenuId) {
var menu = e.target.ownerDocument.getElementById(ctxMenuId);
if (menu) {
menuData.contextmenu = this._buildMenuObj(menu, '');
}
}
sendAsyncMsg('contextmenu', menuData);
},
_getSystemCtxMenuData: function(elem) {
if ((elem instanceof Ci.nsIDOMHTMLAnchorElement && elem.href) ||
(elem instanceof Ci.nsIDOMHTMLAreaElement && elem.href)) {
return elem.href;
}
if (elem instanceof Ci.nsIImageLoadingContent && elem.currentURI) {
return elem.currentURI.spec;
}
if ((elem instanceof Ci.nsIDOMHTMLMediaElement) ||
(elem instanceof Ci.nsIDOMHTMLImageElement)) {
return elem.currentSrc || elem.src;
}
return false;
},
_scrollEventHandler: function(e) {
let win = e.target.defaultView;
if (win != content) {
return;
}
debug("scroll event " + win);
sendAsyncMsg("scroll", { top: win.scrollY, left: win.scrollX });
},
_recvPurgeHistory: function(data) {
debug("Received purgeHistory message: (" + data.json.id + ")");
let history = docShell.QueryInterface(Ci.nsIWebNavigation).sessionHistory;
try {
if (history && history.count) {
history.PurgeHistory(history.count);
}
} catch(e) {}
sendAsyncMsg('got-purge-history', { id: data.json.id, successRv: true });
},
_recvGetScreenshot: function(data) {
debug("Received getScreenshot message: (" + data.json.id + ")");
let self = this;
let maxWidth = data.json.args.width;
let maxHeight = data.json.args.height;
let domRequestID = data.json.id;
let takeScreenshotClosure = function() {
self._takeScreenshot(maxWidth, maxHeight, domRequestID);
};
let maxDelayMS = 2000;
try {
maxDelayMS = Services.prefs.getIntPref('dom.browserElement.maxScreenshotDelayMS');
}
catch(e) {}
// Try to wait for the event loop to go idle before we take the screenshot,
// but once we've waited maxDelayMS milliseconds, go ahead and take it
// anyway.
Cc['@mozilla.org/message-loop;1'].getService(Ci.nsIMessageLoop).postIdleTask(
takeScreenshotClosure, maxDelayMS);
},
/**
* Actually take a screenshot and foward the result up to our parent, given
* the desired maxWidth and maxHeight, and given the DOMRequest ID associated
* with the request from the parent.
*/
_takeScreenshot: function(maxWidth, maxHeight, domRequestID) {
// You can think of the screenshotting algorithm as carrying out the
// following steps:
//
// - Let scaleWidth be the factor by which we'd need to downscale the
// viewport so it would fit within maxWidth. (If the viewport's width
// is less than maxWidth, let scaleWidth be 1.) Compute scaleHeight
// the same way.
//
// - Scale the viewport by max(scaleWidth, scaleHeight). Now either the
// viewport's width is no larger than maxWidth, the viewport's height is
// no larger than maxHeight, or both.
//
// - Crop the viewport so its width is no larger than maxWidth and its
// height is no larger than maxHeight.
//
// - Return a screenshot of the page's viewport scaled and cropped per
// above.
debug("Taking a screenshot: maxWidth=" + maxWidth +
", maxHeight=" + maxHeight +
", domRequestID=" + domRequestID + ".");
let scaleWidth = Math.min(1, maxWidth / content.innerWidth);
let scaleHeight = Math.min(1, maxHeight / content.innerHeight);
let scale = Math.max(scaleWidth, scaleHeight);
let canvasWidth = Math.min(maxWidth, Math.round(content.innerWidth * scale));
let canvasHeight = Math.min(maxHeight, Math.round(content.innerHeight * scale));
var canvas = content.document
.createElementNS("http://www.w3.org/1999/xhtml", "canvas");
canvas.mozOpaque = true;
canvas.width = canvasWidth;
canvas.height = canvasHeight;
var ctx = canvas.getContext("2d");
ctx.scale(scale, scale);
ctx.drawWindow(content, 0, 0, content.innerWidth, content.innerHeight,
"rgb(255,255,255)");
// Take a JPEG screenshot to hack around the fact that we can't specify
// opaque PNG. This requires us to unpremultiply the alpha channel, which
// is expensive on ARM processors because they lack a hardware integer
// division instruction.
canvas.toBlob(function(blob) {
sendAsyncMsg('got-screenshot', {
id: domRequestID,
successRv: blob
});
}, 'image/jpeg');
},
_recvFireCtxCallback: function(data) {
debug("Received fireCtxCallback message: (" + data.json.menuitem + ")");
// We silently ignore if the embedder uses an incorrect id in the callback
if (data.json.menuitem in this._ctxHandlers) {
this._ctxHandlers[data.json.menuitem].click();
this._ctxHandlers = {};
} else {
debug("Ignored invalid contextmenu invocation");
}
},
_buildMenuObj: function(menu, idPrefix) {
function maybeCopyAttribute(src, target, attribute) {
if (src.getAttribute(attribute)) {
target[attribute] = src.getAttribute(attribute);
}
}
var menuObj = {type: 'menu', items: []};
maybeCopyAttribute(menu, menuObj, 'label');
for (var i = 0, child; child = menu.children[i++];) {
if (child.nodeName === 'MENU') {
menuObj.items.push(this._buildMenuObj(child, idPrefix + i + '_'));
} else if (child.nodeName === 'MENUITEM') {
var id = this._ctxCounter + '_' + idPrefix + i;
var menuitem = {id: id, type: 'menuitem'};
maybeCopyAttribute(child, menuitem, 'label');
maybeCopyAttribute(child, menuitem, 'icon');
this._ctxHandlers[id] = child;
menuObj.items.push(menuitem);
}
}
return menuObj;
},
_recvSetVisible: function(data) {
debug("Received setVisible message: (" + data.json.visible + ")");
this._forcedVisible = data.json.visible;
this._updateDocShellVisibility();
},
_recvVisible: function(data) {
sendAsyncMsg('got-visible', {
id: data.json.id,
successRv: docShell.isActive
});
},
/**
* Called when the window which contains this iframe becomes hidden or
* visible.
*/
_recvOwnerVisibilityChange: function(data) {
debug("Received ownerVisibilityChange: (" + data.json.visible + ")");
this._ownerVisible = data.json.visible;
this._updateDocShellVisibility();
},
_updateDocShellVisibility: function() {
var visible = this._forcedVisible && this._ownerVisible;
if (docShell.isActive !== visible) {
docShell.isActive = visible;
}
},
_recvSendMouseEvent: function(data) {
let json = data.json;
let utils = content.QueryInterface(Ci.nsIInterfaceRequestor)
.getInterface(Ci.nsIDOMWindowUtils);
utils.sendMouseEvent(json.type, json.x, json.y, json.button,
json.clickCount, json.modifiers);
},
_recvSendTouchEvent: function(data) {
let json = data.json;
let utils = content.QueryInterface(Ci.nsIInterfaceRequestor)
.getInterface(Ci.nsIDOMWindowUtils);
utils.sendTouchEvent(json.type, json.identifiers, json.touchesX,
json.touchesY, json.radiisX, json.radiisY,
json.rotationAngles, json.forces, json.count,
json.modifiers);
},
_recvCanGoBack: function(data) {
var webNav = docShell.QueryInterface(Ci.nsIWebNavigation);
sendAsyncMsg('got-can-go-back', {
id: data.json.id,
successRv: webNav.canGoBack
});
},
_recvCanGoForward: function(data) {
var webNav = docShell.QueryInterface(Ci.nsIWebNavigation);
sendAsyncMsg('got-can-go-forward', {
id: data.json.id,
successRv: webNav.canGoForward
});
},
_recvGoBack: function(data) {
try {
docShell.QueryInterface(Ci.nsIWebNavigation).goBack();
} catch(e) {
// Silently swallow errors; these happen when we can't go back.
}
},
_recvGoForward: function(data) {
try {
docShell.QueryInterface(Ci.nsIWebNavigation).goForward();
} catch(e) {
// Silently swallow errors; these happen when we can't go forward.
}
},
_recvReload: function(data) {
let webNav = docShell.QueryInterface(Ci.nsIWebNavigation);
let reloadFlags = data.json.hardReload ?
webNav.LOAD_FLAGS_BYPASS_PROXY | webNav.LOAD_FLAGS_BYPASS_CACHE :
webNav.LOAD_FLAGS_NONE;
try {
webNav.reload(reloadFlags);
} catch(e) {
// Silently swallow errors; these can happen if a used cancels reload
}
},
_recvStop: function(data) {
let webNav = docShell.QueryInterface(Ci.nsIWebNavigation);
webNav.stop(webNav.STOP_NETWORK);
},
_keyEventHandler: function(e) {
if (whitelistedEvents.indexOf(e.keyCode) != -1 && !e.defaultPrevented) {
sendAsyncMsg('keyevent', {
type: e.type,
keyCode: e.keyCode,
charCode: e.charCode,
});
}
},
// The docShell keeps a weak reference to the progress listener, so we need
// to keep a strong ref to it ourselves.
_progressListener: {
QueryInterface: XPCOMUtils.generateQI([Ci.nsIWebProgressListener,
Ci.nsISupportsWeakReference]),
_seenLoadStart: false,
onLocationChange: function(webProgress, request, location, flags) {
// We get progress events from subshells here, which is kind of weird.
if (webProgress != docShell) {
return;
}
// Ignore locationchange events which occur before the first loadstart.
// These are usually about:blank loads we don't care about.
if (!this._seenLoadStart) {
return;
}
// Remove password and wyciwyg from uri.
location = Cc["@mozilla.org/docshell/urifixup;1"]
.getService(Ci.nsIURIFixup).createExposableURI(location);
sendAsyncMsg('locationchange', { _payload_: location.spec });
},
onStateChange: function(webProgress, request, stateFlags, status) {
if (webProgress != docShell) {
return;
}
if (stateFlags & Ci.nsIWebProgressListener.STATE_START) {
this._seenLoadStart = true;
sendAsyncMsg('loadstart');
}
if (stateFlags & Ci.nsIWebProgressListener.STATE_STOP) {
sendAsyncMsg('loadend');
// Ignoring NS_BINDING_ABORTED, which is set when loading page is
// stopped.
if (status == Cr.NS_OK ||
status == Cr.NS_BINDING_ABORTED) {
return;
}
// TODO See nsDocShell::DisplayLoadError for a list of all the error
// codes (the status param) we should eventually handle here.
sendAsyncMsg('error', { type: 'other' });
}
},
onSecurityChange: function(webProgress, request, state) {
if (webProgress != docShell) {
return;
}
var stateDesc;
if (state & Ci.nsIWebProgressListener.STATE_IS_SECURE) {
stateDesc = 'secure';
}
else if (state & Ci.nsIWebProgressListener.STATE_IS_BROKEN) {
stateDesc = 'broken';
}
else if (state & Ci.nsIWebProgressListener.STATE_IS_INSECURE) {
stateDesc = 'insecure';
}
else {
debug("Unexpected securitychange state!");
stateDesc = '???';
}
// XXX Until bug 764496 is fixed, this will always return false.
var isEV = !!(state & Ci.nsIWebProgressListener.STATE_IDENTITY_EV_TOPLEVEL);
sendAsyncMsg('securitychange', { state: stateDesc, extendedValidation: isEV });
},
onStatusChange: function(webProgress, request, status, message) {},
onProgressChange: function(webProgress, request, curSelfProgress,
maxSelfProgress, curTotalProgress, maxTotalProgress) {},
},
// Expose the message manager for WebApps and others.
_messageManagerPublic: {
sendAsyncMessage: global.sendAsyncMessage.bind(global),
sendSyncMessage: global.sendSyncMessage.bind(global),
addMessageListener: global.addMessageListener.bind(global),
removeMessageListener: global.removeMessageListener.bind(global)
},
get messageManager() {
return this._messageManagerPublic;
}
};
var api = new BrowserElementChild();

Просмотреть файл

@ -5,6 +5,14 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
"use strict";
dump("############################### browserElementPanning.js loaded\n");
let { classes: Cc, interfaces: Ci, results: Cr, utils: Cu } = Components;
Cu.import("resource://gre/modules/XPCOMUtils.jsm");
Cu.import("resource://gre/modules/Services.jsm");
Cu.import("resource://gre/modules/Geometry.jsm");
const ContentPanning = {
// Are we listening to touch or mouse events?
watchedEventsType: '',
@ -35,8 +43,8 @@ const ContentPanning = {
this.watchedEventsType = 'mouse';
}
events.forEach(function(type) {
addEventListener(type, ContentPanning, false);
});
addEventListener(type, this, false);
}.bind(this));
addMessageListener("Viewport:Change", this._recvViewportChange.bind(this));
addMessageListener("Gesture:DoubleTap", this._recvDoubleTap.bind(this));

Просмотреть файл

@ -95,8 +95,6 @@ function BrowserElementParent(frameLoader, hasRemoteFrame) {
// We use a single message and dispatch to various function based
// on data.msg_name
let mmCalls = {
"get-name": this._recvGetName,
"get-fullscreen-allowed": this._recvGetFullscreenAllowed,
"hello": this._recvHello,
"contextmenu": this._fireCtxMenuEvent,
"locationchange": this._fireEventFromMsg,
@ -281,15 +279,13 @@ BrowserElementParent.prototype = {
if (this._window.document.hidden) {
this._ownerVisibilityChange();
}
},
_recvGetName: function(data) {
return this._frameElement.getAttribute('name');
},
_recvGetFullscreenAllowed: function(data) {
return this._frameElement.hasAttribute('allowfullscreen') ||
this._frameElement.hasAttribute('mozallowfullscreen');
return {
name: this._frameElement.getAttribute('name'),
fullscreenAllowed:
this._frameElement.hasAttribute('allowfullscreen') ||
this._frameElement.hasAttribute('mozallowfullscreen')
}
},
_fireCtxMenuEvent: function(data) {
@ -321,7 +317,7 @@ BrowserElementParent.prototype = {
// For events that send a "_payload_" property, we just want to transmit
// this in the event.
if (detail._payload_) {
if ("_payload_" in detail) {
detail = detail._payload_;
}
@ -571,7 +567,7 @@ BrowserElementParent.prototype = {
},
_remoteFullscreenOriginChange: function(data) {
let origin = data.json;
let origin = data.json._payload_;
this._windowUtils.remoteFrameFullscreenChanged(this._frameElement, origin);
},

Просмотреть файл

@ -11050,8 +11050,6 @@
"[[\"removeformat\",\"\"]] \"foo<del>b[a]r</del>baz\" compare innerHTML":true,
"[[\"removeformat\",\"\"]] \"[foo<nobr>bar</nobr>baz]\" compare innerHTML":true,
"[[\"removeformat\",\"\"]] \"foo<nobr>b[a]r</nobr>baz\" compare innerHTML":true,
"[[\"removeformat\",\"\"]] \"[foo<video></video>bar]\" compare innerHTML":true,
"[[\"removeformat\",\"\"]] \"[foo<video src=abc></video>bar]\" compare innerHTML":true,
"[[\"removeformat\",\"\"]] \"[foo<svg><circle fill=blue r=20 cx=20 cy=20 /></svg>bar]\" compare innerHTML":true,
"[[\"removeformat\",\"\"]] \"[foo<nonexistentelement>bar</nonexistentelement>baz]\" compare innerHTML":true,
"[[\"removeformat\",\"\"]] \"foo<nonexistentelement>b[a]r</nonexistentelement>baz\" compare innerHTML":true,

Просмотреть файл

@ -229,8 +229,6 @@ TabChild::PreloadSlowThings()
}
// Just load and compile these scripts, but don't run them.
tab->TryCacheLoadAndCompileScript(BROWSER_ELEMENT_CHILD_SCRIPT);
tab->TryCacheLoadAndCompileScript(
NS_LITERAL_STRING("chrome://browser/content/forms.js"));
// Load, compile, and run these scripts.
tab->RecvLoadRemoteScript(
NS_LITERAL_STRING("chrome://global/content/preload.js"));

Просмотреть файл

@ -5,5 +5,7 @@
toolkit.jar:
content/global/test-ipc.xul (test.xul)
content/global/remote-test-ipc.js (remote-test.js)
* content/global/BrowserElementChild.js (../browser-element/BrowserElementChild.js)
content/global/BrowserElementChild.js (../browser-element/BrowserElementChild.js)
* content/global/BrowserElementChildPreload.js (../browser-element/BrowserElementChildPreload.js)
* content/global/BrowserElementPanning.js (../browser-element/BrowserElementPanning.js)
content/global/preload.js (preload.js)

Просмотреть файл

@ -7,9 +7,11 @@
// This script is run when the preallocated process starts. It is injected as
// a frame script.
"use strict";
const BrowserElementIsPreloaded = true;
(function (global) {
"use strict";
(function () {
let Cu = Components.utils;
let Cc = Components.classes;
let Ci = Components.interfaces;
@ -73,10 +75,23 @@
Cc["@mozilla.org/thread-manager;1"].getService(Ci["nsIThreadManager"]);
Cc["@mozilla.org/toolkit/app-startup;1"].getService(Ci["nsIAppStartup"]);
Cc["@mozilla.org/uriloader;1"].getService(Ci["nsIURILoader"]);
Cc["@mozilla.org/contentsecuritypolicy;1"].createInstance(Ci["nsIContentSecurityPolicy"]);
/* Applications Specific Helper */
Cc["@mozilla.org/settingsManager;1"].getService(Ci["nsIDOMSettingsManager"]);
// This is a produc-specific file that's sometimes unavailable.
try {
Services.scriptloader.loadSubScript("chrome://browser/content/forms.js", global);
} catch (e) {
}
Services.scriptloader.loadSubScript("chrome://global/content/BrowserElementPanning.js", global);
Services.scriptloader.loadSubScript("chrome://global/content/BrowserElementChildPreload.js", global);
Services.io.getProtocolHandler("app");
Services.io.getProtocolHandler("default");
docShell.isActive = false;
docShell.createAboutBlankContentViewer(null);
})();
})(this);

Просмотреть файл

@ -360,10 +360,13 @@ public:
NS_ASSERTION(NS_IsMainThread(), "Only create MediaManager on main thread");
nsCOMPtr<nsIObserverService> obs = services::GetObserverService();
obs->AddObserver(sSingleton, "xpcom-shutdown", false);
obs->AddObserver(sSingleton, "getUserMedia:response:allow", false);
obs->AddObserver(sSingleton, "getUserMedia:response:deny", false);
obs->AddObserver(sSingleton, "getUserMedia:revoke", false);
if (obs) {
obs->AddObserver(sSingleton, "xpcom-shutdown", false);
obs->AddObserver(sSingleton, "getUserMedia:response:allow", false);
obs->AddObserver(sSingleton, "getUserMedia:response:deny", false);
obs->AddObserver(sSingleton, "getUserMedia:revoke", false);
}
// else MediaManager won't work properly and will leak (see bug 837874)
}
return sSingleton;
}

Просмотреть файл

@ -110,14 +110,14 @@ this.PhoneNumber = (function (dataBase) {
// Only the first entry has the formats field set.
// Parse the main country if we haven't already and use
// the formats field from the main country.
if (typeof entry[0] == "string" && entry[0].substr(2,2) == region)
if (typeof entry[0] == "string")
entry[0] = ParseMetaData(countryCode, entry[0]);
let formats = entry[0].formats;
let current = ParseMetaData(countryCode, entry[n]);
current.formats = formats;
return entry[n] = current;
}
entry[n] = ParseMetaData(countryCode, entry[n]);
return entry[n];
}

Просмотреть файл

@ -77,6 +77,27 @@ function Format(dial, currentRegion, nationalNumber, region, nationalFormat, int
Parse("033316005", "NZ");
Parse("03-331 6005", "NZ");
Parse("03 331 6005", "NZ");
// Always test CA before US because CA has to load all meta-info for US.
ParseWithIntl("4031234567", "CA");
Parse("(416) 585-4319", "CA");
Parse("647-967-4357", "CA");
Parse("416-716-8768", "CA");
Parse("18002684646", "CA");
Parse("416-445-9119", "CA");
Parse("1-800-668-6866", "CA");
Parse("(416) 453-6486", "CA");
Parse("(647) 268-4778", "CA");
Parse("647-218-1313", "CA");
Parse("+1 647-209-4642", "CA");
Parse("416-559-0133", "CA");
Parse("+1 647-639-4118", "CA");
Parse("+12898803664", "CA");
Parse("780-901-4687", "CA");
Parse("+14167070550", "CA");
Parse("+1-647-522-6487", "CA");
Parse("(416) 877-0880", "CA");
// Testing international prefixes.
// Should strip country code.
Parse("0064 3 331 6005", "NZ");
@ -135,26 +156,6 @@ Parse("+52 1 33 1234-5678", "MX");
Parse("044 (33) 1234-5678", "MX");
Parse("045 33 1234-5678", "MX");
ParseWithIntl("4031234567", "CA");
Parse("(416) 585-4319", "CA");
Parse("647-967-4357", "CA");
Parse("416-716-8768", "CA");
Parse("18002684646", "CA");
Parse("416-445-9119", "CA");
Parse("1-800-668-6866", "CA");
Parse("(416) 453-6486", "CA");
Parse("(647) 268-4778", "CA");
Parse("647-218-1313", "CA");
Parse("+1 647-209-4642", "CA");
Parse("416-559-0133", "CA");
Parse("+1 647-639-4118", "CA");
Parse("+12898803664", "CA");
Parse("780-901-4687", "CA");
Parse("+14167070550", "CA");
Parse("+1-647-522-6487", "CA");
Parse("(416) 877-0880", "CA");
// Test that lots of spaces are ok.
Parse("0 3 3 3 1 6 0 0 5", "NZ");

Просмотреть файл

@ -25,16 +25,27 @@ Components.utils.import("resource://gre/modules/PhoneNumberUtils.jsm");
function CantParseWithMcc(dial, mcc) {
var result = PhoneNumberUtils.parseWithMCC(dial, mcc);
if (result) {
ok(false, "Shouldn't parse!\n");
print("expected: does not parse");
print("got: " + dial + " " + mcc);
ok(false, "Shouldn't parse!\n");
dump("expected: does not parse");
dump("got: " + dial + " " + mcc);
} else {
ok(true, "Parses");
}
}
function ParseWithMcc(dial, mcc) {
var result = PhoneNumberUtils.parseWithMCC(dial, mcc);
if (result) {
ok(true, "Parses!\n");
} else {
ok(false, "Should Parse");
dump("expected: parses");
}
}
// Unknown mcc
CantParseWithMcc("1234", 123);
ParseWithMcc("4168293997", 302);
</script>
</window>

Просмотреть файл

@ -155,10 +155,39 @@ GfxOpToSkiaOp(CompositionOp op)
return SkXfermode::kDstATop_Mode;
case OP_XOR:
return SkXfermode::kXor_Mode;
case OP_COUNT:
case OP_MULTIPLY:
return SkXfermode::kMultiply_Mode;
case OP_SCREEN:
return SkXfermode::kScreen_Mode;
case OP_OVERLAY:
return SkXfermode::kOverlay_Mode;
case OP_DARKEN:
return SkXfermode::kDarken_Mode;
case OP_LIGHTEN:
return SkXfermode::kLighten_Mode;
case OP_COLOR_DODGE:
return SkXfermode::kColorDodge_Mode;
case OP_COLOR_BURN:
return SkXfermode::kColorBurn_Mode;
case OP_HARD_LIGHT:
return SkXfermode::kHardLight_Mode;
case OP_SOFT_LIGHT:
return SkXfermode::kSoftLight_Mode;
case OP_DIFFERENCE:
return SkXfermode::kDifference_Mode;
case OP_EXCLUSION:
return SkXfermode::kExclusion_Mode;
case OP_HUE:
return SkXfermode::kHue_Mode;
case OP_SATURATION:
return SkXfermode::kSaturation_Mode;
case OP_COLOR:
return SkXfermode::kColor_Mode;
case OP_LUMINOSITY:
return SkXfermode::kLuminosity_Mode;
default:
return SkXfermode::kSrcOver_Mode;
}
return SkXfermode::kSrcOver_Mode;
}
static inline SkColor ColorToSkColor(const Color &color, Float aAlpha)

Просмотреть файл

@ -136,8 +136,15 @@ PathSkia::ContainsPoint(const Point &aPoint, const Matrix &aTransform) const
return false;
}
return mPath.contains(SkFloatToScalar(transformed.x),
SkFloatToScalar(transformed.y));
SkRegion pointRect;
pointRect.setRect(int32_t(SkFloatToScalar(transformed.x - 1)),
int32_t(SkFloatToScalar(transformed.y - 1)),
int32_t(SkFloatToScalar(transformed.x + 1)),
int32_t(SkFloatToScalar(transformed.y + 1)));
SkRegion pathRegion;
return pathRegion.setPath(mPath, pointRect);
}
static Rect SkRectToRect(const SkRect& aBounds)
@ -170,8 +177,15 @@ PathSkia::StrokeContainsPoint(const StrokeOptions &aStrokeOptions,
return false;
}
return strokePath.contains(SkFloatToScalar(transformed.x),
SkFloatToScalar(transformed.y));
SkRegion pointRect;
pointRect.setRect(int32_t(SkFloatToScalar(transformed.x - 1)),
int32_t(SkFloatToScalar(transformed.y - 1)),
int32_t(SkFloatToScalar(transformed.x + 1)),
int32_t(SkFloatToScalar(transformed.y + 1)));
SkRegion pathRegion;
return pathRegion.setPath(strokePath, pointRect);
}
Rect

Просмотреть файл

@ -39,7 +39,6 @@ bool ImageContainerParent::RecvFlush()
{
SharedImage *img = RemoveSharedImage(mID);
if (img) {
DeallocSharedImageData(this, *img);
delete img;
}
return true;

Просмотреть файл

@ -101,12 +101,12 @@ public:
// all remaining modes are defined in the SVG Compositing standard
// http://www.w3.org/TR/2009/WD-SVGCompositing-20090430/
kPlus_Mode,
kMultiply_Mode,
// all above modes can be expressed as pair of src/dst Coeffs
kCoeffModesCnt,
kScreen_Mode = kCoeffModesCnt,
kMultiply_Mode = kCoeffModesCnt,
kScreen_Mode,
kOverlay_Mode,
kDarken_Mode,
kLighten_Mode,
@ -116,8 +116,12 @@ public:
kSoftLight_Mode,
kDifference_Mode,
kExclusion_Mode,
kHue_Mode,
kSaturation_Mode,
kColor_Mode,
kLuminosity_Mode,
kLastMode = kExclusion_Mode
kLastMode = kLuminosity_Mode
};
/**

Просмотреть файл

@ -0,0 +1,698 @@
# HG changeset patch
# User Rik Cabanier <cabanier@adobe.com>
# Date 1360273929 -46800
# Node ID 3ac8edca3a03b3d22240b5a5b95ae3b5ada9877d
# Parent cbb67fe70b864b36165061e1fd3b083cd09af087
Bug 836892 - Add new blending modes to SkXfermode. r=gw280
diff --git a/gfx/skia/include/core/SkXfermode.h b/gfx/skia/include/core/SkXfermode.h
--- a/gfx/skia/include/core/SkXfermode.h
+++ b/gfx/skia/include/core/SkXfermode.h
@@ -96,33 +96,37 @@ public:
kDstOut_Mode, //!< [Da * (1 - Sa), Dc * (1 - Sa)]
kSrcATop_Mode, //!< [Da, Sc * Da + (1 - Sa) * Dc]
kDstATop_Mode, //!< [Sa, Sa * Dc + Sc * (1 - Da)]
kXor_Mode, //!< [Sa + Da - 2 * Sa * Da, Sc * (1 - Da) + (1 - Sa) * Dc]
// all remaining modes are defined in the SVG Compositing standard
// http://www.w3.org/TR/2009/WD-SVGCompositing-20090430/
kPlus_Mode,
- kMultiply_Mode,
// all above modes can be expressed as pair of src/dst Coeffs
kCoeffModesCnt,
- kScreen_Mode = kCoeffModesCnt,
+ kMultiply_Mode = kCoeffModesCnt,
+ kScreen_Mode,
kOverlay_Mode,
kDarken_Mode,
kLighten_Mode,
kColorDodge_Mode,
kColorBurn_Mode,
kHardLight_Mode,
kSoftLight_Mode,
kDifference_Mode,
kExclusion_Mode,
+ kHue_Mode,
+ kSaturation_Mode,
+ kColor_Mode,
+ kLuminosity_Mode,
- kLastMode = kExclusion_Mode
+ kLastMode = kLuminosity_Mode
};
/**
* If the xfermode is one of the modes in the Mode enum, then asMode()
* returns true and sets (if not null) mode accordingly. Otherwise it
* returns false and ignores the mode parameter.
*/
virtual bool asMode(Mode* mode);
diff --git a/gfx/skia/src/core/SkXfermode.cpp b/gfx/skia/src/core/SkXfermode.cpp
--- a/gfx/skia/src/core/SkXfermode.cpp
+++ b/gfx/skia/src/core/SkXfermode.cpp
@@ -7,16 +7,18 @@
*/
#include "SkXfermode.h"
#include "SkColorPriv.h"
#include "SkFlattenableBuffers.h"
#include "SkMathPriv.h"
+#include <algorithm>
+
SK_DEFINE_INST_COUNT(SkXfermode)
#define SkAlphaMulAlpha(a, b) SkMulDiv255Round(a, b)
#if 0
// idea for higher precision blends in xfer procs (and slightly faster)
// see DstATop as a probable caller
static U8CPU mulmuldiv255round(U8CPU a, U8CPU b, U8CPU c, U8CPU d) {
@@ -176,244 +178,439 @@ static SkPMColor xor_modeproc(SkPMColor
static SkPMColor plus_modeproc(SkPMColor src, SkPMColor dst) {
unsigned b = saturated_add(SkGetPackedB32(src), SkGetPackedB32(dst));
unsigned g = saturated_add(SkGetPackedG32(src), SkGetPackedG32(dst));
unsigned r = saturated_add(SkGetPackedR32(src), SkGetPackedR32(dst));
unsigned a = saturated_add(SkGetPackedA32(src), SkGetPackedA32(dst));
return SkPackARGB32(a, r, g, b);
}
+static inline int srcover_byte(int a, int b) {
+ return a + b - SkAlphaMulAlpha(a, b);
+}
+
+#define blendfunc_byte(sc, dc, sa, da, blendfunc) \
+ clamp_div255round(sc * (255 - da) + dc * (255 - sa) + blendfunc(sc, dc, sa, da))
+
// kMultiply_Mode
+static inline int multiply_byte(int sc, int dc, int sa, int da) {
+ return sc * dc;
+}
static SkPMColor multiply_modeproc(SkPMColor src, SkPMColor dst) {
- int a = SkAlphaMulAlpha(SkGetPackedA32(src), SkGetPackedA32(dst));
- int r = SkAlphaMulAlpha(SkGetPackedR32(src), SkGetPackedR32(dst));
- int g = SkAlphaMulAlpha(SkGetPackedG32(src), SkGetPackedG32(dst));
- int b = SkAlphaMulAlpha(SkGetPackedB32(src), SkGetPackedB32(dst));
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, multiply_byte);
+ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, multiply_byte);
+ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, multiply_byte);
return SkPackARGB32(a, r, g, b);
}
// kScreen_Mode
-static inline int srcover_byte(int a, int b) {
- return a + b - SkAlphaMulAlpha(a, b);
+static inline int screen_byte(int sc, int dc, int sa, int da) {
+ return sc * da + sa * dc - sc * dc;
}
static SkPMColor screen_modeproc(SkPMColor src, SkPMColor dst) {
- int a = srcover_byte(SkGetPackedA32(src), SkGetPackedA32(dst));
- int r = srcover_byte(SkGetPackedR32(src), SkGetPackedR32(dst));
- int g = srcover_byte(SkGetPackedG32(src), SkGetPackedG32(dst));
- int b = srcover_byte(SkGetPackedB32(src), SkGetPackedB32(dst));
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, screen_byte);
+ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, screen_byte);
+ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, screen_byte);
+ return SkPackARGB32(a, r, g, b);
+}
+
+// kHardLight_Mode
+static inline int hardlight_byte(int sc, int dc, int sa, int da) {
+ if(!sa || !da)
+ return sc * da;
+ float Sc = (float)sc/sa;
+ float Dc = (float)dc/da;
+ if(Sc <= 0.5)
+ Sc *= 2 * Dc;
+ else
+ Sc = -1 + 2 * Sc + 2 * Dc - 2 * Sc * Dc;
+
+ return Sc * sa * da;
+}
+static SkPMColor hardlight_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, hardlight_byte);
+ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, hardlight_byte);
+ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, hardlight_byte);
return SkPackARGB32(a, r, g, b);
}
// kOverlay_Mode
static inline int overlay_byte(int sc, int dc, int sa, int da) {
- int tmp = sc * (255 - da) + dc * (255 - sa);
- int rc;
- if (2 * dc <= da) {
- rc = 2 * sc * dc;
- } else {
- rc = sa * da - 2 * (da - dc) * (sa - sc);
- }
- return clamp_div255round(rc + tmp);
+ return hardlight_byte(dc, sc, da, sa);
}
static SkPMColor overlay_modeproc(SkPMColor src, SkPMColor dst) {
int sa = SkGetPackedA32(src);
int da = SkGetPackedA32(dst);
int a = srcover_byte(sa, da);
- int r = overlay_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
- int g = overlay_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
- int b = overlay_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
+ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, overlay_byte);
+ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, overlay_byte);
+ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, overlay_byte);
return SkPackARGB32(a, r, g, b);
}
// kDarken_Mode
static inline int darken_byte(int sc, int dc, int sa, int da) {
- int sd = sc * da;
- int ds = dc * sa;
- if (sd < ds) {
- // srcover
- return sc + dc - SkDiv255Round(ds);
- } else {
- // dstover
- return dc + sc - SkDiv255Round(sd);
- }
+ return SkMin32(sc * da, sa * dc);
}
static SkPMColor darken_modeproc(SkPMColor src, SkPMColor dst) {
int sa = SkGetPackedA32(src);
int da = SkGetPackedA32(dst);
int a = srcover_byte(sa, da);
- int r = darken_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
- int g = darken_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
- int b = darken_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
+ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, darken_byte);
+ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, darken_byte);
+ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, darken_byte);
return SkPackARGB32(a, r, g, b);
}
// kLighten_Mode
static inline int lighten_byte(int sc, int dc, int sa, int da) {
- int sd = sc * da;
- int ds = dc * sa;
- if (sd > ds) {
- // srcover
- return sc + dc - SkDiv255Round(ds);
- } else {
- // dstover
- return dc + sc - SkDiv255Round(sd);
- }
+ return SkMax32(sc * da, sa * dc);
}
static SkPMColor lighten_modeproc(SkPMColor src, SkPMColor dst) {
int sa = SkGetPackedA32(src);
int da = SkGetPackedA32(dst);
int a = srcover_byte(sa, da);
- int r = lighten_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
- int g = lighten_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
- int b = lighten_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
+ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, lighten_byte);
+ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, lighten_byte);
+ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, lighten_byte);
return SkPackARGB32(a, r, g, b);
}
// kColorDodge_Mode
static inline int colordodge_byte(int sc, int dc, int sa, int da) {
- int diff = sa - sc;
- int rc;
- if (0 == diff) {
- rc = sa * da + sc * (255 - da) + dc * (255 - sa);
- rc = SkDiv255Round(rc);
- } else {
- int tmp = (dc * sa << 15) / (da * diff);
- rc = SkDiv255Round(sa * da) * tmp >> 15;
- // don't clamp here, since we'll do it in our modeproc
- }
- return rc;
+ if (dc == 0)
+ return 0;
+ // Avoid division by 0
+ if (sc == sa)
+ return da * sa;
+
+ return SkMin32(sa * da, sa * sa * dc / (sa - sc));
}
static SkPMColor colordodge_modeproc(SkPMColor src, SkPMColor dst) {
- // added to avoid div-by-zero in colordodge_byte
- if (0 == dst) {
- return src;
- }
-
int sa = SkGetPackedA32(src);
int da = SkGetPackedA32(dst);
int a = srcover_byte(sa, da);
- int r = colordodge_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
- int g = colordodge_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
- int b = colordodge_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
- r = clamp_max(r, a);
- g = clamp_max(g, a);
- b = clamp_max(b, a);
+ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, colordodge_byte);
+ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, colordodge_byte);
+ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, colordodge_byte);
return SkPackARGB32(a, r, g, b);
}
// kColorBurn_Mode
static inline int colorburn_byte(int sc, int dc, int sa, int da) {
- int rc;
- if (dc == da && 0 == sc) {
- rc = sa * da + dc * (255 - sa);
- } else if (0 == sc) {
- return SkAlphaMulAlpha(dc, 255 - sa);
- } else {
- int tmp = (sa * (da - dc) * 256) / (sc * da);
- if (tmp > 256) {
- tmp = 256;
- }
- int tmp2 = sa * da;
- rc = tmp2 - (tmp2 * tmp >> 8) + sc * (255 - da) + dc * (255 - sa);
- }
- return SkDiv255Round(rc);
+ // Avoid division by 0
+ if(dc == da)
+ return sa * da;
+ if(sc == 0)
+ return 0;
+
+ return sa * da - SkMin32(sa * da, sa * sa * (da - dc) / sc);
}
static SkPMColor colorburn_modeproc(SkPMColor src, SkPMColor dst) {
- // added to avoid div-by-zero in colorburn_byte
- if (0 == dst) {
- return src;
- }
-
int sa = SkGetPackedA32(src);
int da = SkGetPackedA32(dst);
int a = srcover_byte(sa, da);
- int r = colorburn_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
- int g = colorburn_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
- int b = colorburn_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
- return SkPackARGB32(a, r, g, b);
-}
-
-// kHardLight_Mode
-static inline int hardlight_byte(int sc, int dc, int sa, int da) {
- int rc;
- if (2 * sc <= sa) {
- rc = 2 * sc * dc;
- } else {
- rc = sa * da - 2 * (da - dc) * (sa - sc);
- }
- return clamp_div255round(rc + sc * (255 - da) + dc * (255 - sa));
-}
-static SkPMColor hardlight_modeproc(SkPMColor src, SkPMColor dst) {
- int sa = SkGetPackedA32(src);
- int da = SkGetPackedA32(dst);
- int a = srcover_byte(sa, da);
- int r = hardlight_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
- int g = hardlight_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
- int b = hardlight_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
+ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, colorburn_byte);
+ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, colorburn_byte);
+ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, colorburn_byte);
return SkPackARGB32(a, r, g, b);
}
// returns 255 * sqrt(n/255)
static U8CPU sqrt_unit_byte(U8CPU n) {
return SkSqrtBits(n, 15+4);
}
// kSoftLight_Mode
static inline int softlight_byte(int sc, int dc, int sa, int da) {
int m = da ? dc * 256 / da : 0;
int rc;
- if (2 * sc <= sa) {
- rc = dc * (sa + ((2 * sc - sa) * (256 - m) >> 8));
- } else if (4 * dc <= da) {
+ if (2 * sc <= sa)
+ return dc * (sa + ((2 * sc - sa) * (256 - m) >> 8));
+
+ if (4 * dc <= da) {
int tmp = (4 * m * (4 * m + 256) * (m - 256) >> 16) + 7 * m;
- rc = dc * sa + (da * (2 * sc - sa) * tmp >> 8);
- } else {
- int tmp = sqrt_unit_byte(m) - m;
- rc = dc * sa + (da * (2 * sc - sa) * tmp >> 8);
+ return dc * sa + (da * (2 * sc - sa) * tmp >> 8);
}
- return clamp_div255round(rc + sc * (255 - da) + dc * (255 - sa));
+ int tmp = sqrt_unit_byte(m) - m;
+ return rc = dc * sa + (da * (2 * sc - sa) * tmp >> 8);
}
static SkPMColor softlight_modeproc(SkPMColor src, SkPMColor dst) {
int sa = SkGetPackedA32(src);
int da = SkGetPackedA32(dst);
int a = srcover_byte(sa, da);
- int r = softlight_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
- int g = softlight_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
- int b = softlight_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
+ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, softlight_byte);
+ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, softlight_byte);
+ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, softlight_byte);
return SkPackARGB32(a, r, g, b);
}
// kDifference_Mode
static inline int difference_byte(int sc, int dc, int sa, int da) {
- int tmp = SkMin32(sc * da, dc * sa);
- return clamp_signed_byte(sc + dc - 2 * SkDiv255Round(tmp));
+ int tmp = dc * sa - sc * da;
+ if(tmp<0)
+ return - tmp;
+
+ return tmp;
}
static SkPMColor difference_modeproc(SkPMColor src, SkPMColor dst) {
int sa = SkGetPackedA32(src);
int da = SkGetPackedA32(dst);
int a = srcover_byte(sa, da);
- int r = difference_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
- int g = difference_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
- int b = difference_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
+ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, difference_byte);
+ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, difference_byte);
+ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, difference_byte);
return SkPackARGB32(a, r, g, b);
}
// kExclusion_Mode
static inline int exclusion_byte(int sc, int dc, int sa, int da) {
- // this equations is wacky, wait for SVG to confirm it
- int r = sc * da + dc * sa - 2 * sc * dc + sc * (255 - da) + dc * (255 - sa);
- return clamp_div255round(r);
+ return sc * da + dc * sa - 2 * dc * sc;
}
static SkPMColor exclusion_modeproc(SkPMColor src, SkPMColor dst) {
int sa = SkGetPackedA32(src);
int da = SkGetPackedA32(dst);
int a = srcover_byte(sa, da);
- int r = exclusion_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
- int g = exclusion_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
- int b = exclusion_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
+ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, exclusion_byte);
+ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, exclusion_byte);
+ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, exclusion_byte);
+ return SkPackARGB32(a, r, g, b);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+struct BlendColor {
+ float r;
+ float g;
+ float b;
+
+ BlendColor(): r(0), g(0), b(0)
+ {}
+};
+
+static inline float Lum(BlendColor C)
+{
+ return C.r * 0.3 + C.g * 0.59 + C.b* 0.11;
+}
+
+static inline float SkMinFloat(float a, float b)
+{
+ if (a > b)
+ a = b;
+ return a;
+}
+
+static inline float SkMaxFloat(float a, float b)
+{
+ if (a < b)
+ a = b;
+ return a;
+}
+
+#define minimum(C) SkMinFloat(SkMinFloat(C.r, C.g), C.b)
+#define maximum(C) SkMaxFloat(SkMaxFloat(C.r, C.g), C.b)
+
+static inline float Sat(BlendColor c) {
+ return maximum(c) - minimum(c);
+}
+
+static inline void setSaturationComponents(float& Cmin, float& Cmid, float& Cmax, float s) {
+ if(Cmax > Cmin) {
+ Cmid = (((Cmid - Cmin) * s ) / (Cmax - Cmin));
+ Cmax = s;
+ } else {
+ Cmax = 0;
+ Cmid = 0;
+ }
+ Cmin = 0;
+}
+
+static inline BlendColor SetSat(BlendColor C, float s) {
+ if(C.r <= C.g) {
+ if(C.g <= C.b)
+ setSaturationComponents(C.r, C.g, C.b, s);
+ else
+ if(C.r <= C.b)
+ setSaturationComponents(C.r, C.b, C.g, s);
+ else
+ setSaturationComponents(C.b, C.r, C.g, s);
+ } else if(C.r <= C.b)
+ setSaturationComponents(C.g, C.r, C.b, s);
+ else
+ if(C.g <= C.b)
+ setSaturationComponents(C.g, C.b, C.r, s);
+ else
+ setSaturationComponents(C.b, C.g, C.r, s);
+
+ return C;
+}
+
+static inline BlendColor clipColor(BlendColor C) {
+ float L = Lum(C);
+ float n = minimum(C);
+ float x = maximum(C);
+ if(n < 0) {
+ C.r = L + (((C.r - L) * L) / (L - n));
+ C.g = L + (((C.g - L) * L) / (L - n));
+ C.b = L + (((C.b - L) * L) / (L - n));
+ }
+
+ if(x > 1) {
+ C.r = L + (((C.r - L) * (1 - L)) / (x - L));
+ C.g = L + (((C.g - L) * (1 - L)) / (x - L));
+ C.b = L + (((C.b - L) * (1 - L)) / (x - L));
+ }
+ return C;
+}
+
+static inline BlendColor SetLum(BlendColor C, float l) {
+ float d = l - Lum(C);
+ C.r += d;
+ C.g += d;
+ C.b += d;
+
+ return clipColor(C);
+}
+
+#define blendfunc_nonsep_byte(sc, dc, sa, da, blendval) \
+ clamp_div255round(sc * (255 - da) + dc * (255 - sa) + (int)(sa * da * blendval))
+
+static SkPMColor hue_modeproc(SkPMColor src, SkPMColor dst) {
+ int sr = SkGetPackedR32(src);
+ int sg = SkGetPackedG32(src);
+ int sb = SkGetPackedB32(src);
+ int sa = SkGetPackedA32(src);
+
+ int dr = SkGetPackedR32(dst);
+ int dg = SkGetPackedG32(dst);
+ int db = SkGetPackedB32(dst);
+ int da = SkGetPackedA32(dst);
+
+ BlendColor Cs;
+ if(sa) {
+ Cs.r = (float)sr / sa;
+ Cs.g = (float)sg / sa;
+ Cs.b = (float)sb / sa;
+ BlendColor Cd;
+ if(da) {
+ Cd.r = (float)dr / da;
+ Cd.g = (float)dg / da;
+ Cd.b = (float)db / da;
+ Cs = SetLum(SetSat(Cs, Sat(Cd)), Lum(Cd));
+ }
+ }
+
+ int a = srcover_byte(sa, da);
+ int r = blendfunc_nonsep_byte(sr, dr, sa, da, Cs.r);
+ int g = blendfunc_nonsep_byte(sg, dg, sa, da, Cs.g);
+ int b = blendfunc_nonsep_byte(sb, db, sa, da, Cs.b);
+ return SkPackARGB32(a, r, g, b);
+}
+
+static SkPMColor saturation_modeproc(SkPMColor src, SkPMColor dst) {
+ int sr = SkGetPackedR32(src);
+ int sg = SkGetPackedG32(src);
+ int sb = SkGetPackedB32(src);
+ int sa = SkGetPackedA32(src);
+
+ int dr = SkGetPackedR32(dst);
+ int dg = SkGetPackedG32(dst);
+ int db = SkGetPackedB32(dst);
+ int da = SkGetPackedA32(dst);
+
+ BlendColor Cs;
+ if(sa) {
+ Cs.r = (float)sr / sa;
+ Cs.g = (float)sg / sa;
+ Cs.b = (float)sb / sa;
+ BlendColor Cd;
+ if(da) {
+ Cd.r = (float)dr / da;
+ Cd.g = (float)dg / da;
+ Cd.b = (float)db / da;
+ Cs = SetLum(SetSat(Cd, Sat(Cs)), Lum(Cd));
+ }
+ }
+
+ int a = srcover_byte(sa, da);
+ int r = blendfunc_nonsep_byte(sr, dr, sa, da, Cs.r);
+ int g = blendfunc_nonsep_byte(sg, dg, sa, da, Cs.g);
+ int b = blendfunc_nonsep_byte(sb, db, sa, da, Cs.b);
+ return SkPackARGB32(a, r, g, b);
+}
+
+static SkPMColor color_modeproc(SkPMColor src, SkPMColor dst) {
+ int sr = SkGetPackedR32(src);
+ int sg = SkGetPackedG32(src);
+ int sb = SkGetPackedB32(src);
+ int sa = SkGetPackedA32(src);
+
+ int dr = SkGetPackedR32(dst);
+ int dg = SkGetPackedG32(dst);
+ int db = SkGetPackedB32(dst);
+ int da = SkGetPackedA32(dst);
+
+ BlendColor Cs;
+ if(sa) {
+ Cs.r = (float)sr / sa;
+ Cs.g = (float)sg / sa;
+ Cs.b = (float)sb / sa;
+ BlendColor Cd;
+ if(da) {
+ Cd.r = (float)dr / da;
+ Cd.g = (float)dg / da;
+ Cd.b = (float)db / da;
+ Cs = SetLum(Cs, Lum(Cd));
+ }
+ }
+
+ int a = srcover_byte(sa, da);
+ int r = blendfunc_nonsep_byte(sr, dr, sa, da, Cs.r);
+ int g = blendfunc_nonsep_byte(sg, dg, sa, da, Cs.g);
+ int b = blendfunc_nonsep_byte(sb, db, sa, da, Cs.b);
+ return SkPackARGB32(a, r, g, b);
+}
+
+static SkPMColor luminosity_modeproc(SkPMColor src, SkPMColor dst) {
+ int sr = SkGetPackedR32(src);
+ int sg = SkGetPackedG32(src);
+ int sb = SkGetPackedB32(src);
+ int sa = SkGetPackedA32(src);
+
+ int dr = SkGetPackedR32(dst);
+ int dg = SkGetPackedG32(dst);
+ int db = SkGetPackedB32(dst);
+ int da = SkGetPackedA32(dst);
+
+ BlendColor Cs;
+ if(sa) {
+ Cs.r = (float)sr / sa;
+ Cs.g = (float)sg / sa;
+ Cs.b = (float)sb / sa;
+ BlendColor Cd;
+ if(da) {
+ Cd.r = (float)dr / da;
+ Cd.g = (float)dg / da;
+ Cd.b = (float)db / da;
+ Cs = SetLum(Cd, Lum(Cs));
+ }
+ }
+
+ int a = srcover_byte(sa, da);
+ int r = blendfunc_nonsep_byte(sr, dr, sa, da, Cs.r);
+ int g = blendfunc_nonsep_byte(sg, dg, sa, da, Cs.g);
+ int b = blendfunc_nonsep_byte(sb, db, sa, da, Cs.b);
return SkPackARGB32(a, r, g, b);
}
struct ProcCoeff {
SkXfermodeProc fProc;
SkXfermode::Coeff fSC;
SkXfermode::Coeff fDC;
};
@@ -430,27 +627,31 @@ static const ProcCoeff gProcCoeffs[] = {
{ dstin_modeproc, SkXfermode::kZero_Coeff, SkXfermode::kSA_Coeff },
{ srcout_modeproc, SkXfermode::kIDA_Coeff, SkXfermode::kZero_Coeff },
{ dstout_modeproc, SkXfermode::kZero_Coeff, SkXfermode::kISA_Coeff },
{ srcatop_modeproc, SkXfermode::kDA_Coeff, SkXfermode::kISA_Coeff },
{ dstatop_modeproc, SkXfermode::kIDA_Coeff, SkXfermode::kSA_Coeff },
{ xor_modeproc, SkXfermode::kIDA_Coeff, SkXfermode::kISA_Coeff },
{ plus_modeproc, SkXfermode::kOne_Coeff, SkXfermode::kOne_Coeff },
- { multiply_modeproc,SkXfermode::kZero_Coeff, SkXfermode::kSC_Coeff },
+ { multiply_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF},
{ screen_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
{ overlay_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
{ darken_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
{ lighten_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
{ colordodge_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
{ colorburn_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
{ hardlight_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
{ softlight_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
{ difference_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
{ exclusion_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { hue_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { saturation_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { color_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { luminosity_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
};
///////////////////////////////////////////////////////////////////////////////
bool SkXfermode::asCoeff(Coeff* src, Coeff* dst) {
return false;
}
@@ -1172,16 +1373,20 @@ static const Proc16Rec gModeProcs16[] =
{ darken_modeproc16_0, darken_modeproc16_255, NULL }, // darken
{ lighten_modeproc16_0, lighten_modeproc16_255, NULL }, // lighten
{ NULL, NULL, NULL }, // colordodge
{ NULL, NULL, NULL }, // colorburn
{ NULL, NULL, NULL }, // hardlight
{ NULL, NULL, NULL }, // softlight
{ NULL, NULL, NULL }, // difference
{ NULL, NULL, NULL }, // exclusion
+ { NULL, NULL, NULL }, // hue
+ { NULL, NULL, NULL }, // saturation
+ { NULL, NULL, NULL }, // color
+ { NULL, NULL, NULL }, // luminosity
};
SkXfermodeProc16 SkXfermode::GetProc16(Mode mode, SkColor srcColor) {
SkXfermodeProc16 proc16 = NULL;
if ((unsigned)mode < kModeCount) {
const Proc16Rec& rec = gModeProcs16[mode];
unsigned a = SkColorGetA(srcColor);

Просмотреть файл

@ -7,3 +7,4 @@ See the relevant bugs in bugzilla for information on these patches:
0004-Bug-777614-Re-apply-bug-719872-Fix-crash-on-Android-.patch
0005-Bug-777614-Re-apply-bug-687188-Expand-the-gradient-c.patch
0009-Bug-777614-Re-apply-759683-Handle-compilers-that-don.patch
0010-Bug-836892-Add-new-blending-modes-to-SkXfermode.patch

Просмотреть файл

@ -12,6 +12,8 @@
#include "SkFlattenableBuffers.h"
#include "SkMathPriv.h"
#include <algorithm>
SK_DEFINE_INST_COUNT(SkXfermode)
#define SkAlphaMulAlpha(a, b) SkMulDiv255Round(a, b)
@ -181,173 +183,143 @@ static SkPMColor plus_modeproc(SkPMColor src, SkPMColor dst) {
return SkPackARGB32(a, r, g, b);
}
static inline int srcover_byte(int a, int b) {
return a + b - SkAlphaMulAlpha(a, b);
}
#define blendfunc_byte(sc, dc, sa, da, blendfunc) \
clamp_div255round(sc * (255 - da) + dc * (255 - sa) + blendfunc(sc, dc, sa, da))
// kMultiply_Mode
static inline int multiply_byte(int sc, int dc, int sa, int da) {
return sc * dc;
}
static SkPMColor multiply_modeproc(SkPMColor src, SkPMColor dst) {
int a = SkAlphaMulAlpha(SkGetPackedA32(src), SkGetPackedA32(dst));
int r = SkAlphaMulAlpha(SkGetPackedR32(src), SkGetPackedR32(dst));
int g = SkAlphaMulAlpha(SkGetPackedG32(src), SkGetPackedG32(dst));
int b = SkAlphaMulAlpha(SkGetPackedB32(src), SkGetPackedB32(dst));
int sa = SkGetPackedA32(src);
int da = SkGetPackedA32(dst);
int a = srcover_byte(sa, da);
int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, multiply_byte);
int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, multiply_byte);
int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, multiply_byte);
return SkPackARGB32(a, r, g, b);
}
// kScreen_Mode
static inline int srcover_byte(int a, int b) {
return a + b - SkAlphaMulAlpha(a, b);
static inline int screen_byte(int sc, int dc, int sa, int da) {
return sc * da + sa * dc - sc * dc;
}
static SkPMColor screen_modeproc(SkPMColor src, SkPMColor dst) {
int a = srcover_byte(SkGetPackedA32(src), SkGetPackedA32(dst));
int r = srcover_byte(SkGetPackedR32(src), SkGetPackedR32(dst));
int g = srcover_byte(SkGetPackedG32(src), SkGetPackedG32(dst));
int b = srcover_byte(SkGetPackedB32(src), SkGetPackedB32(dst));
return SkPackARGB32(a, r, g, b);
}
// kOverlay_Mode
static inline int overlay_byte(int sc, int dc, int sa, int da) {
int tmp = sc * (255 - da) + dc * (255 - sa);
int rc;
if (2 * dc <= da) {
rc = 2 * sc * dc;
} else {
rc = sa * da - 2 * (da - dc) * (sa - sc);
}
return clamp_div255round(rc + tmp);
}
static SkPMColor overlay_modeproc(SkPMColor src, SkPMColor dst) {
int sa = SkGetPackedA32(src);
int da = SkGetPackedA32(dst);
int a = srcover_byte(sa, da);
int r = overlay_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
int g = overlay_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
int b = overlay_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
return SkPackARGB32(a, r, g, b);
}
// kDarken_Mode
static inline int darken_byte(int sc, int dc, int sa, int da) {
int sd = sc * da;
int ds = dc * sa;
if (sd < ds) {
// srcover
return sc + dc - SkDiv255Round(ds);
} else {
// dstover
return dc + sc - SkDiv255Round(sd);
}
}
static SkPMColor darken_modeproc(SkPMColor src, SkPMColor dst) {
int sa = SkGetPackedA32(src);
int da = SkGetPackedA32(dst);
int a = srcover_byte(sa, da);
int r = darken_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
int g = darken_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
int b = darken_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
return SkPackARGB32(a, r, g, b);
}
// kLighten_Mode
static inline int lighten_byte(int sc, int dc, int sa, int da) {
int sd = sc * da;
int ds = dc * sa;
if (sd > ds) {
// srcover
return sc + dc - SkDiv255Round(ds);
} else {
// dstover
return dc + sc - SkDiv255Round(sd);
}
}
static SkPMColor lighten_modeproc(SkPMColor src, SkPMColor dst) {
int sa = SkGetPackedA32(src);
int da = SkGetPackedA32(dst);
int a = srcover_byte(sa, da);
int r = lighten_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
int g = lighten_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
int b = lighten_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
return SkPackARGB32(a, r, g, b);
}
// kColorDodge_Mode
static inline int colordodge_byte(int sc, int dc, int sa, int da) {
int diff = sa - sc;
int rc;
if (0 == diff) {
rc = sa * da + sc * (255 - da) + dc * (255 - sa);
rc = SkDiv255Round(rc);
} else {
int tmp = (dc * sa << 15) / (da * diff);
rc = SkDiv255Round(sa * da) * tmp >> 15;
// don't clamp here, since we'll do it in our modeproc
}
return rc;
}
static SkPMColor colordodge_modeproc(SkPMColor src, SkPMColor dst) {
// added to avoid div-by-zero in colordodge_byte
if (0 == dst) {
return src;
}
int sa = SkGetPackedA32(src);
int da = SkGetPackedA32(dst);
int a = srcover_byte(sa, da);
int r = colordodge_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
int g = colordodge_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
int b = colordodge_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
r = clamp_max(r, a);
g = clamp_max(g, a);
b = clamp_max(b, a);
return SkPackARGB32(a, r, g, b);
}
// kColorBurn_Mode
static inline int colorburn_byte(int sc, int dc, int sa, int da) {
int rc;
if (dc == da && 0 == sc) {
rc = sa * da + dc * (255 - sa);
} else if (0 == sc) {
return SkAlphaMulAlpha(dc, 255 - sa);
} else {
int tmp = (sa * (da - dc) * 256) / (sc * da);
if (tmp > 256) {
tmp = 256;
}
int tmp2 = sa * da;
rc = tmp2 - (tmp2 * tmp >> 8) + sc * (255 - da) + dc * (255 - sa);
}
return SkDiv255Round(rc);
}
static SkPMColor colorburn_modeproc(SkPMColor src, SkPMColor dst) {
// added to avoid div-by-zero in colorburn_byte
if (0 == dst) {
return src;
}
int sa = SkGetPackedA32(src);
int da = SkGetPackedA32(dst);
int a = srcover_byte(sa, da);
int r = colorburn_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
int g = colorburn_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
int b = colorburn_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, screen_byte);
int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, screen_byte);
int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, screen_byte);
return SkPackARGB32(a, r, g, b);
}
// kHardLight_Mode
static inline int hardlight_byte(int sc, int dc, int sa, int da) {
int rc;
if (2 * sc <= sa) {
rc = 2 * sc * dc;
} else {
rc = sa * da - 2 * (da - dc) * (sa - sc);
}
return clamp_div255round(rc + sc * (255 - da) + dc * (255 - sa));
if(!sa || !da)
return sc * da;
float Sc = (float)sc/sa;
float Dc = (float)dc/da;
if(Sc <= 0.5)
Sc *= 2 * Dc;
else
Sc = -1 + 2 * Sc + 2 * Dc - 2 * Sc * Dc;
return Sc * sa * da;
}
static SkPMColor hardlight_modeproc(SkPMColor src, SkPMColor dst) {
int sa = SkGetPackedA32(src);
int da = SkGetPackedA32(dst);
int a = srcover_byte(sa, da);
int r = hardlight_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
int g = hardlight_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
int b = hardlight_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, hardlight_byte);
int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, hardlight_byte);
int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, hardlight_byte);
return SkPackARGB32(a, r, g, b);
}
// kOverlay_Mode
static inline int overlay_byte(int sc, int dc, int sa, int da) {
return hardlight_byte(dc, sc, da, sa);
}
static SkPMColor overlay_modeproc(SkPMColor src, SkPMColor dst) {
int sa = SkGetPackedA32(src);
int da = SkGetPackedA32(dst);
int a = srcover_byte(sa, da);
int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, overlay_byte);
int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, overlay_byte);
int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, overlay_byte);
return SkPackARGB32(a, r, g, b);
}
// kDarken_Mode
static inline int darken_byte(int sc, int dc, int sa, int da) {
return SkMin32(sc * da, sa * dc);
}
static SkPMColor darken_modeproc(SkPMColor src, SkPMColor dst) {
int sa = SkGetPackedA32(src);
int da = SkGetPackedA32(dst);
int a = srcover_byte(sa, da);
int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, darken_byte);
int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, darken_byte);
int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, darken_byte);
return SkPackARGB32(a, r, g, b);
}
// kLighten_Mode
static inline int lighten_byte(int sc, int dc, int sa, int da) {
return SkMax32(sc * da, sa * dc);
}
static SkPMColor lighten_modeproc(SkPMColor src, SkPMColor dst) {
int sa = SkGetPackedA32(src);
int da = SkGetPackedA32(dst);
int a = srcover_byte(sa, da);
int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, lighten_byte);
int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, lighten_byte);
int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, lighten_byte);
return SkPackARGB32(a, r, g, b);
}
// kColorDodge_Mode
static inline int colordodge_byte(int sc, int dc, int sa, int da) {
if (dc == 0)
return 0;
// Avoid division by 0
if (sc == sa)
return da * sa;
return SkMin32(sa * da, sa * sa * dc / (sa - sc));
}
static SkPMColor colordodge_modeproc(SkPMColor src, SkPMColor dst) {
int sa = SkGetPackedA32(src);
int da = SkGetPackedA32(dst);
int a = srcover_byte(sa, da);
int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, colordodge_byte);
int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, colordodge_byte);
int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, colordodge_byte);
return SkPackARGB32(a, r, g, b);
}
// kColorBurn_Mode
static inline int colorburn_byte(int sc, int dc, int sa, int da) {
// Avoid division by 0
if(dc == da)
return sa * da;
if(sc == 0)
return 0;
return sa * da - SkMin32(sa * da, sa * sa * (da - dc) / sc);
}
static SkPMColor colorburn_modeproc(SkPMColor src, SkPMColor dst) {
int sa = SkGetPackedA32(src);
int da = SkGetPackedA32(dst);
int a = srcover_byte(sa, da);
int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, colorburn_byte);
int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, colorburn_byte);
int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, colorburn_byte);
return SkPackARGB32(a, r, g, b);
}
@ -360,55 +332,280 @@ static U8CPU sqrt_unit_byte(U8CPU n) {
static inline int softlight_byte(int sc, int dc, int sa, int da) {
int m = da ? dc * 256 / da : 0;
int rc;
if (2 * sc <= sa) {
rc = dc * (sa + ((2 * sc - sa) * (256 - m) >> 8));
} else if (4 * dc <= da) {
if (2 * sc <= sa)
return dc * (sa + ((2 * sc - sa) * (256 - m) >> 8));
if (4 * dc <= da) {
int tmp = (4 * m * (4 * m + 256) * (m - 256) >> 16) + 7 * m;
rc = dc * sa + (da * (2 * sc - sa) * tmp >> 8);
} else {
int tmp = sqrt_unit_byte(m) - m;
rc = dc * sa + (da * (2 * sc - sa) * tmp >> 8);
return dc * sa + (da * (2 * sc - sa) * tmp >> 8);
}
return clamp_div255round(rc + sc * (255 - da) + dc * (255 - sa));
int tmp = sqrt_unit_byte(m) - m;
return rc = dc * sa + (da * (2 * sc - sa) * tmp >> 8);
}
static SkPMColor softlight_modeproc(SkPMColor src, SkPMColor dst) {
int sa = SkGetPackedA32(src);
int da = SkGetPackedA32(dst);
int a = srcover_byte(sa, da);
int r = softlight_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
int g = softlight_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
int b = softlight_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, softlight_byte);
int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, softlight_byte);
int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, softlight_byte);
return SkPackARGB32(a, r, g, b);
}
// kDifference_Mode
static inline int difference_byte(int sc, int dc, int sa, int da) {
int tmp = SkMin32(sc * da, dc * sa);
return clamp_signed_byte(sc + dc - 2 * SkDiv255Round(tmp));
int tmp = dc * sa - sc * da;
if(tmp<0)
return - tmp;
return tmp;
}
static SkPMColor difference_modeproc(SkPMColor src, SkPMColor dst) {
int sa = SkGetPackedA32(src);
int da = SkGetPackedA32(dst);
int a = srcover_byte(sa, da);
int r = difference_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
int g = difference_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
int b = difference_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, difference_byte);
int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, difference_byte);
int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, difference_byte);
return SkPackARGB32(a, r, g, b);
}
// kExclusion_Mode
static inline int exclusion_byte(int sc, int dc, int sa, int da) {
// this equations is wacky, wait for SVG to confirm it
int r = sc * da + dc * sa - 2 * sc * dc + sc * (255 - da) + dc * (255 - sa);
return clamp_div255round(r);
return sc * da + dc * sa - 2 * dc * sc;
}
static SkPMColor exclusion_modeproc(SkPMColor src, SkPMColor dst) {
int sa = SkGetPackedA32(src);
int da = SkGetPackedA32(dst);
int a = srcover_byte(sa, da);
int r = exclusion_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
int g = exclusion_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
int b = exclusion_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, exclusion_byte);
int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, exclusion_byte);
int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, exclusion_byte);
return SkPackARGB32(a, r, g, b);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
struct BlendColor {
float r;
float g;
float b;
BlendColor(): r(0), g(0), b(0)
{}
};
static inline float Lum(BlendColor C)
{
return C.r * 0.3 + C.g * 0.59 + C.b* 0.11;
}
static inline float SkMinFloat(float a, float b)
{
if (a > b)
a = b;
return a;
}
static inline float SkMaxFloat(float a, float b)
{
if (a < b)
a = b;
return a;
}
#define minimum(C) SkMinFloat(SkMinFloat(C.r, C.g), C.b)
#define maximum(C) SkMaxFloat(SkMaxFloat(C.r, C.g), C.b)
static inline float Sat(BlendColor c) {
return maximum(c) - minimum(c);
}
static inline void setSaturationComponents(float& Cmin, float& Cmid, float& Cmax, float s) {
if(Cmax > Cmin) {
Cmid = (((Cmid - Cmin) * s ) / (Cmax - Cmin));
Cmax = s;
} else {
Cmax = 0;
Cmid = 0;
}
Cmin = 0;
}
static inline BlendColor SetSat(BlendColor C, float s) {
if(C.r <= C.g) {
if(C.g <= C.b)
setSaturationComponents(C.r, C.g, C.b, s);
else
if(C.r <= C.b)
setSaturationComponents(C.r, C.b, C.g, s);
else
setSaturationComponents(C.b, C.r, C.g, s);
} else if(C.r <= C.b)
setSaturationComponents(C.g, C.r, C.b, s);
else
if(C.g <= C.b)
setSaturationComponents(C.g, C.b, C.r, s);
else
setSaturationComponents(C.b, C.g, C.r, s);
return C;
}
static inline BlendColor clipColor(BlendColor C) {
float L = Lum(C);
float n = minimum(C);
float x = maximum(C);
if(n < 0) {
C.r = L + (((C.r - L) * L) / (L - n));
C.g = L + (((C.g - L) * L) / (L - n));
C.b = L + (((C.b - L) * L) / (L - n));
}
if(x > 1) {
C.r = L + (((C.r - L) * (1 - L)) / (x - L));
C.g = L + (((C.g - L) * (1 - L)) / (x - L));
C.b = L + (((C.b - L) * (1 - L)) / (x - L));
}
return C;
}
static inline BlendColor SetLum(BlendColor C, float l) {
float d = l - Lum(C);
C.r += d;
C.g += d;
C.b += d;
return clipColor(C);
}
#define blendfunc_nonsep_byte(sc, dc, sa, da, blendval) \
clamp_div255round(sc * (255 - da) + dc * (255 - sa) + (int)(sa * da * blendval))
static SkPMColor hue_modeproc(SkPMColor src, SkPMColor dst) {
int sr = SkGetPackedR32(src);
int sg = SkGetPackedG32(src);
int sb = SkGetPackedB32(src);
int sa = SkGetPackedA32(src);
int dr = SkGetPackedR32(dst);
int dg = SkGetPackedG32(dst);
int db = SkGetPackedB32(dst);
int da = SkGetPackedA32(dst);
BlendColor Cs;
if(sa) {
Cs.r = (float)sr / sa;
Cs.g = (float)sg / sa;
Cs.b = (float)sb / sa;
BlendColor Cd;
if(da) {
Cd.r = (float)dr / da;
Cd.g = (float)dg / da;
Cd.b = (float)db / da;
Cs = SetLum(SetSat(Cs, Sat(Cd)), Lum(Cd));
}
}
int a = srcover_byte(sa, da);
int r = blendfunc_nonsep_byte(sr, dr, sa, da, Cs.r);
int g = blendfunc_nonsep_byte(sg, dg, sa, da, Cs.g);
int b = blendfunc_nonsep_byte(sb, db, sa, da, Cs.b);
return SkPackARGB32(a, r, g, b);
}
static SkPMColor saturation_modeproc(SkPMColor src, SkPMColor dst) {
int sr = SkGetPackedR32(src);
int sg = SkGetPackedG32(src);
int sb = SkGetPackedB32(src);
int sa = SkGetPackedA32(src);
int dr = SkGetPackedR32(dst);
int dg = SkGetPackedG32(dst);
int db = SkGetPackedB32(dst);
int da = SkGetPackedA32(dst);
BlendColor Cs;
if(sa) {
Cs.r = (float)sr / sa;
Cs.g = (float)sg / sa;
Cs.b = (float)sb / sa;
BlendColor Cd;
if(da) {
Cd.r = (float)dr / da;
Cd.g = (float)dg / da;
Cd.b = (float)db / da;
Cs = SetLum(SetSat(Cd, Sat(Cs)), Lum(Cd));
}
}
int a = srcover_byte(sa, da);
int r = blendfunc_nonsep_byte(sr, dr, sa, da, Cs.r);
int g = blendfunc_nonsep_byte(sg, dg, sa, da, Cs.g);
int b = blendfunc_nonsep_byte(sb, db, sa, da, Cs.b);
return SkPackARGB32(a, r, g, b);
}
static SkPMColor color_modeproc(SkPMColor src, SkPMColor dst) {
int sr = SkGetPackedR32(src);
int sg = SkGetPackedG32(src);
int sb = SkGetPackedB32(src);
int sa = SkGetPackedA32(src);
int dr = SkGetPackedR32(dst);
int dg = SkGetPackedG32(dst);
int db = SkGetPackedB32(dst);
int da = SkGetPackedA32(dst);
BlendColor Cs;
if(sa) {
Cs.r = (float)sr / sa;
Cs.g = (float)sg / sa;
Cs.b = (float)sb / sa;
BlendColor Cd;
if(da) {
Cd.r = (float)dr / da;
Cd.g = (float)dg / da;
Cd.b = (float)db / da;
Cs = SetLum(Cs, Lum(Cd));
}
}
int a = srcover_byte(sa, da);
int r = blendfunc_nonsep_byte(sr, dr, sa, da, Cs.r);
int g = blendfunc_nonsep_byte(sg, dg, sa, da, Cs.g);
int b = blendfunc_nonsep_byte(sb, db, sa, da, Cs.b);
return SkPackARGB32(a, r, g, b);
}
static SkPMColor luminosity_modeproc(SkPMColor src, SkPMColor dst) {
int sr = SkGetPackedR32(src);
int sg = SkGetPackedG32(src);
int sb = SkGetPackedB32(src);
int sa = SkGetPackedA32(src);
int dr = SkGetPackedR32(dst);
int dg = SkGetPackedG32(dst);
int db = SkGetPackedB32(dst);
int da = SkGetPackedA32(dst);
BlendColor Cs;
if(sa) {
Cs.r = (float)sr / sa;
Cs.g = (float)sg / sa;
Cs.b = (float)sb / sa;
BlendColor Cd;
if(da) {
Cd.r = (float)dr / da;
Cd.g = (float)dg / da;
Cd.b = (float)db / da;
Cs = SetLum(Cd, Lum(Cs));
}
}
int a = srcover_byte(sa, da);
int r = blendfunc_nonsep_byte(sr, dr, sa, da, Cs.r);
int g = blendfunc_nonsep_byte(sg, dg, sa, da, Cs.g);
int b = blendfunc_nonsep_byte(sb, db, sa, da, Cs.b);
return SkPackARGB32(a, r, g, b);
}
@ -435,7 +632,7 @@ static const ProcCoeff gProcCoeffs[] = {
{ xor_modeproc, SkXfermode::kIDA_Coeff, SkXfermode::kISA_Coeff },
{ plus_modeproc, SkXfermode::kOne_Coeff, SkXfermode::kOne_Coeff },
{ multiply_modeproc,SkXfermode::kZero_Coeff, SkXfermode::kSC_Coeff },
{ multiply_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF},
{ screen_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
{ overlay_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
{ darken_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
@ -446,6 +643,10 @@ static const ProcCoeff gProcCoeffs[] = {
{ softlight_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
{ difference_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
{ exclusion_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
{ hue_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
{ saturation_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
{ color_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
{ luminosity_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
};
///////////////////////////////////////////////////////////////////////////////
@ -1177,6 +1378,10 @@ static const Proc16Rec gModeProcs16[] = {
{ NULL, NULL, NULL }, // softlight
{ NULL, NULL, NULL }, // difference
{ NULL, NULL, NULL }, // exclusion
{ NULL, NULL, NULL }, // hue
{ NULL, NULL, NULL }, // saturation
{ NULL, NULL, NULL }, // color
{ NULL, NULL, NULL }, // luminosity
};
SkXfermodeProc16 SkXfermode::GetProc16(Mode mode, SkColor srcColor) {

Просмотреть файл

@ -138,6 +138,7 @@ CPPSRCS = \
FoldConstants.cpp \
Intl.cpp \
NameFunctions.cpp \
ParallelDo.cpp \
ParallelArray.cpp \
ParseMaps.cpp \
ParseNode.cpp \
@ -317,7 +318,9 @@ CPPSRCS += MIR.cpp \
ValueNumbering.cpp \
RangeAnalysis.cpp \
VMFunctions.cpp \
ParallelFunctions.cpp \
AliasAnalysis.cpp \
ParallelArrayAnalysis.cpp \
UnreachableCodeElimination.cpp \
$(NULL)
endif #ENABLE_ION

Просмотреть файл

@ -248,3 +248,301 @@ function IsStructurallyValidLanguageTag(locale) {
return !callFunction(std_RegExp_test, duplicateVariantRE, locale) &&
!callFunction(std_RegExp_test, duplicateSingletonRE, locale);
}
/**
* Canonicalizes the given structurally valid BCP 47 language tag, including
* regularized case of subtags. For example, the language tag
* Zh-NAN-haNS-bu-variant2-Variant1-u-ca-chinese-t-Zh-laTN-x-PRIVATE, where
*
* Zh ; 2*3ALPHA
* -NAN ; ["-" extlang]
* -haNS ; ["-" script]
* -bu ; ["-" region]
* -variant2 ; *("-" variant)
* -Variant1
* -u-ca-chinese ; *("-" extension)
* -t-Zh-laTN
* -x-PRIVATE ; ["-" privateuse]
*
* becomes nan-Hans-mm-variant2-variant1-t-zh-latn-u-ca-chinese-x-private
*
* Spec: ECMAScript Internationalization API Specification, 6.2.3.
* Spec: RFC 5646, section 4.5.
*/
function CanonicalizeLanguageTag(locale) {
assert(IsStructurallyValidLanguageTag(locale), "CanonicalizeLanguageTag");
// The input
// "Zh-NAN-haNS-bu-variant2-Variant1-u-ca-chinese-t-Zh-laTN-x-PRIVATE"
// will be used throughout this method to illustrate how it works.
// Language tags are compared and processed case-insensitively, so
// technically it's not necessary to adjust case. But for easier processing,
// and because the canonical form for most subtags is lower case, we start
// with lower case for all.
// "Zh-NAN-haNS-bu-variant2-Variant1-u-ca-chinese-t-Zh-laTN-x-PRIVATE" ->
// "zh-nan-hans-bu-variant2-variant1-u-ca-chinese-t-zh-latn-x-private"
locale = callFunction(std_String_toLowerCase, locale);
// Handle mappings for complete tags.
if (callFunction(std_Object_hasOwnProperty, langTagMappings, locale))
return langTagMappings[locale];
var subtags = callFunction(std_String_split, locale, "-");
var i = 0;
// Handle the standard part: All subtags before the first singleton or "x".
// "zh-nan-hans-bu-variant2-variant1"
while (i < subtags.length) {
var subtag = subtags[i];
// If we reach the start of an extension sequence or private use part,
// we're done with this loop. We have to check for i > 0 because for
// irregular language tags, such as i-klingon, the single-character
// subtag "i" is not the start of an extension sequence.
// In the example, we break at "u".
if (subtag.length === 1 && (i > 0 || subtag === "x"))
break;
if (subtag.length === 4) {
// 4-character subtags are script codes; their first character
// needs to be capitalized. "hans" -> "Hans"
subtag = callFunction(std_String_toUpperCase, subtag[0]) +
callFunction(std_String_substring, subtag, 1);
} else if (i !== 0 && subtag.length === 2) {
// 2-character subtags that are not in initial position are region
// codes; they need to be upper case. "bu" -> "BU"
subtag = callFunction(std_String_toUpperCase, subtag);
}
if (callFunction(std_Object_hasOwnProperty, langSubtagMappings, subtag)) {
// Replace deprecated subtags with their preferred values.
// "BU" -> "MM"
// This has to come after we capitalize region codes because
// otherwise some language and region codes could be confused.
// For example, "in" is an obsolete language code for Indonesian,
// but "IN" is the country code for India.
// Note that the script generating langSubtagMappings makes sure
// that no regular subtag mapping will replace an extlang code.
subtag = langSubtagMappings[subtag];
} else if (callFunction(std_Object_hasOwnProperty, extlangMappings, subtag)) {
// Replace deprecated extlang subtags with their preferred values,
// and remove the preceding subtag if it's a redundant prefix.
// "zh-nan" -> "nan"
// Note that the script generating extlangMappings makes sure that
// no extlang mapping will replace a normal language code.
subtag = extlangMappings[subtag].preferred;
if (i === 1 && extlangMappings[subtag].prefix === subtags[0]) {
callFunction(std_Array_shift, subtags);
i--;
}
}
subtags[i] = subtag;
i++;
}
var normal = callFunction(std_Array_join, callFunction(std_Array_slice, subtags, 0, i), "-");
// Extension sequences are sorted by their singleton characters.
// "u-ca-chinese-t-zh-latn" -> "t-zh-latn-u-ca-chinese"
var extensions = new List();
while (i < subtags.length && subtags[i] !== "x") {
var extensionStart = i;
i++;
while (i < subtags.length && subtags[i].length > 1)
i++;
var extension = callFunction(std_Array_join, callFunction(std_Array_slice, subtags, extensionStart, i), "-");
extensions.push(extension);
}
extensions.sort();
// Private use sequences are left as is. "x-private"
var privateUse = "";
if (i < subtags.length)
privateUse = callFunction(std_Array_join, callFunction(std_Array_slice, subtags, i), "-");
// Put everything back together.
var canonical = normal;
if (extensions.length > 0)
canonical += "-" + extensions.join("-");
if (privateUse.length > 0) {
// Be careful of a Language-Tag that is entirely privateuse.
if (canonical.length > 0)
canonical += "-" + privateUse;
else
canonical = privateUse;
}
return canonical;
}
/**
* Verifies that the given string is a well-formed ISO 4217 currency code.
*
* Spec: ECMAScript Internationalization API Specification, 6.3.1.
*/
function IsWellFormedCurrencyCode(currency) {
var c = ToString(currency);
var normalized = toASCIIUpperCase(c);
if (normalized.length !== 3)
return false;
return !callFunction(std_RegExp_test, /[^A-Z]/, normalized);
}
/********** Locale and Parameter Negotiation **********/
/**
* Add old-style language tags without script code for locales that in current
* usage would include a script subtag. Returns the availableLocales argument
* provided.
*
* Spec: ECMAScript Internationalization API Specification, 9.1.
*/
function addOldStyleLanguageTags(availableLocales) {
// checking for commonly used old-style language tags only
if (availableLocales["pa-Arab-PK"])
availableLocales["pa-PK"] = true;
if (availableLocales["zh-Hans-CN"])
availableLocales["zh-CN"] = true;
if (availableLocales["zh-Hans-SG"])
availableLocales["zh-SG"] = true;
if (availableLocales["zh-Hant-HK"])
availableLocales["zh-HK"] = true;
if (availableLocales["zh-Hant-TW"])
availableLocales["zh-TW"] = true;
return availableLocales;
}
/**
* Canonicalizes a locale list.
*
* Spec: ECMAScript Internationalization API Specification, 9.2.1.
*/
function CanonicalizeLocaleList(locales) {
if (locales === undefined)
return new List();
var seen = new List();
if (typeof locales === "string")
locales = [locales];
var O = ToObject(locales);
var len = TO_UINT32(O.length);
var k = 0;
while (k < len) {
// Don't call ToString(k) - SpiderMonkey is faster with integers.
var kPresent = HasProperty(O, k);
if (kPresent) {
var kValue = O[k];
if (!(typeof kValue === "string" ||
(typeof kValue === "object" && kValue !== null) ||
// The following is here only because Waldo thinks we really
// have to have it in order to be spec-conformant:
// document.all is an object first implemented in Explorer
// and then in Firefox and other fine browsers, whose
// presence is also used by applications to identify Explorer
// and which therefore has to be falsy in non-Explorer
// browsers. It cloaks itself by pretending its type is
// undefined. Just in case somebody thinks of decorating it
// with a toString method that returns a language tag and
// then passes it in as a locale, we check for its cloak
// here.
(typeof kValue === "undefined" && kValue !== undefined)))
{
ThrowError(JSMSG_INVALID_LOCALES_ELEMENT);
}
var tag = ToString(kValue);
if (!IsStructurallyValidLanguageTag(tag))
ThrowError(JSMSG_INVALID_LANGUAGE_TAG, tag);
tag = CanonicalizeLanguageTag(tag);
if (seen.indexOf(tag) === -1)
seen.push(tag);
}
k++;
}
return seen;
}
/**
* Compares a BCP 47 language tag against the locales in availableLocales
* and returns the best available match. Uses the fallback
* mechanism of RFC 4647, section 3.4.
*
* Spec: ECMAScript Internationalization API Specification, 9.2.2.
* Spec: RFC 4647, section 3.4.
*/
function BestAvailableLocale(availableLocales, locale) {
assert(IsStructurallyValidLanguageTag(locale), "BestAvailableLocale");
assert(locale === CanonicalizeLanguageTag(locale), "BestAvailableLocale");
assert(callFunction(std_String_indexOf, locale, "-u-") === -1, "BestAvailableLocale");
var candidate = locale;
while (true) {
if (availableLocales[candidate])
return candidate;
var pos = callFunction(std_String_lastIndexOf, candidate, "-");
if (pos === -1)
return undefined;
if (pos >= 2 && candidate[pos - 2] === "-")
pos -= 2;
candidate = callFunction(std_String_substring, candidate, 0, pos);
}
}
/**
* Compares a BCP 47 language priority list against the set of locales in
* availableLocales and determines the best available language to meet the
* request. Options specified through Unicode extension subsequences are
* ignored in the lookup, but information about such subsequences is returned
* separately.
*
* This variant is based on the Lookup algorithm of RFC 4647 section 3.4.
*
* Spec: ECMAScript Internationalization API Specification, 9.2.3.
* Spec: RFC 4647, section 3.4.
*/
function LookupMatcher(availableLocales, requestedLocales) {
var i = 0;
var len = requestedLocales.length;
var availableLocale;
var locale, noExtensionsLocale;
while (i < len && availableLocale === undefined) {
locale = requestedLocales[i];
noExtensionsLocale = callFunction(std_String_replace, locale, unicodeLocaleExtensionSequenceGlobalRE, "");
availableLocale = BestAvailableLocale(availableLocales, noExtensionsLocale);
i++;
}
var result = new Record();
if (availableLocale !== undefined) {
result.__locale = availableLocale;
if (locale !== noExtensionsLocale) {
var extensionMatch = callFunction(std_String_match, locale, unicodeLocaleExtensionSequenceRE);
var extension = extensionMatch[0];
var extensionIndex = extensionMatch.index;
result.__extension = extension;
result.__extensionIndex = extensionIndex;
}
} else {
result.__locale = DefaultLocale();
}
return result;
}
/**
* Compares a BCP 47 language priority list against the set of locales in
* availableLocales and determines the best available language to meet the
* request. Options specified through Unicode extension subsequences are
* ignored in the lookup, but information about such subsequences is returned
* separately.
*
* Spec: ECMAScript Internationalization API Specification, 9.2.4.
*/
function BestFitMatcher(availableLocales, requestedLocales) {
// this implementation doesn't have anything better
return LookupMatcher(availableLocales, requestedLocales);
}

Просмотреть файл

@ -16,6 +16,7 @@
#include "builtin/TestingFunctions.h"
#include "methodjit/MethodJIT.h"
#include "vm/ForkJoin.h"
#include "vm/Stack-inl.h"
@ -878,6 +879,16 @@ DisplayName(JSContext *cx, unsigned argc, jsval *vp)
return true;
}
JSBool
js::testingFunc_inParallelSection(JSContext *cx, unsigned argc, jsval *vp)
{
// If we were actually *in* a parallel section, then this function
// would be inlined to TRUE in ion-generated code.
JS_ASSERT(!InParallelSection());
JS_SET_RVAL(cx, vp, JSVAL_FALSE);
return true;
}
static JSFunctionSpecWithHelp TestingFunctions[] = {
JS_FN_HELP("gc", ::GC, 0, 0,
"gc([obj] | 'compartment')",
@ -1009,6 +1020,10 @@ static JSFunctionSpecWithHelp TestingFunctions[] = {
" inferred name based on where the function was defined. This can be\n"
" different from the 'name' property on the function."),
JS_FN_HELP("inParallelSection", testingFunc_inParallelSection, 0, 0,
"inParallelSection()",
" True if this code is executing within a parallel section."),
JS_FS_HELP_END
};

Просмотреть файл

@ -11,6 +11,9 @@ namespace js {
bool
DefineTestingFunctions(JSContext *cx, JSHandleObject obj);
JSBool
testingFunc_inParallelSection(JSContext *cx, unsigned argc, jsval *vp);
} /* namespace js */
#endif /* TestingFunctions_h__ */

Просмотреть файл

@ -34,9 +34,17 @@ void
BumpChunk::delete_(BumpChunk *chunk)
{
#ifdef DEBUG
memset(chunk, 0xcd, sizeof(*chunk) + chunk->bumpSpaceSize);
// Part of the chunk may have been marked as poisoned/noaccess. Undo that
// before writing the 0xcd bytes.
size_t size = sizeof(*chunk) + chunk->bumpSpaceSize;
#if defined(MOZ_ASAN)
ASAN_UNPOISON_MEMORY_REGION(chunk, size);
#elif defined(MOZ_VALGRIND)
VALGRIND_MAKE_MEM_UNDEFINED(chunk, size);
#endif
js_free(chunk);
memset(chunk, 0xcd, size);
#endif
js_free(chunk);
}
bool

Просмотреть файл

@ -398,7 +398,7 @@ struct CompileError {
inline bool
StrictModeFromContext(JSContext *cx)
{
return cx->hasRunOption(JSOPTION_STRICT_MODE);
return cx->hasOption(JSOPTION_STRICT_MODE);
}
// Ideally, tokenizing would be entirely independent of context. But the

Просмотреть файл

@ -610,6 +610,13 @@ class Rooted : public RootedBase<T>
#endif
}
void init(PerThreadData *ptArg) {
#if defined(JSGC_ROOT_ANALYSIS) || defined(JSGC_USE_EXACT_ROOTING)
PerThreadDataFriendFields *pt = PerThreadDataFriendFields::get(ptArg);
commonInit(pt->thingGCRooters);
#endif
}
public:
Rooted(JSContext *cx
MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
@ -636,6 +643,31 @@ class Rooted : public RootedBase<T>
init(cx);
}
Rooted(PerThreadData *pt
MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
: ptr(RootMethods<T>::initial())
{
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
init(pt);
}
Rooted(PerThreadData *pt, T initial
MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
: ptr(initial)
{
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
init(pt);
}
template <typename S>
Rooted(PerThreadData *pt, const Unrooted<S> &initial
MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
: ptr(static_cast<S>(initial))
{
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
init(pt);
}
~Rooted() {
#if defined(JSGC_ROOT_ANALYSIS) || defined(JSGC_USE_EXACT_ROOTING)
JS_ASSERT(*stack == this);

Просмотреть файл

@ -80,6 +80,8 @@ MarkExactStackRoots(JSTracer *trc)
for (unsigned i = 0; i < THING_ROOT_LIMIT; i++) {
for (ContextIter cx(trc->runtime); !cx.done(); cx.next())
MarkExactStackRootList(trc, cx->thingGCRooters[i], ThingRootKind(i));
MarkExactStackRootList(trc, trc->runtime->mainThread->thingGCRooters[i], ThingRootKind(i));
}
}
#endif /* JSGC_USE_EXACT_ROOTING */

Просмотреть файл

@ -196,6 +196,19 @@ SuppressCheckRoots(Vector<Rooter, 0, SystemAllocPolicy> &rooters)
return false;
}
static void
GatherRooters(Vector<Rooter, 0, SystemAllocPolicy> &rooters,
Rooted<void*> **thingGCRooters,
unsigned thingRootKind)
{
Rooted<void*> *rooter = thingGCRooters[thingRootKind];
while (rooter) {
Rooter r = { rooter, ThingRootKind(thingRootKind) };
JS_ALWAYS_TRUE(rooters.append(r));
rooter = rooter->previous();
}
}
void
JS::CheckStackRoots(JSContext *cx)
{
@ -243,16 +256,13 @@ JS::CheckStackRoots(JSContext *cx)
#endif
// Gather up all of the rooters
Vector< Rooter, 0, SystemAllocPolicy> rooters;
Vector<Rooter, 0, SystemAllocPolicy> rooters;
for (unsigned i = 0; i < THING_ROOT_LIMIT; i++) {
for (ContextIter cx(rt); !cx.done(); cx.next()) {
Rooted<void*> *rooter = cx->thingGCRooters[i];
while (rooter) {
Rooter r = { rooter, ThingRootKind(i) };
JS_ALWAYS_TRUE(rooters.append(r));
rooter = rooter->previous();
}
GatherRooters(rooters, cx->thingGCRooters, i);
}
GatherRooters(rooters, rt->mainThread.thingGCRooters, i);
}
if (SuppressCheckRoots(rooters))

Просмотреть файл

@ -239,6 +239,8 @@ ConvertFrames(JSContext *cx, IonActivation *activation, IonBailoutIterator &it)
#ifdef DEBUG
// Use count is reset after invalidation. Log use count on bailouts to
// determine if we have a critical sequence of bailout.
//
// Note: frame conversion only occurs in sequential mode
if (it.script()->ion == it.ionScript()) {
IonSpew(IonSpew_Bailouts, " Current script use count is %u",
it.script()->getUseCount());
@ -307,6 +309,8 @@ ConvertFrames(JSContext *cx, IonActivation *activation, IonBailoutIterator &it)
return BAILOUT_RETURN_OVERRECURSED;
}
fp->clearRunningInIon();
jsbytecode *bailoutPc = fp->script()->code + iter.pcOffset();
br->setBailoutPc(bailoutPc);
@ -617,7 +621,6 @@ ion::ThunkToInterpreter(Value *vp)
// prologue), so we must create one now for each inlined frame which needs
// one.
{
br->entryfp()->clearRunningInIon();
ScriptFrameIter iter(cx);
StackFrame *fp = NULL;
Rooted<JSScript*> script(cx);

Просмотреть файл

@ -18,7 +18,9 @@
#include "jsnum.h"
#include "jsmath.h"
#include "jsinterpinlines.h"
#include "ParallelFunctions.h"
#include "ExecutionModeInlines.h"
#include "vm/ForkJoin.h"
#include "vm/StringObject-inl.h"
@ -476,6 +478,17 @@ CodeGenerator::visitLambda(LLambda *lir)
masm.newGCThing(output, fun, ool->entry());
masm.initGCThing(output, fun);
emitLambdaInit(output, scopeChain, fun);
masm.bind(ool->rejoin());
return true;
}
void
CodeGenerator::emitLambdaInit(const Register &output,
const Register &scopeChain,
JSFunction *fun)
{
// Initialize nargs and flags. We do this with a single uint32 to avoid
// 16-bit writes.
union {
@ -494,8 +507,22 @@ CodeGenerator::visitLambda(LLambda *lir)
Address(output, JSFunction::offsetOfNativeOrScript()));
masm.storePtr(scopeChain, Address(output, JSFunction::offsetOfEnvironment()));
masm.storePtr(ImmGCPtr(fun->displayAtom()), Address(output, JSFunction::offsetOfAtom()));
}
masm.bind(ool->rejoin());
bool
CodeGenerator::visitParLambda(LParLambda *lir)
{
Register resultReg = ToRegister(lir->output());
Register parSliceReg = ToRegister(lir->parSlice());
Register scopeChainReg = ToRegister(lir->scopeChain());
Register tempReg1 = ToRegister(lir->getTemp0());
Register tempReg2 = ToRegister(lir->getTemp1());
JSFunction *fun = lir->mir()->fun();
JS_ASSERT(scopeChainReg != resultReg);
emitParAllocateGCThing(resultReg, parSliceReg, tempReg1, tempReg2, fun);
emitLambdaInit(resultReg, scopeChainReg, fun);
return true;
}
@ -763,6 +790,52 @@ CodeGenerator::visitFunctionEnvironment(LFunctionEnvironment *lir)
return true;
}
bool
CodeGenerator::visitParSlice(LParSlice *lir)
{
const Register tempReg = ToRegister(lir->getTempReg());
masm.setupUnalignedABICall(0, tempReg);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParForkJoinSlice));
JS_ASSERT(ToRegister(lir->output()) == ReturnReg);
return true;
}
bool
CodeGenerator::visitParWriteGuard(LParWriteGuard *lir)
{
JS_ASSERT(gen->info().executionMode() == ParallelExecution);
const Register tempReg = ToRegister(lir->getTempReg());
masm.setupUnalignedABICall(2, tempReg);
masm.passABIArg(ToRegister(lir->parSlice()));
masm.passABIArg(ToRegister(lir->object()));
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParWriteGuard));
Label *bail;
if (!ensureOutOfLineParallelAbort(&bail))
return false;
// branch to the OOL failure code if false is returned
masm.branchTestBool(Assembler::Zero, ReturnReg, ReturnReg, bail);
return true;
}
bool
CodeGenerator::visitParDump(LParDump *lir)
{
ValueOperand value = ToValue(lir, 0);
masm.reserveStack(sizeof(Value));
masm.storeValue(value, Address(StackPointer, 0));
masm.movePtr(StackPointer, CallTempReg0);
masm.setupUnalignedABICall(1, CallTempReg1);
masm.passABIArg(CallTempReg0);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParDumpValue));
masm.freeStack(sizeof(Value));
return true;
}
bool
CodeGenerator::visitTypeBarrier(LTypeBarrier *lir)
{
@ -986,8 +1059,26 @@ static const VMFunction GetIntrinsicValueInfo =
bool
CodeGenerator::visitCallGetIntrinsicValue(LCallGetIntrinsicValue *lir)
{
pushArg(ImmGCPtr(lir->mir()->name()));
return callVM(GetIntrinsicValueInfo, lir);
// When compiling parallel kernels, always bail.
switch (gen->info().executionMode()) {
case SequentialExecution: {
pushArg(ImmGCPtr(lir->mir()->name()));
return callVM(GetIntrinsicValueInfo, lir);
}
case ParallelExecution: {
Label *bail;
if (!ensureOutOfLineParallelAbort(&bail))
return false;
masm.jump(bail);
return true;
}
default:
JS_NOT_REACHED("Bad execution mode");
return false;
}
}
typedef bool (*InvokeFunctionFn)(JSContext *, HandleFunction, uint32_t, Value *, Value *);
@ -1031,7 +1122,8 @@ CodeGenerator::visitCallGeneric(LCallGeneric *call)
Register objreg = ToRegister(call->getTempObject());
Register nargsreg = ToRegister(call->getNargsReg());
uint32_t unusedStack = StackOffsetOfPassedArg(call->argslot());
Label invoke, thunk, makeCall, end;
ExecutionMode executionMode = gen->info().executionMode();
Label uncompiled, thunk, makeCall, end;
// Known-target case is handled by LCallKnown.
JS_ASSERT(!call->hasSingleTarget());
@ -1049,15 +1141,14 @@ CodeGenerator::visitCallGeneric(LCallGeneric *call)
return false;
// Guard that calleereg is an interpreted function with a JSScript:
masm.branchIfFunctionHasNoScript(calleereg, &invoke);
masm.branchIfFunctionHasNoScript(calleereg, &uncompiled);
// Knowing that calleereg is a non-native function, load the JSScript.
masm.loadPtr(Address(calleereg, offsetof(JSFunction, u.i.script_)), objreg);
ExecutionMode executionMode = gen->info().executionMode();
masm.loadPtr(Address(objreg, ionOffset(executionMode)), objreg);
// Guard that the IonScript has been compiled.
masm.branchPtr(Assembler::BelowOrEqual, objreg, ImmWord(ION_COMPILING_SCRIPT), &invoke);
masm.branchPtr(Assembler::BelowOrEqual, objreg, ImmWord(ION_COMPILING_SCRIPT), &uncompiled);
// Nestle the StackPointer up to the argument vector.
masm.freeStack(unusedStack);
@ -1100,9 +1191,18 @@ CodeGenerator::visitCallGeneric(LCallGeneric *call)
masm.jump(&end);
// Handle uncompiled or native functions.
masm.bind(&invoke);
if (!emitCallInvokeFunction(call, calleereg, call->numActualArgs(), unusedStack))
return false;
masm.bind(&uncompiled);
switch (executionMode) {
case SequentialExecution:
if (!emitCallInvokeFunction(call, calleereg, call->numActualArgs(), unusedStack))
return false;
break;
case ParallelExecution:
if (!emitParCallToUncompiledScript(calleereg))
return false;
break;
}
masm.bind(&end);
@ -1115,10 +1215,30 @@ CodeGenerator::visitCallGeneric(LCallGeneric *call)
masm.bind(&notPrimitive);
}
if (!checkForParallelBailout())
return false;
dropArguments(call->numStackArgs() + 1);
return true;
}
// Generates a call to ParCallToUncompiledScript() and then bails out.
// |calleeReg| should contain the JSFunction*.
bool
CodeGenerator::emitParCallToUncompiledScript(Register calleeReg)
{
Label *bail;
if (!ensureOutOfLineParallelAbort(&bail))
return false;
masm.movePtr(calleeReg, CallTempReg0);
masm.setupUnalignedABICall(1, CallTempReg1);
masm.passABIArg(CallTempReg0);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParCallToUncompiledScript));
masm.jump(bail);
return true;
}
bool
CodeGenerator::visitCallKnown(LCallKnown *call)
{
@ -1127,7 +1247,8 @@ CodeGenerator::visitCallKnown(LCallKnown *call)
Register objreg = ToRegister(call->getTempObject());
uint32_t unusedStack = StackOffsetOfPassedArg(call->argslot());
RootedFunction target(cx, call->getSingleTarget());
Label end, invoke;
ExecutionMode executionMode = gen->info().executionMode();
Label end, uncompiled;
// Native single targets are handled by LCallNative.
JS_ASSERT(!target->isNative());
@ -1140,10 +1261,13 @@ CodeGenerator::visitCallKnown(LCallKnown *call)
if (target->isInterpretedLazy() && !target->getOrCreateScript(cx))
return false;
// If the function is known to be uncompilable, only emit the call to InvokeFunction.
ExecutionMode executionMode = gen->info().executionMode();
// If the function is known to be uncompilable, just emit the call to
// Invoke in sequential mode, else mark as cannot compile.
RootedScript targetScript(cx, target->nonLazyScript());
if (GetIonScript(targetScript, executionMode) == ION_DISABLED_SCRIPT) {
if (executionMode == ParallelExecution)
return false;
if (!emitCallInvokeFunction(call, calleereg, call->numActualArgs(), unusedStack))
return false;
@ -1163,7 +1287,7 @@ CodeGenerator::visitCallKnown(LCallKnown *call)
masm.loadPtr(Address(objreg, ionOffset(executionMode)), objreg);
// Guard that the IonScript has been compiled.
masm.branchPtr(Assembler::BelowOrEqual, objreg, ImmWord(ION_COMPILING_SCRIPT), &invoke);
masm.branchPtr(Assembler::BelowOrEqual, objreg, ImmWord(ION_COMPILING_SCRIPT), &uncompiled);
// Load the start of the target IonCode.
masm.loadPtr(Address(objreg, IonScript::offsetOfMethod()), objreg);
@ -1190,12 +1314,24 @@ CodeGenerator::visitCallKnown(LCallKnown *call)
masm.jump(&end);
// Handle uncompiled functions.
masm.bind(&invoke);
if (!emitCallInvokeFunction(call, calleereg, call->numActualArgs(), unusedStack))
return false;
masm.bind(&uncompiled);
switch (executionMode) {
case SequentialExecution:
if (!emitCallInvokeFunction(call, calleereg, call->numActualArgs(), unusedStack))
return false;
break;
case ParallelExecution:
if (!emitParCallToUncompiledScript(calleereg))
return false;
break;
}
masm.bind(&end);
if (!checkForParallelBailout())
return false;
// If the return value of the constructing function is Primitive,
// replace the return value with the Object from CreateThis.
if (call->mir()->isConstructing()) {
@ -1209,6 +1345,22 @@ CodeGenerator::visitCallKnown(LCallKnown *call)
return true;
}
bool
CodeGenerator::checkForParallelBailout()
{
// In parallel mode, if we call another ion-compiled function and
// it returns JS_ION_ERROR, that indicates a bailout that we have
// to propagate up the stack.
ExecutionMode executionMode = gen->info().executionMode();
if (executionMode == ParallelExecution) {
Label *bail;
if (!ensureOutOfLineParallelAbort(&bail))
return false;
masm.branchTestMagic(Assembler::Equal, JSReturnOperand, bail);
}
return true;
}
bool
CodeGenerator::emitCallInvokeFunction(LApplyArgsGeneric *apply, Register extraStackSize)
{
@ -1579,6 +1731,142 @@ CodeGenerator::visitCheckOverRecursedFailure(CheckOverRecursedFailure *ool)
return true;
}
// Out-of-line path to report over-recursed error and fail.
class ParCheckOverRecursedFailure : public OutOfLineCodeBase<CodeGenerator>
{
LParCheckOverRecursed *lir_;
public:
ParCheckOverRecursedFailure(LParCheckOverRecursed *lir)
: lir_(lir)
{ }
bool accept(CodeGenerator *codegen) {
return codegen->visitParCheckOverRecursedFailure(this);
}
LParCheckOverRecursed *lir() const {
return lir_;
}
};
bool
CodeGenerator::visitParCheckOverRecursed(LParCheckOverRecursed *lir)
{
// See above: unlike visitCheckOverRecursed(), this code runs in
// parallel mode and hence uses the ionStackLimit from the current
// thread state. Also, we must check the interrupt flags because
// on interrupt or abort, only the stack limit for the main thread
// is reset, not the worker threads. See comment in vm/ForkJoin.h
// for more details.
Register parSliceReg = ToRegister(lir->parSlice());
Register tempReg = ToRegister(lir->getTempReg());
masm.loadPtr(Address(parSliceReg, offsetof(ForkJoinSlice, perThreadData)), tempReg);
masm.loadPtr(Address(tempReg, offsetof(PerThreadData, ionStackLimit)), tempReg);
// Conditional forward (unlikely) branch to failure.
ParCheckOverRecursedFailure *ool = new ParCheckOverRecursedFailure(lir);
if (!addOutOfLineCode(ool))
return false;
masm.branchPtr(Assembler::BelowOrEqual, StackPointer, tempReg, ool->entry());
masm.parCheckInterruptFlags(tempReg, ool->entry());
masm.bind(ool->rejoin());
return true;
}
bool
CodeGenerator::visitParCheckOverRecursedFailure(ParCheckOverRecursedFailure *ool)
{
Label *bail;
if (!ensureOutOfLineParallelAbort(&bail))
return false;
// Avoid saving/restoring the temp register since we will put the
// ReturnReg into it below and we don't want to clobber that
// during PopRegsInMask():
LParCheckOverRecursed *lir = ool->lir();
Register tempReg = ToRegister(lir->getTempReg());
RegisterSet saveSet(lir->safepoint()->liveRegs());
saveSet.maybeTake(tempReg);
masm.PushRegsInMask(saveSet);
masm.movePtr(ToRegister(lir->parSlice()), CallTempReg0);
masm.setupUnalignedABICall(1, CallTempReg1);
masm.passABIArg(CallTempReg0);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParCheckOverRecursed));
masm.movePtr(ReturnReg, tempReg);
masm.PopRegsInMask(saveSet);
masm.branchTestBool(Assembler::Zero, tempReg, tempReg, bail);
masm.jump(ool->rejoin());
return true;
}
// Out-of-line path to report over-recursed error and fail.
class OutOfLineParCheckInterrupt : public OutOfLineCodeBase<CodeGenerator>
{
public:
LParCheckInterrupt *const lir;
OutOfLineParCheckInterrupt(LParCheckInterrupt *lir)
: lir(lir)
{ }
bool accept(CodeGenerator *codegen) {
return codegen->visitOutOfLineParCheckInterrupt(this);
}
};
bool
CodeGenerator::visitParCheckInterrupt(LParCheckInterrupt *lir)
{
// First check for slice->shared->interrupt_.
OutOfLineParCheckInterrupt *ool = new OutOfLineParCheckInterrupt(lir);
if (!addOutOfLineCode(ool))
return false;
// We must check two flags:
// - runtime->interrupt
// - runtime->parallelAbort
// See vm/ForkJoin.h for discussion on why we use this design.
Register tempReg = ToRegister(lir->getTempReg());
masm.parCheckInterruptFlags(tempReg, ool->entry());
masm.bind(ool->rejoin());
return true;
}
bool
CodeGenerator::visitOutOfLineParCheckInterrupt(OutOfLineParCheckInterrupt *ool)
{
Label *bail;
if (!ensureOutOfLineParallelAbort(&bail))
return false;
// Avoid saving/restoring the temp register since we will put the
// ReturnReg into it below and we don't want to clobber that
// during PopRegsInMask():
LParCheckInterrupt *lir = ool->lir;
Register tempReg = ToRegister(lir->getTempReg());
RegisterSet saveSet(lir->safepoint()->liveRegs());
saveSet.maybeTake(tempReg);
masm.PushRegsInMask(saveSet);
masm.movePtr(ToRegister(ool->lir->parSlice()), CallTempReg0);
masm.setupUnalignedABICall(1, CallTempReg1);
masm.passABIArg(CallTempReg0);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParCheckInterrupt));
masm.movePtr(ReturnReg, tempReg);
masm.PopRegsInMask(saveSet);
masm.branchTestBool(Assembler::Zero, tempReg, tempReg, bail);
masm.jump(ool->rejoin());
return true;
}
IonScriptCounts *
CodeGenerator::maybeCreateScriptCounts()
{
@ -1728,6 +2016,9 @@ CodeGenerator::generateBody()
return false;
}
if (!callTraceLIR(i, *iter))
return false;
if (!iter->accept(this))
return false;
}
@ -1765,6 +2056,8 @@ static const VMFunction NewInitArrayInfo =
bool
CodeGenerator::visitNewArrayCallVM(LNewArray *lir)
{
JS_ASSERT(gen->info().executionMode() == SequentialExecution);
Register objReg = ToRegister(lir->output());
JS_ASSERT(!lir->isCall());
@ -1813,21 +2106,14 @@ CodeGenerator::visitNewSlots(LNewSlots *lir)
bool
CodeGenerator::visitNewArray(LNewArray *lir)
{
JS_ASSERT(gen->info().executionMode() == SequentialExecution);
Register objReg = ToRegister(lir->output());
JSObject *templateObject = lir->mir()->templateObject();
uint32_t count = lir->mir()->count();
JS_ASSERT(count < JSObject::NELEMENTS_LIMIT);
size_t maxArraySlots =
gc::GetGCKindSlots(gc::FINALIZE_OBJECT_LAST) - ObjectElements::VALUES_PER_HEADER;
// Allocate space using the VMCall
// when mir hints it needs to get allocated immediatly,
// but only when data doesn't fit the available array slots.
bool allocating = lir->mir()->isAllocating() && count > maxArraySlots;
if (templateObject->hasSingletonType() || allocating)
if (lir->mir()->shouldUseVM())
return visitNewArrayCallVM(lir);
OutOfLineNewArray *ool = new OutOfLineNewArray(lir);
@ -1875,6 +2161,8 @@ static const VMFunction NewInitObjectInfo = FunctionInfo<NewInitObjectFn>(NewIni
bool
CodeGenerator::visitNewObjectVMCall(LNewObject *lir)
{
JS_ASSERT(gen->info().executionMode() == SequentialExecution);
Register objReg = ToRegister(lir->output());
JS_ASSERT(!lir->isCall());
@ -1894,11 +2182,11 @@ CodeGenerator::visitNewObjectVMCall(LNewObject *lir)
bool
CodeGenerator::visitNewObject(LNewObject *lir)
{
JS_ASSERT(gen->info().executionMode() == SequentialExecution);
Register objReg = ToRegister(lir->output());
JSObject *templateObject = lir->mir()->templateObject();
if (templateObject->hasSingletonType() || templateObject->hasDynamicSlots())
if (lir->mir()->shouldUseVM())
return visitNewObjectVMCall(lir);
OutOfLineNewObject *ool = new OutOfLineNewObject(lir);
@ -1955,7 +2243,7 @@ CodeGenerator::visitNewCallObject(LNewCallObject *lir)
{
Register obj = ToRegister(lir->output());
JSObject *templateObj = lir->mir()->templateObj();
JSObject *templateObj = lir->mir()->templateObject();
// If we have a template object, we can inline call object creation.
OutOfLineCode *ool;
@ -1984,6 +2272,68 @@ CodeGenerator::visitNewCallObject(LNewCallObject *lir)
return true;
}
bool
CodeGenerator::visitParNewCallObject(LParNewCallObject *lir)
{
Register resultReg = ToRegister(lir->output());
Register parSliceReg = ToRegister(lir->parSlice());
Register tempReg1 = ToRegister(lir->getTemp0());
Register tempReg2 = ToRegister(lir->getTemp1());
JSObject *templateObj = lir->mir()->templateObj();
emitParAllocateGCThing(resultReg, parSliceReg, tempReg1, tempReg2, templateObj);
// NB: !lir->slots()->isRegister() implies that there is no slots
// array at all, and the memory is already zeroed when copying
// from the template object
if (lir->slots()->isRegister()) {
Register slotsReg = ToRegister(lir->slots());
JS_ASSERT(slotsReg != resultReg);
masm.storePtr(slotsReg, Address(resultReg, JSObject::offsetOfSlots()));
}
return true;
}
bool
CodeGenerator::visitParNewDenseArray(LParNewDenseArray *lir)
{
Register parSliceReg = ToRegister(lir->parSlice());
Register lengthReg = ToRegister(lir->length());
Register tempReg0 = ToRegister(lir->getTemp0());
Register tempReg1 = ToRegister(lir->getTemp1());
Register tempReg2 = ToRegister(lir->getTemp2());
JSObject *templateObj = lir->mir()->templateObject();
// Allocate the array into tempReg2. Don't use resultReg because it
// may alias parSliceReg etc.
emitParAllocateGCThing(tempReg2, parSliceReg, tempReg0, tempReg1, templateObj);
// Invoke a C helper to allocate the elements. For convenience,
// this helper also returns the array back to us, or NULL, which
// obviates the need to preserve the register across the call. In
// reality, we should probably just have the C helper also
// *allocate* the array, but that would require that it initialize
// the various fields of the object, and I didn't want to
// duplicate the code in initGCThing() that already does such an
// admirable job.
masm.setupUnalignedABICall(3, CallTempReg3);
masm.passABIArg(parSliceReg);
masm.passABIArg(tempReg2);
masm.passABIArg(lengthReg);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParExtendArray));
Register resultReg = ToRegister(lir->output());
JS_ASSERT(resultReg == ReturnReg);
Label *bail;
if (!ensureOutOfLineParallelAbort(&bail))
return false;
masm.branchTestPtr(Assembler::Zero, resultReg, resultReg, bail);
return true;
}
typedef JSObject *(*NewStringObjectFn)(JSContext *, HandleString);
static const VMFunction NewStringObjectInfo = FunctionInfo<NewStringObjectFn>(NewStringObject);
@ -2018,6 +2368,100 @@ typedef bool(*InitPropFn)(JSContext *cx, HandleObject obj,
static const VMFunction InitPropInfo =
FunctionInfo<InitPropFn>(InitProp);
bool
CodeGenerator::visitParNew(LParNew *lir)
{
Register objReg = ToRegister(lir->output());
Register parSliceReg = ToRegister(lir->parSlice());
Register tempReg1 = ToRegister(lir->getTemp0());
Register tempReg2 = ToRegister(lir->getTemp1());
JSObject *templateObject = lir->mir()->templateObject();
emitParAllocateGCThing(objReg, parSliceReg, tempReg1, tempReg2,
templateObject);
return true;
}
class OutOfLineParNewGCThing : public OutOfLineCodeBase<CodeGenerator>
{
public:
gc::AllocKind allocKind;
Register objReg;
OutOfLineParNewGCThing(gc::AllocKind allocKind, Register objReg)
: allocKind(allocKind), objReg(objReg)
{}
bool accept(CodeGenerator *codegen) {
return codegen->visitOutOfLineParNewGCThing(this);
}
};
bool
CodeGenerator::emitParAllocateGCThing(const Register &objReg,
const Register &parSliceReg,
const Register &tempReg1,
const Register &tempReg2,
JSObject *templateObj)
{
gc::AllocKind allocKind = templateObj->getAllocKind();
OutOfLineParNewGCThing *ool = new OutOfLineParNewGCThing(allocKind, objReg);
if (!ool || !addOutOfLineCode(ool))
return false;
masm.parNewGCThing(objReg, parSliceReg, tempReg1, tempReg2,
templateObj, ool->entry());
masm.bind(ool->rejoin());
masm.initGCThing(objReg, templateObj);
return true;
}
bool
CodeGenerator::visitOutOfLineParNewGCThing(OutOfLineParNewGCThing *ool)
{
// As a fallback for allocation in par. exec. mode, we invoke the
// C helper ParNewGCThing(), which calls into the GC code. If it
// returns NULL, we bail. If returns non-NULL, we rejoin the
// original instruction.
// This saves all caller-save registers, regardless of whether
// they are live. This is wasteful but a simplification, given
// that for some of the LIR that this is used with
// (e.g., LParLambda) there are values in those registers
// that must not be clobbered but which are not technically
// considered live.
RegisterSet saveSet(RegisterSet::Volatile());
// Also preserve the temps we're about to overwrite,
// but don't bother to save the objReg.
saveSet.addUnchecked(CallTempReg0);
saveSet.addUnchecked(CallTempReg1);
saveSet.maybeTake(AnyRegister(ool->objReg));
masm.PushRegsInMask(saveSet);
masm.move32(Imm32(ool->allocKind), CallTempReg0);
masm.setupUnalignedABICall(1, CallTempReg1);
masm.passABIArg(CallTempReg0);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParNewGCThing));
masm.movePtr(ReturnReg, ool->objReg);
masm.PopRegsInMask(saveSet);
Label *bail;
if (!ensureOutOfLineParallelAbort(&bail))
return false;
masm.branchTestPtr(Assembler::Zero, ool->objReg, ool->objReg, bail);
masm.jump(ool->rejoin());
return true;
}
bool
CodeGenerator::visitParBailout(LParBailout *lir)
{
Label *bail;
if (!ensureOutOfLineParallelAbort(&bail))
return false;
masm.jump(bail);
return true;
}
bool
CodeGenerator::visitInitProp(LInitProp *lir)
{
@ -2371,6 +2815,34 @@ CodeGenerator::visitBinaryV(LBinaryV *lir)
}
}
bool
CodeGenerator::visitParCompareS(LParCompareS *lir)
{
JSOp op = lir->mir()->jsop();
Register left = ToRegister(lir->left());
Register right = ToRegister(lir->right());
JS_ASSERT((op == JSOP_EQ || op == JSOP_STRICTEQ) ||
(op == JSOP_NE || op == JSOP_STRICTNE));
masm.setupUnalignedABICall(2, CallTempReg2);
masm.passABIArg(left);
masm.passABIArg(right);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParCompareStrings));
masm.and32(Imm32(0xF), ReturnReg); // The C functions return an enum whose size is undef
// Check for cases that we do not currently handle in par exec
Label *bail;
if (!ensureOutOfLineParallelAbort(&bail))
return false;
masm.branch32(Assembler::Equal, ReturnReg, Imm32(ParCompareUnknown), bail);
if (op == JSOP_NE || op == JSOP_STRICTNE)
masm.xor32(Imm32(1), ReturnReg);
return true;
}
typedef bool (*StringCompareFn)(JSContext *, HandleString, HandleString, JSBool *);
static const VMFunction stringsEqualInfo =
FunctionInfo<StringCompareFn>(ion::StringsEqual<true>);
@ -2394,33 +2866,7 @@ CodeGenerator::emitCompareS(LInstruction *lir, JSOp op, Register left, Register
if (!ool)
return false;
Label notPointerEqual;
// Fast path for identical strings
masm.branchPtr(Assembler::NotEqual, left, right, &notPointerEqual);
masm.move32(Imm32(op == JSOP_EQ || op == JSOP_STRICTEQ), output);
masm.jump(ool->rejoin());
masm.bind(&notPointerEqual);
masm.loadPtr(Address(left, JSString::offsetOfLengthAndFlags()), output);
masm.loadPtr(Address(right, JSString::offsetOfLengthAndFlags()), temp);
Label notAtom;
// We can optimize the equality operation to a pointer compare for
// two atoms.
Imm32 atomBit(JSString::ATOM_BIT);
masm.branchTest32(Assembler::Zero, output, atomBit, &notAtom);
masm.branchTest32(Assembler::Zero, temp, atomBit, &notAtom);
masm.cmpPtr(left, right);
emitSet(JSOpToCondition(op), output);
masm.jump(ool->rejoin());
masm.bind(&notAtom);
// Strings of different length can never be equal.
masm.rshiftPtr(Imm32(JSString::LENGTH_SHIFT), output);
masm.rshiftPtr(Imm32(JSString::LENGTH_SHIFT), temp);
masm.branchPtr(Assembler::Equal, output, temp, ool->entry());
masm.move32(Imm32(op == JSOP_NE || op == JSOP_STRICTNE), output);
masm.compareStrings(op, left, right, output, temp, ool->entry());
masm.bind(ool->rejoin());
return true;
@ -2584,7 +3030,7 @@ CodeGenerator::visitIsNullOrLikeUndefined(LIsNullOrLikeUndefined *lir)
else
cond = masm.testUndefined(cond, value);
emitSet(cond, output);
masm.emitSet(cond, output);
return true;
}
@ -3130,17 +3576,25 @@ CodeGenerator::visitOutOfLineStoreElementHole(OutOfLineStoreElementHole *ool)
value = TypedOrValueRegister(valueType, ToAnyRegister(store->value()));
}
// We can bump the initialized length inline if index ==
// initializedLength and index < capacity. Otherwise, we have to
// consider fallback options. In fallback cases, we branch to one
// of two labels because (at least in parallel mode) we can
// recover from index < capacity but not index !=
// initializedLength.
Label indexNotInitLen;
Label indexWouldExceedCapacity;
// If index == initializedLength, try to bump the initialized length inline.
// If index > initializedLength, call a stub. Note that this relies on the
// condition flags sticking from the incoming branch.
Label callStub;
masm.j(Assembler::NotEqual, &callStub);
masm.j(Assembler::NotEqual, &indexNotInitLen);
Int32Key key = ToInt32Key(index);
// Check array capacity.
masm.branchKey(Assembler::BelowOrEqual, Address(elements, ObjectElements::offsetOfCapacity()),
key, &callStub);
key, &indexWouldExceedCapacity);
// Update initialized length. The capacity guard above ensures this won't overflow,
// due to NELEMENTS_LIMIT.
@ -3168,22 +3622,82 @@ CodeGenerator::visitOutOfLineStoreElementHole(OutOfLineStoreElementHole *ool)
masm.jump(ool->rejoinStore());
}
masm.bind(&callStub);
saveLive(ins);
switch (gen->info().executionMode()) {
case SequentialExecution:
masm.bind(&indexNotInitLen);
masm.bind(&indexWouldExceedCapacity);
saveLive(ins);
pushArg(Imm32(current->mir()->strict()));
pushArg(value);
if (index->isConstant())
pushArg(*index->toConstant());
else
pushArg(TypedOrValueRegister(MIRType_Int32, ToAnyRegister(index)));
pushArg(object);
if (!callVM(SetObjectElementInfo, ins))
return false;
pushArg(Imm32(current->mir()->strict()));
pushArg(value);
if (index->isConstant())
pushArg(*index->toConstant());
else
pushArg(TypedOrValueRegister(MIRType_Int32, ToAnyRegister(index)));
pushArg(object);
if (!callVM(SetObjectElementInfo, ins))
return false;
restoreLive(ins);
masm.jump(ool->rejoin());
return true;
restoreLive(ins);
masm.jump(ool->rejoin());
return true;
case ParallelExecution:
Label *bail;
if (!ensureOutOfLineParallelAbort(&bail))
return false;
//////////////////////////////////////////////////////////////
// If the problem is that we do not have sufficient capacity,
// try to reallocate the elements array and then branch back
// to perform the actual write. Note that we do not want to
// force the reg alloc to assign any particular register, so
// we make space on the stack and pass the arguments that way.
// (Also, outside of the VM call mechanism, it's very hard to
// pass in a Value to a C function!).
masm.bind(&indexWouldExceedCapacity);
// The use of registers here is somewhat subtle. We need to
// save and restore the volatile registers but we also need to
// preserve the ReturnReg. Normally we'd just add a constraint
// to the regalloc, but since this is the slow path of a hot
// instruction we don't want to do that. So instead we push
// the volatile registers but we don't save the register
// `object`. We will copy the ReturnReg into `object`. The
// function we are calling (`ParPush`) agrees to either return
// `object` unchanged or NULL. This way after we restore the
// registers, we can examine `object` to know whether an error
// occurred.
RegisterSet saveSet(ins->safepoint()->liveRegs());
saveSet.maybeTake(object);
masm.PushRegsInMask(saveSet);
masm.reserveStack(sizeof(ParPushArgs));
masm.storePtr(object, Address(StackPointer, offsetof(ParPushArgs, object)));
masm.storeConstantOrRegister(value, Address(StackPointer,
offsetof(ParPushArgs, value)));
masm.movePtr(StackPointer, CallTempReg0);
masm.setupUnalignedABICall(1, CallTempReg1);
masm.passABIArg(CallTempReg0);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParPush));
masm.freeStack(sizeof(ParPushArgs));
masm.movePtr(ReturnReg, object);
masm.PopRegsInMask(saveSet);
masm.branchTestPtr(Assembler::Zero, object, object, bail);
masm.jump(ool->rejoin());
//////////////////////////////////////////////////////////////
// If the problem is that we are trying to write an index that
// is not the initialized length, that would result in a
// sparse array, and since we don't want to think about that
// case right now, we just bail out.
masm.bind(&indexNotInitLen);
masm.jump(bail);
return true;
}
JS_ASSERT(false);
return false;
}
typedef bool (*ArrayPopShiftFn)(JSContext *, HandleObject, MutableHandleValue);
@ -3556,7 +4070,7 @@ CodeGenerator::visitIteratorMore(LIteratorMore *lir)
// Set output to true if props_cursor < props_end.
masm.loadPtr(Address(output, offsetof(NativeIterator, props_end)), temp);
masm.cmpPtr(Address(output, offsetof(NativeIterator, props_cursor)), temp);
emitSet(Assembler::LessThan, output);
masm.emitSet(Assembler::LessThan, output);
masm.bind(ool->rejoin());
return true;
@ -3699,7 +4213,8 @@ CodeGenerator::link()
bailouts_.length(), graph.numConstants(),
safepointIndices_.length(), osiIndices_.length(),
cacheList_.length(), safepoints_.size(),
graph.mir().numScripts());
graph.mir().numScripts(),
executionMode == ParallelExecution ? ForkJoinSlices(cx) : 0);
SetIonScript(script, executionMode, ionScript);
if (!ionScript)
@ -3738,6 +4253,9 @@ CodeGenerator::link()
JS_ASSERT(graph.mir().numScripts() > 0);
ionScript->copyScriptEntries(graph.mir().scripts());
if (executionMode == ParallelExecution)
ionScript->zeroParallelInvalidatedScripts();
linkAbsoluteLabels();
// The correct state for prebarriers is unknown until the end of compilation,
@ -5090,6 +5608,19 @@ CodeGenerator::visitFunctionBoundary(LFunctionBoundary *lir)
}
}
bool
CodeGenerator::visitOutOfLineParallelAbort(OutOfLineParallelAbort *ool)
{
masm.movePtr(ImmWord((void *) current->mir()->info().script()), CallTempReg0);
masm.setupUnalignedABICall(1, CallTempReg1);
masm.passABIArg(CallTempReg0);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParallelAbort));
masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
masm.jump(returnLabel_);
return true;
}
} // namespace ion
} // namespace js

Просмотреть файл

@ -21,15 +21,19 @@
namespace js {
namespace ion {
class OutOfLineNewParallelArray;
class OutOfLineTestObject;
class OutOfLineNewArray;
class OutOfLineNewObject;
class CheckOverRecursedFailure;
class ParCheckOverRecursedFailure;
class OutOfLineParCheckInterrupt;
class OutOfLineUnboxDouble;
class OutOfLineCache;
class OutOfLineStoreElementHole;
class OutOfLineTypeOfV;
class OutOfLineLoadTypedArray;
class OutOfLineParNewGCThing;
class CodeGenerator : public CodeGeneratorSpecific
{
@ -72,6 +76,7 @@ class CodeGenerator : public CodeGeneratorSpecific
bool visitRegExpTest(LRegExpTest *lir);
bool visitLambda(LLambda *lir);
bool visitLambdaForSingleton(LLambdaForSingleton *lir);
bool visitParLambda(LParLambda *lir);
bool visitPointer(LPointer *lir);
bool visitSlots(LSlots *lir);
bool visitStoreSlotV(LStoreSlotV *store);
@ -90,6 +95,7 @@ class CodeGenerator : public CodeGeneratorSpecific
bool visitApplyArgsGeneric(LApplyArgsGeneric *apply);
bool visitDoubleToInt32(LDoubleToInt32 *lir);
bool visitNewSlots(LNewSlots *lir);
bool visitOutOfLineNewParallelArray(OutOfLineNewParallelArray *ool);
bool visitNewArrayCallVM(LNewArray *lir);
bool visitNewArray(LNewArray *lir);
bool visitOutOfLineNewArray(OutOfLineNewArray *ool);
@ -98,7 +104,11 @@ class CodeGenerator : public CodeGeneratorSpecific
bool visitOutOfLineNewObject(OutOfLineNewObject *ool);
bool visitNewDeclEnvObject(LNewDeclEnvObject *lir);
bool visitNewCallObject(LNewCallObject *lir);
bool visitParNewCallObject(LParNewCallObject *lir);
bool visitNewStringObject(LNewStringObject *lir);
bool visitParNew(LParNew *lir);
bool visitParNewDenseArray(LParNewDenseArray *lir);
bool visitParBailout(LParBailout *lir);
bool visitInitProp(LInitProp *lir);
bool visitCreateThis(LCreateThis *lir);
bool visitCreateThisWithProto(LCreateThisWithProto *lir);
@ -132,6 +142,7 @@ class CodeGenerator : public CodeGeneratorSpecific
Register output, Register temp);
bool visitCompareS(LCompareS *lir);
bool visitCompareStrictS(LCompareStrictS *lir);
bool visitParCompareS(LParCompareS *lir);
bool visitCompareVM(LCompareVM *lir);
bool visitIsNullOrLikeUndefined(LIsNullOrLikeUndefined *lir);
bool visitIsNullOrLikeUndefinedAndBranch(LIsNullOrLikeUndefinedAndBranch *lir);
@ -141,6 +152,9 @@ class CodeGenerator : public CodeGeneratorSpecific
bool visitCharCodeAt(LCharCodeAt *lir);
bool visitFromCharCode(LFromCharCode *lir);
bool visitFunctionEnvironment(LFunctionEnvironment *lir);
bool visitParSlice(LParSlice *lir);
bool visitParWriteGuard(LParWriteGuard *lir);
bool visitParDump(LParDump *lir);
bool visitCallGetProperty(LCallGetProperty *lir);
bool visitCallGetElement(LCallGetElement *lir);
bool visitCallSetElement(LCallSetElement *lir);
@ -196,6 +210,12 @@ class CodeGenerator : public CodeGeneratorSpecific
bool visitCheckOverRecursed(LCheckOverRecursed *lir);
bool visitCheckOverRecursedFailure(CheckOverRecursedFailure *ool);
bool visitParCheckOverRecursed(LParCheckOverRecursed *lir);
bool visitParCheckOverRecursedFailure(ParCheckOverRecursedFailure *ool);
bool visitParCheckInterrupt(LParCheckInterrupt *lir);
bool visitOutOfLineParCheckInterrupt(OutOfLineParCheckInterrupt *ool);
bool visitUnboxDouble(LUnboxDouble *lir);
bool visitOutOfLineUnboxDouble(OutOfLineUnboxDouble *ool);
bool visitOutOfLineStoreElementHole(OutOfLineStoreElementHole *ool);
@ -207,6 +227,10 @@ class CodeGenerator : public CodeGeneratorSpecific
bool visitOutOfLineGetNameCache(OutOfLineCache *ool);
bool visitOutOfLineCallsiteCloneCache(OutOfLineCache *ool);
bool visitOutOfLineParNewGCThing(OutOfLineParNewGCThing *ool);
bool visitOutOfLineParallelAbort(OutOfLineParallelAbort *ool);
bool visitGetPropertyCacheV(LGetPropertyCacheV *ins) {
return visitCache(ins);
}
@ -236,9 +260,23 @@ class CodeGenerator : public CodeGeneratorSpecific
bool visitCache(LInstruction *load);
bool visitCallSetProperty(LInstruction *ins);
bool checkForParallelBailout();
ConstantOrRegister getSetPropertyValue(LInstruction *ins);
bool generateBranchV(const ValueOperand &value, Label *ifTrue, Label *ifFalse, FloatRegister fr);
bool emitParAllocateGCThing(const Register &objReg,
const Register &threadContextReg,
const Register &tempReg1,
const Register &tempReg2,
JSObject *templateObj);
bool emitParCallToUncompiledScript(Register calleeReg);
void emitLambdaInit(const Register &resultReg,
const Register &scopeChainReg,
JSFunction *fun);
IonScriptCounts *maybeCreateScriptCounts();
// Test whether value is truthy or not and jump to the corresponding label.

Просмотреть файл

@ -17,6 +17,8 @@
#include "EdgeCaseAnalysis.h"
#include "RangeAnalysis.h"
#include "LinearScan.h"
#include "vm/ParallelDo.h"
#include "ParallelArrayAnalysis.h"
#include "jscompartment.h"
#include "vm/ThreadPool.h"
#include "vm/ForkJoin.h"
@ -122,9 +124,6 @@ ion::InitializeIon()
if (status != PR_SUCCESS)
return false;
if (!ForkJoinSlice::Initialize())
return false;
IonTLSInitialized = true;
}
#endif
@ -470,6 +469,7 @@ IonScript::IonScript()
safepointsSize_(0),
scriptList_(0),
scriptEntries_(0),
parallelInvalidatedScriptList_(0),
refcount_(0),
recompileInfo_(),
slowCallCount(0)
@ -482,7 +482,7 @@ IonScript *
IonScript::New(JSContext *cx, uint32_t frameSlots, uint32_t frameSize, size_t snapshotsSize,
size_t bailoutEntries, size_t constants, size_t safepointIndices,
size_t osiIndices, size_t cacheEntries, size_t safepointsSize,
size_t scriptEntries)
size_t scriptEntries, size_t parallelInvalidatedScriptEntries)
{
if (snapshotsSize >= MAX_BUFFER_SIZE ||
(bailoutEntries >= MAX_BUFFER_SIZE / sizeof(uint32_t)))
@ -502,6 +502,8 @@ IonScript::New(JSContext *cx, uint32_t frameSlots, uint32_t frameSize, size_t sn
size_t paddedCacheEntriesSize = AlignBytes(cacheEntries * sizeof(IonCache), DataAlignment);
size_t paddedSafepointSize = AlignBytes(safepointsSize, DataAlignment);
size_t paddedScriptSize = AlignBytes(scriptEntries * sizeof(RawScript), DataAlignment);
size_t paddedParallelInvalidatedScriptSize =
AlignBytes(parallelInvalidatedScriptEntries * sizeof(RawScript), DataAlignment);
size_t bytes = paddedSnapshotsSize +
paddedBailoutSize +
paddedConstantsSize +
@ -509,7 +511,8 @@ IonScript::New(JSContext *cx, uint32_t frameSlots, uint32_t frameSize, size_t sn
paddedOsiIndicesSize +
paddedCacheEntriesSize +
paddedSafepointSize +
paddedScriptSize;
paddedScriptSize +
paddedParallelInvalidatedScriptSize;
uint8_t *buffer = (uint8_t *)cx->malloc_(sizeof(IonScript) + bytes);
if (!buffer)
return NULL;
@ -551,6 +554,10 @@ IonScript::New(JSContext *cx, uint32_t frameSlots, uint32_t frameSize, size_t sn
script->scriptEntries_ = scriptEntries;
offsetCursor += paddedScriptSize;
script->parallelInvalidatedScriptList_ = offsetCursor;
script->parallelInvalidatedScriptEntries_ = parallelInvalidatedScriptEntries;
offsetCursor += parallelInvalidatedScriptEntries;
script->frameSlots_ = frameSlots;
script->frameSize_ = frameSize;
@ -606,6 +613,13 @@ IonScript::copyScriptEntries(JSScript **scripts)
scriptList()[i] = scripts[i];
}
void
IonScript::zeroParallelInvalidatedScripts()
{
memset(parallelInvalidatedScriptList(), 0,
parallelInvalidatedScriptEntries_ * sizeof(JSScript *));
}
void
IonScript::copySafepointIndices(const SafepointIndex *si, MacroAssembler &masm)
{
@ -772,8 +786,8 @@ ion::ToggleBarriers(JSCompartment *comp, bool needs)
namespace js {
namespace ion {
CodeGenerator *
CompileBackEnd(MIRGenerator *mir)
bool
OptimizeMIR(MIRGenerator *mir)
{
IonSpewPass("BuildSSA");
// Note: don't call AssertGraphCoherency before SplitCriticalEdges,
@ -782,146 +796,146 @@ CompileBackEnd(MIRGenerator *mir)
MIRGraph &graph = mir->graph();
if (mir->shouldCancel("Start"))
return NULL;
return false;
if (!SplitCriticalEdges(graph))
return NULL;
return false;
IonSpewPass("Split Critical Edges");
AssertGraphCoherency(graph);
if (mir->shouldCancel("Split Critical Edges"))
return NULL;
return false;
if (!RenumberBlocks(graph))
return NULL;
return false;
IonSpewPass("Renumber Blocks");
AssertGraphCoherency(graph);
if (mir->shouldCancel("Renumber Blocks"))
return NULL;
return false;
if (!BuildDominatorTree(graph))
return NULL;
return false;
// No spew: graph not changed.
if (mir->shouldCancel("Dominator Tree"))
return NULL;
return false;
// This must occur before any code elimination.
if (!EliminatePhis(mir, graph, AggressiveObservability))
return NULL;
return false;
IonSpewPass("Eliminate phis");
AssertGraphCoherency(graph);
if (mir->shouldCancel("Eliminate phis"))
return NULL;
return false;
if (!BuildPhiReverseMapping(graph))
return NULL;
return false;
AssertExtendedGraphCoherency(graph);
// No spew: graph not changed.
if (mir->shouldCancel("Phi reverse mapping"))
return NULL;
return false;
// This pass also removes copies.
if (!ApplyTypeInformation(mir, graph))
return NULL;
return false;
IonSpewPass("Apply types");
AssertExtendedGraphCoherency(graph);
if (mir->shouldCancel("Apply types"))
return NULL;
return false;
// Alias analysis is required for LICM and GVN so that we don't move
// loads across stores.
if (js_IonOptions.licm || js_IonOptions.gvn) {
AliasAnalysis analysis(mir, graph);
if (!analysis.analyze())
return NULL;
return false;
IonSpewPass("Alias analysis");
AssertExtendedGraphCoherency(graph);
if (mir->shouldCancel("Alias analysis"))
return NULL;
return false;
// Eliminating dead resume point operands requires basic block
// instructions to be numbered. Reuse the numbering computed during
// alias analysis.
if (!EliminateDeadResumePointOperands(mir, graph))
return NULL;
return false;
if (mir->shouldCancel("Eliminate dead resume point operands"))
return NULL;
return false;
}
if (js_IonOptions.gvn) {
ValueNumberer gvn(mir, graph, js_IonOptions.gvnIsOptimistic);
if (!gvn.analyze())
return NULL;
return false;
IonSpewPass("GVN");
AssertExtendedGraphCoherency(graph);
if (mir->shouldCancel("GVN"))
return NULL;
return false;
}
if (js_IonOptions.uce) {
UnreachableCodeElimination uce(mir, graph);
if (!uce.analyze())
return NULL;
return false;
IonSpewPass("UCE");
AssertExtendedGraphCoherency(graph);
}
if (mir->shouldCancel("UCE"))
return NULL;
return false;
if (js_IonOptions.licm) {
LICM licm(mir, graph);
if (!licm.analyze())
return NULL;
return false;
IonSpewPass("LICM");
AssertExtendedGraphCoherency(graph);
if (mir->shouldCancel("LICM"))
return NULL;
return false;
}
if (js_IonOptions.rangeAnalysis) {
RangeAnalysis r(graph);
if (!r.addBetaNobes())
return NULL;
return false;
IonSpewPass("Beta");
AssertExtendedGraphCoherency(graph);
if (mir->shouldCancel("RA Beta"))
return NULL;
return false;
if (!r.analyze())
return NULL;
return false;
IonSpewPass("Range Analysis");
AssertExtendedGraphCoherency(graph);
if (mir->shouldCancel("Range Analysis"))
return NULL;
return false;
if (!r.removeBetaNobes())
return NULL;
return false;
IonSpewPass("De-Beta");
AssertExtendedGraphCoherency(graph);
if (mir->shouldCancel("RA De-Beta"))
return NULL;
return false;
}
if (!EliminateDeadCode(mir, graph))
return NULL;
return false;
IonSpewPass("DCE");
AssertExtendedGraphCoherency(graph);
if (mir->shouldCancel("DCE"))
return NULL;
return false;
// Passes after this point must not move instructions; these analyses
// depend on knowing the final order in which instructions will execute.
@ -929,12 +943,12 @@ CompileBackEnd(MIRGenerator *mir)
if (js_IonOptions.edgeCaseAnalysis) {
EdgeCaseAnalysis edgeCaseAnalysis(mir, graph);
if (!edgeCaseAnalysis.analyzeLate())
return NULL;
return false;
IonSpewPass("Edge Case Analysis (Late)");
AssertGraphCoherency(graph);
if (mir->shouldCancel("Edge Case Analysis (Late)"))
return NULL;
return false;
}
// Note: check elimination has to run after all other passes that move
@ -942,12 +956,17 @@ CompileBackEnd(MIRGenerator *mir)
// motion after this pass could incorrectly move a load or store before its
// bounds check.
if (!EliminateRedundantChecks(graph))
return NULL;
return false;
IonSpewPass("Bounds Check Elimination");
AssertGraphCoherency(graph);
if (mir->shouldCancel("Bounds Check Elimination"))
return NULL;
return true;
}
CodeGenerator *
GenerateLIR(MIRGenerator *mir)
{
MIRGraph &graph = mir->graph();
LIRGraph *lir = mir->temp().lifoAlloc()->new_<LIRGraph>(&graph);
if (!lir)
@ -1028,12 +1047,21 @@ CompileBackEnd(MIRGenerator *mir)
return codegen;
}
CodeGenerator *
CompileBackEnd(MIRGenerator *mir)
{
if (!OptimizeMIR(mir))
return NULL;
return GenerateLIR(mir);
}
class SequentialCompileContext {
public:
ExecutionMode executionMode() {
return SequentialExecution;
}
MethodStatus checkScriptSize(JSContext *cx, UnrootedScript script);
AbortReason compile(IonBuilder *builder, MIRGraph *graph,
ScopedJSDeletePtr<LifoAlloc> &autoDelete);
};
@ -1227,29 +1255,6 @@ SequentialCompileContext::compile(IonBuilder *builder, MIRGraph *graph,
return success ? AbortReason_NoAbort : AbortReason_Disable;
}
MethodStatus
TestIonCompile(JSContext *cx, HandleScript script, HandleFunction fun, jsbytecode *osrPc, bool constructing)
{
SequentialCompileContext compileContext;
AbortReason reason = IonCompile(cx, script, fun, osrPc, constructing, compileContext);
if (reason == AbortReason_Alloc)
return Method_Skipped;
if (reason == AbortReason_Inlining)
return Method_Skipped;
if (reason == AbortReason_Disable) {
if (!cx->isExceptionPending())
ForbidCompilation(cx, script);
return Method_CantCompile;
}
JS_ASSERT(reason == AbortReason_NoAbort);
return Method_Compiled;
}
static bool
CheckFrame(AbstractFramePtr fp)
{
@ -1302,8 +1307,8 @@ CheckScript(UnrootedScript script)
return true;
}
static MethodStatus
CheckScriptSize(JSContext *cx, UnrootedScript script)
MethodStatus
SequentialCompileContext::checkScriptSize(JSContext *cx, UnrootedScript script)
{
if (!js_IonOptions.limitScriptSize)
return Method_Compiled;
@ -1345,8 +1350,10 @@ CheckScriptSize(JSContext *cx, UnrootedScript script)
return Method_Compiled;
}
template <typename CompileContext>
static MethodStatus
Compile(JSContext *cx, JSScript *script, JSFunction *fun, jsbytecode *osrPc, bool constructing)
Compile(JSContext *cx, HandleScript script, HandleFunction fun, jsbytecode *osrPc, bool constructing,
CompileContext &compileContext)
{
JS_ASSERT(ion::IsEnabled(cx));
JS_ASSERT_IF(osrPc != NULL, (JSOp)*osrPc == JSOP_LOOPENTRY);
@ -1361,36 +1368,39 @@ Compile(JSContext *cx, JSScript *script, JSFunction *fun, jsbytecode *osrPc, boo
return Method_CantCompile;
}
MethodStatus status = CheckScriptSize(cx, script);
MethodStatus status = compileContext.checkScriptSize(cx, script);
if (status != Method_Compiled) {
IonSpew(IonSpew_Abort, "Aborted compilation of %s:%d", script->filename, script->lineno);
return status;
}
if (script->ion) {
if (!script->ion->method())
ExecutionMode executionMode = compileContext.executionMode();
IonScript *scriptIon = GetIonScript(script, executionMode);
if (scriptIon) {
if (!scriptIon->method())
return Method_CantCompile;
return Method_Compiled;
}
if (cx->methodJitEnabled) {
// If JM is enabled we use getUseCount instead of incUseCount to avoid
// bumping the use count twice.
if (script->getUseCount() < js_IonOptions.usesBeforeCompile)
return Method_Skipped;
} else {
if (script->incUseCount() < js_IonOptions.usesBeforeCompileNoJaeger)
return Method_Skipped;
}
if (executionMode == SequentialExecution) {
if (cx->methodJitEnabled) {
// If JM is enabled we use getUseCount instead of incUseCount to avoid
// bumping the use count twice.
SequentialCompileContext compileContext;
if (script->getUseCount() < js_IonOptions.usesBeforeCompile)
return Method_Skipped;
} else {
if (script->incUseCount() < js_IonOptions.usesBeforeCompileNoJaeger)
return Method_Skipped;
}
}
AbortReason reason = IonCompile(cx, script, fun, osrPc, constructing, compileContext);
if (reason == AbortReason_Disable)
return Method_CantCompile;
// Compilation succeeded or we invalidated right away or an inlining/alloc abort
return script->hasIonScript() ? Method_Compiled : Method_Skipped;
return HasIonScript(script, executionMode) ? Method_Compiled : Method_Skipped;
}
} // namespace ion
@ -1428,15 +1438,17 @@ ion::CanEnterAtBranch(JSContext *cx, JSScript *script, AbstractFramePtr fp,
}
// Attempt compilation. Returns Method_Compiled if already compiled.
JSFunction *fun = fp.isFunctionFrame() ? fp.fun() : NULL;
MethodStatus status = Compile(cx, script, fun, pc, isConstructing);
RootedFunction fun(cx, fp.isFunctionFrame() ? fp.fun() : NULL);
SequentialCompileContext compileContext;
RootedScript rscript(cx, script);
MethodStatus status = Compile(cx, rscript, fun, pc, isConstructing, compileContext);
if (status != Method_Compiled) {
if (status == Method_CantCompile)
ForbidCompilation(cx, script);
return status;
}
if (script->ion->osrPc() != pc)
if (script->ion && script->ion->osrPc() != pc)
return Method_Skipped;
return Method_Compiled;
@ -1480,8 +1492,10 @@ ion::CanEnter(JSContext *cx, JSScript *script, AbstractFramePtr fp,
}
// Attempt compilation. Returns Method_Compiled if already compiled.
JSFunction *fun = fp.isFunctionFrame() ? fp.fun() : NULL;
MethodStatus status = Compile(cx, script, fun, NULL, isConstructing);
RootedFunction fun(cx, fp.isFunctionFrame() ? fp.fun() : NULL);
SequentialCompileContext compileContext;
RootedScript rscript(cx, script);
MethodStatus status = Compile(cx, rscript, fun, NULL, isConstructing, compileContext);
if (status != Method_Compiled) {
if (status == Method_CantCompile)
ForbidCompilation(cx, script);
@ -1491,6 +1505,135 @@ ion::CanEnter(JSContext *cx, JSScript *script, AbstractFramePtr fp,
return Method_Compiled;
}
MethodStatus
ParallelCompileContext::checkScriptSize(JSContext *cx, UnrootedScript script)
{
if (!js_IonOptions.limitScriptSize)
return Method_Compiled;
// When compiling for parallel execution we don't have off-thread
// compilation. We also up the max script size of the kernels.
static const uint32_t MAX_SCRIPT_SIZE = 5000;
static const uint32_t MAX_LOCALS_AND_ARGS = 256;
if (script->length > MAX_SCRIPT_SIZE) {
IonSpew(IonSpew_Abort, "Script too large (%u bytes)", script->length);
return Method_CantCompile;
}
uint32_t numLocalsAndArgs = analyze::TotalSlots(script);
if (numLocalsAndArgs > MAX_LOCALS_AND_ARGS) {
IonSpew(IonSpew_Abort, "Too many locals and arguments (%u)", numLocalsAndArgs);
return Method_CantCompile;
}
return Method_Compiled;
}
MethodStatus
ParallelCompileContext::compileTransitively()
{
using parallel::SpewBeginCompile;
using parallel::SpewEndCompile;
if (worklist_.empty())
return Method_Skipped;
RootedFunction fun(cx_);
RootedScript script(cx_);
while (!worklist_.empty()) {
fun = worklist_.back()->toFunction();
script = fun->nonLazyScript();
worklist_.popBack();
SpewBeginCompile(fun);
// If we had invalidations last time the parallel script run, add the
// invalidated scripts to the worklist.
if (script->hasParallelIonScript()) {
IonScript *ion = script->parallelIonScript();
JS_ASSERT(ion->parallelInvalidatedScriptEntries() > 0);
RootedFunction invalidFun(cx_);
for (uint32_t i = 0; i < ion->parallelInvalidatedScriptEntries(); i++) {
if (JSScript *invalid = ion->getAndZeroParallelInvalidatedScript(i)) {
invalidFun = invalid->function();
parallel::Spew(parallel::SpewCompile,
"Adding previously invalidated function %p:%s:%u",
fun.get(), invalid->filename, invalid->lineno);
appendToWorklist(invalidFun);
}
}
}
// Attempt compilation. Returns Method_Compiled if already compiled.
MethodStatus status = Compile(cx_, script, fun, NULL, false, *this);
if (status != Method_Compiled) {
if (status == Method_CantCompile)
ForbidCompilation(cx_, script, ParallelExecution);
return SpewEndCompile(status);
}
// This can GC, so afterward, script->parallelIon is not guaranteed to be valid.
if (!cx_->compartment->ionCompartment()->enterJIT())
return SpewEndCompile(Method_Error);
// Subtle: it is possible for GC to occur during compilation of
// one of the invoked functions, which would cause the earlier
// functions (such as the kernel itself) to be collected. In this
// event, we give up and fallback to sequential for now.
if (!script->hasParallelIonScript()) {
parallel::Spew(parallel::SpewCompile,
"Function %p:%s:%u was garbage-collected or invalidated",
fun.get(), script->filename, script->lineno);
return SpewEndCompile(Method_Skipped);
}
SpewEndCompile(Method_Compiled);
}
return Method_Compiled;
}
AbortReason
ParallelCompileContext::compile(IonBuilder *builder,
MIRGraph *graph,
ScopedJSDeletePtr<LifoAlloc> &autoDelete)
{
JS_ASSERT(!builder->script()->parallelIon);
RootedScript builderScript(cx_, builder->script());
IonSpewNewFunction(graph, builderScript);
if (!builder->build())
return builder->abortReason();
builder->clearForBackEnd();
// For the time being, we do not enable parallel compilation.
if (!OptimizeMIR(builder)) {
IonSpew(IonSpew_Abort, "Failed during back-end compilation.");
return AbortReason_Disable;
}
if (!analyzeAndGrowWorklist(builder, *graph)) {
return AbortReason_Disable;
}
CodeGenerator *codegen = GenerateLIR(builder);
if (!codegen) {
IonSpew(IonSpew_Abort, "Failed during back-end compilation.");
return AbortReason_Disable;
}
bool success = codegen->link();
js_delete(codegen);
IonSpewEndFunction();
return success ? AbortReason_NoAbort : AbortReason_Disable;
}
MethodStatus
ion::CanEnterUsingFastInvoke(JSContext *cx, HandleScript script, uint32_t numActualArgs)
{
@ -1953,38 +2096,60 @@ ion::Invalidate(JSContext *cx, const Vector<types::RecompileInfo> &invalid, bool
}
bool
ion::Invalidate(JSContext *cx, UnrootedScript script, bool resetUses)
ion::Invalidate(JSContext *cx, UnrootedScript script, ExecutionMode mode, bool resetUses)
{
AutoAssertNoGC nogc;
JS_ASSERT(script->hasIonScript());
Vector<types::RecompileInfo> scripts(cx);
if (!scripts.append(script->ionScript()->recompileInfo()))
return false;
switch (mode) {
case SequentialExecution:
JS_ASSERT(script->hasIonScript());
if (!scripts.append(script->ionScript()->recompileInfo()))
return false;
break;
case ParallelExecution:
JS_ASSERT(script->hasParallelIonScript());
if (!scripts.append(script->parallelIonScript()->recompileInfo()))
return false;
break;
}
Invalidate(cx, scripts, resetUses);
return true;
}
bool
ion::Invalidate(JSContext *cx, UnrootedScript script, bool resetUses)
{
return Invalidate(cx, script, SequentialExecution, resetUses);
}
static void
FinishInvalidationOf(FreeOp *fop, UnrootedScript script, IonScript **ionField)
{
// If this script has Ion code on the stack, invalidation() will return
// true. In this case we have to wait until destroying it.
if (!(*ionField)->invalidated()) {
types::TypeCompartment &types = script->compartment()->types;
(*ionField)->recompileInfo().compilerOutput(types)->invalidate();
ion::IonScript::Destroy(fop, *ionField);
}
// In all cases, NULL out script->ion to avoid re-entry.
*ionField = NULL;
}
void
ion::FinishInvalidation(FreeOp *fop, UnrootedScript script)
{
if (!script->hasIonScript())
return;
if (script->hasIonScript())
FinishInvalidationOf(fop, script, &script->ion);
/*
* If this script has Ion code on the stack, invalidation() will return
* true. In this case we have to wait until destroying it.
*/
if (!script->ion->invalidated()) {
types::TypeCompartment &types = script->compartment()->types;
script->ion->recompileInfo().compilerOutput(types)->invalidate();
ion::IonScript::Destroy(fop, script->ion);
}
/* In all cases, NULL out script->ion to avoid re-entry. */
script->ion = NULL;
if (script->hasParallelIonScript())
FinishInvalidationOf(fop, script, &script->parallelIon);
}
void
@ -2002,22 +2167,43 @@ ion::MarkShapeFromIon(JSRuntime *rt, Shape **shapep)
void
ion::ForbidCompilation(JSContext *cx, UnrootedScript script)
{
IonSpew(IonSpew_Abort, "Disabling Ion compilation of script %s:%d",
script->filename, script->lineno);
ForbidCompilation(cx, script, SequentialExecution);
}
void
ion::ForbidCompilation(JSContext *cx, UnrootedScript script, ExecutionMode mode)
{
IonSpew(IonSpew_Abort, "Disabling Ion mode %d compilation of script %s:%d",
mode, script->filename, script->lineno);
CancelOffThreadIonCompile(cx->compartment, script);
if (script->hasIonScript()) {
// It is only safe to modify script->ion if the script is not currently
// running, because IonFrameIterator needs to tell what ionScript to
// use (either the one on the JSScript, or the one hidden in the
// breadcrumbs Invalidation() leaves). Therefore, if invalidation
// fails, we cannot disable the script.
if (!Invalidate(cx, script, false))
return;
switch (mode) {
case SequentialExecution:
if (script->hasIonScript()) {
// It is only safe to modify script->ion if the script is not currently
// running, because IonFrameIterator needs to tell what ionScript to
// use (either the one on the JSScript, or the one hidden in the
// breadcrumbs Invalidation() leaves). Therefore, if invalidation
// fails, we cannot disable the script.
if (!Invalidate(cx, script, mode, false))
return;
}
script->ion = ION_DISABLED_SCRIPT;
return;
case ParallelExecution:
if (script->hasParallelIonScript()) {
if (!Invalidate(cx, script, mode, false))
return;
}
script->parallelIon = ION_DISABLED_SCRIPT;
return;
}
script->ion = ION_DISABLED_SCRIPT;
JS_NOT_REACHED("No such execution mode");
}
uint32_t
@ -2101,7 +2287,7 @@ ion::PurgeCaches(UnrootedScript script, JSCompartment *c) {
script->ion->purgeCaches(c);
if (script->hasParallelIonScript())
script->ion->purgeCaches(c);
script->parallelIon->purgeCaches(c);
}
size_t

Просмотреть файл

@ -11,6 +11,7 @@
#include "jscntxt.h"
#include "jscompartment.h"
#include "IonCode.h"
#include "CompileInfo.h"
#include "jsinfer.h"
#include "jsinterp.h"
@ -18,6 +19,7 @@ namespace js {
namespace ion {
class TempAllocator;
class ParallelCompileContext; // in ParallelArrayAnalysis.h
// Possible register allocators which may be used.
enum IonRegisterAllocator {
@ -173,6 +175,11 @@ struct IonOptions
// Default: 5
uint32_t slowCallIncUseCount;
// How many uses of a parallel kernel before we attempt compilation.
//
// Default: 1
uint32_t usesBeforeCompileParallel;
void setEagerCompilation() {
eagerCompilation = true;
usesBeforeCompile = usesBeforeCompileNoJaeger = 0;
@ -209,7 +216,8 @@ struct IonOptions
inlineUseCountRatio(128),
eagerCompilation(false),
slowCallLimit(512),
slowCallIncUseCount(5)
slowCallIncUseCount(5),
usesBeforeCompileParallel(1)
{
}
};
@ -301,6 +309,7 @@ IonExecStatus FastInvoke(JSContext *cx, HandleFunction fun, CallArgsList &args);
void Invalidate(types::TypeCompartment &types, FreeOp *fop,
const Vector<types::RecompileInfo> &invalid, bool resetUses = true);
void Invalidate(JSContext *cx, const Vector<types::RecompileInfo> &invalid, bool resetUses = true);
bool Invalidate(JSContext *cx, UnrootedScript script, ExecutionMode mode, bool resetUses = true);
bool Invalidate(JSContext *cx, UnrootedScript script, bool resetUses = true);
void MarkValueFromIon(JSRuntime *rt, Value *vp);
@ -315,14 +324,14 @@ class CodeGenerator;
CodeGenerator *CompileBackEnd(MIRGenerator *mir);
void AttachFinishedCompilations(JSContext *cx);
void FinishOffThreadBuilder(IonBuilder *builder);
MethodStatus TestIonCompile(JSContext *cx, HandleScript script, HandleFunction fun, jsbytecode *osrPc, bool constructing);
static inline bool IsEnabled(JSContext *cx)
{
return cx->hasRunOption(JSOPTION_ION) && cx->typeInferenceEnabled();
return cx->hasOption(JSOPTION_ION) && cx->typeInferenceEnabled();
}
void ForbidCompilation(JSContext *cx, UnrootedScript script);
void ForbidCompilation(JSContext *cx, UnrootedScript script, ExecutionMode mode);
uint32_t UsesBeforeIonRecompile(UnrootedScript script, jsbytecode *pc);
void PurgeCaches(UnrootedScript script, JSCompartment *c);

Просмотреть файл

@ -4507,6 +4507,16 @@ IonBuilder::jsop_initprop(HandlePropertyName name)
needsBarrier = false;
}
// In parallel execution, we never require write barriers. See
// forkjoin.cpp for more information.
switch (info().executionMode()) {
case SequentialExecution:
break;
case ParallelExecution:
needsBarrier = false;
break;
}
if (templateObject->isFixedSlot(shape->slot())) {
MStoreFixedSlot *store = MStoreFixedSlot::New(obj, shape->slot(), value);
if (needsBarrier)
@ -5470,8 +5480,8 @@ IonBuilder::jsop_getelem_dense()
return pushTypeBarrier(load, types, barrier);
}
static MInstruction *
GetTypedArrayLength(MDefinition *obj)
MInstruction *
IonBuilder::getTypedArrayLength(MDefinition *obj)
{
if (obj->isConstant()) {
JSObject *array = &obj->toConstant()->value().toObject();
@ -5482,8 +5492,8 @@ GetTypedArrayLength(MDefinition *obj)
return MTypedArrayLength::New(obj);
}
static MInstruction *
GetTypedArrayElements(MDefinition *obj)
MInstruction *
IonBuilder::getTypedArrayElements(MDefinition *obj)
{
if (obj->isConstant()) {
JSObject *array = &obj->toConstant()->value().toObject();
@ -5546,14 +5556,14 @@ IonBuilder::jsop_getelem_typed(int arrayType)
}
// Get the length.
MInstruction *length = GetTypedArrayLength(obj);
MInstruction *length = getTypedArrayLength(obj);
current->add(length);
// Bounds check.
id = addBoundsCheck(id, length);
// Get the elements vector.
MInstruction *elements = GetTypedArrayElements(obj);
MInstruction *elements = getTypedArrayElements(obj);
current->add(elements);
// Load the element.
@ -5723,14 +5733,14 @@ IonBuilder::jsop_setelem_typed(int arrayType)
id = idInt32;
// Get the length.
MInstruction *length = GetTypedArrayLength(obj);
MInstruction *length = getTypedArrayLength(obj);
current->add(length);
// Bounds check.
id = addBoundsCheck(id, length);
// Get the elements vector.
MInstruction *elements = GetTypedArrayElements(obj);
MInstruction *elements = getTypedArrayElements(obj);
current->add(elements);
// Clamp value to [0, 255] for Uint8ClampedArray.
@ -5794,7 +5804,7 @@ IonBuilder::jsop_length_fastPath()
if (sig.inTypes->getTypedArrayType() != TypedArray::TYPE_MAX) {
MDefinition *obj = current->pop();
MInstruction *length = GetTypedArrayLength(obj);
MInstruction *length = getTypedArrayLength(obj);
current->add(length);
current->push(length);
return true;

Просмотреть файл

@ -310,6 +310,10 @@ class IonBuilder : public MIRGenerator
types::StackTypeSet *barrier, types::StackTypeSet *types,
TypeOracle::Unary unary, TypeOracle::UnaryTypes unaryTypes);
// Typed array helpers.
MInstruction *getTypedArrayLength(MDefinition *obj);
MInstruction *getTypedArrayElements(MDefinition *obj);
bool jsop_add(MDefinition *left, MDefinition *right);
bool jsop_bitnot();
bool jsop_bitop(JSOp op);
@ -415,6 +419,18 @@ class IonBuilder : public MIRGenerator
// RegExp natives.
InliningStatus inlineRegExpTest(CallInfo &callInfo);
// Parallel Array.
InliningStatus inlineUnsafeSetElement(CallInfo &callInfo);
bool inlineUnsafeSetDenseArrayElement(CallInfo &callInfo, uint32_t base);
bool inlineUnsafeSetTypedArrayElement(CallInfo &callInfo, uint32_t base, int arrayType);
InliningStatus inlineForceSequentialOrInParallelSection(CallInfo &callInfo);
InliningStatus inlineNewDenseArray(CallInfo &callInfo);
InliningStatus inlineNewDenseArrayForSequentialExecution(CallInfo &callInfo);
InliningStatus inlineNewDenseArrayForParallelExecution(CallInfo &callInfo);
InliningStatus inlineThrowError(CallInfo &callInfo);
InliningStatus inlineDump(CallInfo &callInfo);
InliningStatus inlineNativeCall(CallInfo &callInfo, JSNative native);
// Call functions

Просмотреть файл

@ -211,6 +211,18 @@ struct IonScript
uint32_t scriptList_;
uint32_t scriptEntries_;
// In parallel mode, list of scripts that we call that were invalidated
// last time this script bailed out. These will be recompiled (or tried to
// be) upon next parallel entry of this script.
//
// For non-parallel IonScripts, this is NULL.
//
// For parallel IonScripts, there are as many entries as there are slices,
// since for any single parallel execution, we can only get a single
// invalidation per slice.
uint32_t parallelInvalidatedScriptList_;
uint32_t parallelInvalidatedScriptEntries_;
// Number of references from invalidation records.
size_t refcount_;
@ -244,6 +256,10 @@ struct IonScript
JSScript **scriptList() const {
return (JSScript **)(reinterpret_cast<const uint8_t *>(this) + scriptList_);
}
JSScript **parallelInvalidatedScriptList() {
return (JSScript **)(reinterpret_cast<const uint8_t *>(this) +
parallelInvalidatedScriptList_);
}
private:
void trace(JSTracer *trc);
@ -255,7 +271,8 @@ struct IonScript
static IonScript *New(JSContext *cx, uint32_t frameLocals, uint32_t frameSize,
size_t snapshotsSize, size_t snapshotEntries,
size_t constants, size_t safepointIndexEntries, size_t osiIndexEntries,
size_t cacheEntries, size_t safepointsSize, size_t scriptEntries);
size_t cacheEntries, size_t safepointsSize, size_t scriptEntries,
size_t parallelInvalidatedScriptEntries);
static void Trace(JSTracer *trc, IonScript *script);
static void Destroy(FreeOp *fop, IonScript *script);
@ -339,6 +356,15 @@ struct IonScript
size_t scriptEntries() const {
return scriptEntries_;
}
size_t parallelInvalidatedScriptEntries() const {
return parallelInvalidatedScriptEntries_;
}
RawScript getAndZeroParallelInvalidatedScript(uint32_t i) {
JS_ASSERT(i < parallelInvalidatedScriptEntries_);
RawScript script = parallelInvalidatedScriptList()[i];
parallelInvalidatedScriptList()[i] = NULL;
return script;
}
size_t sizeOfIncludingThis(JSMallocSizeOfFun mallocSizeOf) const {
return mallocSizeOf(this);
}
@ -380,6 +406,7 @@ struct IonScript
void copyCacheEntries(const IonCache *caches, MacroAssembler &masm);
void copySafepoints(const SafepointWriter *writer);
void copyScriptEntries(JSScript **scripts);
void zeroParallelInvalidatedScripts();
bool invalidated() const {
return refcount_ != 0;

Просмотреть файл

@ -332,6 +332,60 @@ MacroAssembler::newGCThing(const Register &result,
subPtr(Imm32(thingSize), result);
}
void
MacroAssembler::parNewGCThing(const Register &result,
const Register &threadContextReg,
const Register &tempReg1,
const Register &tempReg2,
JSObject *templateObject,
Label *fail)
{
// Similar to ::newGCThing(), except that it allocates from a
// custom Allocator in the ForkJoinSlice*, rather than being
// hardcoded to the compartment allocator. This requires two
// temporary registers.
//
// Subtle: I wanted to reuse `result` for one of the temporaries,
// but the register allocator was assigning it to the same
// register as `threadContextReg`. Then we overwrite that
// register which messed up the OOL code.
gc::AllocKind allocKind = templateObject->getAllocKind();
uint32_t thingSize = (uint32_t)gc::Arena::thingSize(allocKind);
// Load the allocator:
// tempReg1 = (Allocator*) forkJoinSlice->allocator
loadPtr(Address(threadContextReg, offsetof(js::ForkJoinSlice, allocator)),
tempReg1);
// Get a pointer to the relevant free list:
// tempReg1 = (FreeSpan*) &tempReg1->arenas.freeLists[(allocKind)]
uint32_t offset = (offsetof(Allocator, arenas) +
js::gc::ArenaLists::getFreeListOffset(allocKind));
addPtr(Imm32(offset), tempReg1);
// Load first item on the list
// tempReg2 = tempReg1->first
loadPtr(Address(tempReg1, offsetof(gc::FreeSpan, first)), tempReg2);
// Check whether list is empty
// if tempReg1->last <= tempReg2, fail
branchPtr(Assembler::BelowOrEqual,
Address(tempReg1, offsetof(gc::FreeSpan, last)),
tempReg2,
fail);
// If not, take first and advance pointer by thingSize bytes.
// result = tempReg2;
// tempReg2 += thingSize;
movePtr(tempReg2, result);
addPtr(Imm32(thingSize), tempReg2);
// Update `first`
// tempReg1->first = tempReg2;
storePtr(tempReg2, Address(tempReg1, offsetof(gc::FreeSpan, first)));
}
void
MacroAssembler::initGCThing(const Register &obj, JSObject *templateObject)
{
@ -378,6 +432,55 @@ MacroAssembler::initGCThing(const Register &obj, JSObject *templateObject)
}
}
void
MacroAssembler::compareStrings(JSOp op, Register left, Register right, Register result,
Register temp, Label *fail)
{
JS_ASSERT(IsEqualityOp(op));
Label done;
Label notPointerEqual;
// Fast path for identical strings.
branchPtr(Assembler::NotEqual, left, right, &notPointerEqual);
move32(Imm32(op == JSOP_EQ || op == JSOP_STRICTEQ), result);
jump(&done);
bind(&notPointerEqual);
loadPtr(Address(left, JSString::offsetOfLengthAndFlags()), result);
loadPtr(Address(right, JSString::offsetOfLengthAndFlags()), temp);
Label notAtom;
// Optimize the equality operation to a pointer compare for two atoms.
Imm32 atomBit(JSString::ATOM_BIT);
branchTest32(Assembler::Zero, result, atomBit, &notAtom);
branchTest32(Assembler::Zero, temp, atomBit, &notAtom);
cmpPtr(left, right);
emitSet(JSOpToCondition(op), result);
jump(&done);
bind(&notAtom);
// Strings of different length can never be equal.
rshiftPtr(Imm32(JSString::LENGTH_SHIFT), result);
rshiftPtr(Imm32(JSString::LENGTH_SHIFT), temp);
branchPtr(Assembler::Equal, result, temp, fail);
move32(Imm32(op == JSOP_NE || op == JSOP_STRICTNE), result);
bind(&done);
}
void
MacroAssembler::parCheckInterruptFlags(const Register &tempReg,
Label *fail)
{
JSCompartment *compartment = GetIonContext()->compartment;
void *interrupt = (void*)&compartment->rt->interrupt;
movePtr(ImmWord(interrupt), tempReg);
load32(Address(tempReg, 0), tempReg);
branchTest32(Assembler::NonZero, tempReg, tempReg, fail);
}
void
MacroAssembler::maybeRemoveOsrFrame(Register scratch)
{

Просмотреть файл

@ -18,6 +18,9 @@
#include "ion/IonCompartment.h"
#include "ion/IonInstrumentation.h"
#include "ion/TypeOracle.h"
#include "ion/ParallelFunctions.h"
#include "vm/ForkJoin.h"
#include "jstypedarray.h"
#include "jscompartment.h"
@ -489,8 +492,24 @@ class MacroAssembler : public MacroAssemblerSpecific
// Inline allocation.
void newGCThing(const Register &result, JSObject *templateObject, Label *fail);
void parNewGCThing(const Register &result,
const Register &threadContextReg,
const Register &tempReg1,
const Register &tempReg2,
JSObject *templateObject,
Label *fail);
void initGCThing(const Register &obj, JSObject *templateObject);
// Compares two strings for equality based on the JSOP.
// This checks for identical pointers, atoms and length and fails for everything else.
void compareStrings(JSOp op, Register left, Register right, Register result,
Register temp, Label *fail);
// Checks the flags that signal that parallel code may need to interrupt or
// abort. Branches to fail in that case.
void parCheckInterruptFlags(const Register &tempReg,
Label *fail);
// If the IonCode that created this assembler needs to transition into the VM,
// we want to store the IonCode on the stack in order to mark it during a GC.
// This is a reference to a patch location where the IonCode* will be written.

Просмотреть файл

@ -236,6 +236,7 @@ ion::CheckLogging()
" pools Literal Pools (ARM only for now)\n"
" cacheflush Instruction Cache flushes (ARM only for now)\n"
" logs C1 and JSON visualization logging\n"
" trace Generate calls to js::ion::Trace() for effectful instructions\n"
" all Everything\n"
"\n"
);
@ -278,6 +279,8 @@ ion::CheckLogging()
EnableChannel(IonSpew_CacheFlush);
if (ContainsFlag(env, "logs"))
EnableIonDebugLogging();
if (ContainsFlag(env, "trace"))
EnableChannel(IonSpew_Trace);
if (ContainsFlag(env, "all"))
LoggingBits = uint32_t(-1);

Просмотреть файл

@ -52,6 +52,8 @@ namespace ion {
_(Safepoints) \
/* Debug info about Pools*/ \
_(Pools) \
/* Calls to js::ion::Trace() */ \
_(Trace) \
/* Debug info about the I$ */ \
_(CacheFlush)

Просмотреть файл

@ -64,11 +64,12 @@ enum MIRType
MIRType_Object,
MIRType_Magic,
MIRType_Value,
MIRType_None, // Invalid, used as a placeholder.
MIRType_Slots, // A slots vector
MIRType_Elements, // An elements vector
MIRType_StackFrame, // StackFrame pointer for OSR.
MIRType_Shape // A Shape pointer.
MIRType_None, // Invalid, used as a placeholder.
MIRType_Slots, // A slots vector
MIRType_Elements, // An elements vector
MIRType_StackFrame, // StackFrame pointer for OSR.
MIRType_Shape, // A Shape pointer.
MIRType_ForkJoinSlice // js::ForkJoinSlice*
};
#ifdef DEBUG

Просмотреть файл

@ -249,6 +249,16 @@ class LNewSlots : public LCallInstructionHelper<1, 0, 3>
}
};
class LNewParallelArray : public LInstructionHelper<1, 0, 0>
{
public:
LIR_HEADER(NewParallelArray);
MNewParallelArray *mir() const {
return mir_->toNewParallelArray();
}
};
class LNewArray : public LInstructionHelper<1, 0, 0>
{
public:
@ -269,6 +279,79 @@ class LNewObject : public LInstructionHelper<1, 0, 0>
}
};
class LParNew : public LInstructionHelper<1, 1, 2>
{
public:
LIR_HEADER(ParNew);
LParNew(const LAllocation &parSlice,
const LDefinition &temp1,
const LDefinition &temp2)
{
setOperand(0, parSlice);
setTemp(0, temp1);
setTemp(1, temp2);
}
MParNew *mir() const {
return mir_->toParNew();
}
const LAllocation *parSlice() {
return getOperand(0);
}
const LAllocation *getTemp0() {
return getTemp(0)->output();
}
const LAllocation *getTemp1() {
return getTemp(1)->output();
}
};
class LParNewDenseArray : public LCallInstructionHelper<1, 2, 3>
{
public:
LIR_HEADER(ParNewDenseArray);
LParNewDenseArray(const LAllocation &parSlice,
const LAllocation &length,
const LDefinition &temp1,
const LDefinition &temp2,
const LDefinition &temp3) {
setOperand(0, parSlice);
setOperand(1, length);
setTemp(0, temp1);
setTemp(1, temp2);
setTemp(2, temp3);
}
MParNewDenseArray *mir() const {
return mir_->toParNewDenseArray();
}
const LAllocation *parSlice() {
return getOperand(0);
}
const LAllocation *length() {
return getOperand(1);
}
const LAllocation *getTemp0() {
return getTemp(0)->output();
}
const LAllocation *getTemp1() {
return getTemp(1)->output();
}
const LAllocation *getTemp2() {
return getTemp(2)->output();
}
};
// Allocates a new DeclEnvObject.
//
// This instruction generates two possible instruction sets:
@ -311,6 +394,64 @@ class LNewCallObject : public LInstructionHelper<1, 1, 0>
}
};
class LParNewCallObject : public LInstructionHelper<1, 2, 2>
{
LParNewCallObject(const LAllocation &parSlice,
const LAllocation &slots,
const LDefinition &temp1,
const LDefinition &temp2) {
setOperand(0, parSlice);
setOperand(1, slots);
setTemp(0, temp1);
setTemp(1, temp2);
}
public:
LIR_HEADER(ParNewCallObject);
static LParNewCallObject *NewWithSlots(const LAllocation &parSlice,
const LAllocation &slots,
const LDefinition &temp1,
const LDefinition &temp2) {
return new LParNewCallObject(parSlice, slots, temp1, temp2);
}
static LParNewCallObject *NewSansSlots(const LAllocation &parSlice,
const LDefinition &temp1,
const LDefinition &temp2) {
LAllocation slots = LConstantIndex::Bogus();
return new LParNewCallObject(parSlice, slots, temp1, temp2);
}
const LAllocation *parSlice() {
return getOperand(0);
}
const LAllocation *slots() {
return getOperand(1);
}
const bool hasDynamicSlots() {
// TO INVESTIGATE: Felix tried using isRegister() method here,
// but for useFixed(_, CallTempN), isRegister() is false (and
// isUse() is true). So for now ignore that and try to match
// the LConstantIndex::Bogus() generated above instead.
return slots() && ! slots()->isConstant();
}
const MParNewCallObject *mir() const {
return mir_->toParNewCallObject();
}
const LAllocation *getTemp0() {
return getTemp(0)->output();
}
const LAllocation *getTemp1() {
return getTemp(1)->output();
}
};
class LNewStringObject : public LInstructionHelper<1, 1, 1>
{
public:
@ -332,6 +473,12 @@ class LNewStringObject : public LInstructionHelper<1, 1, 1>
}
};
class LParBailout : public LInstructionHelper<0, 0, 0>
{
public:
LIR_HEADER(ParBailout);
};
// Takes in an Object and a Value.
class LInitProp : public LCallInstructionHelper<0, 1 + BOX_PIECES, 0>
{
@ -371,6 +518,48 @@ class LCheckOverRecursed : public LInstructionHelper<0, 0, 1>
}
};
class LParCheckOverRecursed : public LInstructionHelper<0, 1, 1>
{
public:
LIR_HEADER(ParCheckOverRecursed);
LParCheckOverRecursed(const LAllocation &parSlice,
const LDefinition &tempReg)
{
setOperand(0, parSlice);
setTemp(0, tempReg);
}
const LAllocation *parSlice() {
return getOperand(0);
}
const LDefinition *getTempReg() {
return getTemp(0);
}
};
class LParCheckInterrupt : public LInstructionHelper<0, 1, 1>
{
public:
LIR_HEADER(ParCheckInterrupt);
LParCheckInterrupt(const LAllocation &parSlice,
const LDefinition &tempReg)
{
setOperand(0, parSlice);
setTemp(0, tempReg);
}
const LAllocation *parSlice() {
return getOperand(0);
}
const LDefinition *getTempReg() {
return getTemp(0);
}
};
class LDefVar : public LCallInstructionHelper<0, 1, 0>
{
public:
@ -1146,6 +1335,27 @@ class LCompareStrictS : public LInstructionHelper<1, BOX_PIECES + 1, 2>
}
};
class LParCompareS : public LCallInstructionHelper<1, 2, 0>
{
public:
LIR_HEADER(ParCompareS);
LParCompareS(const LAllocation &left, const LAllocation &right) {
setOperand(0, left);
setOperand(1, right);
}
const LAllocation *left() {
return getOperand(0);
}
const LAllocation *right() {
return getOperand(1);
}
MCompare *mir() {
return mir_->toCompare();
}
};
// Used for strict-equality comparisons where one side is a boolean
// and the other is a value. Note that CompareI is used to compare
// two booleans.
@ -2098,6 +2308,37 @@ class LLambda : public LInstructionHelper<1, 1, 0>
}
};
class LParLambda : public LInstructionHelper<1, 2, 2>
{
public:
LIR_HEADER(ParLambda);
LParLambda(const LAllocation &parSlice,
const LAllocation &scopeChain,
const LDefinition &temp1,
const LDefinition &temp2) {
setOperand(0, parSlice);
setOperand(1, scopeChain);
setTemp(0, temp1);
setTemp(1, temp2);
}
const LAllocation *parSlice() {
return getOperand(0);
}
const LAllocation *scopeChain() {
return getOperand(1);
}
const MParLambda *mir() const {
return mir_->toParLambda();
}
const LAllocation *getTemp0() {
return getTemp(0)->output();
}
const LAllocation *getTemp1() {
return getTemp(1)->output();
}
};
// Determines the implicit |this| value for function calls.
class LImplicitThis : public LInstructionHelper<BOX_PIECES, 1, 0>
{
@ -3076,6 +3317,20 @@ class LFunctionEnvironment : public LInstructionHelper<1, 1, 0>
}
};
class LParSlice : public LCallInstructionHelper<1, 0, 1>
{
public:
LIR_HEADER(ParSlice);
LParSlice(const LDefinition &temp1) {
setTemp(0, temp1);
}
const LAllocation *getTempReg() {
return getTemp(0)->output();
}
};
class LCallGetProperty : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES, 0>
{
public:
@ -3325,6 +3580,48 @@ class LGetArgument : public LInstructionHelper<BOX_PIECES, 1, 0>
}
};
class LParWriteGuard : public LCallInstructionHelper<0, 2, 1>
{
public:
LIR_HEADER(ParWriteGuard);
LParWriteGuard(const LAllocation &parSlice,
const LAllocation &object,
const LDefinition &temp1) {
setOperand(0, parSlice);
setOperand(1, object);
setTemp(0, temp1);
}
bool isCall() const {
return true;
}
const LAllocation *parSlice() {
return getOperand(0);
}
const LAllocation *object() {
return getOperand(1);
}
const LAllocation *getTempReg() {
return getTemp(0)->output();
}
};
class LParDump : public LCallInstructionHelper<0, BOX_PIECES, 0>
{
public:
LIR_HEADER(ParDump);
static const size_t Value = 0;
const LAllocation *value() {
return getOperand(0);
}
};
// Guard that a value is in a TypeSet.
class LTypeBarrier : public LInstructionHelper<BOX_PIECES, BOX_PIECES, 1>
{

Просмотреть файл

@ -546,6 +546,8 @@ class LDefinition
return LDefinition::GENERAL;
case MIRType_StackFrame:
return LDefinition::GENERAL;
case MIRType_ForkJoinSlice:
return LDefinition::GENERAL;
default:
JS_NOT_REACHED("unexpected type");
return LDefinition::GENERAL;

Просмотреть файл

@ -22,14 +22,20 @@
_(TableSwitch) \
_(TableSwitchV) \
_(Goto) \
_(NewParallelArray) \
_(NewArray) \
_(NewObject) \
_(NewSlots) \
_(NewDeclEnvObject) \
_(NewCallObject) \
_(NewStringObject) \
_(ParNew) \
_(ParNewDenseArray) \
_(ParNewCallObject) \
_(ParBailout) \
_(InitProp) \
_(CheckOverRecursed) \
_(ParCheckOverRecursed) \
_(RecompileCheck) \
_(DefVar) \
_(DefFun) \
@ -63,6 +69,7 @@
_(CompareDAndBranch) \
_(CompareS) \
_(CompareStrictS) \
_(ParCompareS) \
_(CompareB) \
_(CompareBAndBranch) \
_(CompareV) \
@ -109,6 +116,7 @@
_(RegExpTest) \
_(Lambda) \
_(LambdaForSingleton) \
_(ParLambda) \
_(ImplicitThis) \
_(Slots) \
_(Elements) \
@ -119,6 +127,8 @@
_(StoreSlotT) \
_(GuardShape) \
_(GuardClass) \
_(ParWriteGuard) \
_(ParDump) \
_(TypeBarrier) \
_(MonitorTypes) \
_(InitializedLength) \
@ -149,6 +159,7 @@
_(StoreFixedSlotV) \
_(StoreFixedSlotT) \
_(FunctionEnvironment) \
_(ParSlice) \
_(GetPropertyCacheV) \
_(GetPropertyCacheT) \
_(GetElementCacheV) \
@ -184,6 +195,7 @@
_(InstanceOfV) \
_(CallInstanceOf) \
_(InterruptCheck) \
_(ParCheckInterrupt) \
_(FunctionBoundary) \
_(GetDOMProperty) \
_(SetDOMProperty) \

Просмотреть файл

@ -629,6 +629,7 @@ LinearScanAllocator::splitBlockingIntervals(LAllocation allocation)
if (fixed->numRanges() > 0) {
CodePosition fixedPos = current->intersect(fixed);
if (fixedPos != CodePosition::MIN) {
JS_ASSERT(fixedPos > current->start());
JS_ASSERT(fixedPos < current->end());
if (!splitInterval(current, fixedPos))
return false;

Просмотреть файл

@ -113,6 +113,19 @@ LIRGenerator::visitCheckOverRecursed(MCheckOverRecursed *ins)
return true;
}
bool
LIRGenerator::visitParCheckOverRecursed(MParCheckOverRecursed *ins)
{
LParCheckOverRecursed *lir = new LParCheckOverRecursed(
useRegister(ins->parSlice()),
temp());
if (!add(lir))
return false;
if (!assignSafepoint(lir, ins))
return false;
return true;
}
bool
LIRGenerator::visitDefVar(MDefVar *ins)
{
@ -143,6 +156,13 @@ LIRGenerator::visitNewSlots(MNewSlots *ins)
return defineReturn(lir, ins);
}
bool
LIRGenerator::visitNewParallelArray(MNewParallelArray *ins)
{
LNewParallelArray *lir = new LNewParallelArray();
return define(lir, ins) && assignSafepoint(lir, ins);
}
bool
LIRGenerator::visitNewArray(MNewArray *ins)
{
@ -183,6 +203,25 @@ LIRGenerator::visitNewCallObject(MNewCallObject *ins)
return true;
}
bool
LIRGenerator::visitParNewCallObject(MParNewCallObject *ins)
{
const LAllocation &parThreadContext = useRegister(ins->parSlice());
const LDefinition &temp1 = temp();
const LDefinition &temp2 = temp();
LParNewCallObject *lir;
if (ins->slots()->type() == MIRType_Slots) {
const LAllocation &slots = useRegister(ins->slots());
lir = LParNewCallObject::NewWithSlots(parThreadContext, slots,
temp1, temp2);
} else {
lir = LParNewCallObject::NewSansSlots(parThreadContext, temp1, temp2);
}
return define(lir, ins);
}
bool
LIRGenerator::visitNewStringObject(MNewStringObject *ins)
{
@ -192,6 +231,13 @@ LIRGenerator::visitNewStringObject(MNewStringObject *ins)
return define(lir, ins) && assignSafepoint(lir, ins);
}
bool
LIRGenerator::visitParBailout(MParBailout *ins)
{
LParBailout *lir = new LParBailout();
return add(lir, ins);
}
bool
LIRGenerator::visitInitProp(MInitProp *ins)
{
@ -580,10 +626,24 @@ LIRGenerator::visitCompare(MCompare *comp)
// LCompareSAndBranch. Doing this now wouldn't be wrong, but doesn't
// make sense and avoids confusion.
if (comp->compareType() == MCompare::Compare_String) {
LCompareS *lir = new LCompareS(useRegister(left), useRegister(right), temp());
if (!define(lir, comp))
return false;
return assignSafepoint(lir, comp);
switch (comp->block()->info().executionMode()) {
case SequentialExecution:
{
LCompareS *lir = new LCompareS(useRegister(left), useRegister(right), temp());
if (!define(lir, comp))
return false;
return assignSafepoint(lir, comp);
}
case ParallelExecution:
{
LParCompareS *lir = new LParCompareS(useFixed(left, CallTempReg0),
useFixed(right, CallTempReg1));
return defineReturn(lir, comp);
}
}
JS_NOT_REACHED("Unexpected execution mode");
}
// Strict compare between value and string
@ -1382,6 +1442,17 @@ LIRGenerator::visitLambda(MLambda *ins)
return define(lir, ins) && assignSafepoint(lir, ins);
}
bool
LIRGenerator::visitParLambda(MParLambda *ins)
{
JS_ASSERT(!ins->fun()->hasSingletonType());
JS_ASSERT(!types::UseNewTypeForClone(ins->fun()));
LParLambda *lir = new LParLambda(useRegister(ins->parSlice()),
useRegister(ins->scopeChain()),
temp(), temp());
return define(lir, ins);
}
bool
LIRGenerator::visitImplicitThis(MImplicitThis *ins)
{
@ -1439,6 +1510,62 @@ LIRGenerator::visitFunctionEnvironment(MFunctionEnvironment *ins)
return define(new LFunctionEnvironment(useRegisterAtStart(ins->function())), ins);
}
bool
LIRGenerator::visitParSlice(MParSlice *ins)
{
LParSlice *lir = new LParSlice(tempFixed(CallTempReg0));
return defineReturn(lir, ins);
}
bool
LIRGenerator::visitParWriteGuard(MParWriteGuard *ins)
{
return add(new LParWriteGuard(useFixed(ins->parSlice(), CallTempReg0),
useFixed(ins->object(), CallTempReg1),
tempFixed(CallTempReg2)));
}
bool
LIRGenerator::visitParCheckInterrupt(MParCheckInterrupt *ins)
{
LParCheckInterrupt *lir = new LParCheckInterrupt(
useRegister(ins->parSlice()),
temp());
if (!add(lir))
return false;
if (!assignSafepoint(lir, ins))
return false;
return true;
}
bool
LIRGenerator::visitParDump(MParDump *ins)
{
LParDump *lir = new LParDump();
useBoxFixed(lir, LParDump::Value, ins->value(), CallTempReg0, CallTempReg1);
return add(lir);
}
bool
LIRGenerator::visitParNew(MParNew *ins)
{
LParNew *lir = new LParNew(useRegister(ins->parSlice()),
temp(), temp());
return define(lir, ins);
}
bool
LIRGenerator::visitParNewDenseArray(MParNewDenseArray *ins)
{
LParNewDenseArray *lir = new LParNewDenseArray(
useFixed(ins->parSlice(), CallTempReg0),
useFixed(ins->length(), CallTempReg1),
tempFixed(CallTempReg2),
tempFixed(CallTempReg3),
tempFixed(CallTempReg4));
return defineReturn(lir, ins);
}
bool
LIRGenerator::visitStoreSlot(MStoreSlot *ins)
{

Просмотреть файл

@ -79,13 +79,19 @@ class LIRGenerator : public LIRGeneratorSpecific
bool visitGoto(MGoto *ins);
bool visitTableSwitch(MTableSwitch *tableswitch);
bool visitNewSlots(MNewSlots *ins);
bool visitNewParallelArray(MNewParallelArray *ins);
bool visitNewArray(MNewArray *ins);
bool visitNewObject(MNewObject *ins);
bool visitNewDeclEnvObject(MNewDeclEnvObject *ins);
bool visitNewCallObject(MNewCallObject *ins);
bool visitNewStringObject(MNewStringObject *ins);
bool visitParNew(MParNew *ins);
bool visitParNewCallObject(MParNewCallObject *ins);
bool visitParNewDenseArray(MParNewDenseArray *ins);
bool visitParBailout(MParBailout *ins);
bool visitInitProp(MInitProp *ins);
bool visitCheckOverRecursed(MCheckOverRecursed *ins);
bool visitParCheckOverRecursed(MParCheckOverRecursed *ins);
bool visitDefVar(MDefVar *ins);
bool visitDefFun(MDefFun *ins);
bool visitPrepareCall(MPrepareCall *ins);
@ -136,6 +142,7 @@ class LIRGenerator : public LIRGeneratorSpecific
bool visitRegExp(MRegExp *ins);
bool visitRegExpTest(MRegExpTest *ins);
bool visitLambda(MLambda *ins);
bool visitParLambda(MParLambda *ins);
bool visitImplicitThis(MImplicitThis *ins);
bool visitSlots(MSlots *ins);
bool visitElements(MElements *ins);
@ -143,6 +150,10 @@ class LIRGenerator : public LIRGeneratorSpecific
bool visitConvertElementsToDoubles(MConvertElementsToDoubles *ins);
bool visitLoadSlot(MLoadSlot *ins);
bool visitFunctionEnvironment(MFunctionEnvironment *ins);
bool visitParSlice(MParSlice *ins);
bool visitParWriteGuard(MParWriteGuard *ins);
bool visitParCheckInterrupt(MParCheckInterrupt *ins);
bool visitParDump(MParDump *ins);
bool visitStoreSlot(MStoreSlot *ins);
bool visitTypeBarrier(MTypeBarrier *ins);
bool visitMonitorTypes(MMonitorTypes *ins);

Просмотреть файл

@ -7,6 +7,8 @@
#include "jslibmath.h"
#include "jsmath.h"
#include "builtin/ParallelArray.h"
#include "builtin/TestingFunctions.h"
#include "MIR.h"
#include "MIRGraph.h"
@ -76,6 +78,22 @@ IonBuilder::inlineNativeCall(CallInfo &callInfo, JSNative native)
if (native == regexp_test)
return inlineRegExpTest(callInfo);
// Parallel Array
if (native == intrinsic_UnsafeSetElement)
return inlineUnsafeSetElement(callInfo);
if (native == testingFunc_inParallelSection)
return inlineForceSequentialOrInParallelSection(callInfo);
if (native == intrinsic_NewDenseArray)
return inlineNewDenseArray(callInfo);
// Self-hosting
if (native == intrinsic_ThrowError)
return inlineThrowError(callInfo);
#ifdef DEBUG
if (native == intrinsic_Dump)
return inlineDump(callInfo);
#endif
return InliningStatus_NotInlined;
}
@ -846,5 +864,286 @@ IonBuilder::inlineRegExpTest(CallInfo &callInfo)
return InliningStatus_Inlined;
}
IonBuilder::InliningStatus
IonBuilder::inlineUnsafeSetElement(CallInfo &callInfo)
{
uint32_t argc = callInfo.argc();
if (argc < 3 || (argc % 3) != 0 || callInfo.constructing())
return InliningStatus_NotInlined;
/* Important:
*
* Here we inline each of the stores resulting from a call to
* %UnsafeSetElement(). It is essential that these stores occur
* atomically and cannot be interrupted by a stack or recursion
* check. If this is not true, race conditions can occur.
*/
for (uint32_t base = 0; base < argc; base += 3) {
uint32_t arri = base + 1;
uint32_t idxi = base + 2;
types::StackTypeSet *obj = getInlineArgTypeSet(callInfo, arri);
types::StackTypeSet *id = getInlineArgTypeSet(callInfo, idxi);
int arrayType;
if (!oracle->elementAccessIsDenseNative(obj, id) &&
!oracle->elementAccessIsTypedArray(obj, id, &arrayType))
{
return InliningStatus_NotInlined;
}
}
callInfo.unwrapArgs();
// Push the result first so that the stack depth matches up for
// the potential bailouts that will occur in the stores below.
MConstant *udef = MConstant::New(UndefinedValue());
current->add(udef);
current->push(udef);
for (uint32_t base = 0; base < argc; base += 3) {
uint32_t arri = base + 1;
uint32_t idxi = base + 2;
types::StackTypeSet *obj = getInlineArgTypeSet(callInfo, arri);
types::StackTypeSet *id = getInlineArgTypeSet(callInfo, idxi);
if (oracle->elementAccessIsDenseNative(obj, id)) {
if (!inlineUnsafeSetDenseArrayElement(callInfo, base))
return InliningStatus_Error;
continue;
}
int arrayType;
if (oracle->elementAccessIsTypedArray(obj, id, &arrayType)) {
if (!inlineUnsafeSetTypedArrayElement(callInfo, base, arrayType))
return InliningStatus_Error;
continue;
}
JS_NOT_REACHED("Element access not dense array nor typed array");
}
return InliningStatus_Inlined;
}
bool
IonBuilder::inlineUnsafeSetDenseArrayElement(CallInfo &callInfo, uint32_t base)
{
// Note: we do not check the conditions that are asserted as true
// in intrinsic_UnsafeSetElement():
// - arr is a dense array
// - idx < initialized length
// Furthermore, note that inference should be propagating
// the type of the value to the JSID_VOID property of the array.
uint32_t arri = base + 1;
uint32_t idxi = base + 2;
uint32_t elemi = base + 3;
MElements *elements = MElements::New(callInfo.getArg(arri));
current->add(elements);
MToInt32 *id = MToInt32::New(callInfo.getArg(idxi));
current->add(id);
// We disable the hole check for this store. This implies that if
// there were setters on the prototype, they would not be invoked.
// But this is actually the desired behavior.
MStoreElement *store = MStoreElement::New(elements, id,
callInfo.getArg(elemi),
/* needsHoleCheck = */ false);
store->setRacy();
current->add(store);
if (!resumeAfter(store))
return false;
return true;
}
bool
IonBuilder::inlineUnsafeSetTypedArrayElement(CallInfo &callInfo,
uint32_t base,
int arrayType)
{
// Note: we do not check the conditions that are asserted as true
// in intrinsic_UnsafeSetElement():
// - arr is a typed array
// - idx < length
uint32_t arri = base + 1;
uint32_t idxi = base + 2;
uint32_t elemi = base + 3;
MInstruction *elements = getTypedArrayElements(callInfo.getArg(arri));
current->add(elements);
MToInt32 *id = MToInt32::New(callInfo.getArg(idxi));
current->add(id);
MDefinition *value = callInfo.getArg(elemi);
if (arrayType == TypedArray::TYPE_UINT8_CLAMPED) {
value = MClampToUint8::New(value);
current->add(value->toInstruction());
}
MStoreTypedArrayElement *store = MStoreTypedArrayElement::New(elements, id, value, arrayType);
store->setRacy();
current->add(store);
if (!resumeAfter(store))
return false;
return true;
}
IonBuilder::InliningStatus
IonBuilder::inlineForceSequentialOrInParallelSection(CallInfo &callInfo)
{
if (callInfo.constructing())
return InliningStatus_NotInlined;
ExecutionMode executionMode = info().executionMode();
switch (executionMode) {
case SequentialExecution:
// In sequential mode, leave as is, because we'd have to
// access the "in warmup" flag of the runtime.
return InliningStatus_NotInlined;
case ParallelExecution:
// During Parallel Exec, we always force sequential, so
// replace with true. This permits UCE to eliminate the
// entire path as dead, which is important.
callInfo.unwrapArgs();
MConstant *ins = MConstant::New(BooleanValue(true));
current->add(ins);
current->push(ins);
return InliningStatus_Inlined;
}
JS_NOT_REACHED("Invalid execution mode");
}
IonBuilder::InliningStatus
IonBuilder::inlineNewDenseArray(CallInfo &callInfo)
{
if (callInfo.constructing() || callInfo.argc() != 1)
return InliningStatus_NotInlined;
// For now, in seq. mode we just call the C function. In
// par. mode we use inlined MIR.
ExecutionMode executionMode = info().executionMode();
switch (executionMode) {
case SequentialExecution:
return inlineNewDenseArrayForSequentialExecution(callInfo);
case ParallelExecution:
return inlineNewDenseArrayForParallelExecution(callInfo);
}
JS_NOT_REACHED("unknown ExecutionMode");
}
IonBuilder::InliningStatus
IonBuilder::inlineNewDenseArrayForSequentialExecution(CallInfo &callInfo)
{
// not yet implemented; in seq. mode the C function is not so bad
return InliningStatus_NotInlined;
}
IonBuilder::InliningStatus
IonBuilder::inlineNewDenseArrayForParallelExecution(CallInfo &callInfo)
{
// Create the new parallel array object. Parallel arrays have specially
// constructed type objects, so we can only perform the inlining if we
// already have one of these type objects.
types::StackTypeSet *returnTypes = getInlineReturnTypeSet();
if (returnTypes->getKnownTypeTag() != JSVAL_TYPE_OBJECT)
return InliningStatus_NotInlined;
if (returnTypes->getObjectCount() != 1)
return InliningStatus_NotInlined;
types::TypeObject *typeObject = returnTypes->getTypeObject(0);
RootedObject templateObject(cx, NewDenseAllocatedArray(cx, 0));
if (!templateObject)
return InliningStatus_Error;
templateObject->setType(typeObject);
MParNewDenseArray *newObject = new MParNewDenseArray(graph().parSlice(),
callInfo.getArg(1),
templateObject);
current->add(newObject);
current->push(newObject);
return InliningStatus_Inlined;
}
IonBuilder::InliningStatus
IonBuilder::inlineThrowError(CallInfo &callInfo)
{
// In Parallel Execution, convert %ThrowError() into a bailout.
if (callInfo.constructing())
return InliningStatus_NotInlined;
ExecutionMode executionMode = info().executionMode();
switch (executionMode) {
case SequentialExecution:
return InliningStatus_NotInlined;
case ParallelExecution:
break;
}
callInfo.unwrapArgs();
MParBailout *bailout = new MParBailout();
if (!bailout)
return InliningStatus_Error;
current->end(bailout);
current = newBlock(pc);
if (!current)
return InliningStatus_Error;
MConstant *udef = MConstant::New(UndefinedValue());
current->add(udef);
current->push(udef);
return InliningStatus_Inlined;
}
IonBuilder::InliningStatus
IonBuilder::inlineDump(CallInfo &callInfo)
{
// In Parallel Execution, call ParDump. We just need a debugging
// aid!
if (callInfo.constructing())
return InliningStatus_NotInlined;
ExecutionMode executionMode = info().executionMode();
switch (executionMode) {
case SequentialExecution:
return InliningStatus_NotInlined;
case ParallelExecution:
break;
}
callInfo.unwrapArgs();
MParDump *dump = new MParDump(callInfo.getArg(1));
current->add(dump);
MConstant *udef = MConstant::New(UndefinedValue());
current->add(udef);
current->push(udef);
return InliningStatus_Inlined;
}
} // namespace ion
} // namespace js

Просмотреть файл

@ -1879,6 +1879,29 @@ MBeta::computeRange()
}
}
bool
MNewObject::shouldUseVM() const
{
return templateObject()->hasSingletonType() ||
templateObject()->hasDynamicSlots();
}
bool
MNewArray::shouldUseVM() const
{
JS_ASSERT(count() < JSObject::NELEMENTS_LIMIT);
size_t maxArraySlots =
gc::GetGCKindSlots(gc::FINALIZE_OBJECT_LAST) - ObjectElements::VALUES_PER_HEADER;
// Allocate space using the VMCall
// when mir hints it needs to get allocated immediatly,
// but only when data doesn't fit the available array slots.
bool allocating = isAllocating() && count() > maxArraySlots;
return templateObject()->hasSingletonType() || allocating;
}
bool
MLoadFixedSlot::mightAlias(MDefinition *store)
{

Просмотреть файл

@ -310,6 +310,7 @@ class MDefinition : public MNode
{ }
virtual Opcode op() const = 0;
virtual const char *opName() const = 0;
void printName(FILE *fp);
static void PrintOpcodeName(FILE *fp, Opcode op);
virtual void printOpcode(FILE *fp);
@ -579,6 +580,9 @@ class MInstruction
Opcode op() const { \
return MDefinition::Op_##opcode; \
} \
const char *opName() const { \
return #opcode; \
} \
bool accept(MInstructionVisitor *visitor) { \
return visitor->visit##opcode(this); \
}
@ -610,6 +614,15 @@ class MAryInstruction : public MInstruction
class MNullaryInstruction : public MAryInstruction<0>
{ };
class MUnaryInstruction : public MAryInstruction<1>
{
protected:
MUnaryInstruction(MDefinition *ins)
{
setOperand(0, ins);
}
};
// Generates an LSnapshot without further effect.
class MStart : public MNullaryInstruction
{
@ -1068,6 +1081,28 @@ class MThrow
}
};
class MNewParallelArray : public MNullaryInstruction
{
CompilerRootObject templateObject_;
MNewParallelArray(JSObject *templateObject)
: templateObject_(templateObject)
{
setResultType(MIRType_Object);
}
public:
INSTRUCTION_HEADER(NewParallelArray);
static MNewParallelArray *New(JSObject *templateObject) {
return new MNewParallelArray(templateObject);
}
JSObject *templateObject() const {
return templateObject_;
}
};
class MNewArray : public MNullaryInstruction
{
public:
@ -1107,6 +1142,10 @@ class MNewArray : public MNullaryInstruction
return allocating_ == NewArray_Allocating;
}
// Returns true if the code generator should call through to the
// VM rather than the fast path.
bool shouldUseVM() const;
// NewArray is marked as non-effectful because all our allocations are
// either lazy when we are using "new Array(length)" or bounded by the
// script or the stack size when we are using "new Array(...)" or "[...]"
@ -1135,11 +1174,54 @@ class MNewObject : public MNullaryInstruction
return new MNewObject(templateObject);
}
// Returns true if the code generator should call through to the
// VM rather than the fast path.
bool shouldUseVM() const;
JSObject *templateObject() const {
return templateObject_;
}
};
// Could be allocating either a new array or a new object.
class MParNew : public MUnaryInstruction
{
CompilerRootObject templateObject_;
public:
INSTRUCTION_HEADER(ParNew);
MParNew(MDefinition *parSlice,
JSObject *templateObject)
: MUnaryInstruction(parSlice),
templateObject_(templateObject)
{
setResultType(MIRType_Object);
}
MDefinition *parSlice() const {
return getOperand(0);
}
JSObject *templateObject() const {
return templateObject_;
}
};
// Could be allocating either a new array or a new object.
class MParBailout : public MAryControlInstruction<0, 0>
{
public:
INSTRUCTION_HEADER(ParBailout);
MParBailout()
: MAryControlInstruction()
{
setResultType(MIRType_Undefined);
setGuard();
}
};
// Slow path for adding a property to an object without a known base.
class MInitProp
: public MAryInstruction<2>,
@ -1360,15 +1442,6 @@ class MApplyArgs
}
};
class MUnaryInstruction : public MAryInstruction<1>
{
protected:
MUnaryInstruction(MDefinition *ins)
{
setOperand(0, ins);
}
};
class MBinaryInstruction : public MAryInstruction<2>
{
protected:
@ -3188,6 +3261,45 @@ class MCheckOverRecursed : public MNullaryInstruction
INSTRUCTION_HEADER(CheckOverRecursed)
};
// Check the current frame for over-recursion past the global stack limit.
// Uses the per-thread recursion limit.
class MParCheckOverRecursed : public MUnaryInstruction
{
public:
INSTRUCTION_HEADER(ParCheckOverRecursed);
MParCheckOverRecursed(MDefinition *parForkJoinSlice)
: MUnaryInstruction(parForkJoinSlice)
{
setResultType(MIRType_None);
setGuard();
setMovable();
}
MDefinition *parSlice() const {
return getOperand(0);
}
};
// Check for an interrupt (or rendezvous) in parallel mode.
class MParCheckInterrupt : public MUnaryInstruction
{
public:
INSTRUCTION_HEADER(ParCheckInterrupt);
MParCheckInterrupt(MDefinition *parForkJoinSlice)
: MUnaryInstruction(parForkJoinSlice)
{
setResultType(MIRType_None);
setGuard();
setMovable();
}
MDefinition *parSlice() const {
return getOperand(0);
}
};
// Check the script's use count and trigger recompilation to inline
// calls when the script becomes hot.
class MRecompileCheck : public MNullaryInstruction
@ -3401,6 +3513,47 @@ class MLambda
}
};
class MParLambda
: public MBinaryInstruction,
public SingleObjectPolicy
{
CompilerRootFunction fun_;
MParLambda(MDefinition *parSlice,
MDefinition *scopeChain, JSFunction *fun)
: MBinaryInstruction(parSlice, scopeChain), fun_(fun)
{
setResultType(MIRType_Object);
}
public:
INSTRUCTION_HEADER(ParLambda);
static MParLambda *New(MDefinition *parSlice,
MDefinition *scopeChain, JSFunction *fun) {
return new MParLambda(parSlice, scopeChain, fun);
}
static MParLambda *New(MDefinition *parSlice,
MLambda *originalInstruction) {
return New(parSlice,
originalInstruction->scopeChain(),
originalInstruction->fun());
}
MDefinition *parSlice() const {
return getOperand(0);
}
MDefinition *scopeChain() const {
return getOperand(1);
}
JSFunction *fun() const {
return fun_;
}
};
// Determines the implicit |this| value for function calls.
class MImplicitThis
: public MUnaryInstruction,
@ -3960,11 +4113,13 @@ class MStoreElementCommon
{
bool needsBarrier_;
MIRType elementType_;
bool racy_; // if true, exempted from normal data race req. during par. exec.
protected:
MStoreElementCommon()
: needsBarrier_(false),
elementType_(MIRType_Value)
elementType_(MIRType_Value),
racy_(false)
{ }
public:
@ -3981,6 +4136,12 @@ class MStoreElementCommon
void setNeedsBarrier() {
needsBarrier_ = true;
}
bool racy() const {
return racy_;
}
void setRacy() {
racy_ = true;
}
};
// Store a value to a dense array slots vector.
@ -4288,9 +4449,12 @@ class MStoreTypedArrayElement
{
int arrayType_;
// See note in MStoreElementCommon.
bool racy_;
MStoreTypedArrayElement(MDefinition *elements, MDefinition *index, MDefinition *value,
int arrayType)
: MTernaryInstruction(elements, index, value), arrayType_(arrayType)
: MTernaryInstruction(elements, index, value), arrayType_(arrayType), racy_(false)
{
setResultType(MIRType_Value);
setMovable();
@ -4334,6 +4498,12 @@ class MStoreTypedArrayElement
AliasSet getAliasSet() const {
return AliasSet::Store(AliasSet::TypedArrayElement);
}
bool racy() const {
return racy_;
}
void setRacy() {
racy_ = true;
}
};
// Clamp input to range [0, 255] for Uint8ClampedArray.
@ -5054,6 +5224,27 @@ class MFunctionEnvironment
}
};
// Loads the current js::ForkJoinSlice*.
// Only applicable in ParallelExecution.
class MParSlice
: public MNullaryInstruction
{
public:
MParSlice()
: MNullaryInstruction()
{
setResultType(MIRType_ForkJoinSlice);
}
INSTRUCTION_HEADER(ParSlice);
AliasSet getAliasSet() const {
// Indicate that this instruction reads nothing, stores nothing.
// (For all intents and purposes)
return AliasSet::None();
}
};
// Store to vp[slot] (slots that are not inline in an object).
class MStoreSlot
: public MBinaryInstruction,
@ -5895,6 +6086,62 @@ class MGetArgument
}
};
class MParWriteGuard
: public MBinaryInstruction,
public ObjectPolicy<1>
{
MParWriteGuard(MDefinition *parThreadContext,
MDefinition *obj)
: MBinaryInstruction(parThreadContext, obj)
{
setResultType(MIRType_None);
setGuard();
setMovable();
}
public:
INSTRUCTION_HEADER(ParWriteGuard);
static MParWriteGuard *New(MDefinition *parThreadContext, MDefinition *obj) {
return new MParWriteGuard(parThreadContext, obj);
}
MDefinition *parSlice() const {
return getOperand(0);
}
MDefinition *object() const {
return getOperand(1);
}
BailoutKind bailoutKind() const {
return Bailout_Normal;
}
AliasSet getAliasSet() const {
return AliasSet::None();
}
};
class MParDump
: public MUnaryInstruction,
public BoxPolicy<0>
{
public:
INSTRUCTION_HEADER(ParDump);
MParDump(MDefinition *v)
: MUnaryInstruction(v)
{
setResultType(MIRType_None);
}
MDefinition *value() const {
return getOperand(0);
}
TypePolicy *typePolicy() {
return this;
}
};
// Given a value, guard that the value is in a particular TypeSet, then returns
// that value.
class MTypeBarrier : public MUnaryInstruction
@ -6045,7 +6292,7 @@ class MNewCallObject : public MUnaryInstruction
MDefinition *slots() {
return getOperand(0);
}
JSObject *templateObj() {
JSObject *templateObject() {
return templateObj_;
}
AliasSet getAliasSet() const {
@ -6053,6 +6300,51 @@ class MNewCallObject : public MUnaryInstruction
}
};
class MParNewCallObject : public MBinaryInstruction
{
CompilerRootObject templateObj_;
MParNewCallObject(MDefinition *parSlice,
JSObject *templateObj, MDefinition *slots)
: MBinaryInstruction(parSlice, slots),
templateObj_(templateObj)
{
setResultType(MIRType_Object);
}
public:
INSTRUCTION_HEADER(ParNewCallObject);
static MParNewCallObject *New(MDefinition *parSlice,
JSObject *templateObj,
MDefinition *slots) {
return new MParNewCallObject(parSlice, templateObj, slots);
}
static MParNewCallObject *New(MDefinition *parSlice,
MNewCallObject *originalInstruction) {
return New(parSlice,
originalInstruction->templateObject(),
originalInstruction->slots());
}
MDefinition *parSlice() const {
return getOperand(0);
}
MDefinition *slots() const {
return getOperand(1);
}
JSObject *templateObj() const {
return templateObj_;
}
AliasSet getAliasSet() const {
return AliasSet::None();
}
};
class MNewStringObject :
public MUnaryInstruction,
public StringPolicy
@ -6158,6 +6450,38 @@ class MEnclosingScope : public MLoadFixedSlot
}
};
// Creates a dense array of the given length.
//
// Note: the template object should be an *empty* dense array!
class MParNewDenseArray : public MBinaryInstruction
{
CompilerRootObject templateObject_;
public:
INSTRUCTION_HEADER(ParNewDenseArray);
MParNewDenseArray(MDefinition *parSlice,
MDefinition *length,
JSObject *templateObject)
: MBinaryInstruction(parSlice, length),
templateObject_(templateObject)
{
setResultType(MIRType_Object);
}
MDefinition *parSlice() const {
return getOperand(0);
}
MDefinition *length() const {
return getOperand(1);
}
JSObject *templateObject() const {
return templateObject_;
}
};
// A resume point contains the information needed to reconstruct the interpreter
// state from a position in the JIT. See the big comment near resumeAfter() in
// IonBuilder.cpp.

Просмотреть файл

@ -67,6 +67,37 @@ MIRGraph::unmarkBlocks() {
i->unmark();
}
MDefinition *
MIRGraph::parSlice() {
// Search the entry block to find a par slice instruction. If we do not
// find one, add one after the Start instruction.
//
// Note: the original design used a field in MIRGraph to cache the
// parSlice rather than searching for it again. However, this
// could become out of date due to DCE. Given that we do not
// generally have to search very far to find the par slice
// instruction if it exists, and that we don't look for it that
// often, I opted to simply eliminate the cache and search anew
// each time, so that it is that much easier to keep the IR
// coherent. - nmatsakis
MBasicBlock *entry = entryBlock();
JS_ASSERT(entry->info().executionMode() == ParallelExecution);
MInstruction *start = NULL;
for (MInstructionIterator ins(entry->begin()); ins != entry->end(); ins++) {
if (ins->isParSlice())
return *ins;
else if (ins->isStart())
start = *ins;
}
JS_ASSERT(start);
MParSlice *parSlice = new MParSlice();
entry->insertAfter(start, parSlice);
return parSlice;
}
MBasicBlock *
MBasicBlock::New(MIRGraph &graph, CompileInfo &info,
MBasicBlock *pred, jsbytecode *entryPc, Kind kind)
@ -127,6 +158,22 @@ MBasicBlock::NewSplitEdge(MIRGraph &graph, CompileInfo &info, MBasicBlock *pred)
return MBasicBlock::New(graph, info, pred, pred->pc(), SPLIT_EDGE);
}
MBasicBlock *
MBasicBlock::NewParBailout(MIRGraph &graph, CompileInfo &info,
MBasicBlock *pred, jsbytecode *entryPc)
{
MBasicBlock *block = MBasicBlock::New(graph, info, pred, entryPc, NORMAL);
if (!block)
return NULL;
MParBailout *bailout = new MParBailout();
if (!bailout)
return NULL;
block->end(bailout);
return block;
}
MBasicBlock::MBasicBlock(MIRGraph &graph, CompileInfo &info, jsbytecode *pc, Kind kind)
: earlyAbort_(false),
graph_(graph),
@ -730,14 +777,27 @@ MBasicBlock::getSuccessor(size_t index) const
return lastIns()->getSuccessor(index);
}
size_t
MBasicBlock::getSuccessorIndex(MBasicBlock *block) const
{
JS_ASSERT(lastIns());
for (size_t i = 0; i < numSuccessors(); i++) {
if (getSuccessor(i) == block)
return i;
}
JS_NOT_REACHED("Invalid successor");
}
void
MBasicBlock::replaceSuccessor(size_t pos, MBasicBlock *split)
{
JS_ASSERT(lastIns());
lastIns()->replaceSuccessor(pos, split);
// Note, successors-with-phis is not yet set.
JS_ASSERT(!successorWithPhis_);
// Note, during split-critical-edges, successors-with-phis is not yet set.
// During PAA, this case is handled before we enter.
JS_ASSERT_IF(successorWithPhis_, successorWithPhis_ != getSuccessor(pos));
lastIns()->replaceSuccessor(pos, split);
}
void
@ -793,6 +853,7 @@ MBasicBlock::removePredecessor(MBasicBlock *pred)
predecessors_.erase(ptr);
return;
}
JS_NOT_REACHED("predecessor was not found");
}

Просмотреть файл

@ -79,6 +79,8 @@ class MBasicBlock : public TempObject, public InlineListNode<MBasicBlock>
static MBasicBlock *NewPendingLoopHeader(MIRGraph &graph, CompileInfo &info,
MBasicBlock *pred, jsbytecode *entryPc);
static MBasicBlock *NewSplitEdge(MIRGraph &graph, CompileInfo &info, MBasicBlock *pred);
static MBasicBlock *NewParBailout(MIRGraph &graph, CompileInfo &info,
MBasicBlock *pred, jsbytecode *entryPc);
bool dominates(MBasicBlock *other);
@ -165,8 +167,11 @@ class MBasicBlock : public TempObject, public InlineListNode<MBasicBlock>
void inheritSlots(MBasicBlock *parent);
bool initEntrySlots();
// Replaces an edge for a given block with a new block. This is used for
// critical edge splitting.
// Replaces an edge for a given block with a new block. This is
// used for critical edge splitting and also for inserting
// bailouts during ParallelArrayAnalysis.
//
// Note: If successorWithPhis is set, you must not be replacing it.
void replacePredecessor(MBasicBlock *old, MBasicBlock *split);
void replaceSuccessor(size_t pos, MBasicBlock *split);
@ -394,6 +399,7 @@ class MBasicBlock : public TempObject, public InlineListNode<MBasicBlock>
}
size_t numSuccessors() const;
MBasicBlock *getSuccessor(size_t index) const;
size_t getSuccessorIndex(MBasicBlock *) const;
// Specifies the closest loop header dominating this block.
void setLoopHeader(MBasicBlock *loop) {
@ -608,6 +614,14 @@ class MIRGraph
JSScript **scripts() {
return scripts_.begin();
}
// The ParSlice is an instance of ForkJoinSlice*, it carries
// "per-helper-thread" information. So as not to modify the
// calling convention for parallel code, we obtain the current
// slice from thread-local storage. This helper method will
// lazilly insert an MParSlice instruction in the entry block and
// return the definition.
MDefinition *parSlice();
};
class MDefinitionIterator

Просмотреть файл

@ -72,6 +72,7 @@ namespace ion {
_(TruncateToInt32) \
_(ToString) \
_(NewSlots) \
_(NewParallelArray) \
_(NewArray) \
_(NewObject) \
_(NewDeclEnvObject) \
@ -145,14 +146,32 @@ namespace ion {
_(InterruptCheck) \
_(FunctionBoundary) \
_(GetDOMProperty) \
_(SetDOMProperty)
_(SetDOMProperty) \
_(ParCheckOverRecursed) \
_(ParNewCallObject) \
_(ParNew) \
_(ParNewDenseArray) \
_(ParBailout) \
_(ParLambda) \
_(ParSlice) \
_(ParWriteGuard) \
_(ParDump) \
_(ParCheckInterrupt)
// Forward declarations of MIR types.
#define FORWARD_DECLARE(op) class M##op;
MIR_OPCODE_LIST(FORWARD_DECLARE)
#undef FORWARD_DECLARE
class MInstructionVisitor
class MInstructionVisitor // interface i.e. pure abstract class
{
public:
#define VISIT_INS(op) virtual bool visit##op(M##op *) = 0;
MIR_OPCODE_LIST(VISIT_INS)
#undef VISIT_INS
};
class MInstructionVisitorWithDefaults : public MInstructionVisitor
{
public:
#define VISIT_INS(op) virtual bool visit##op(M##op *) { JS_NOT_REACHED("NYI: " #op); return false; }

Просмотреть файл

@ -0,0 +1,848 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=4 sw=4 et tw=99:
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include <stdio.h>
#include "Ion.h"
#include "MIR.h"
#include "MIRGraph.h"
#include "ParallelArrayAnalysis.h"
#include "IonSpewer.h"
#include "UnreachableCodeElimination.h"
#include "IonAnalysis.h"
#include "vm/ParallelDo.h"
#include "vm/Stack.h"
namespace js {
namespace ion {
using parallel::Spew;
using parallel::SpewMIR;
using parallel::SpewCompile;
#define SAFE_OP(op) \
virtual bool visit##op(M##op *prop) { return true; }
#define CUSTOM_OP(op) \
virtual bool visit##op(M##op *prop);
#define DROP_OP(op) \
virtual bool visit##op(M##op *ins) { \
MBasicBlock *block = ins->block(); \
block->discard(ins); \
return true; \
}
#define PERMIT(T) (1 << T)
#define PERMIT_NUMERIC (PERMIT(MIRType_Int32) | PERMIT(MIRType_Double))
#define SPECIALIZED_OP(op, flags) \
virtual bool visit##op(M##op *ins) { \
return visitSpecializedInstruction(ins, ins->specialization(), flags); \
}
#define UNSAFE_OP(op) \
virtual bool visit##op(M##op *ins) { \
SpewMIR(ins, "Unsafe"); \
return markUnsafe(); \
}
#define WRITE_GUARDED_OP(op, obj) \
virtual bool visit##op(M##op *prop) { \
return insertWriteGuard(prop, prop->obj()); \
}
#define MAYBE_WRITE_GUARDED_OP(op, obj) \
virtual bool visit##op(M##op *prop) { \
if (prop->racy()) \
return true; \
return insertWriteGuard(prop, prop->obj()); \
}
class ParallelArrayVisitor : public MInstructionVisitor
{
JSContext *cx_;
ParallelCompileContext &compileContext_;
MIRGraph &graph_;
bool unsafe_;
MDefinition *parSlice_;
bool insertWriteGuard(MInstruction *writeInstruction,
MDefinition *valueBeingWritten);
bool replaceWithParNew(MInstruction *newInstruction,
JSObject *templateObject);
bool replace(MInstruction *oldInstruction,
MInstruction *replacementInstruction);
bool visitSpecializedInstruction(MInstruction *ins, MIRType spec, uint32_t flags);
// Intended for use in a visitXyz() instruction like "return
// markUnsafe()". Sets the unsafe flag and returns true (since
// this does not indicate an unrecoverable compilation failure).
bool markUnsafe() {
JS_ASSERT(!unsafe_);
unsafe_ = true;
return true;
}
public:
AutoObjectVector callTargets;
ParallelArrayVisitor(JSContext *cx, ParallelCompileContext &compileContext,
MIRGraph &graph)
: cx_(cx),
compileContext_(compileContext),
graph_(graph),
unsafe_(false),
parSlice_(NULL),
callTargets(cx)
{ }
void clearUnsafe() { unsafe_ = false; }
bool unsafe() { return unsafe_; }
MDefinition *parSlice() {
if (!parSlice_)
parSlice_ = graph_.parSlice();
return parSlice_;
}
bool convertToBailout(MBasicBlock *block, MInstruction *ins);
// I am taking the policy of blacklisting everything that's not
// obviously safe for now. We can loosen as we need.
SAFE_OP(Constant)
SAFE_OP(Parameter)
SAFE_OP(Callee)
SAFE_OP(TableSwitch)
SAFE_OP(Goto)
CUSTOM_OP(Test)
CUSTOM_OP(Compare)
SAFE_OP(Phi)
SAFE_OP(Beta)
UNSAFE_OP(OsrValue)
UNSAFE_OP(OsrScopeChain)
UNSAFE_OP(ReturnFromCtor)
CUSTOM_OP(CheckOverRecursed)
DROP_OP(RecompileCheck)
UNSAFE_OP(DefVar)
UNSAFE_OP(DefFun)
UNSAFE_OP(CreateThis)
UNSAFE_OP(CreateThisWithTemplate)
UNSAFE_OP(CreateThisWithProto)
SAFE_OP(PrepareCall)
SAFE_OP(PassArg)
CUSTOM_OP(Call)
UNSAFE_OP(ApplyArgs)
SAFE_OP(BitNot)
UNSAFE_OP(TypeOf)
SAFE_OP(ToId)
SAFE_OP(BitAnd)
SAFE_OP(BitOr)
SAFE_OP(BitXor)
SAFE_OP(Lsh)
SAFE_OP(Rsh)
SPECIALIZED_OP(Ursh, PERMIT_NUMERIC)
SPECIALIZED_OP(MinMax, PERMIT_NUMERIC)
SAFE_OP(Abs)
SAFE_OP(Sqrt)
SAFE_OP(MathFunction)
SPECIALIZED_OP(Add, PERMIT_NUMERIC)
SPECIALIZED_OP(Sub, PERMIT_NUMERIC)
SPECIALIZED_OP(Mul, PERMIT_NUMERIC)
SPECIALIZED_OP(Div, PERMIT_NUMERIC)
SPECIALIZED_OP(Mod, PERMIT_NUMERIC)
UNSAFE_OP(Concat)
UNSAFE_OP(CharCodeAt)
UNSAFE_OP(FromCharCode)
SAFE_OP(Return)
CUSTOM_OP(Throw)
SAFE_OP(Box) // Boxing just creates a JSVal, doesn't alloc.
SAFE_OP(Unbox)
SAFE_OP(GuardObject)
SAFE_OP(ToDouble)
SAFE_OP(ToInt32)
SAFE_OP(TruncateToInt32)
UNSAFE_OP(ToString)
SAFE_OP(NewSlots)
CUSTOM_OP(NewArray)
CUSTOM_OP(NewObject)
CUSTOM_OP(NewCallObject)
CUSTOM_OP(NewParallelArray)
UNSAFE_OP(InitProp)
SAFE_OP(Start)
UNSAFE_OP(OsrEntry)
SAFE_OP(Nop)
UNSAFE_OP(RegExp)
CUSTOM_OP(Lambda)
UNSAFE_OP(ImplicitThis)
SAFE_OP(Slots)
SAFE_OP(Elements)
SAFE_OP(ConstantElements)
SAFE_OP(LoadSlot)
WRITE_GUARDED_OP(StoreSlot, slots)
SAFE_OP(FunctionEnvironment) // just a load of func env ptr
SAFE_OP(TypeBarrier) // causes a bailout if the type is not found: a-ok with us
SAFE_OP(MonitorTypes) // causes a bailout if the type is not found: a-ok with us
UNSAFE_OP(GetPropertyCache)
UNSAFE_OP(GetElementCache)
UNSAFE_OP(BindNameCache)
SAFE_OP(GuardShape)
SAFE_OP(GuardClass)
SAFE_OP(ArrayLength)
SAFE_OP(TypedArrayLength)
SAFE_OP(TypedArrayElements)
SAFE_OP(InitializedLength)
WRITE_GUARDED_OP(SetInitializedLength, elements)
SAFE_OP(Not)
SAFE_OP(BoundsCheck)
SAFE_OP(BoundsCheckLower)
SAFE_OP(LoadElement)
SAFE_OP(LoadElementHole)
MAYBE_WRITE_GUARDED_OP(StoreElement, elements)
WRITE_GUARDED_OP(StoreElementHole, elements)
UNSAFE_OP(ArrayPopShift)
UNSAFE_OP(ArrayPush)
SAFE_OP(LoadTypedArrayElement)
SAFE_OP(LoadTypedArrayElementHole)
MAYBE_WRITE_GUARDED_OP(StoreTypedArrayElement, elements)
UNSAFE_OP(ClampToUint8)
SAFE_OP(LoadFixedSlot)
WRITE_GUARDED_OP(StoreFixedSlot, object)
UNSAFE_OP(CallGetProperty)
UNSAFE_OP(GetNameCache)
SAFE_OP(CallGetIntrinsicValue) // Bails in parallel mode
UNSAFE_OP(CallsiteCloneCache)
UNSAFE_OP(CallGetElement)
UNSAFE_OP(CallSetElement)
UNSAFE_OP(CallSetProperty)
UNSAFE_OP(DeleteProperty)
UNSAFE_OP(SetPropertyCache)
UNSAFE_OP(IteratorStart)
UNSAFE_OP(IteratorNext)
UNSAFE_OP(IteratorMore)
UNSAFE_OP(IteratorEnd)
SAFE_OP(StringLength)
UNSAFE_OP(ArgumentsLength)
UNSAFE_OP(GetArgument)
SAFE_OP(Floor)
SAFE_OP(Round)
UNSAFE_OP(InstanceOf)
CUSTOM_OP(InterruptCheck)
SAFE_OP(ParSlice)
SAFE_OP(ParNew)
SAFE_OP(ParNewDenseArray)
SAFE_OP(ParNewCallObject)
SAFE_OP(ParLambda)
SAFE_OP(ParDump)
SAFE_OP(ParBailout)
UNSAFE_OP(ArrayConcat)
UNSAFE_OP(GetDOMProperty)
UNSAFE_OP(SetDOMProperty)
UNSAFE_OP(NewStringObject)
UNSAFE_OP(Random)
UNSAFE_OP(Pow)
UNSAFE_OP(PowHalf)
UNSAFE_OP(RegExpTest)
UNSAFE_OP(CallInstanceOf)
UNSAFE_OP(FunctionBoundary)
UNSAFE_OP(GuardString)
UNSAFE_OP(NewDeclEnvObject)
UNSAFE_OP(In)
UNSAFE_OP(InArray)
SAFE_OP(ParWriteGuard)
SAFE_OP(ParCheckInterrupt)
SAFE_OP(ParCheckOverRecursed)
SAFE_OP(PolyInlineDispatch)
// It looks like this could easily be made safe:
UNSAFE_OP(ConvertElementsToDoubles)
};
bool
ParallelCompileContext::appendToWorklist(HandleFunction fun)
{
JS_ASSERT(fun);
if (!fun->isInterpreted())
return true;
RootedScript script(cx_, fun->nonLazyScript());
// Skip if we're disabled.
if (!script->canParallelIonCompile()) {
Spew(SpewCompile, "Skipping %p:%s:%u, canParallelIonCompile() is false",
fun.get(), script->filename, script->lineno);
return true;
}
// Skip if we're compiling off thread.
if (script->parallelIon == ION_COMPILING_SCRIPT) {
Spew(SpewCompile, "Skipping %p:%s:%u, off-main-thread compilation in progress",
fun.get(), script->filename, script->lineno);
return true;
}
// Skip if the code is expected to result in a bailout.
if (script->parallelIon && script->parallelIon->bailoutExpected()) {
Spew(SpewCompile, "Skipping %p:%s:%u, bailout expected",
fun.get(), script->filename, script->lineno);
return true;
}
// Skip if we haven't warmed up to get some type info. We're betting
// that the parallel kernel will be non-branchy for the most part, so
// this threshold is usually very low (1).
if (script->getUseCount() < js_IonOptions.usesBeforeCompileParallel) {
Spew(SpewCompile, "Skipping %p:%s:%u, use count %u < %u",
fun.get(), script->filename, script->lineno,
script->getUseCount(), js_IonOptions.usesBeforeCompileParallel);
return true;
}
for (uint32_t i = 0; i < worklist_.length(); i++) {
if (worklist_[i]->toFunction() == fun)
return true;
}
// Note that we add all possibly compilable functions to the worklist,
// even if they're already compiled. This is so that we can return
// Method_Compiled and not Method_Skipped if we have a worklist full of
// already-compiled functions.
return worklist_.append(fun);
}
bool
ParallelCompileContext::analyzeAndGrowWorklist(MIRGenerator *mir, MIRGraph &graph)
{
// Walk the basic blocks in a DFS. When we encounter a block with an
// unsafe instruction, then we know that this block will bailout when
// executed. Therefore, we replace the block.
//
// We don't need a worklist, though, because the graph is sorted
// in RPO. Therefore, we just use the marked flags to tell us
// when we visited some predecessor of the current block.
ParallelArrayVisitor visitor(cx_, *this, graph);
graph.entryBlock()->mark(); // Note: in par. exec., we never enter from OSR.
uint32_t marked = 0;
for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
if (mir->shouldCancel("ParallelArrayAnalysis"))
return false;
if (block->isMarked()) {
// Iterate through and transform the instructions. Stop
// if we encounter an inherently unsafe operation, in
// which case we will transform this block into a bailout
// block.
MInstruction *instr = NULL;
for (MInstructionIterator ins(block->begin());
ins != block->end() && !visitor.unsafe();)
{
if (mir->shouldCancel("ParallelArrayAnalysis"))
return false;
// We may be removing or replacing the current
// instruction, so advance `ins` now. Remember the
// last instr. we looked at for use later if it should
// prove unsafe.
instr = *ins++;
if (!instr->accept(&visitor))
return false;
}
if (!visitor.unsafe()) {
// Count the number of reachable blocks.
marked++;
// Block consists of only safe instructions. Visit its successors.
for (uint32_t i = 0; i < block->numSuccessors(); i++)
block->getSuccessor(i)->mark();
} else {
// Block contains an unsafe instruction. That means that once
// we enter this block, we are guaranteed to bailout.
// If this is the entry block, then there is no point
// in even trying to execute this function as it will
// always bailout.
if (*block == graph.entryBlock()) {
Spew(SpewCompile, "Entry block contains unsafe MIR");
return false;
}
// Otherwise, create a replacement that will.
if (!visitor.convertToBailout(*block, instr))
return false;
JS_ASSERT(!block->isMarked());
}
}
}
// Append newly discovered outgoing callgraph edges to the worklist.
RootedFunction target(cx_);
for (uint32_t i = 0; i < visitor.callTargets.length(); i++) {
target = visitor.callTargets[i]->toFunction();
appendToWorklist(target);
}
Spew(SpewCompile, "Safe");
IonSpewPass("ParallelArrayAnalysis");
UnreachableCodeElimination uce(mir, graph);
if (!uce.removeUnmarkedBlocks(marked))
return false;
IonSpewPass("UCEAfterParallelArrayAnalysis");
AssertExtendedGraphCoherency(graph);
if (!removeResumePointOperands(mir, graph))
return false;
IonSpewPass("RemoveResumePointOperands");
AssertExtendedGraphCoherency(graph);
if (!EliminateDeadCode(mir, graph))
return false;
IonSpewPass("DCEAfterParallelArrayAnalysis");
AssertExtendedGraphCoherency(graph);
return true;
}
bool
ParallelCompileContext::removeResumePointOperands(MIRGenerator *mir, MIRGraph &graph)
{
// In parallel exec mode, nothing is effectful, therefore we do
// not need to reconstruct interpreter state and can simply
// bailout by returning a special code. Ideally we'd either
// remove the unused resume points or else never generate them in
// the first place, but I encountered various assertions and
// crashes attempting to do that, so for the time being I simply
// replace their operands with undefined. This prevents them from
// interfering with DCE and other optimizations. It is also *necessary*
// to handle cases like this:
//
// foo(a, b, c.bar())
//
// where `foo` was deemed to be an unsafe function to call. This
// is because without neutering the ResumePoints, they would still
// refer to the MPassArg nodes generated for the call to foo().
// But the call to foo() is dead and has been removed, leading to
// an inconsistent IR and assertions at codegen time.
MConstant *udef = NULL;
for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
if (udef)
replaceOperandsOnResumePoint(block->entryResumePoint(), udef);
for (MInstructionIterator ins(block->begin()); ins != block->end(); ins++) {
if (ins->isStart()) {
JS_ASSERT(udef == NULL);
udef = MConstant::New(UndefinedValue());
block->insertAfter(*ins, udef);
} else if (udef) {
if (MResumePoint *resumePoint = ins->resumePoint())
replaceOperandsOnResumePoint(resumePoint, udef);
}
}
}
return true;
}
void
ParallelCompileContext::replaceOperandsOnResumePoint(MResumePoint *resumePoint,
MDefinition *withDef)
{
for (size_t i = 0; i < resumePoint->numOperands(); i++)
resumePoint->replaceOperand(i, withDef);
}
bool
ParallelArrayVisitor::visitTest(MTest *)
{
return true;
}
bool
ParallelArrayVisitor::visitCompare(MCompare *compare)
{
MCompare::CompareType type = compare->compareType();
return type == MCompare::Compare_Int32 ||
type == MCompare::Compare_Double ||
type == MCompare::Compare_String;
}
bool
ParallelArrayVisitor::convertToBailout(MBasicBlock *block, MInstruction *ins)
{
JS_ASSERT(unsafe()); // `block` must have contained unsafe items
JS_ASSERT(block->isMarked()); // `block` must have been reachable to get here
// Clear the unsafe flag for subsequent blocks.
clearUnsafe();
// This block is no longer reachable.
block->unmark();
// Determine the best PC to use for the bailouts we'll be creating.
jsbytecode *pc = block->pc();
if (!pc)
pc = block->pc();
// Create a bailout block for each predecessor. In principle, we
// only need one bailout block--in fact, only one per graph! But I
// found this approach easier to implement given the design of the
// MIR Graph construction routines. Besides, most often `block`
// has only one predecessor. Also, using multiple blocks helps to
// keep the PC information more accurate (though replacing `block`
// with exactly one bailout would be just as good).
for (size_t i = 0; i < block->numPredecessors(); i++) {
MBasicBlock *pred = block->getPredecessor(i);
// We only care about incoming edges from reachable predecessors.
if (!pred->isMarked())
continue;
// create bailout block to insert on this edge
MBasicBlock *bailBlock = MBasicBlock::NewParBailout(graph_, block->info(), pred, pc);
if (!bailBlock)
return false;
// if `block` had phis, we are replacing it with `bailBlock` which does not
if (pred->successorWithPhis() == block)
pred->setSuccessorWithPhis(NULL, 0);
// redirect the predecessor to the bailout block
uint32_t succIdx = pred->getSuccessorIndex(block);
pred->replaceSuccessor(succIdx, bailBlock);
// Insert the bailout block after `block` in the execution
// order. This should satisfy the RPO requirements and
// moreover ensures that we will visit this block in our outer
// walk, thus allowing us to keep the count of marked blocks
// accurate.
graph_.insertBlockAfter(block, bailBlock);
bailBlock->mark();
}
return true;
}
/////////////////////////////////////////////////////////////////////////////
// Memory allocation
//
// Simple memory allocation opcodes---those which ultimately compile
// down to a (possibly inlined) invocation of NewGCThing()---are
// replaced with MParNew, which is supplied with the thread context.
// These allocations will take place using per-helper-thread arenas.
bool
ParallelArrayVisitor::visitNewParallelArray(MNewParallelArray *ins)
{
MParNew *parNew = new MParNew(parSlice(), ins->templateObject());
replace(ins, parNew);
return true;
}
bool
ParallelArrayVisitor::visitNewCallObject(MNewCallObject *ins)
{
// fast path: replace with ParNewCallObject op
MParNewCallObject *parNewCallObjectInstruction =
MParNewCallObject::New(parSlice(), ins);
replace(ins, parNewCallObjectInstruction);
return true;
}
bool
ParallelArrayVisitor::visitLambda(MLambda *ins)
{
if (ins->fun()->hasSingletonType() ||
types::UseNewTypeForClone(ins->fun()))
{
// slow path: bail on parallel execution.
return markUnsafe();
}
// fast path: replace with ParLambda op
MParLambda *parLambdaInstruction = MParLambda::New(parSlice(), ins);
replace(ins, parLambdaInstruction);
return true;
}
bool
ParallelArrayVisitor::visitNewObject(MNewObject *newInstruction)
{
if (newInstruction->shouldUseVM()) {
SpewMIR(newInstruction, "should use VM");
return markUnsafe();
}
return replaceWithParNew(newInstruction,
newInstruction->templateObject());
}
bool
ParallelArrayVisitor::visitNewArray(MNewArray *newInstruction)
{
if (newInstruction->shouldUseVM()) {
SpewMIR(newInstruction, "should use VM");
return markUnsafe();
}
return replaceWithParNew(newInstruction,
newInstruction->templateObject());
}
bool
ParallelArrayVisitor::replaceWithParNew(MInstruction *newInstruction,
JSObject *templateObject)
{
MParNew *parNewInstruction = new MParNew(parSlice(), templateObject);
replace(newInstruction, parNewInstruction);
return true;
}
bool
ParallelArrayVisitor::replace(MInstruction *oldInstruction,
MInstruction *replacementInstruction)
{
MBasicBlock *block = oldInstruction->block();
block->insertBefore(oldInstruction, replacementInstruction);
oldInstruction->replaceAllUsesWith(replacementInstruction);
block->discard(oldInstruction);
return true;
}
/////////////////////////////////////////////////////////////////////////////
// Write Guards
//
// We only want to permit writes to locally guarded objects.
// Furthermore, we want to avoid PICs and other non-thread-safe things
// (though perhaps we should support PICs at some point). If we
// cannot determine the origin of an object, we can insert a write
// guard which will check whether the object was allocated from the
// per-thread-arena or not.
bool
ParallelArrayVisitor::insertWriteGuard(MInstruction *writeInstruction,
MDefinition *valueBeingWritten)
{
// Many of the write operations do not take the JS object
// but rather something derived from it, such as the elements.
// So we need to identify the JS object:
MDefinition *object;
switch (valueBeingWritten->type()) {
case MIRType_Object:
object = valueBeingWritten;
break;
case MIRType_Slots:
switch (valueBeingWritten->op()) {
case MDefinition::Op_Slots:
object = valueBeingWritten->toSlots()->object();
break;
case MDefinition::Op_NewSlots:
// Values produced by new slots will ALWAYS be
// thread-local.
return true;
default:
SpewMIR(writeInstruction, "cannot insert write guard for %s",
valueBeingWritten->opName());
return markUnsafe();
}
break;
case MIRType_Elements:
switch (valueBeingWritten->op()) {
case MDefinition::Op_Elements:
object = valueBeingWritten->toElements()->object();
break;
case MDefinition::Op_TypedArrayElements:
object = valueBeingWritten->toTypedArrayElements()->object();
break;
default:
SpewMIR(writeInstruction, "cannot insert write guard for %s",
valueBeingWritten->opName());
return markUnsafe();
}
break;
default:
SpewMIR(writeInstruction, "cannot insert write guard for MIR Type %d",
valueBeingWritten->type());
return markUnsafe();
}
if (object->isUnbox())
object = object->toUnbox()->input();
switch (object->op()) {
case MDefinition::Op_ParNew:
// MParNew will always be creating something thread-local, omit the guard
SpewMIR(writeInstruction, "write to ParNew prop does not require guard");
return true;
default:
break;
}
MBasicBlock *block = writeInstruction->block();
MParWriteGuard *writeGuard = MParWriteGuard::New(parSlice(), object);
block->insertBefore(writeInstruction, writeGuard);
writeGuard->adjustInputs(writeGuard);
return true;
}
/////////////////////////////////////////////////////////////////////////////
// Calls
//
// We only support calls to interpreted functions that that have already been
// Ion compiled. If a function has no IonScript, we bail out. The compilation
// is done during warmup of the parallel kernel, see js::RunScript.
static bool
GetPossibleCallees(JSContext *cx, HandleScript script, jsbytecode *pc,
types::StackTypeSet *calleeTypes, AutoObjectVector &targets)
{
JS_ASSERT(calleeTypes);
if (calleeTypes->baseFlags() != 0)
return true;
unsigned objCount = calleeTypes->getObjectCount();
if (objCount == 0)
return true;
RootedFunction fun(cx);
for (unsigned i = 0; i < objCount; i++) {
RawObject obj = calleeTypes->getSingleObject(i);
if (obj && obj->isFunction()) {
fun = obj->toFunction();
} else {
types::TypeObject *typeObj = calleeTypes->getTypeObject(i);
if (!typeObj)
continue;
fun = typeObj->interpretedFunction;
if (!fun)
continue;
}
if (fun->isCloneAtCallsite()) {
fun = CloneFunctionAtCallsite(cx, fun, script, pc);
if (!fun)
return false;
}
if (!targets.append(fun))
return false;
}
return true;
}
bool
ParallelArrayVisitor::visitCall(MCall *ins)
{
JS_ASSERT(ins->getSingleTarget() || ins->calleeTypes());
// DOM? Scary.
if (ins->isDOMFunction()) {
SpewMIR(ins, "call to dom function");
return markUnsafe();
}
RootedFunction target(cx_, ins->getSingleTarget());
if (target) {
// Native? Scary.
if (target->isNative()) {
SpewMIR(ins, "call to native function");
return markUnsafe();
}
return callTargets.append(target);
}
if (ins->isConstructing()) {
SpewMIR(ins, "call to unknown constructor");
return markUnsafe();
}
RootedScript script(cx_, ins->block()->info().script());
return GetPossibleCallees(cx_, script, ins->resumePoint()->pc(),
ins->calleeTypes(), callTargets);
}
/////////////////////////////////////////////////////////////////////////////
// Stack limit, interrupts
//
// In sequential Ion code, the stack limit is stored in the JSRuntime.
// We store it in the thread context. We therefore need a separate
// instruction to access it, one parameterized by the thread context.
// Similar considerations apply to checking for interrupts.
bool
ParallelArrayVisitor::visitCheckOverRecursed(MCheckOverRecursed *ins)
{
MParCheckOverRecursed *replacement = new MParCheckOverRecursed(parSlice());
return replace(ins, replacement);
}
bool
ParallelArrayVisitor::visitInterruptCheck(MInterruptCheck *ins)
{
MParCheckInterrupt *replacement = new MParCheckInterrupt(parSlice());
return replace(ins, replacement);
}
/////////////////////////////////////////////////////////////////////////////
// Specialized ops
//
// Some ops, like +, can be specialized to ints/doubles. Anything
// else is terrifying.
//
// TODO---Eventually, we should probably permit arbitrary + but bail
// if the operands are not both integers/floats.
bool
ParallelArrayVisitor::visitSpecializedInstruction(MInstruction *ins, MIRType spec,
uint32_t flags)
{
uint32_t flag = 1 << spec;
if (flags & flag)
return true;
SpewMIR(ins, "specialized to unacceptable type %d", spec);
return markUnsafe();
}
/////////////////////////////////////////////////////////////////////////////
// Throw
bool
ParallelArrayVisitor::visitThrow(MThrow *thr)
{
MBasicBlock *block = thr->block();
JS_ASSERT(block->lastIns() == thr);
block->discardLastIns();
MParBailout *bailout = new MParBailout();
if (!bailout)
return false;
block->end(bailout);
return true;
}
}
}

Просмотреть файл

@ -0,0 +1,61 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=4 sw=4 et tw=99:
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef jsion_parallel_array_analysis_h__
#define jsion_parallel_array_analysis_h__
#include "MIR.h"
#include "CompileInfo.h"
namespace js {
class StackFrame;
namespace ion {
class MIRGraph;
class AutoDestroyAllocator;
class ParallelCompileContext
{
private:
JSContext *cx_;
// Compilation is transitive from some set of root(s).
AutoObjectVector worklist_;
// Is a function compilable for parallel execution?
bool analyzeAndGrowWorklist(MIRGenerator *mir, MIRGraph &graph);
bool removeResumePointOperands(MIRGenerator *mir, MIRGraph &graph);
void replaceOperandsOnResumePoint(MResumePoint *resumePoint, MDefinition *withDef);
public:
ParallelCompileContext(JSContext *cx)
: cx_(cx),
worklist_(cx)
{ }
// Should we append a function to the worklist?
bool appendToWorklist(HandleFunction fun);
ExecutionMode executionMode() {
return ParallelExecution;
}
// Defined in Ion.cpp, so that they can make use of static fns defined there
MethodStatus checkScriptSize(JSContext *cx, UnrootedScript script);
MethodStatus compileTransitively();
AbortReason compile(IonBuilder *builder, MIRGraph *graph,
ScopedJSDeletePtr<LifoAlloc> &autoDelete);
};
} // namespace ion
} // namespace js
#endif // jsion_parallel_array_analysis_h

Просмотреть файл

@ -0,0 +1,225 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=4 sw=4 et tw=99:
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jsinterp.h"
#include "ParallelFunctions.h"
#include "IonSpewer.h"
#include "jsinterpinlines.h"
#include "jscompartmentinlines.h"
#include "vm/ParallelDo.h"
using namespace js;
using namespace ion;
using parallel::Spew;
using parallel::SpewBailouts;
using parallel::SpewBailoutIR;
// Load the current thread context.
ForkJoinSlice *
ion::ParForkJoinSlice()
{
return ForkJoinSlice::Current();
}
// ParNewGCThing() is called in place of NewGCThing() when executing
// parallel code. It uses the ArenaLists for the current thread and
// allocates from there.
JSObject *
ion::ParNewGCThing(gc::AllocKind allocKind)
{
ForkJoinSlice *slice = ForkJoinSlice::Current();
uint32_t thingSize = (uint32_t)gc::Arena::thingSize(allocKind);
void *t = slice->allocator->parallelNewGCThing(allocKind, thingSize);
return static_cast<JSObject *>(t);
}
// Check that the object was created by the current thread
// (and hence is writable).
bool
ion::ParWriteGuard(ForkJoinSlice *slice, JSObject *object)
{
JS_ASSERT(ForkJoinSlice::Current() == slice);
return slice->allocator->arenas.containsArena(slice->runtime(),
object->arenaHeader());
}
#ifdef DEBUG
static void
printTrace(const char *prefix, struct IonLIRTraceData *cached)
{
fprintf(stderr, "%s / Block %3u / LIR %3u / Mode %u / LIR %s\n",
prefix,
cached->bblock, cached->lir, cached->execModeInt, cached->lirOpName);
}
struct IonLIRTraceData seqTraceData;
#endif
void
ion::TraceLIR(uint32_t bblock, uint32_t lir, uint32_t execModeInt,
const char *lirOpName, const char *mirOpName,
JSScript *script, jsbytecode *pc)
{
#ifdef DEBUG
static enum { NotSet, All, Bailouts } traceMode;
// If you set IONFLAGS=trace, this function will be invoked before every LIR.
//
// You can either modify it to do whatever you like, or use gdb scripting.
// For example:
//
// break ParTrace
// commands
// continue
// exit
if (traceMode == NotSet) {
// Racy, but that's ok.
const char *env = getenv("IONFLAGS");
if (strstr(env, "trace-all"))
traceMode = All;
else
traceMode = Bailouts;
}
IonLIRTraceData *cached;
if (execModeInt == 0)
cached = &seqTraceData;
else
cached = &ForkJoinSlice::Current()->traceData;
if (bblock == 0xDEADBEEF) {
if (execModeInt == 0)
printTrace("BAILOUT", cached);
else
SpewBailoutIR(cached->bblock, cached->lir,
cached->lirOpName, cached->mirOpName,
cached->script, cached->pc);
}
cached->bblock = bblock;
cached->lir = lir;
cached->execModeInt = execModeInt;
cached->lirOpName = lirOpName;
cached->mirOpName = mirOpName;
cached->script = script;
cached->pc = pc;
if (traceMode == All)
printTrace("Exec", cached);
#endif
}
bool
ion::ParCheckOverRecursed(ForkJoinSlice *slice)
{
JS_ASSERT(ForkJoinSlice::Current() == slice);
// When an interrupt is triggered, we currently overwrite the
// stack limit with a sentinel value that brings us here.
// Therefore, we must check whether this is really a stack overrun
// and, if not, check whether an interrupt is needed.
if (slice->isMainThread()) {
int stackDummy_;
if (!JS_CHECK_STACK_SIZE(js::GetNativeStackLimit(slice->runtime()), &stackDummy_))
return false;
return ParCheckInterrupt(slice);
} else {
// FIXME---we don't ovewrite the stack limit for worker
// threads, which means that technically they can recurse
// forever---or at least a long time---without ever checking
// the interrupt. it also means that if we get here on a
// worker thread, this is a real stack overrun!
return false;
}
}
bool
ion::ParCheckInterrupt(ForkJoinSlice *slice)
{
JS_ASSERT(ForkJoinSlice::Current() == slice);
bool result = slice->check();
if (!result)
return false;
return true;
}
void
ion::ParDumpValue(Value *v)
{
#ifdef DEBUG
js_DumpValue(*v);
#endif
}
JSObject*
ion::ParPush(ParPushArgs *args)
{
// It is awkward to have the MIR pass the current slice in, so
// just fetch it from TLS. Extending the array is kind of the
// slow path anyhow as it reallocates the elements vector.
ForkJoinSlice *slice = js::ForkJoinSlice::Current();
JSObject::EnsureDenseResult res =
args->object->parExtendDenseElements(slice->allocator,
&args->value, 1);
if (res != JSObject::ED_OK)
return NULL;
return args->object;
}
JSObject *
ion::ParExtendArray(ForkJoinSlice *slice, JSObject *array, uint32_t length)
{
JSObject::EnsureDenseResult res =
array->parExtendDenseElements(slice->allocator, NULL, length);
if (res != JSObject::ED_OK)
return NULL;
return array;
}
ParCompareResult
ion::ParCompareStrings(JSString *str1, JSString *str2)
{
// NYI---the rope case
if (!str1->isLinear())
return ParCompareUnknown;
if (!str2->isLinear())
return ParCompareUnknown;
JSLinearString &linearStr1 = str1->asLinear();
JSLinearString &linearStr2 = str2->asLinear();
if (EqualStrings(&linearStr1, &linearStr2))
return ParCompareEq;
return ParCompareNe;
}
void
ion::ParallelAbort(JSScript *script)
{
JS_ASSERT(InParallelSection());
ForkJoinSlice *slice = ForkJoinSlice::Current();
Spew(SpewBailouts, "Parallel abort in %p:%s:%d", script, script->filename, script->lineno);
if (!slice->abortedScript)
slice->abortedScript = script;
}
void
ion::ParCallToUncompiledScript(JSFunction *func)
{
JS_ASSERT(InParallelSection());
#ifdef DEBUG
RawScript script = func->nonLazyScript();
Spew(SpewBailouts, "Call to uncompiled script: %p:%s:%d", script, script->filename, script->lineno);
#endif
}

Просмотреть файл

@ -0,0 +1,63 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=4 sw=4 et tw=99:
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef jsion_parallel_functions_h__
#define jsion_parallel_functions_h__
#include "vm/ThreadPool.h"
#include "vm/ForkJoin.h"
#include "gc/Heap.h"
namespace js {
namespace ion {
ForkJoinSlice *ParForkJoinSlice();
JSObject *ParNewGCThing(gc::AllocKind allocKind);
bool ParWriteGuard(ForkJoinSlice *context, JSObject *object);
void ParBailout(uint32_t id);
bool ParCheckOverRecursed(ForkJoinSlice *slice);
bool ParCheckInterrupt(ForkJoinSlice *context);
void ParDumpValue(Value *v);
// We pass the arguments to ParPush in a structure because, in code
// gen, it is convenient to store them on the stack to avoid
// constraining the reg alloc for the slow path.
struct ParPushArgs {
JSObject *object;
Value value;
};
// Extends the given object with the given value (like `Array.push`).
// Returns NULL on failure or else `args->object`, which is convenient
// during code generation.
JSObject* ParPush(ParPushArgs *args);
// Extends the given array with `length` new holes. Returns NULL on
// failure or else `array`, which is convenient during code
// generation.
JSObject *ParExtendArray(ForkJoinSlice *slice, JSObject *array, uint32_t length);
enum ParCompareResult {
ParCompareNe = false,
ParCompareEq = true,
ParCompareUnknown = 2
};
ParCompareResult ParCompareStrings(JSString *str1, JSString *str2);
void ParallelAbort(JSScript *script);
void TraceLIR(uint32_t bblock, uint32_t lir, uint32_t execModeInt,
const char *lirOpName, const char *mirOpName,
JSScript *script, jsbytecode *pc);
void ParCallToUncompiledScript(JSFunction *func);
} // namespace ion
} // namespace js
#endif // jsion_parallel_functions_h__

Просмотреть файл

@ -290,32 +290,19 @@ TypeInferenceOracle::inArrayIsPacked(UnrootedScript script, jsbytecode *pc)
bool
TypeInferenceOracle::elementReadIsDenseNative(RawScript script, jsbytecode *pc)
{
// Check whether the object is a dense array and index is int32 or double.
StackTypeSet *obj = script->analysis()->poppedTypes(pc, 1);
StackTypeSet *id = script->analysis()->poppedTypes(pc, 0);
JSValueType idType = id->getKnownTypeTag();
if (idType != JSVAL_TYPE_INT32 && idType != JSVAL_TYPE_DOUBLE)
return false;
Class *clasp = obj->getKnownClass();
return clasp && clasp->isNative();
return elementAccessIsDenseNative(script->analysis()->poppedTypes(pc, 1),
script->analysis()->poppedTypes(pc, 0));
}
bool
TypeInferenceOracle::elementReadIsTypedArray(HandleScript script, jsbytecode *pc, int *arrayType)
{
// Check whether the object is a typed array and index is int32 or double.
StackTypeSet *obj = script->analysis()->poppedTypes(pc, 1);
StackTypeSet *id = DropUnrooted(script)->analysis()->poppedTypes(pc, 0);
JSValueType idType = id->getKnownTypeTag();
if (idType != JSVAL_TYPE_INT32 && idType != JSVAL_TYPE_DOUBLE)
return false;
*arrayType = obj->getTypedArrayType();
if (*arrayType == TypedArray::TYPE_MAX)
if (!elementAccessIsTypedArray(script->analysis()->poppedTypes(pc, 1),
script->analysis()->poppedTypes(pc, 0),
arrayType))
{
return false;
}
JS_ASSERT(*arrayType >= 0 && *arrayType < TypedArray::TYPE_MAX);
@ -404,10 +391,13 @@ TypeInferenceOracle::elementReadGeneric(UnrootedScript script, jsbytecode *pc, b
bool
TypeInferenceOracle::elementWriteIsDenseNative(HandleScript script, jsbytecode *pc)
{
// Check whether the object is a dense array and index is int32 or double.
StackTypeSet *obj = script->analysis()->poppedTypes(pc, 2);
StackTypeSet *id = script->analysis()->poppedTypes(pc, 1);
return elementAccessIsDenseNative(script->analysis()->poppedTypes(pc, 2),
script->analysis()->poppedTypes(pc, 1));
}
bool
TypeInferenceOracle::elementAccessIsDenseNative(StackTypeSet *obj, StackTypeSet *id)
{
JSValueType idType = id->getKnownTypeTag();
if (idType != JSVAL_TYPE_INT32 && idType != JSVAL_TYPE_DOUBLE)
return false;
@ -422,9 +412,15 @@ TypeInferenceOracle::elementWriteIsDenseNative(HandleScript script, jsbytecode *
bool
TypeInferenceOracle::elementWriteIsTypedArray(RawScript script, jsbytecode *pc, int *arrayType)
{
// Check whether the object is a dense array and index is int32 or double.
StackTypeSet *obj = script->analysis()->poppedTypes(pc, 2);
StackTypeSet *id = script->analysis()->poppedTypes(pc, 1);
return elementAccessIsTypedArray(script->analysis()->poppedTypes(pc, 2),
script->analysis()->poppedTypes(pc, 1),
arrayType);
}
bool
TypeInferenceOracle::elementAccessIsTypedArray(StackTypeSet *obj, StackTypeSet *id, int *arrayType)
{
// Check whether the object is a typed array and index is int32 or double.
JSValueType idType = id->getKnownTypeTag();
if (idType != JSVAL_TYPE_INT32 && idType != JSVAL_TYPE_DOUBLE)

Просмотреть файл

@ -13,6 +13,7 @@
namespace js {
namespace ion {
enum LazyArgumentsType {
MaybeArguments = 0,
DefinitelyArguments,
@ -121,6 +122,12 @@ class TypeOracle
virtual bool elementWriteIsPacked(UnrootedScript script, jsbytecode *pc) {
return false;
}
virtual bool elementAccessIsDenseNative(types::StackTypeSet *obj, types::StackTypeSet *id) {
return false;
}
virtual bool elementAccessIsTypedArray(types::StackTypeSet *obj, types::StackTypeSet *id, int *arrayType) {
return false;
}
virtual bool arrayResultShouldHaveDoubleConversion(UnrootedScript script, jsbytecode *pc) {
return false;
}
@ -251,7 +258,9 @@ class TypeInferenceOracle : public TypeOracle
bool elementReadIsPacked(UnrootedScript script, jsbytecode *pc);
void elementReadGeneric(UnrootedScript script, jsbytecode *pc, bool *cacheable, bool *monitorResult);
bool elementWriteIsDenseNative(HandleScript script, jsbytecode *pc);
bool elementAccessIsDenseNative(types::StackTypeSet *obj, types::StackTypeSet *id);
bool elementWriteIsTypedArray(RawScript script, jsbytecode *pc, int *arrayType);
bool elementAccessIsTypedArray(types::StackTypeSet *obj, types::StackTypeSet *id, int *arrayType);
bool elementWriteNeedsDoubleConversion(UnrootedScript script, jsbytecode *pc);
bool elementWriteHasExtraIndexedProperty(UnrootedScript script, jsbytecode *pc);
bool elementWriteIsPacked(UnrootedScript script, jsbytecode *pc);
@ -360,6 +369,8 @@ StringFromMIRType(MIRType type)
return "Elements";
case MIRType_StackFrame:
return "StackFrame";
case MIRType_ForkJoinSlice:
return "ForkJoinSlice";
default:
JS_NOT_REACHED("Unknown MIRType.");
return "";

Просмотреть файл

@ -35,17 +35,36 @@ UnreachableCodeElimination::analyze()
// Pass 1: Identify unreachable blocks (if any).
if (!prunePointlessBranchesAndMarkReachableBlocks())
return false;
return removeUnmarkedBlocksAndCleanup();
}
bool
UnreachableCodeElimination::removeUnmarkedBlocks(size_t marked)
{
marked_ = marked;
return removeUnmarkedBlocksAndCleanup();
}
bool
UnreachableCodeElimination::removeUnmarkedBlocksAndCleanup()
{
// Everything is reachable, no work required.
JS_ASSERT(marked_ <= graph_.numBlocks());
if (marked_ == graph_.numBlocks()) {
// Everything is reachable.
graph_.unmarkBlocks();
return true;
}
// Pass 2: Remove unmarked blocks.
// Pass 2: Remove unmarked blocks (see analyze() above).
if (!removeUnmarkedBlocksAndClearDominators())
return false;
graph_.unmarkBlocks();
AssertGraphCoherency(graph_);
IonSpewPass("UCEMidPoint");
// Pass 3: Recompute dominators and tweak phis.
BuildDominatorTree(graph_);
if (redundantPhis_ && !EliminatePhis(mir_, graph_, ConservativeObservability))

Просмотреть файл

@ -26,6 +26,7 @@ class UnreachableCodeElimination
bool prunePointlessBranchesAndMarkReachableBlocks();
void removeUsesFromUnmarkedBlocks(MDefinition *instr);
bool removeUnmarkedBlocksAndClearDominators();
bool removeUnmarkedBlocksAndCleanup();
public:
UnreachableCodeElimination(MIRGenerator *mir, MIRGraph &graph)
@ -35,7 +36,13 @@ class UnreachableCodeElimination
redundantPhis_(false)
{}
// Walks the graph and discovers what is reachable. Removes everything else.
bool analyze();
// Removes any blocks that are not marked. Assumes that these blocks are not
// reachable. The parameter |marked| should be the number of blocks that
// are marked.
bool removeUnmarkedBlocks(size_t marked);
};
} /* namespace ion */

Просмотреть файл

@ -13,6 +13,8 @@
#include "vm/StringObject-inl.h"
#include "builtin/ParallelArray.h"
#include "jsboolinlines.h"
#include "jsinterpinlines.h"

Просмотреть файл

@ -103,13 +103,6 @@ CodeGeneratorARM::visitTestIAndBranch(LTestIAndBranch *test)
return true;
}
void
CodeGeneratorARM::emitSet(Assembler::Condition cond, const Register &dest)
{
masm.ma_mov(Imm32(0), dest);
masm.ma_mov(Imm32(1), dest, NoSetCond, cond);
}
bool
CodeGeneratorARM::visitCompare(LCompare *comp)
{
@ -1236,7 +1229,7 @@ CodeGeneratorARM::visitCompareD(LCompareD *comp)
Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
masm.compareDouble(lhs, rhs);
emitSet(Assembler::ConditionFromDoubleCondition(cond), ToRegister(comp->output()));
masm.emitSet(Assembler::ConditionFromDoubleCondition(cond), ToRegister(comp->output()));
return true;
}
@ -1270,7 +1263,7 @@ CodeGeneratorARM::visitCompareB(LCompareB *lir)
masm.cmp32(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()));
else
masm.cmp32(lhs.payloadReg(), ToRegister(rhs));
emitSet(JSOpToCondition(mir->jsop()), output);
masm.emitSet(JSOpToCondition(mir->jsop()), output);
masm.jump(&done);
}
@ -1322,7 +1315,7 @@ CodeGeneratorARM::visitCompareV(LCompareV *lir)
masm.j(Assembler::NotEqual, &notEqual);
{
masm.cmp32(lhs.payloadReg(), rhs.payloadReg());
emitSet(cond, output);
masm.emitSet(cond, output);
masm.jump(&done);
}
masm.bind(&notEqual);
@ -1363,7 +1356,7 @@ CodeGeneratorARM::visitNotI(LNotI *ins)
{
// It is hard to optimize !x, so just do it the basic way for now.
masm.ma_cmp(ToRegister(ins->input()), Imm32(0));
emitSet(Assembler::Equal, ToRegister(ins->output()));
masm.emitSet(Assembler::Equal, ToRegister(ins->output()));
return true;
}

Просмотреть файл

@ -57,9 +57,6 @@ class CodeGeneratorARM : public CodeGeneratorShared
void emitRoundDouble(const FloatRegister &src, const Register &dest, Label *fail);
// Emits a conditional set.
void emitSet(Assembler::Condition cond, const Register &dest);
// Emits a branch that directs control flow to the true block if |cond| is
// true, and the false block if |cond| is false.
void emitBranch(Assembler::Condition cond, MBasicBlock *ifTrue, MBasicBlock *ifFalse);

Просмотреть файл

@ -1486,6 +1486,12 @@ MacroAssemblerARMCompat::add32(Imm32 imm, Register dest)
ma_add(imm, dest, SetCond);
}
void
MacroAssemblerARMCompat::xor32(Imm32 imm, Register dest)
{
ma_eor(imm, dest, SetCond);
}
void
MacroAssemblerARMCompat::add32(Imm32 imm, const Address &dest)
{

Просмотреть файл

@ -713,6 +713,9 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
ma_ldr(Operand(address.base, address.offset), ScratchRegister);
branchTest32(cond, ScratchRegister, imm, label);
}
void branchTestBool(Condition cond, const Register &lhs, const Register &rhs, Label *label) {
branchTest32(cond, lhs, rhs, label);
}
void branchTestPtr(Condition cond, const Register &lhs, const Register &rhs, Label *label) {
branchTest32(cond, lhs, rhs, label);
}
@ -929,6 +932,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
void add32(Imm32 imm, Register dest);
void add32(Imm32 imm, const Address &dest);
void sub32(Imm32 imm, Register dest);
void xor32(Imm32 imm, Register dest);
void and32(Imm32 imm, Register dest);
void and32(Imm32 imm, const Address &dest);
@ -1061,6 +1065,13 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
ma_lsl(imm, dest, dest);
}
void
emitSet(Assembler::Condition cond, const Register &dest)
{
ma_mov(Imm32(0), dest);
ma_mov(Imm32(1), dest, NoSetCond, cond);
}
// Setup a call to C/C++ code, given the number of general arguments it
// takes. Note that this only supports cdecl.
//

Просмотреть файл

@ -14,6 +14,8 @@
#include "CodeGenerator-shared-inl.h"
#include "ion/IonSpewer.h"
#include "ion/IonMacroAssembler.h"
#include "ion/ParallelFunctions.h"
#include "builtin/ParallelArray.h"
using namespace js;
using namespace js::ion;
@ -25,6 +27,7 @@ namespace ion {
CodeGeneratorShared::CodeGeneratorShared(MIRGenerator *gen, LIRGraph *graph)
: oolIns(NULL),
oolParallelAbort_(NULL),
masm(&sps_),
gen(gen),
graph(*graph),
@ -501,5 +504,89 @@ CodeGeneratorShared::markArgumentSlots(LSafepoint *safepoint)
return true;
}
bool
CodeGeneratorShared::ensureOutOfLineParallelAbort(Label **result)
{
if (!oolParallelAbort_) {
oolParallelAbort_ = new OutOfLineParallelAbort();
if (!addOutOfLineCode(oolParallelAbort_))
return false;
}
*result = oolParallelAbort_->entry();
return true;
}
bool
OutOfLineParallelAbort::generate(CodeGeneratorShared *codegen)
{
codegen->callTraceLIR(0xDEADBEEF, NULL, "ParallelBailout");
return codegen->visitOutOfLineParallelAbort(this);
}
bool
CodeGeneratorShared::callTraceLIR(uint32_t blockIndex, LInstruction *lir,
const char *bailoutName)
{
JS_ASSERT_IF(!lir, bailoutName);
uint32_t emi = (uint32_t) gen->info().executionMode();
if (!IonSpewEnabled(IonSpew_Trace))
return true;
masm.PushRegsInMask(RegisterSet::All());
RegisterSet regSet(RegisterSet::All());
Register blockIndexReg = regSet.takeGeneral();
Register lirIndexReg = regSet.takeGeneral();
Register emiReg = regSet.takeGeneral();
Register lirOpNameReg = regSet.takeGeneral();
Register mirOpNameReg = regSet.takeGeneral();
Register scriptReg = regSet.takeGeneral();
Register pcReg = regSet.takeGeneral();
// This first move is here so that when you scan the disassembly,
// you can easily pick out where each instruction begins. The
// next few items indicate to you the Basic Block / LIR.
masm.move32(Imm32(0xDEADBEEF), blockIndexReg);
if (lir) {
masm.move32(Imm32(blockIndex), blockIndexReg);
masm.move32(Imm32(lir->id()), lirIndexReg);
masm.move32(Imm32(emi), emiReg);
masm.movePtr(ImmWord(lir->opName()), lirOpNameReg);
if (MDefinition *mir = lir->mirRaw()) {
masm.movePtr(ImmWord(mir->opName()), mirOpNameReg);
masm.movePtr(ImmWord((void *)mir->block()->info().script()), scriptReg);
masm.movePtr(ImmWord(mir->trackedPc()), pcReg);
} else {
masm.movePtr(ImmWord((void *)NULL), mirOpNameReg);
masm.movePtr(ImmWord((void *)NULL), scriptReg);
masm.movePtr(ImmWord((void *)NULL), pcReg);
}
} else {
masm.move32(Imm32(0xDEADBEEF), blockIndexReg);
masm.move32(Imm32(0xDEADBEEF), lirIndexReg);
masm.move32(Imm32(emi), emiReg);
masm.movePtr(ImmWord(bailoutName), lirOpNameReg);
masm.movePtr(ImmWord(bailoutName), mirOpNameReg);
masm.movePtr(ImmWord((void *)NULL), scriptReg);
masm.movePtr(ImmWord((void *)NULL), pcReg);
}
masm.setupUnalignedABICall(7, CallTempReg4);
masm.passABIArg(blockIndexReg);
masm.passABIArg(lirIndexReg);
masm.passABIArg(emiReg);
masm.passABIArg(lirOpNameReg);
masm.passABIArg(mirOpNameReg);
masm.passABIArg(scriptReg);
masm.passABIArg(pcReg);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, TraceLIR));
masm.PopRegsInMask(RegisterSet::All());
return true;
}
} // namespace ion
} // namespace js

Просмотреть файл

@ -25,6 +25,7 @@ namespace ion {
class OutOfLineCode;
class CodeGenerator;
class MacroAssembler;
class OutOfLineParallelAbort;
template <class ArgSeq, class StoreOutputTo>
class OutOfLineCallVM;
@ -34,6 +35,7 @@ class CodeGeneratorShared : public LInstructionVisitor
{
js::Vector<OutOfLineCode *, 0, SystemAllocPolicy> outOfLineCode_;
OutOfLineCode *oolIns;
OutOfLineParallelAbort *oolParallelAbort_;
public:
MacroAssembler masm;
@ -293,6 +295,15 @@ class CodeGeneratorShared : public LInstructionVisitor
bool visitOutOfLineCallVM(OutOfLineCallVM<ArgSeq, StoreOutputTo> *ool);
bool visitOutOfLineTruncateSlow(OutOfLineTruncateSlow *ool);
public:
// When compiling parallel code, all bailouts just abort funnel to
// this same point and hence abort execution altogether:
virtual bool visitOutOfLineParallelAbort(OutOfLineParallelAbort *ool) = 0;
bool callTraceLIR(uint32_t blockIndex, LInstruction *lir, const char *bailoutName = NULL);
protected:
bool ensureOutOfLineParallelAbort(Label **result);
};
// Wrapper around Label, on the heap, to avoid a bogus assert with OOM.
@ -541,6 +552,17 @@ CodeGeneratorShared::visitOutOfLineCallVM(OutOfLineCallVM<ArgSeq, StoreOutputTo>
return true;
}
// An out-of-line parallel abort thunk.
class OutOfLineParallelAbort : public OutOfLineCode
{
public:
OutOfLineParallelAbort()
{ }
bool generate(CodeGeneratorShared *codegen);
};
} // namespace ion
} // namespace js

Просмотреть файл

@ -15,6 +15,7 @@
#include "ion/IonFrames.h"
#include "ion/MoveEmitter.h"
#include "ion/IonCompartment.h"
#include "ion/ParallelFunctions.h"
using namespace js;
using namespace js::ion;
@ -119,42 +120,6 @@ CodeGeneratorX86Shared::visitTestDAndBranch(LTestDAndBranch *test)
return true;
}
void
CodeGeneratorX86Shared::emitSet(Assembler::Condition cond, const Register &dest,
Assembler::NaNCond ifNaN)
{
if (GeneralRegisterSet(Registers::SingleByteRegs).has(dest)) {
// If the register we're defining is a single byte register,
// take advantage of the setCC instruction
masm.setCC(cond, dest);
masm.movzxbl(dest, dest);
if (ifNaN != Assembler::NaN_Unexpected) {
Label noNaN;
masm.j(Assembler::NoParity, &noNaN);
if (ifNaN == Assembler::NaN_IsTrue)
masm.movl(Imm32(1), dest);
else
masm.xorl(dest, dest);
masm.bind(&noNaN);
}
} else {
Label end;
Label ifFalse;
if (ifNaN == Assembler::NaN_IsFalse)
masm.j(Assembler::Parity, &ifFalse);
masm.movl(Imm32(1), dest);
masm.j(cond, &end);
if (ifNaN == Assembler::NaN_IsTrue)
masm.j(Assembler::Parity, &end);
masm.bind(&ifFalse);
masm.xorl(dest, dest);
masm.bind(&end);
}
}
void
CodeGeneratorX86Shared::emitCompare(MCompare::CompareType type, const LAllocation *left, const LAllocation *right)
{
@ -175,7 +140,7 @@ bool
CodeGeneratorX86Shared::visitCompare(LCompare *comp)
{
emitCompare(comp->mir()->compareType(), comp->left(), comp->right());
emitSet(JSOpToCondition(comp->jsop()), ToRegister(comp->output()));
masm.emitSet(JSOpToCondition(comp->jsop()), ToRegister(comp->output()));
return true;
}
@ -196,7 +161,7 @@ CodeGeneratorX86Shared::visitCompareD(LCompareD *comp)
Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
masm.compareDouble(cond, lhs, rhs);
emitSet(Assembler::ConditionFromDoubleCondition(cond), ToRegister(comp->output()),
masm.emitSet(Assembler::ConditionFromDoubleCondition(cond), ToRegister(comp->output()),
Assembler::NaNCondFromDoubleCondition(cond));
return true;
}
@ -205,7 +170,7 @@ bool
CodeGeneratorX86Shared::visitNotI(LNotI *ins)
{
masm.cmpl(ToRegister(ins->input()), Imm32(0));
emitSet(Assembler::Equal, ToRegister(ins->output()));
masm.emitSet(Assembler::Equal, ToRegister(ins->output()));
return true;
}
@ -216,7 +181,7 @@ CodeGeneratorX86Shared::visitNotD(LNotD *ins)
masm.xorpd(ScratchFloatReg, ScratchFloatReg);
masm.compareDouble(Assembler::DoubleEqualOrUnordered, opd, ScratchFloatReg);
emitSet(Assembler::Equal, ToRegister(ins->output()), Assembler::NaN_IsTrue);
masm.emitSet(Assembler::Equal, ToRegister(ins->output()), Assembler::NaN_IsTrue);
return true;
}
@ -290,6 +255,20 @@ class BailoutLabel {
template <typename T> bool
CodeGeneratorX86Shared::bailout(const T &binder, LSnapshot *snapshot)
{
CompileInfo &info = snapshot->mir()->block()->info();
switch (info.executionMode()) {
case ParallelExecution: {
// in parallel mode, make no attempt to recover, just signal an error.
Label *ool;
if (!ensureOutOfLineParallelAbort(&ool))
return false;
binder(masm, ool);
return true;
}
case SequentialExecution: break;
}
if (!encode(snapshot))
return false;

Просмотреть файл

@ -64,11 +64,6 @@ class CodeGeneratorX86Shared : public CodeGeneratorShared
void emitCompare(MCompare::CompareType type, const LAllocation *left, const LAllocation *right);
// Emits a conditional set.
void emitSet(Assembler::Condition cond, const Register &dest,
Assembler::NaNCond ifNaN = Assembler::NaN_Unexpected);
void emitSet(Assembler::DoubleCondition cond, const Register &dest);
// Emits a branch that directs control flow to the true block if |cond| is
// true, and the false block if |cond| is false.
void emitBranch(Assembler::Condition cond, MBasicBlock *ifTrue, MBasicBlock *ifFalse,

Просмотреть файл

@ -25,7 +25,7 @@ class MDefinition;
class MInstruction;
class LOsiPoint;
class LIRGeneratorShared : public MInstructionVisitor
class LIRGeneratorShared : public MInstructionVisitorWithDefaults
{
protected:
MIRGenerator *gen;

Просмотреть файл

@ -111,6 +111,9 @@ class MacroAssemblerX86Shared : public Assembler
void sub32(Imm32 imm, Register dest) {
subl(imm, dest);
}
void xor32(Imm32 imm, Register dest) {
xorl(imm, dest);
}
void branch32(Condition cond, const Address &lhs, const Register &rhs, Label *label) {
cmpl(Operand(lhs), rhs);
@ -140,6 +143,10 @@ class MacroAssemblerX86Shared : public Assembler
testl(Operand(address), imm);
j(cond, label);
}
void branchTestBool(Condition cond, const Register &lhs, const Register &rhs, Label *label) {
testb(lhs, rhs);
j(cond, label);
}
// The following functions are exposed for use in platform-shared code.
template <typename T>
@ -344,7 +351,7 @@ class MacroAssemblerX86Shared : public Assembler
}
bool maybeInlineDouble(uint64_t u, const FloatRegister &dest) {
// This implements parts of "13.4 Generating constants" of
// This implements parts of "13.4 Generating constants" of
// "2. Optimizing subroutines in assembly language" by Agner Fog.
switch (u) {
case 0x0000000000000000ULL: // 0.0
@ -384,6 +391,40 @@ class MacroAssemblerX86Shared : public Assembler
return true;
}
void emitSet(Assembler::Condition cond, const Register &dest,
Assembler::NaNCond ifNaN = Assembler::NaN_Unexpected) {
if (GeneralRegisterSet(Registers::SingleByteRegs).has(dest)) {
// If the register we're defining is a single byte register,
// take advantage of the setCC instruction
setCC(cond, dest);
movzxbl(dest, dest);
if (ifNaN != Assembler::NaN_Unexpected) {
Label noNaN;
j(Assembler::NoParity, &noNaN);
if (ifNaN == Assembler::NaN_IsTrue)
movl(Imm32(1), dest);
else
xorl(dest, dest);
bind(&noNaN);
}
} else {
Label end;
Label ifFalse;
if (ifNaN == Assembler::NaN_IsFalse)
j(Assembler::Parity, &ifFalse);
movl(Imm32(1), dest);
j(cond, &end);
if (ifNaN == Assembler::NaN_IsTrue)
j(Assembler::Parity, &end);
bind(&ifFalse);
xorl(dest, dest);
bind(&end);
}
}
// Emit a JMP that can be toggled to a CMP. See ToggleToJmp(), ToggleToCmp().
CodeOffsetLabel toggledJump(Label *label) {
CodeOffsetLabel offset(size());

Просмотреть файл

@ -347,7 +347,7 @@ CodeGeneratorX64::visitCompareB(LCompareB *lir)
// Perform the comparison.
masm.cmpq(lhs.valueReg(), ScratchReg);
emitSet(JSOpToCondition(mir->jsop()), output);
masm.emitSet(JSOpToCondition(mir->jsop()), output);
return true;
}
@ -380,11 +380,10 @@ CodeGeneratorX64::visitCompareV(LCompareV *lir)
const ValueOperand rhs = ToValue(lir, LCompareV::RhsInput);
const Register output = ToRegister(lir->output());
JS_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ ||
mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE);
JS_ASSERT(IsEqualityOp(mir->jsop()));
masm.cmpq(lhs.valueReg(), rhs.valueReg());
emitSet(JSOpToCondition(mir->jsop()), output);
masm.emitSet(JSOpToCondition(mir->jsop()), output);
return true;
}

Просмотреть файл

@ -16,6 +16,7 @@
#include "vm/Shape.h"
#include "jsscriptinlines.h"
#include "ion/ExecutionModeInlines.h"
using namespace js;
using namespace js::ion;
@ -141,12 +142,14 @@ CodeGeneratorX86::visitUnbox(LUnbox *unbox)
void
CodeGeneratorX86::linkAbsoluteLabels()
{
ExecutionMode executionMode = gen->info().executionMode();
UnrootedScript script = gen->info().script();
IonCode *method = script->ion->method();
IonScript *ionScript = GetIonScript(script, executionMode);
IonCode *method = ionScript->method();
for (size_t i = 0; i < deferredDoubles_.length(); i++) {
DeferredDouble *d = deferredDoubles_[i];
const Value &v = script->ion->getConstant(d->index());
const Value &v = ionScript->getConstant(d->index());
MacroAssembler::Bind(method, d->label(), &v);
}
}
@ -348,7 +351,7 @@ CodeGeneratorX86::visitCompareB(LCompareB *lir)
masm.cmp32(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()));
else
masm.cmp32(lhs.payloadReg(), ToRegister(rhs));
emitSet(JSOpToCondition(mir->jsop()), output);
masm.emitSet(JSOpToCondition(mir->jsop()), output);
masm.jump(&done);
}
masm.bind(&notBoolean);
@ -391,15 +394,14 @@ CodeGeneratorX86::visitCompareV(LCompareV *lir)
const ValueOperand rhs = ToValue(lir, LCompareV::RhsInput);
const Register output = ToRegister(lir->output());
JS_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ ||
mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE);
JS_ASSERT(IsEqualityOp(mir->jsop()));
Label notEqual, done;
masm.cmp32(lhs.typeReg(), rhs.typeReg());
masm.j(Assembler::NotEqual, &notEqual);
{
masm.cmp32(lhs.payloadReg(), rhs.payloadReg());
emitSet(cond, output);
masm.emitSet(cond, output);
masm.jump(&done);
}
masm.bind(&notEqual);

Просмотреть файл

@ -1,4 +1,5 @@
assertEq("a".localeCompare(), 0);
assertEq("undefined".localeCompare(), 0);
assertEq("a".localeCompare(), "a".localeCompare("undefined"));
assertEq("a".localeCompare("b"), -1);
assertEq("a".localeCompare("b", "a"), -1);
assertEq("b".localeCompare("a"), 1);

Просмотреть файл

@ -133,9 +133,6 @@ class AutoVersionAPI
JSVersion oldDefaultVersion;
bool oldHasVersionOverride;
JSVersion oldVersionOverride;
#ifdef DEBUG
unsigned oldCompileOptions;
#endif
JSVersion newVersion;
public:
@ -144,9 +141,6 @@ class AutoVersionAPI
oldDefaultVersion(cx->getDefaultVersion()),
oldHasVersionOverride(cx->isVersionOverridden()),
oldVersionOverride(oldHasVersionOverride ? cx->findVersion() : JSVERSION_UNKNOWN)
#ifdef DEBUG
, oldCompileOptions(cx->getCompileOptions())
#endif
{
this->newVersion = newVersion;
cx->clearVersionOverride();
@ -159,7 +153,6 @@ class AutoVersionAPI
cx->overrideVersion(oldVersionOverride);
else
cx->clearVersionOverride();
JS_ASSERT(oldCompileOptions == cx->getCompileOptions());
}
/* The version that this scoped-entity establishes. */
@ -882,6 +875,7 @@ JSRuntime::JSRuntime(JSUseHelperThreads useHelperThreads)
ionPcScriptCache(NULL),
threadPool(this),
ctypesActivityCallback(NULL),
parallelWarmup(0),
ionReturnOverride_(MagicValue(JS_ARG_POISON)),
useHelperThreads_(useHelperThreads),
requestedHelperThreadCount(-1),
@ -1125,6 +1119,9 @@ JS_NewRuntime(uint32_t maxbytes, JSUseHelperThreads useHelperThreads)
return NULL;
#endif
if (!ForkJoinSlice::InitializeTLS())
return NULL;
if (!rt->init(maxbytes)) {
JS_DestroyRuntime(rt);
return NULL;
@ -1306,9 +1303,6 @@ JS_SetVersion(JSContext *cx, JSVersion newVersion)
JS_ASSERT(!VersionHasFlags(newVersion));
JSVersion newVersionNumber = newVersion;
#ifdef DEBUG
unsigned coptsBefore = cx->getCompileOptions();
#endif
JSVersion oldVersion = cx->findVersion();
JSVersion oldVersionNumber = VersionNumber(oldVersion);
if (oldVersionNumber == newVersionNumber)
@ -1316,7 +1310,6 @@ JS_SetVersion(JSContext *cx, JSVersion newVersion)
VersionCopyFlags(&newVersion, oldVersion);
cx->maybeOverrideVersion(newVersion);
JS_ASSERT(cx->getCompileOptions() == coptsBefore);
return oldVersionNumber;
}
@ -1369,18 +1362,16 @@ JS_GetOptions(JSContext *cx)
* We may have been synchronized with a script version that was formerly on
* the stack, but has now been popped.
*/
return cx->allOptions();
return cx->options();
}
static unsigned
SetOptionsCommon(JSContext *cx, unsigned options)
{
JS_ASSERT((options & JSALLOPTION_MASK) == options);
unsigned oldopts = cx->allOptions();
unsigned newropts = options & JSRUNOPTION_MASK;
unsigned newcopts = options & JSCOMPILEOPTION_MASK;
cx->setRunOptions(newropts);
cx->setCompileOptions(newcopts);
JS_ASSERT((options & JSOPTION_MASK) == options);
unsigned oldopts = cx->options();
unsigned newopts = options & JSOPTION_MASK;
cx->setOptions(newopts);
cx->updateJITEnabled();
return oldopts;
}
@ -1394,7 +1385,7 @@ JS_SetOptions(JSContext *cx, uint32_t options)
JS_PUBLIC_API(uint32_t)
JS_ToggleOptions(JSContext *cx, uint32_t options)
{
unsigned oldopts = cx->allOptions();
unsigned oldopts = cx->options();
unsigned newopts = oldopts ^ options;
return SetOptionsCommon(cx, newopts);
}
@ -5054,7 +5045,7 @@ struct AutoLastFrameCheck
~AutoLastFrameCheck() {
if (cx->isExceptionPending() &&
!JS_IsRunning(cx) &&
!cx->hasRunOption(JSOPTION_DONT_REPORT_UNCAUGHT)) {
!cx->hasOption(JSOPTION_DONT_REPORT_UNCAUGHT)) {
js_ReportUncaughtException(cx);
}
}
@ -5153,8 +5144,8 @@ JS::CompileOptions::CompileOptions(JSContext *cx)
utf8(false),
filename(NULL),
lineno(1),
compileAndGo(cx->hasRunOption(JSOPTION_COMPILE_N_GO)),
noScriptRval(cx->hasRunOption(JSOPTION_NO_SCRIPT_RVAL)),
compileAndGo(cx->hasOption(JSOPTION_COMPILE_N_GO)),
noScriptRval(cx->hasOption(JSOPTION_NO_SCRIPT_RVAL)),
selfHostingMode(false),
userBit(false),
sourcePolicy(SAVE_SOURCE)

Просмотреть файл

@ -2153,11 +2153,7 @@ JS_StringToVersion(const char *string);
#define JSOPTION_ION JS_BIT(20) /* IonMonkey */
/* Options which reflect compile-time properties of scripts. */
#define JSCOMPILEOPTION_MASK 0
#define JSRUNOPTION_MASK (JS_BITMASK(21) & ~JSCOMPILEOPTION_MASK)
#define JSALLOPTION_MASK (JSCOMPILEOPTION_MASK | JSRUNOPTION_MASK)
#define JSOPTION_MASK JS_BITMASK(21)
extern JS_PUBLIC_API(uint32_t)
JS_GetOptions(JSContext *cx);

Просмотреть файл

@ -1416,7 +1416,7 @@ js::array_sort(JSContext *cx, unsigned argc, Value *vp)
}
} else {
/* array.sort() cannot currently be used from parallel code */
JS_ASSERT(!ForkJoinSlice::InParallelSection());
JS_ASSERT(!InParallelSection());
FastInvokeGuard fig(cx, fval);
if (!MergeSort(vec.begin(), n, vec.begin() + n,
SortComparatorFunction(cx, fval, fig))) {
@ -2205,7 +2205,7 @@ array_map(JSContext *cx, unsigned argc, Value *vp)
/* Step 8. */
RootedValue kValue(cx);
JS_ASSERT(!ForkJoinSlice::InParallelSection());
JS_ASSERT(!InParallelSection());
FastInvokeGuard fig(cx, ObjectValue(*callable));
InvokeArgsGuard &ag = fig.args();
while (k < len) {
@ -2286,7 +2286,7 @@ array_filter(JSContext *cx, unsigned argc, Value *vp)
uint32_t to = 0;
/* Step 9. */
JS_ASSERT(!ForkJoinSlice::InParallelSection());
JS_ASSERT(!InParallelSection());
FastInvokeGuard fig(cx, ObjectValue(*callable));
InvokeArgsGuard &ag = fig.args();
RootedValue kValue(cx);

Просмотреть файл

@ -1168,7 +1168,7 @@ JSContext::JSContext(JSRuntime *rt)
hasVersionOverride(false),
throwing(false),
exception(UndefinedValue()),
runOptions(0),
options_(0),
defaultLocale(NULL),
reportGranularity(JS_DEFAULT_JITREPORT_GRANULARITY),
localeCallbacks(NULL),
@ -1493,7 +1493,7 @@ void
JSContext::updateJITEnabled()
{
#ifdef JS_METHODJIT
methodJitEnabled = (runOptions & JSOPTION_METHODJIT) && !IsJITBrokenHere();
methodJitEnabled = (options_ & JSOPTION_METHODJIT) && !IsJITBrokenHere();
#endif
}
@ -1514,7 +1514,7 @@ JSContext::mark(JSTracer *trc)
/* Stack frames and slots are traced by StackSpace::mark. */
/* Mark other roots-by-definition in the JSContext. */
if (defaultCompartmentObject_ && !hasRunOption(JSOPTION_UNROOTED_GLOBAL))
if (defaultCompartmentObject_ && !hasOption(JSOPTION_UNROOTED_GLOBAL))
MarkObjectRoot(trc, &defaultCompartmentObject_, "default compartment object");
if (isExceptionPending())
MarkValueRoot(trc, &exception, "exception");

Просмотреть файл

@ -1155,6 +1155,10 @@ struct JSRuntime : js::RuntimeFriendFields,
js::CTypesActivityCallback ctypesActivityCallback;
// Non-zero if this is a parallel warmup execution. See
// js::parallel::Do() for more information.
uint32_t parallelWarmup;
private:
// In certain cases, we want to optimize certain opcodes to typed instructions,
// to avoid carrying an extra register to feed into an unbox. Unfortunately,
@ -1297,7 +1301,6 @@ struct AutoResolving;
*/
namespace VersionFlags {
static const unsigned MASK = 0x0FFF; /* see JSVersion in jspubtd.h */
static const unsigned FULL_MASK = 0x0FFF;
} /* namespace VersionFlags */
static inline JSVersion
@ -1324,20 +1327,6 @@ VersionHasFlags(JSVersion version)
return !!VersionExtractFlags(version);
}
static inline unsigned
VersionFlagsToOptions(JSVersion version)
{
unsigned copts = 0;
JS_ASSERT((copts & JSCOMPILEOPTION_MASK) == copts);
return copts;
}
static inline JSVersion
OptionFlagsToVersion(unsigned options, JSVersion version)
{
return version;
}
static inline bool
VersionIsKnown(JSVersion version)
{
@ -1377,8 +1366,8 @@ struct JSContext : js::ContextFriendFields,
bool throwing; /* is there a pending exception? */
js::Value exception; /* most-recently-thrown exception */
/* Per-context run options. */
unsigned runOptions; /* see jsapi.h for JSOPTION_* */
/* Per-context options. */
unsigned options_; /* see jsapi.h for JSOPTION_* */
/* Default locale for Internationalization API */
char *defaultLocale;
@ -1565,26 +1554,21 @@ struct JSContext : js::ContextFriendFields,
*/
inline JSVersion findVersion() const;
void setRunOptions(unsigned ropts) {
JS_ASSERT((ropts & JSRUNOPTION_MASK) == ropts);
runOptions = ropts;
void setOptions(unsigned opts) {
JS_ASSERT((opts & JSOPTION_MASK) == opts);
options_ = opts;
}
/* Note: may override the version. */
inline void setCompileOptions(unsigned newcopts);
unsigned options() const { return options_; }
unsigned getRunOptions() const { return runOptions; }
inline unsigned getCompileOptions() const;
inline unsigned allOptions() const;
bool hasRunOption(unsigned ropt) const {
JS_ASSERT((ropt & JSRUNOPTION_MASK) == ropt);
return !!(runOptions & ropt);
bool hasOption(unsigned opt) const {
JS_ASSERT((opt & JSOPTION_MASK) == opt);
return !!(options_ & opt);
}
bool hasStrictOption() const { return hasRunOption(JSOPTION_STRICT); }
bool hasWErrorOption() const { return hasRunOption(JSOPTION_WERROR); }
bool hasAtLineOption() const { return hasRunOption(JSOPTION_ATLINE); }
bool hasStrictOption() const { return hasOption(JSOPTION_STRICT); }
bool hasWErrorOption() const { return hasOption(JSOPTION_WERROR); }
bool hasAtLineOption() const { return hasOption(JSOPTION_ATLINE); }
js::LifoAlloc &tempLifoAlloc() { return runtime->tempLifoAlloc; }
inline js::LifoAlloc &analysisLifoAlloc();
@ -2261,6 +2245,16 @@ class ContextAllocPolicy
void reportAllocOverflow() const { js_ReportAllocationOverflow(cx); }
};
JSBool intrinsic_ThrowError(JSContext *cx, unsigned argc, Value *vp);
JSBool intrinsic_NewDenseArray(JSContext *cx, unsigned argc, Value *vp);
JSBool intrinsic_UnsafeSetElement(JSContext *cx, unsigned argc, Value *vp);
JSBool intrinsic_ForceSequential(JSContext *cx, unsigned argc, Value *vp);
JSBool intrinsic_NewParallelArray(JSContext *cx, unsigned argc, Value *vp);
#ifdef DEBUG
JSBool intrinsic_Dump(JSContext *cx, unsigned argc, Value *vp);
#endif
} /* namespace js */
#ifdef _MSC_VER

Просмотреть файл

@ -468,23 +468,6 @@ JSContext::maybeOverrideVersion(JSVersion newVersion)
return true;
}
inline unsigned
JSContext::getCompileOptions() const { return js::VersionFlagsToOptions(findVersion()); }
inline unsigned
JSContext::allOptions() const { return getRunOptions() | getCompileOptions(); }
inline void
JSContext::setCompileOptions(unsigned newcopts)
{
JS_ASSERT((newcopts & JSCOMPILEOPTION_MASK) == newcopts);
if (JS_LIKELY(getCompileOptions() == newcopts))
return;
JSVersion version = findVersion();
JSVersion newVersion = js::OptionFlagsToVersion(newcopts, version);
maybeOverrideVersion(newVersion);
}
inline js::LifoAlloc &
JSContext::analysisLifoAlloc()
{

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше