зеркало из https://github.com/mozilla/gecko-dev.git
Merge mozilla-central to fx-team
This commit is contained in:
Коммит
153097fa77
|
@ -352,11 +352,11 @@ pref("dom.w3c_touch_events.safetyX", 0); // escape borders in units of 1/240"
|
|||
pref("dom.w3c_touch_events.safetyY", 120); // escape borders in units of 1/240"
|
||||
|
||||
#ifdef MOZ_SAFE_BROWSING
|
||||
pref("browser.safebrowsing.enabled", false);
|
||||
pref("browser.safebrowsing.enabled", true);
|
||||
// Prevent loading of pages identified as malware
|
||||
pref("browser.safebrowsing.malware.enabled", false);
|
||||
pref("browser.safebrowsing.downloads.enabled", false);
|
||||
pref("browser.safebrowsing.downloads.remote.enabled", false);
|
||||
pref("browser.safebrowsing.malware.enabled", true);
|
||||
pref("browser.safebrowsing.downloads.enabled", true);
|
||||
pref("browser.safebrowsing.downloads.remote.enabled", true);
|
||||
pref("browser.safebrowsing.downloads.remote.timeout_ms", 10000);
|
||||
pref("browser.safebrowsing.debug", false);
|
||||
|
||||
|
@ -388,7 +388,7 @@ pref("urlclassifier.max-complete-age", 2700);
|
|||
|
||||
// Tracking protection
|
||||
pref("privacy.trackingprotection.enabled", false);
|
||||
pref("privacy.trackingprotection.pbmode.enabled", false);
|
||||
pref("privacy.trackingprotection.pbmode.enabled", true);
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
<project name="platform_build" path="build" remote="b2g" revision="8d83715f08b7849f16a0dfc88f78d5c3a89c0a54">
|
||||
<copyfile dest="Makefile" src="core/root.mk"/>
|
||||
</project>
|
||||
<project name="gaia" path="gaia" remote="mozillaorg" revision="28d63cf3bdc4417f7ad8cab2230f096bf9f6d3b5"/>
|
||||
<project name="gaia" path="gaia" remote="mozillaorg" revision="cba7e4b86361af31b153cfebaf99900e0b860f7b"/>
|
||||
<project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
|
||||
<project name="fake-qemu-kernel" path="prebuilts/qemu-kernel" remote="b2g" revision="939b377d55a2f081d94029a30a75d05e5a20daf3"/>
|
||||
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="9a58f2e395da17c252f61f28900b5b09aeb813bd"/>
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
<project name="platform_build" path="build" remote="b2g" revision="8d83715f08b7849f16a0dfc88f78d5c3a89c0a54">
|
||||
<copyfile dest="Makefile" src="core/root.mk"/>
|
||||
</project>
|
||||
<project name="gaia" path="gaia" remote="mozillaorg" revision="28d63cf3bdc4417f7ad8cab2230f096bf9f6d3b5"/>
|
||||
<project name="gaia" path="gaia" remote="mozillaorg" revision="cba7e4b86361af31b153cfebaf99900e0b860f7b"/>
|
||||
<project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
|
||||
<project name="fake-qemu-kernel" path="prebuilts/qemu-kernel" remote="b2g" revision="939b377d55a2f081d94029a30a75d05e5a20daf3"/>
|
||||
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="9a58f2e395da17c252f61f28900b5b09aeb813bd"/>
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
<copyfile dest="Makefile" src="core/root.mk"/>
|
||||
</project>
|
||||
<project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
|
||||
<project name="gaia.git" path="gaia" remote="mozillaorg" revision="28d63cf3bdc4417f7ad8cab2230f096bf9f6d3b5"/>
|
||||
<project name="gaia.git" path="gaia" remote="mozillaorg" revision="cba7e4b86361af31b153cfebaf99900e0b860f7b"/>
|
||||
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="9a58f2e395da17c252f61f28900b5b09aeb813bd"/>
|
||||
<project name="rilproxy" path="rilproxy" remote="b2g" revision="5ef30994f4778b4052e58a4383dbe7890048c87e"/>
|
||||
<project name="platform_hardware_ril" path="hardware/ril" remote="b2g" revision="cb4604d5a578efd027277059ce3e0f6e3af59bd1"/>
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
</project>
|
||||
<project name="rilproxy" path="rilproxy" remote="b2g" revision="5ef30994f4778b4052e58a4383dbe7890048c87e"/>
|
||||
<project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
|
||||
<project name="gaia" path="gaia" remote="mozillaorg" revision="28d63cf3bdc4417f7ad8cab2230f096bf9f6d3b5"/>
|
||||
<project name="gaia" path="gaia" remote="mozillaorg" revision="cba7e4b86361af31b153cfebaf99900e0b860f7b"/>
|
||||
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="9a58f2e395da17c252f61f28900b5b09aeb813bd"/>
|
||||
<project name="moztt" path="external/moztt" remote="b2g" revision="ac7e9ae8a24ab4a3f3da801ca53f95f39a32b89f"/>
|
||||
<project name="apitrace" path="external/apitrace" remote="apitrace" revision="94bbf7890326d37f03fd2a6822b6618b08bec8e2"/>
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
<project name="platform_build" path="build" remote="b2g" revision="8d83715f08b7849f16a0dfc88f78d5c3a89c0a54">
|
||||
<copyfile dest="Makefile" src="core/root.mk"/>
|
||||
</project>
|
||||
<project name="gaia" path="gaia" remote="mozillaorg" revision="28d63cf3bdc4417f7ad8cab2230f096bf9f6d3b5"/>
|
||||
<project name="gaia" path="gaia" remote="mozillaorg" revision="cba7e4b86361af31b153cfebaf99900e0b860f7b"/>
|
||||
<project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
|
||||
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="9a58f2e395da17c252f61f28900b5b09aeb813bd"/>
|
||||
<project name="librecovery" path="librecovery" remote="b2g" revision="1b3591a50ed352fc6ddb77462b7b35d0bfa555a3"/>
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
<project name="platform_build" path="build" remote="b2g" revision="c9d4fe680662ee44a4bdea42ae00366f5df399cf">
|
||||
<copyfile dest="Makefile" src="core/root.mk"/>
|
||||
</project>
|
||||
<project name="gaia" path="gaia" remote="mozillaorg" revision="28d63cf3bdc4417f7ad8cab2230f096bf9f6d3b5"/>
|
||||
<project name="gaia" path="gaia" remote="mozillaorg" revision="cba7e4b86361af31b153cfebaf99900e0b860f7b"/>
|
||||
<project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
|
||||
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="9a58f2e395da17c252f61f28900b5b09aeb813bd"/>
|
||||
<project name="librecovery" path="librecovery" remote="b2g" revision="1b3591a50ed352fc6ddb77462b7b35d0bfa555a3"/>
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
<copyfile dest="Makefile" src="core/root.mk"/>
|
||||
</project>
|
||||
<project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
|
||||
<project name="gaia.git" path="gaia" remote="mozillaorg" revision="28d63cf3bdc4417f7ad8cab2230f096bf9f6d3b5"/>
|
||||
<project name="gaia.git" path="gaia" remote="mozillaorg" revision="cba7e4b86361af31b153cfebaf99900e0b860f7b"/>
|
||||
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="9a58f2e395da17c252f61f28900b5b09aeb813bd"/>
|
||||
<project name="rilproxy" path="rilproxy" remote="b2g" revision="5ef30994f4778b4052e58a4383dbe7890048c87e"/>
|
||||
<project name="platform_hardware_ril" path="hardware/ril" remote="b2g" revision="cb4604d5a578efd027277059ce3e0f6e3af59bd1"/>
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
<project name="platform_build" path="build" remote="b2g" revision="8d83715f08b7849f16a0dfc88f78d5c3a89c0a54">
|
||||
<copyfile dest="Makefile" src="core/root.mk"/>
|
||||
</project>
|
||||
<project name="gaia" path="gaia" remote="mozillaorg" revision="28d63cf3bdc4417f7ad8cab2230f096bf9f6d3b5"/>
|
||||
<project name="gaia" path="gaia" remote="mozillaorg" revision="cba7e4b86361af31b153cfebaf99900e0b860f7b"/>
|
||||
<project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
|
||||
<project name="fake-qemu-kernel" path="prebuilts/qemu-kernel" remote="b2g" revision="939b377d55a2f081d94029a30a75d05e5a20daf3"/>
|
||||
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="9a58f2e395da17c252f61f28900b5b09aeb813bd"/>
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
{
|
||||
"git": {
|
||||
"git_revision": "28d63cf3bdc4417f7ad8cab2230f096bf9f6d3b5",
|
||||
"git_revision": "cba7e4b86361af31b153cfebaf99900e0b860f7b",
|
||||
"remote": "https://git.mozilla.org/releases/gaia.git",
|
||||
"branch": ""
|
||||
},
|
||||
"revision": "f1fbe09c3fd9e47b2ca2709b471b2c9cfd7a733f",
|
||||
"revision": "d02e42f3a17e4c37ff6c2b56e792e7d762732676",
|
||||
"repo_path": "integration/gaia-central"
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
<project name="platform_build" path="build" remote="b2g" revision="8d83715f08b7849f16a0dfc88f78d5c3a89c0a54">
|
||||
<copyfile dest="Makefile" src="core/root.mk"/>
|
||||
</project>
|
||||
<project name="gaia" path="gaia" remote="mozillaorg" revision="28d63cf3bdc4417f7ad8cab2230f096bf9f6d3b5"/>
|
||||
<project name="gaia" path="gaia" remote="mozillaorg" revision="cba7e4b86361af31b153cfebaf99900e0b860f7b"/>
|
||||
<project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
|
||||
<project name="fake-qemu-kernel" path="prebuilts/qemu-kernel" remote="b2g" revision="939b377d55a2f081d94029a30a75d05e5a20daf3"/>
|
||||
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="9a58f2e395da17c252f61f28900b5b09aeb813bd"/>
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
<project name="rilproxy" path="rilproxy" remote="b2g" revision="5ef30994f4778b4052e58a4383dbe7890048c87e"/>
|
||||
<project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
|
||||
<project name="fake-qemu-kernel" path="prebuilts/qemu-kernel" remote="b2g" revision="939b377d55a2f081d94029a30a75d05e5a20daf3"/>
|
||||
<project name="gaia" path="gaia" remote="mozillaorg" revision="28d63cf3bdc4417f7ad8cab2230f096bf9f6d3b5"/>
|
||||
<project name="gaia" path="gaia" remote="mozillaorg" revision="cba7e4b86361af31b153cfebaf99900e0b860f7b"/>
|
||||
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="9a58f2e395da17c252f61f28900b5b09aeb813bd"/>
|
||||
<project name="moztt" path="external/moztt" remote="b2g" revision="ac7e9ae8a24ab4a3f3da801ca53f95f39a32b89f"/>
|
||||
<project name="apitrace" path="external/apitrace" remote="apitrace" revision="94bbf7890326d37f03fd2a6822b6618b08bec8e2"/>
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
<project name="platform_build" path="build" remote="b2g" revision="c9d4fe680662ee44a4bdea42ae00366f5df399cf">
|
||||
<copyfile dest="Makefile" src="core/root.mk"/>
|
||||
</project>
|
||||
<project name="gaia" path="gaia" remote="mozillaorg" revision="28d63cf3bdc4417f7ad8cab2230f096bf9f6d3b5"/>
|
||||
<project name="gaia" path="gaia" remote="mozillaorg" revision="cba7e4b86361af31b153cfebaf99900e0b860f7b"/>
|
||||
<project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
|
||||
<project name="fake-qemu-kernel" path="prebuilts/qemu-kernel" remote="b2g" revision="939b377d55a2f081d94029a30a75d05e5a20daf3"/>
|
||||
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="9a58f2e395da17c252f61f28900b5b09aeb813bd"/>
|
||||
|
|
|
@ -43,6 +43,7 @@ let RemoteAboutNewTab = {
|
|||
* Initialize the RemotePageManager and add all message listeners for this page
|
||||
*/
|
||||
init: function() {
|
||||
RemoteNewTabLocation.init();
|
||||
this.pageListener = new RemotePages("about:remote-newtab");
|
||||
this.pageListener.addMessageListener("NewTab:InitializeGrid", this.initializeGrid.bind(this));
|
||||
this.pageListener.addMessageListener("NewTab:UpdateGrid", this.updateGrid.bind(this));
|
||||
|
@ -292,6 +293,7 @@ let RemoteAboutNewTab = {
|
|||
Ci.nsISupportsWeakReference]),
|
||||
|
||||
uninit: function() {
|
||||
RemoteNewTabLocation.uninit();
|
||||
this._removeObservers();
|
||||
this.pageListener.destroy();
|
||||
this.pageListener = null;
|
||||
|
|
|
@ -1,17 +1,49 @@
|
|||
/* globals Services */
|
||||
/* globals Services, UpdateUtils, XPCOMUtils, URL, NewTabPrefsProvider, Locale */
|
||||
/* exported RemoteNewTabLocation */
|
||||
|
||||
"use strict";
|
||||
|
||||
this.EXPORTED_SYMBOLS = ["RemoteNewTabLocation"];
|
||||
|
||||
Components.utils.import("resource://gre/modules/Services.jsm");
|
||||
Components.utils.importGlobalProperties(["URL"]);
|
||||
const {utils: Cu} = Components;
|
||||
Cu.import("resource://gre/modules/Services.jsm");
|
||||
Cu.import("resource://gre/modules/XPCOMUtils.jsm");
|
||||
Cu.importGlobalProperties(["URL"]);
|
||||
|
||||
// TODO: will get dynamically set in bug 1210478
|
||||
const DEFAULT_PAGE_LOCATION = "https://newtab.cdn.mozilla.net/v0/nightly/en-US/index.html";
|
||||
XPCOMUtils.defineLazyModuleGetter(this, "UpdateUtils",
|
||||
"resource://gre/modules/UpdateUtils.jsm");
|
||||
XPCOMUtils.defineLazyModuleGetter(this, "NewTabPrefsProvider",
|
||||
"resource:///modules/NewTabPrefsProvider.jsm");
|
||||
XPCOMUtils.defineLazyModuleGetter(this, "Locale",
|
||||
"resource://gre/modules/Locale.jsm");
|
||||
|
||||
this.RemoteNewTabLocation = {
|
||||
_url: new URL(DEFAULT_PAGE_LOCATION),
|
||||
// The preference that tells whether to match the OS locale
|
||||
const PREF_MATCH_OS_LOCALE = "intl.locale.matchOS";
|
||||
|
||||
// The preference that tells what locale the user selected
|
||||
const PREF_SELECTED_LOCALE = "general.useragent.locale";
|
||||
|
||||
const DEFAULT_PAGE_LOCATION = "https://newtab.cdn.mozilla.net/" +
|
||||
"v%VERSION%/%CHANNEL%/%LOCALE%/index.html";
|
||||
|
||||
const VALID_CHANNELS = new Set(["esr", "release", "beta", "aurora", "nightly"]);
|
||||
|
||||
const NEWTAB_VERSION = "0";
|
||||
|
||||
let RemoteNewTabLocation = {
|
||||
/*
|
||||
* Generate a default url based on locale and update channel
|
||||
*/
|
||||
_generateDefaultURL() {
|
||||
let releaseName = this._releaseFromUpdateChannel(UpdateUtils.UpdateChannel);
|
||||
let uri = DEFAULT_PAGE_LOCATION
|
||||
.replace("%VERSION%", this.version)
|
||||
.replace("%LOCALE%", Locale.getLocale())
|
||||
.replace("%CHANNEL%", releaseName);
|
||||
return new URL(uri);
|
||||
},
|
||||
|
||||
_url: null,
|
||||
_overridden: false,
|
||||
|
||||
get href() {
|
||||
|
@ -26,17 +58,84 @@ this.RemoteNewTabLocation = {
|
|||
return this._overridden;
|
||||
},
|
||||
|
||||
override: function(newURL) {
|
||||
this._url = new URL(newURL);
|
||||
this._overridden = true;
|
||||
Services.obs.notifyObservers(null, "remote-new-tab-location-changed",
|
||||
this._url.href);
|
||||
get version() {
|
||||
return NEWTAB_VERSION;
|
||||
},
|
||||
|
||||
reset: function() {
|
||||
this._url = new URL(DEFAULT_PAGE_LOCATION);
|
||||
get channels() {
|
||||
return VALID_CHANNELS;
|
||||
},
|
||||
|
||||
/**
|
||||
* Returns the release name from an Update Channel name
|
||||
*
|
||||
* @return {String} a release name based on the update channel. Defaults to nightly
|
||||
*/
|
||||
_releaseFromUpdateChannel(channel) {
|
||||
let result = "nightly";
|
||||
if (VALID_CHANNELS.has(channel)) {
|
||||
result = channel;
|
||||
}
|
||||
return result;
|
||||
},
|
||||
|
||||
/*
|
||||
* Updates the location when the page is not overriden.
|
||||
* Useful when there is a pref change
|
||||
*/
|
||||
_updateMaybe() {
|
||||
if (!this.overridden) {
|
||||
let url = this._generateDefaultURL();
|
||||
if (url.href !== this._url.href) {
|
||||
this._url = url;
|
||||
Services.obs.notifyObservers(null, "remote-new-tab-location-changed",
|
||||
this._url.href);
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
/*
|
||||
* Override the Remote newtab page location.
|
||||
*/
|
||||
override(newURL) {
|
||||
let url = new URL(newURL);
|
||||
if (url.href !== this._url.href) {
|
||||
this._overridden = true;
|
||||
this._url = url;
|
||||
Services.obs.notifyObservers(null, "remote-new-tab-location-changed",
|
||||
this._url.href);
|
||||
}
|
||||
},
|
||||
|
||||
/*
|
||||
* Reset the newtab page location to the default value
|
||||
*/
|
||||
reset() {
|
||||
let url = this._generateDefaultURL();
|
||||
if (url.href !== this._url.href) {
|
||||
this._url = url;
|
||||
this._overridden = false;
|
||||
Services.obs.notifyObservers(null, "remote-new-tab-location-changed",
|
||||
this._url.href);
|
||||
}
|
||||
},
|
||||
|
||||
init() {
|
||||
NewTabPrefsProvider.prefs.on(
|
||||
PREF_SELECTED_LOCALE,
|
||||
this._updateMaybe.bind(this));
|
||||
|
||||
NewTabPrefsProvider.prefs.on(
|
||||
PREF_MATCH_OS_LOCALE,
|
||||
this._updateMaybe.bind(this));
|
||||
|
||||
this._url = this._generateDefaultURL();
|
||||
},
|
||||
|
||||
uninit() {
|
||||
this._url = null;
|
||||
this._overridden = false;
|
||||
Services.obs.notifyObservers(null, "remote-new-tab-location-changed",
|
||||
this._url.href);
|
||||
NewTabPrefsProvider.prefs.off(PREF_SELECTED_LOCALE, this._updateMaybe);
|
||||
NewTabPrefsProvider.prefs.off(PREF_MATCH_OS_LOCALE, this._updateMaybe);
|
||||
}
|
||||
};
|
||||
|
|
|
@ -1,39 +1,140 @@
|
|||
/* globals ok, equal, RemoteNewTabLocation, Services */
|
||||
/* globals ok, equal, RemoteNewTabLocation, NewTabPrefsProvider, Services, Preferences */
|
||||
/* jscs:disable requireCamelCaseOrUpperCaseIdentifiers */
|
||||
"use strict";
|
||||
|
||||
Components.utils.import("resource:///modules/RemoteNewTabLocation.jsm");
|
||||
Components.utils.import("resource:///modules/NewTabPrefsProvider.jsm");
|
||||
Components.utils.import("resource://gre/modules/Services.jsm");
|
||||
Components.utils.import("resource://gre/modules/Preferences.jsm");
|
||||
Components.utils.importGlobalProperties(["URL"]);
|
||||
|
||||
add_task(function* () {
|
||||
var notificationPromise;
|
||||
let defaultHref = RemoteNewTabLocation.href;
|
||||
RemoteNewTabLocation.init();
|
||||
const DEFAULT_HREF = RemoteNewTabLocation.href;
|
||||
RemoteNewTabLocation.uninit();
|
||||
|
||||
add_task(function* test_defaults() {
|
||||
RemoteNewTabLocation.init();
|
||||
ok(RemoteNewTabLocation.href, "Default location has an href");
|
||||
ok(RemoteNewTabLocation.origin, "Default location has an origin");
|
||||
ok(!RemoteNewTabLocation.overridden, "Default location is not overridden");
|
||||
RemoteNewTabLocation.uninit();
|
||||
});
|
||||
|
||||
/**
|
||||
* Tests the overriding of the default URL
|
||||
*/
|
||||
add_task(function* test_overrides() {
|
||||
RemoteNewTabLocation.init();
|
||||
let testURL = new URL("https://example.com/");
|
||||
let notificationPromise;
|
||||
|
||||
notificationPromise = changeNotificationPromise(testURL.href);
|
||||
notificationPromise = nextChangeNotificationPromise(
|
||||
testURL.href, "Remote Location should change");
|
||||
RemoteNewTabLocation.override(testURL.href);
|
||||
yield notificationPromise;
|
||||
ok(RemoteNewTabLocation.overridden, "Remote location should be overridden");
|
||||
equal(RemoteNewTabLocation.href, testURL.href, "Remote href should be the custom URL");
|
||||
equal(RemoteNewTabLocation.origin, testURL.origin, "Remote origin should be the custom URL");
|
||||
equal(RemoteNewTabLocation.href, testURL.href,
|
||||
"Remote href should be the custom URL");
|
||||
equal(RemoteNewTabLocation.origin, testURL.origin,
|
||||
"Remote origin should be the custom URL");
|
||||
|
||||
notificationPromise = changeNotificationPromise(defaultHref);
|
||||
notificationPromise = nextChangeNotificationPromise(
|
||||
DEFAULT_HREF, "Remote href should be reset");
|
||||
RemoteNewTabLocation.reset();
|
||||
yield notificationPromise;
|
||||
ok(!RemoteNewTabLocation.overridden, "Newtab URL should not be overridden");
|
||||
equal(RemoteNewTabLocation.href, defaultHref, "Remote href should be reset");
|
||||
RemoteNewTabLocation.uninit();
|
||||
});
|
||||
|
||||
function changeNotificationPromise(aNewURL) {
|
||||
/**
|
||||
* Tests how RemoteNewTabLocation responds to updates to prefs
|
||||
*/
|
||||
add_task(function* test_updates() {
|
||||
RemoteNewTabLocation.init();
|
||||
let notificationPromise;
|
||||
let expectedHref = "https://newtab.cdn.mozilla.net" +
|
||||
`/v${RemoteNewTabLocation.version}` +
|
||||
"/nightly" +
|
||||
"/en-GB" +
|
||||
"/index.html";
|
||||
Preferences.set("intl.locale.matchOS", true);
|
||||
Preferences.set("general.useragent.locale", "en-GB");
|
||||
NewTabPrefsProvider.prefs.init();
|
||||
|
||||
// test update checks for prefs
|
||||
notificationPromise = nextChangeNotificationPromise(
|
||||
expectedHref, "Remote href should be updated");
|
||||
Preferences.set("intl.locale.matchOS", false);
|
||||
yield notificationPromise;
|
||||
|
||||
notificationPromise = nextChangeNotificationPromise(
|
||||
DEFAULT_HREF, "Remote href changes back to default");
|
||||
Preferences.set("general.useragent.locale", "en-US");
|
||||
|
||||
yield notificationPromise;
|
||||
|
||||
// test update fires on override and reset
|
||||
let testURL = new URL("https://example.com/");
|
||||
notificationPromise = nextChangeNotificationPromise(
|
||||
testURL.href, "a notification occurs on override");
|
||||
RemoteNewTabLocation.override(testURL.href);
|
||||
yield notificationPromise;
|
||||
|
||||
// from overridden to default
|
||||
notificationPromise = nextChangeNotificationPromise(
|
||||
DEFAULT_HREF, "a notification occurs on reset");
|
||||
RemoteNewTabLocation.reset();
|
||||
yield notificationPromise;
|
||||
|
||||
// override to default URL from default URL
|
||||
notificationPromise = nextChangeNotificationPromise(
|
||||
testURL.href, "a notification only occurs for a change in overridden urls");
|
||||
RemoteNewTabLocation.override(DEFAULT_HREF);
|
||||
RemoteNewTabLocation.override(testURL.href);
|
||||
yield notificationPromise;
|
||||
|
||||
// reset twice, only one notification for default URL
|
||||
notificationPromise = nextChangeNotificationPromise(
|
||||
DEFAULT_HREF, "reset occurs");
|
||||
RemoteNewTabLocation.reset();
|
||||
yield notificationPromise;
|
||||
|
||||
notificationPromise = nextChangeNotificationPromise(
|
||||
testURL.href, "a notification only occurs for a change in reset urls");
|
||||
RemoteNewTabLocation.reset();
|
||||
RemoteNewTabLocation.override(testURL.href);
|
||||
yield notificationPromise;
|
||||
|
||||
NewTabPrefsProvider.prefs.uninit();
|
||||
RemoteNewTabLocation.uninit();
|
||||
});
|
||||
|
||||
/**
|
||||
* Verifies that RemoteNewTabLocation's _releaseFromUpdateChannel
|
||||
* Returns the correct release names
|
||||
*/
|
||||
add_task(function* test_release_names() {
|
||||
RemoteNewTabLocation.init();
|
||||
let valid_channels = RemoteNewTabLocation.channels;
|
||||
let invalid_channels = new Set(["default", "invalid"]);
|
||||
|
||||
for (let channel of valid_channels) {
|
||||
equal(channel, RemoteNewTabLocation._releaseFromUpdateChannel(channel),
|
||||
"release == channel name when valid");
|
||||
}
|
||||
|
||||
for (let channel of invalid_channels) {
|
||||
equal("nightly", RemoteNewTabLocation._releaseFromUpdateChannel(channel),
|
||||
"release == nightly when invalid");
|
||||
}
|
||||
RemoteNewTabLocation.uninit();
|
||||
});
|
||||
|
||||
function nextChangeNotificationPromise(aNewURL, testMessage) {
|
||||
return new Promise(resolve => {
|
||||
Services.obs.addObserver(function observer(aSubject, aTopic, aData) { // jshint ignore:line
|
||||
Services.obs.removeObserver(observer, aTopic);
|
||||
equal(aData, aNewURL, "remote-new-tab-location-changed data should be new URL.");
|
||||
equal(aData, aNewURL, testMessage);
|
||||
resolve();
|
||||
}, "remote-new-tab-location-changed", false);
|
||||
});
|
||||
|
|
|
@ -1,12 +1,10 @@
|
|||
[DEFAULT]
|
||||
support-files = file_pdfjs_test.pdf
|
||||
support-files =
|
||||
file_pdfjs_test.pdf
|
||||
head.js
|
||||
|
||||
[browser_pdfjs_main.js]
|
||||
skip-if = e10s # Bug 1159385
|
||||
[browser_pdfjs_navigation.js]
|
||||
skip-if = e10s # Bug 1159385
|
||||
[browser_pdfjs_savedialog.js]
|
||||
[browser_pdfjs_views.js]
|
||||
skip-if = e10s # Bug 1159385
|
||||
[browser_pdfjs_zoom.js]
|
||||
skip-if = e10s # Bug 1159385
|
||||
|
|
|
@ -4,9 +4,7 @@
|
|||
const RELATIVE_DIR = "browser/extensions/pdfjs/test/";
|
||||
const TESTROOT = "http://example.com/browser/" + RELATIVE_DIR;
|
||||
|
||||
function test() {
|
||||
var tab;
|
||||
|
||||
add_task(function* test() {
|
||||
let handlerService = Cc["@mozilla.org/uriloader/handler-service;1"].getService(Ci.nsIHandlerService);
|
||||
let mimeService = Cc["@mozilla.org/mime;1"].getService(Ci.nsIMIMEService);
|
||||
let handlerInfo = mimeService.getFromTypeAndExtension('application/pdf', 'pdf');
|
||||
|
@ -17,82 +15,53 @@ function test() {
|
|||
|
||||
info('Pref action: ' + handlerInfo.preferredAction);
|
||||
|
||||
waitForExplicitFinish();
|
||||
registerCleanupFunction(function() {
|
||||
gBrowser.removeTab(tab);
|
||||
});
|
||||
yield BrowserTestUtils.withNewTab({ gBrowser: gBrowser, url: "about:blank" },
|
||||
function* (newTabBrowser) {
|
||||
yield waitForPdfJS(newTabBrowser, TESTROOT + "file_pdfjs_test.pdf");
|
||||
|
||||
tab = gBrowser.addTab(TESTROOT + "file_pdfjs_test.pdf");
|
||||
var newTabBrowser = gBrowser.getBrowserForTab(tab);
|
||||
newTabBrowser.addEventListener("load", function eventHandler() {
|
||||
newTabBrowser.removeEventListener("load", eventHandler, true);
|
||||
ok(gBrowser.isFindBarInitialized(), "Browser FindBar initialized!");
|
||||
|
||||
var document = newTabBrowser.contentDocument,
|
||||
window = newTabBrowser.contentWindow;
|
||||
yield ContentTask.spawn(newTabBrowser, null, function* () {
|
||||
//
|
||||
// Overall sanity tests
|
||||
//
|
||||
ok(content.document.querySelector('div#viewer'), "document content has viewer UI");
|
||||
ok('PDFJS' in content.wrappedJSObject, "window content has PDFJS object");
|
||||
|
||||
// Runs tests after all 'load' event handlers have fired off
|
||||
window.addEventListener("documentload", function() {
|
||||
runTests(document, window, tab, function () {
|
||||
closePDFViewer(window, finish);
|
||||
//
|
||||
// Sidebar: open
|
||||
//
|
||||
var sidebar = content.document.querySelector('button#sidebarToggle'),
|
||||
outerContainer = content.document.querySelector('div#outerContainer');
|
||||
|
||||
sidebar.click();
|
||||
ok(outerContainer.classList.contains('sidebarOpen'), "sidebar opens on click");
|
||||
|
||||
//
|
||||
// Sidebar: close
|
||||
//
|
||||
sidebar.click();
|
||||
ok(!outerContainer.classList.contains('sidebarOpen'), "sidebar closes on click");
|
||||
|
||||
//
|
||||
// Page change from prev/next buttons
|
||||
//
|
||||
var prevPage = content.document.querySelector('button#previous'),
|
||||
nextPage = content.document.querySelector('button#next');
|
||||
|
||||
var pgNumber = content.document.querySelector('input#pageNumber').value;
|
||||
is(parseInt(pgNumber, 10), 1, 'initial page is 1');
|
||||
|
||||
//
|
||||
// Bookmark button
|
||||
//
|
||||
var viewBookmark = content.document.querySelector('a#viewBookmark');
|
||||
viewBookmark.click();
|
||||
|
||||
ok(viewBookmark.href.length > 0, "viewBookmark button has href");
|
||||
|
||||
var viewer = content.wrappedJSObject.PDFViewerApplication;
|
||||
yield viewer.close();
|
||||
});
|
||||
}, false, true);
|
||||
}, true);
|
||||
}
|
||||
|
||||
|
||||
function runTests(document, window, tab, callback) {
|
||||
|
||||
//
|
||||
// Overall sanity tests
|
||||
//
|
||||
ok(document.querySelector('div#viewer'), "document content has viewer UI");
|
||||
ok('PDFJS' in window.wrappedJSObject, "window content has PDFJS object");
|
||||
ok('PDFViewerApplication' in window.wrappedJSObject,
|
||||
"window content has viewer object");
|
||||
|
||||
//
|
||||
// Browser Find
|
||||
//
|
||||
ok(gBrowser.isFindBarInitialized(tab), "Browser FindBar initialized!");
|
||||
|
||||
//
|
||||
// Sidebar: open
|
||||
//
|
||||
var sidebar = document.querySelector('button#sidebarToggle'),
|
||||
outerContainer = document.querySelector('div#outerContainer');
|
||||
|
||||
sidebar.click();
|
||||
ok(outerContainer.classList.contains('sidebarOpen'), 'sidebar opens on click');
|
||||
|
||||
//
|
||||
// Sidebar: close
|
||||
//
|
||||
sidebar.click();
|
||||
ok(!outerContainer.classList.contains('sidebarOpen'), 'sidebar closes on click');
|
||||
|
||||
//
|
||||
// Page change from prev/next buttons
|
||||
//
|
||||
var prevPage = document.querySelector('button#previous'),
|
||||
nextPage = document.querySelector('button#next');
|
||||
|
||||
var pageNumber = document.querySelector('input#pageNumber');
|
||||
is(parseInt(pageNumber.value), 1, 'initial page is 1');
|
||||
|
||||
//
|
||||
// Bookmark button
|
||||
//
|
||||
var viewBookmark = document.querySelector('a#viewBookmark');
|
||||
viewBookmark.click();
|
||||
ok(viewBookmark.href.length > 0, 'viewBookmark button has href');
|
||||
|
||||
callback();
|
||||
}
|
||||
|
||||
/**
|
||||
* Destroys PDF.js viewer opened document.
|
||||
*/
|
||||
function closePDFViewer(window, callback) {
|
||||
var viewer = window.wrappedJSObject.PDFViewerApplication;
|
||||
viewer.close().then(callback);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
|
|
@ -139,9 +139,7 @@ const TESTS = [
|
|||
}
|
||||
];
|
||||
|
||||
function test() {
|
||||
var tab;
|
||||
|
||||
add_task(function* test() {
|
||||
let mimeService = Cc["@mozilla.org/mime;1"].getService(Ci.nsIMIMEService);
|
||||
let handlerInfo = mimeService.getFromTypeAndExtension('application/pdf', 'pdf');
|
||||
|
||||
|
@ -151,51 +149,73 @@ function test() {
|
|||
|
||||
info('Pref action: ' + handlerInfo.preferredAction);
|
||||
|
||||
waitForExplicitFinish();
|
||||
registerCleanupFunction(function() {
|
||||
gBrowser.removeTab(tab);
|
||||
});
|
||||
yield BrowserTestUtils.withNewTab({ gBrowser, url: "about:blank" },
|
||||
function* (newTabBrowser) {
|
||||
yield waitForPdfJS(newTabBrowser, TESTROOT + "file_pdfjs_test.pdf");
|
||||
|
||||
tab = gBrowser.addTab(TESTROOT + "file_pdfjs_test.pdf");
|
||||
gBrowser.selectedTab = tab;
|
||||
|
||||
var newTabBrowser = gBrowser.getBrowserForTab(tab);
|
||||
newTabBrowser.addEventListener("load", function eventHandler() {
|
||||
newTabBrowser.removeEventListener("load", eventHandler, true);
|
||||
|
||||
var document = newTabBrowser.contentDocument,
|
||||
window = newTabBrowser.contentWindow;
|
||||
|
||||
// Runs tests after all 'load' event handlers have fired off
|
||||
window.addEventListener("documentload", function() {
|
||||
runTests(document, window, function () {
|
||||
var pageNumber = document.querySelector('input#pageNumber');
|
||||
is(pageNumber.value, pageNumber.max, "Document is left on the last page");
|
||||
finish();
|
||||
yield ContentTask.spawn(newTabBrowser, null, function* () {
|
||||
// Check if PDF is opened with internal viewer
|
||||
ok(content.document.querySelector('div#viewer'), "document content has viewer UI");
|
||||
ok('PDFJS' in content.wrappedJSObject, "window content has PDFJS object");
|
||||
});
|
||||
}, false, true);
|
||||
}, true);
|
||||
}
|
||||
|
||||
function runTests(document, window, finish) {
|
||||
// Check if PDF is opened with internal viewer
|
||||
ok(document.querySelector('div#viewer'), "document content has viewer UI");
|
||||
ok('PDFJS' in window.wrappedJSObject, "window content has PDFJS object");
|
||||
yield ContentTask.spawn(newTabBrowser, null, contentSetUp);
|
||||
|
||||
// Wait for outline items, the start the navigation actions
|
||||
waitForOutlineItems(document).then(function () {
|
||||
// The key navigation has to happen in page-fit, otherwise it won't scroll
|
||||
// trough a complete page
|
||||
setZoomToPageFit(document).then(function () {
|
||||
runNextTest(document, window, finish);
|
||||
}, function () {
|
||||
ok(false, "Current scale has been set to 'page-fit'");
|
||||
finish();
|
||||
yield Task.spawn(runTests(newTabBrowser));
|
||||
|
||||
yield ContentTask.spawn(newTabBrowser, null, function*() {
|
||||
let pageNumber = content.document.querySelector('input#pageNumber');
|
||||
is(pageNumber.value, pageNumber.max, "Document is left on the last page");
|
||||
});
|
||||
});
|
||||
}, function () {
|
||||
ok(false, "Outline items have been found");
|
||||
finish();
|
||||
});
|
||||
});
|
||||
|
||||
function* contentSetUp() {
|
||||
/**
|
||||
* Outline Items gets appended to the document later on we have to
|
||||
* wait for them before we start to navigate though document
|
||||
*
|
||||
* @param document
|
||||
* @returns {deferred.promise|*}
|
||||
*/
|
||||
function waitForOutlineItems(document) {
|
||||
return new Promise((resolve, reject) => {
|
||||
document.addEventListener("outlineloaded", function outlineLoaded(evt) {
|
||||
document.removeEventListener("outlineloaded", outlineLoaded);
|
||||
var outlineCount = evt.detail.outlineCount;
|
||||
|
||||
if (document.querySelectorAll(".outlineItem").length === outlineCount) {
|
||||
resolve();
|
||||
} else {
|
||||
reject();
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* The key navigation has to happen in page-fit, otherwise it won't scroll
|
||||
* through a complete page
|
||||
*
|
||||
* @param document
|
||||
* @returns {deferred.promise|*}
|
||||
*/
|
||||
function setZoomToPageFit(document) {
|
||||
return new Promise((resolve) => {
|
||||
document.addEventListener("pagerendered", function onZoom(e) {
|
||||
document.removeEventListener("pagerendered", onZoom);
|
||||
document.querySelector("#viewer").click();
|
||||
resolve();
|
||||
});
|
||||
|
||||
var select = document.querySelector("select#scaleSelect");
|
||||
select.selectedIndex = 2;
|
||||
select.dispatchEvent(new Event("change"));
|
||||
});
|
||||
}
|
||||
|
||||
yield waitForOutlineItems(content.document);
|
||||
yield setZoomToPageFit(content.document);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -207,99 +227,55 @@ function runTests(document, window, finish) {
|
|||
* @param test
|
||||
* @param callback
|
||||
*/
|
||||
function runNextTest(document, window, endCallback) {
|
||||
var test = TESTS.shift(),
|
||||
deferred = Promise.defer(),
|
||||
pageNumber = document.querySelector('input#pageNumber');
|
||||
function* runTests(browser) {
|
||||
yield ContentTask.spawn(browser, TESTS, function* (TESTS) {
|
||||
let window = content;
|
||||
let document = window.document;
|
||||
|
||||
// Add an event-listener to wait for page to change, afterwards resolve the promise
|
||||
var timeout = window.setTimeout(() => deferred.reject(), 5000);
|
||||
window.addEventListener('pagechange', function pageChange() {
|
||||
if (pageNumber.value == test.expectedPage) {
|
||||
window.removeEventListener('pagechange', pageChange);
|
||||
window.clearTimeout(timeout);
|
||||
deferred.resolve(pageNumber.value);
|
||||
for (let test of TESTS) {
|
||||
let deferred = {};
|
||||
deferred.promise = new Promise((resolve, reject) => {
|
||||
deferred.resolve = resolve;
|
||||
deferred.reject = reject;
|
||||
});
|
||||
|
||||
let pageNumber = document.querySelector('input#pageNumber');
|
||||
|
||||
// Add an event-listener to wait for page to change, afterwards resolve the promise
|
||||
let timeout = window.setTimeout(() => deferred.reject(), 5000);
|
||||
window.addEventListener('pagechange', function pageChange() {
|
||||
if (pageNumber.value == test.expectedPage) {
|
||||
window.removeEventListener('pagechange', pageChange);
|
||||
window.clearTimeout(timeout);
|
||||
deferred.resolve(+pageNumber.value);
|
||||
}
|
||||
});
|
||||
|
||||
// Get the element and trigger the action for changing the page
|
||||
var el = document.querySelector(test.action.selector);
|
||||
ok(el, "Element '" + test.action.selector + "' has been found");
|
||||
|
||||
// The value option is for input case
|
||||
if (test.action.value)
|
||||
el.value = test.action.value;
|
||||
|
||||
// Dispatch the event for changing the page
|
||||
if (test.action.event == "keydown") {
|
||||
var ev = document.createEvent("KeyboardEvent");
|
||||
ev.initKeyEvent("keydown", true, true, null, false, false, false, false,
|
||||
test.action.keyCode, 0);
|
||||
el.dispatchEvent(ev);
|
||||
}
|
||||
else {
|
||||
var ev = new Event(test.action.event);
|
||||
}
|
||||
el.dispatchEvent(ev);
|
||||
|
||||
let pgNumber = yield deferred.promise;
|
||||
is(pgNumber, test.expectedPage, test.message);
|
||||
}
|
||||
});
|
||||
|
||||
// Get the element and trigger the action for changing the page
|
||||
var el = document.querySelector(test.action.selector);
|
||||
ok(el, "Element '" + test.action.selector + "' has been found");
|
||||
|
||||
// The value option is for input case
|
||||
if (test.action.value)
|
||||
el.value = test.action.value;
|
||||
|
||||
// Dispatch the event for changing the page
|
||||
if (test.action.event == "keydown") {
|
||||
var ev = document.createEvent("KeyboardEvent");
|
||||
ev.initKeyEvent("keydown", true, true, null, false, false, false, false,
|
||||
test.action.keyCode, 0);
|
||||
el.dispatchEvent(ev);
|
||||
}
|
||||
else {
|
||||
var ev = new Event(test.action.event);
|
||||
}
|
||||
el.dispatchEvent(ev);
|
||||
|
||||
|
||||
// When the promise gets resolved we call the next test if there are any left
|
||||
// or else we call the final callback which will end the test
|
||||
deferred.promise.then(function (pgNumber) {
|
||||
is(pgNumber, test.expectedPage, test.message);
|
||||
|
||||
if (TESTS.length)
|
||||
runNextTest(document, window, endCallback);
|
||||
else
|
||||
endCallback();
|
||||
}, function () {
|
||||
ok(false, "Test '" + test.message + "' failed with timeout.");
|
||||
endCallback();
|
||||
var viewer = content.wrappedJSObject.PDFViewerApplication;
|
||||
yield viewer.close();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Outline Items gets appended to the document latter on we have to
|
||||
* wait for them before we start to navigate though document
|
||||
*
|
||||
* @param document
|
||||
* @returns {deferred.promise|*}
|
||||
*/
|
||||
function waitForOutlineItems(document) {
|
||||
var deferred = Promise.defer();
|
||||
document.addEventListener("outlineloaded", function outlineLoaded(evt) {
|
||||
document.removeEventListener("outlineloaded", outlineLoaded);
|
||||
var outlineCount = evt.detail.outlineCount;
|
||||
|
||||
if (document.querySelectorAll(".outlineItem").length === outlineCount) {
|
||||
deferred.resolve();
|
||||
} else {
|
||||
deferred.reject();
|
||||
}
|
||||
});
|
||||
|
||||
return deferred.promise;
|
||||
}
|
||||
|
||||
/**
|
||||
* The key navigation has to happen in page-fit, otherwise it won't scroll
|
||||
* trough a complete page
|
||||
*
|
||||
* @param document
|
||||
* @returns {deferred.promise|*}
|
||||
*/
|
||||
function setZoomToPageFit(document) {
|
||||
var deferred = Promise.defer();
|
||||
document.addEventListener("pagerendered", function onZoom(e) {
|
||||
document.removeEventListener("pagerendered", onZoom);
|
||||
document.querySelector("#viewer").click();
|
||||
deferred.resolve();
|
||||
});
|
||||
|
||||
var select = document.querySelector("select#scaleSelect");
|
||||
select.selectedIndex = 2;
|
||||
select.dispatchEvent(new Event("change"));
|
||||
|
||||
return deferred.promise;
|
||||
}
|
||||
|
||||
|
|
|
@ -4,9 +4,7 @@
|
|||
const RELATIVE_DIR = "browser/extensions/pdfjs/test/";
|
||||
const TESTROOT = "http://example.com/browser/" + RELATIVE_DIR;
|
||||
|
||||
function test() {
|
||||
var tab;
|
||||
|
||||
add_task(function* test() {
|
||||
let handlerService = Cc["@mozilla.org/uriloader/handler-service;1"].getService(Ci.nsIHandlerService);
|
||||
let mimeService = Cc["@mozilla.org/mime;1"].getService(Ci.nsIMIMEService);
|
||||
let handlerInfo = mimeService.getFromTypeAndExtension('application/pdf', 'pdf');
|
||||
|
@ -17,70 +15,47 @@ function test() {
|
|||
|
||||
info('Pref action: ' + handlerInfo.preferredAction);
|
||||
|
||||
waitForExplicitFinish();
|
||||
registerCleanupFunction(function() {
|
||||
gBrowser.removeTab(tab);
|
||||
});
|
||||
yield BrowserTestUtils.withNewTab({ gBrowser, url: "about:blank" },
|
||||
function* (browser) {
|
||||
// check that PDF is opened with internal viewer
|
||||
yield waitForPdfJS(browser, TESTROOT + "file_pdfjs_test.pdf");
|
||||
|
||||
tab = gBrowser.addTab(TESTROOT + "file_pdfjs_test.pdf");
|
||||
var newTabBrowser = gBrowser.getBrowserForTab(tab);
|
||||
newTabBrowser.addEventListener("load", function eventHandler() {
|
||||
newTabBrowser.removeEventListener("load", eventHandler, true);
|
||||
yield ContentTask.spawn(browser, null, function* () {
|
||||
ok(content.document.querySelector('div#viewer'), "document content has viewer UI");
|
||||
ok('PDFJS' in content.wrappedJSObject, "window content has PDFJS object");
|
||||
|
||||
var document = newTabBrowser.contentDocument,
|
||||
window = newTabBrowser.contentWindow;
|
||||
//open sidebar
|
||||
var sidebar = content.document.querySelector('button#sidebarToggle');
|
||||
var outerContainer = content.document.querySelector('div#outerContainer');
|
||||
|
||||
// Runs tests after all 'load' event handlers have fired off
|
||||
window.addEventListener("documentload", function() {
|
||||
runTests(document, window, function () {
|
||||
closePDFViewer(window, finish);
|
||||
sidebar.click();
|
||||
ok(outerContainer.classList.contains('sidebarOpen'), 'sidebar opens on click');
|
||||
|
||||
// check that thumbnail view is open
|
||||
var thumbnailView = content.document.querySelector('div#thumbnailView');
|
||||
var outlineView = content.document.querySelector('div#outlineView');
|
||||
|
||||
is(thumbnailView.getAttribute('class'), null, 'Initial view is thumbnail view');
|
||||
is(outlineView.getAttribute('class'), 'hidden', 'Outline view is hidden initially');
|
||||
|
||||
//switch to outline view
|
||||
var viewOutlineButton = content.document.querySelector('button#viewOutline');
|
||||
viewOutlineButton.click();
|
||||
|
||||
is(thumbnailView.getAttribute('class'), 'hidden', 'Thumbnail view is hidden when outline is selected');
|
||||
is(outlineView.getAttribute('class'), '', 'Outline view is visible when selected');
|
||||
|
||||
//switch back to thumbnail view
|
||||
var viewThumbnailButton = content.document.querySelector('button#viewThumbnail');
|
||||
viewThumbnailButton.click();
|
||||
|
||||
is(thumbnailView.getAttribute('class'), '', 'Thumbnail view is visible when selected');
|
||||
is(outlineView.getAttribute('class'), 'hidden', 'Outline view is hidden when thumbnail is selected');
|
||||
|
||||
sidebar.click();
|
||||
|
||||
var viewer = content.wrappedJSObject.PDFViewerApplication;
|
||||
yield viewer.close();
|
||||
});
|
||||
}, false, true);
|
||||
}, true);
|
||||
}
|
||||
|
||||
function runTests(document, window, callback) {
|
||||
// check that PDF is opened with internal viewer
|
||||
ok(document.querySelector('div#viewer'), "document content has viewer UI");
|
||||
ok('PDFJS' in window.wrappedJSObject, "window content has PDFJS object");
|
||||
|
||||
//open sidebar
|
||||
var sidebar = document.querySelector('button#sidebarToggle');
|
||||
var outerContainer = document.querySelector('div#outerContainer');
|
||||
|
||||
sidebar.click();
|
||||
ok(outerContainer.classList.contains('sidebarOpen'), 'sidebar opens on click');
|
||||
|
||||
// check that thumbnail view is open
|
||||
var thumbnailView = document.querySelector('div#thumbnailView');
|
||||
var outlineView = document.querySelector('div#outlineView');
|
||||
|
||||
is(thumbnailView.getAttribute('class'), null, 'Initial view is thumbnail view');
|
||||
is(outlineView.getAttribute('class'), 'hidden', 'Outline view is hidden initially');
|
||||
|
||||
//switch to outline view
|
||||
var viewOutlineButton = document.querySelector('button#viewOutline');
|
||||
viewOutlineButton.click();
|
||||
|
||||
is(outlineView.getAttribute('class'), '', 'Outline view is visible when selected');
|
||||
is(thumbnailView.getAttribute('class'), 'hidden', 'Thumbnail view is hidden when outline is selected');
|
||||
|
||||
//switch back to thumbnail view
|
||||
var viewThumbnailButton = document.querySelector('button#viewThumbnail');
|
||||
viewThumbnailButton.click();
|
||||
|
||||
is(thumbnailView.getAttribute('class'), '', 'Thumbnail view is visible when selected');
|
||||
is(outlineView.getAttribute('class'), 'hidden', 'Outline view is hidden when thumbnail is selected');
|
||||
|
||||
sidebar.click();
|
||||
|
||||
callback();
|
||||
}
|
||||
|
||||
/**
|
||||
* Destroys PDF.js viewer opened document.
|
||||
*/
|
||||
function closePDFViewer(window, callback) {
|
||||
var viewer = window.wrappedJSObject.PDFViewerApplication;
|
||||
viewer.close().then(callback);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
|
|
@ -28,6 +28,7 @@ const TESTS = [
|
|||
{
|
||||
action: {
|
||||
keyboard: true,
|
||||
keyCode: 61,
|
||||
event: "+"
|
||||
},
|
||||
expectedZoom: 1, // 1 - zoom in
|
||||
|
@ -37,6 +38,7 @@ const TESTS = [
|
|||
{
|
||||
action: {
|
||||
keyboard: true,
|
||||
keyCode: 109,
|
||||
event: "-"
|
||||
},
|
||||
expectedZoom: -1, // -1 - zoom out
|
||||
|
@ -54,11 +56,7 @@ const TESTS = [
|
|||
}
|
||||
];
|
||||
|
||||
var initialWidth; // the initial width of the PDF document
|
||||
var previousWidth; // the width of the PDF document at previous step/test
|
||||
|
||||
function test() {
|
||||
var tab;
|
||||
add_task(function* test() {
|
||||
let handlerService = Cc["@mozilla.org/uriloader/handler-service;1"]
|
||||
.getService(Ci.nsIHandlerService);
|
||||
let mimeService = Cc["@mozilla.org/mime;1"].getService(Ci.nsIMIMEService);
|
||||
|
@ -72,114 +70,83 @@ function test() {
|
|||
|
||||
info('Pref action: ' + handlerInfo.preferredAction);
|
||||
|
||||
waitForExplicitFinish();
|
||||
registerCleanupFunction(function() {
|
||||
gBrowser.removeTab(tab);
|
||||
});
|
||||
yield BrowserTestUtils.withNewTab({ gBrowser, url: "about:blank" },
|
||||
function* (newTabBrowser) {
|
||||
yield waitForPdfJS(newTabBrowser, TESTROOT + "file_pdfjs_test.pdf" + "#zoom=100");
|
||||
|
||||
tab = gBrowser.selectedTab = gBrowser.addTab(TESTROOT + "file_pdfjs_test.pdf");
|
||||
var newTabBrowser = gBrowser.getBrowserForTab(tab);
|
||||
yield ContentTask.spawn(newTabBrowser, TESTS, function* (TESTS) {
|
||||
let document = content.document;
|
||||
|
||||
newTabBrowser.addEventListener("load", function eventHandler() {
|
||||
newTabBrowser.removeEventListener("load", eventHandler, true);
|
||||
function waitForRender() {
|
||||
return new Promise((resolve) => {
|
||||
document.addEventListener("pagerendered", function onPageRendered(e) {
|
||||
if(e.detail.pageNumber !== 1) {
|
||||
return;
|
||||
}
|
||||
|
||||
var document = newTabBrowser.contentDocument,
|
||||
window = newTabBrowser.contentWindow;
|
||||
document.removeEventListener("pagerendered", onPageRendered, true);
|
||||
resolve();
|
||||
}, true);
|
||||
});
|
||||
}
|
||||
|
||||
// Runs tests after all 'load' event handlers have fired off
|
||||
window.addEventListener("documentload", function() {
|
||||
initialWidth = parseInt(document.querySelector("div#pageContainer1").style.width);
|
||||
previousWidth = initialWidth;
|
||||
runTests(document, window, function () {
|
||||
closePDFViewer(window, finish);
|
||||
// check that PDF is opened with internal viewer
|
||||
ok(content.document.querySelector('div#viewer'), "document content has viewer UI");
|
||||
ok('PDFJS' in content.wrappedJSObject, "window content has PDFJS object");
|
||||
|
||||
let initialWidth, previousWidth;
|
||||
initialWidth = previousWidth =
|
||||
parseInt(content.document.querySelector("div#pageContainer1").style.width);
|
||||
|
||||
for (let test of TESTS) {
|
||||
// We zoom using an UI element
|
||||
var ev;
|
||||
if (test.action.selector) {
|
||||
// Get the element and trigger the action for changing the zoom
|
||||
var el = document.querySelector(test.action.selector);
|
||||
ok(el, "Element '" + test.action.selector + "' has been found");
|
||||
|
||||
if (test.action.index){
|
||||
el.selectedIndex = test.action.index;
|
||||
}
|
||||
|
||||
// Dispatch the event for changing the zoom
|
||||
ev = new Event(test.action.event);
|
||||
}
|
||||
// We zoom using keyboard
|
||||
else {
|
||||
// Simulate key press
|
||||
ev = new content.KeyboardEvent("keydown",
|
||||
{ key: test.action.event,
|
||||
keyCode: test.action.keyCode,
|
||||
ctrlKey: true });
|
||||
el = content;
|
||||
}
|
||||
|
||||
el.dispatchEvent(ev);
|
||||
yield waitForRender();
|
||||
|
||||
var pageZoomScale = content.document.querySelector('select#scaleSelect');
|
||||
|
||||
// The zoom value displayed in the zoom select
|
||||
var zoomValue = pageZoomScale.options[pageZoomScale.selectedIndex].innerHTML;
|
||||
|
||||
let pageContainer = content.document.querySelector('div#pageContainer1');
|
||||
let actualWidth = parseInt(pageContainer.style.width);
|
||||
|
||||
// the actual zoom of the PDF document
|
||||
let computedZoomValue = parseInt(((actualWidth/initialWidth).toFixed(2))*100) + "%";
|
||||
is(computedZoomValue, zoomValue, "Content has correct zoom");
|
||||
|
||||
// Check that document zooms in the expected way (in/out)
|
||||
let zoom = (actualWidth - previousWidth) * test.expectedZoom;
|
||||
ok(zoom > 0, test.message);
|
||||
|
||||
previousWidth = actualWidth;
|
||||
}
|
||||
|
||||
var viewer = content.wrappedJSObject.PDFViewerApplication;
|
||||
yield viewer.close();
|
||||
});
|
||||
}, false, true);
|
||||
}, true);
|
||||
}
|
||||
|
||||
function runTests(document, window, callback) {
|
||||
// check that PDF is opened with internal viewer
|
||||
ok(document.querySelector('div#viewer'), "document content has viewer UI");
|
||||
ok('PDFJS' in window.wrappedJSObject, "window content has PDFJS object");
|
||||
|
||||
// Start the zooming tests after the document is loaded
|
||||
waitForDocumentLoad(document).then(function () {
|
||||
zoomPDF(document, window, TESTS.shift(), callback);
|
||||
});
|
||||
}
|
||||
|
||||
function waitForDocumentLoad(document) {
|
||||
var deferred = Promise.defer();
|
||||
var interval = setInterval(function () {
|
||||
if (document.querySelector("div#pageContainer1") != null){
|
||||
clearInterval(interval);
|
||||
deferred.resolve();
|
||||
}
|
||||
}, 500);
|
||||
|
||||
return deferred.promise;
|
||||
}
|
||||
|
||||
function zoomPDF(document, window, test, endCallback) {
|
||||
var renderedPage;
|
||||
|
||||
document.addEventListener("pagerendered", function onPageRendered(e) {
|
||||
if(e.detail.pageNumber !== 1) {
|
||||
return;
|
||||
}
|
||||
|
||||
document.removeEventListener("pagerendered", onPageRendered, true);
|
||||
|
||||
var pageZoomScale = document.querySelector('select#scaleSelect');
|
||||
|
||||
// The zoom value displayed in the zoom select
|
||||
var zoomValue = pageZoomScale.options[pageZoomScale.selectedIndex].innerHTML;
|
||||
|
||||
let pageContainer = document.querySelector('div#pageContainer1');
|
||||
let actualWidth = parseInt(pageContainer.style.width);
|
||||
|
||||
// the actual zoom of the PDF document
|
||||
let computedZoomValue = parseInt(((actualWidth/initialWidth).toFixed(2))*100) + "%";
|
||||
is(computedZoomValue, zoomValue, "Content has correct zoom");
|
||||
|
||||
// Check that document zooms in the expected way (in/out)
|
||||
let zoom = (actualWidth - previousWidth) * test.expectedZoom;
|
||||
ok(zoom > 0, test.message);
|
||||
|
||||
// Go to next test (if there is any) or finish
|
||||
var nextTest = TESTS.shift();
|
||||
if (nextTest) {
|
||||
previousWidth = actualWidth;
|
||||
zoomPDF(document, window, nextTest, endCallback);
|
||||
}
|
||||
else
|
||||
endCallback();
|
||||
}, true);
|
||||
|
||||
// We zoom using an UI element
|
||||
if (test.action.selector) {
|
||||
// Get the element and trigger the action for changing the zoom
|
||||
var el = document.querySelector(test.action.selector);
|
||||
ok(el, "Element '" + test.action.selector + "' has been found");
|
||||
|
||||
if (test.action.index){
|
||||
el.selectedIndex = test.action.index;
|
||||
}
|
||||
|
||||
// Dispatch the event for changing the zoom
|
||||
el.dispatchEvent(new Event(test.action.event));
|
||||
}
|
||||
// We zoom using keyboard
|
||||
else {
|
||||
// Simulate key press
|
||||
EventUtils.synthesizeKey(test.action.event, { ctrlKey: true });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Destroys PDF.js viewer opened document.
|
||||
*/
|
||||
function closePDFViewer(window, callback) {
|
||||
var viewer = window.wrappedJSObject.PDFViewerApplication;
|
||||
viewer.close().then(callback);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
function waitForPdfJS(browser, url) {
|
||||
// Runs tests after all 'load' event handlers have fired off
|
||||
return ContentTask.spawn(browser, url, function* (url) {
|
||||
yield new Promise((resolve) => {
|
||||
// NB: Add the listener to the global object so that we receive the
|
||||
// event fired from the new window.
|
||||
addEventListener("documentload", function listener() {
|
||||
removeEventListener("documentload", listener, false);
|
||||
resolve();
|
||||
}, false, true);
|
||||
|
||||
content.location = url;
|
||||
});
|
||||
});
|
||||
}
|
|
@ -396,33 +396,6 @@ SerializeURI(nsIURI* aURI,
|
|||
aURI->GetOriginCharset(aSerializedURI.charset);
|
||||
}
|
||||
|
||||
static PLDHashOperator
|
||||
EnumerateOverride(nsIURI* aURIKey,
|
||||
nsIURI* aURI,
|
||||
void* aArg)
|
||||
{
|
||||
nsTArray<OverrideMapping>* overrides =
|
||||
static_cast<nsTArray<OverrideMapping>*>(aArg);
|
||||
|
||||
SerializedURI chromeURI, overrideURI;
|
||||
|
||||
SerializeURI(aURIKey, chromeURI);
|
||||
SerializeURI(aURI, overrideURI);
|
||||
|
||||
OverrideMapping override = {
|
||||
chromeURI, overrideURI
|
||||
};
|
||||
overrides->AppendElement(override);
|
||||
return (PLDHashOperator)PL_DHASH_NEXT;
|
||||
}
|
||||
|
||||
struct EnumerationArgs
|
||||
{
|
||||
InfallibleTArray<ChromePackage>& packages;
|
||||
const nsCString& selectedLocale;
|
||||
const nsCString& selectedSkin;
|
||||
};
|
||||
|
||||
void
|
||||
nsChromeRegistryChrome::SendRegisteredChrome(
|
||||
mozilla::dom::PContentParent* aParent)
|
||||
|
@ -431,10 +404,12 @@ nsChromeRegistryChrome::SendRegisteredChrome(
|
|||
InfallibleTArray<SubstitutionMapping> resources;
|
||||
InfallibleTArray<OverrideMapping> overrides;
|
||||
|
||||
EnumerationArgs args = {
|
||||
packages, mSelectedLocale, mSelectedSkin
|
||||
};
|
||||
mPackagesHash.EnumerateRead(CollectPackages, &args);
|
||||
for (auto iter = mPackagesHash.Iter(); !iter.Done(); iter.Next()) {
|
||||
ChromePackage chromePackage;
|
||||
ChromePackageFromPackageEntry(iter.Key(), iter.UserData(), &chromePackage,
|
||||
mSelectedLocale, mSelectedSkin);
|
||||
packages.AppendElement(chromePackage);
|
||||
}
|
||||
|
||||
// If we were passed a parent then a new child process has been created and
|
||||
// has requested all of the chrome so send it the resources too. Otherwise
|
||||
|
@ -452,7 +427,15 @@ nsChromeRegistryChrome::SendRegisteredChrome(
|
|||
rph->CollectSubstitutions(resources);
|
||||
}
|
||||
|
||||
mOverrideTable.EnumerateRead(&EnumerateOverride, &overrides);
|
||||
for (auto iter = mOverrideTable.Iter(); !iter.Done(); iter.Next()) {
|
||||
SerializedURI chromeURI, overrideURI;
|
||||
|
||||
SerializeURI(iter.Key(), chromeURI);
|
||||
SerializeURI(iter.UserData(), overrideURI);
|
||||
|
||||
OverrideMapping override = { chromeURI, overrideURI };
|
||||
overrides.AppendElement(override);
|
||||
}
|
||||
|
||||
if (aParent) {
|
||||
bool success = aParent->SendRegisterChrome(packages, resources, overrides,
|
||||
|
@ -490,20 +473,6 @@ nsChromeRegistryChrome::ChromePackageFromPackageEntry(const nsACString& aPackage
|
|||
aChromePackage->flags = aPackage->flags;
|
||||
}
|
||||
|
||||
PLDHashOperator
|
||||
nsChromeRegistryChrome::CollectPackages(const nsACString &aKey,
|
||||
PackageEntry *package,
|
||||
void *arg)
|
||||
{
|
||||
EnumerationArgs* args = static_cast<EnumerationArgs*>(arg);
|
||||
|
||||
ChromePackage chromePackage;
|
||||
ChromePackageFromPackageEntry(aKey, package, &chromePackage,
|
||||
args->selectedLocale, args->selectedSkin);
|
||||
args->packages.AppendElement(chromePackage);
|
||||
return PL_DHASH_NEXT;
|
||||
}
|
||||
|
||||
static bool
|
||||
CanLoadResource(nsIURI* aResourceURI)
|
||||
{
|
||||
|
|
|
@ -59,9 +59,6 @@ class nsChromeRegistryChrome : public nsChromeRegistry
|
|||
ChromePackage* aChromePackage,
|
||||
const nsCString& aSelectedLocale,
|
||||
const nsCString& aSelectedSkin);
|
||||
static PLDHashOperator CollectPackages(const nsACString &aKey,
|
||||
PackageEntry *package,
|
||||
void *arg);
|
||||
|
||||
nsresult OverrideLocalePackage(const nsACString& aPackage,
|
||||
nsACString& aOverride);
|
||||
|
|
|
@ -275,6 +275,8 @@ WebappsActor.prototype = {
|
|||
aApp.localId = (aId in reg.webapps) ? reg.webapps[aId].localId
|
||||
: reg._nextLocalId();
|
||||
aApp.sideloaded = true;
|
||||
aApp.enabled = true;
|
||||
aApp.blockedStatus = Ci.nsIBlocklistService.STATE_NOT_BLOCKED;
|
||||
|
||||
reg.webapps[aId] = aApp;
|
||||
reg.updatePermissionsForApp(aId);
|
||||
|
|
|
@ -910,6 +910,7 @@ GK_ATOM(onstatechange, "onstatechange")
|
|||
GK_ATOM(onstatuschanged, "onstatuschanged")
|
||||
GK_ATOM(onstkcommand, "onstkcommand")
|
||||
GK_ATOM(onstksessionend, "onstksessionend")
|
||||
GK_ATOM(onstorage, "onstorage")
|
||||
GK_ATOM(onstorageareachanged, "onstorageareachanged")
|
||||
GK_ATOM(onsubmit, "onsubmit")
|
||||
GK_ATOM(onsuccess, "onsuccess")
|
||||
|
|
|
@ -91,7 +91,7 @@ public:
|
|||
// Measurement of the following members may be added later if DMD finds it
|
||||
// is worthwhile:
|
||||
// - mGlobalName
|
||||
return mKey.SizeOfExcludingThisMustBeUnshared(aMallocSizeOf);
|
||||
return mKey.SizeOfExcludingThisIfUnshared(aMallocSizeOf);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -527,8 +527,10 @@ WINDOW_EVENT(popstate,
|
|||
eBasicEventClass)
|
||||
// Not supported yet
|
||||
// WINDOW_EVENT(redo)
|
||||
// Not supported yet
|
||||
// WINDOW_EVENT(storage)
|
||||
WINDOW_EVENT(storage,
|
||||
eStorage,
|
||||
EventNameType_HTMLBodyOrFramesetOnly,
|
||||
eBasicEventClass)
|
||||
// Not supported yet
|
||||
// WINDOW_EVENT(undo)
|
||||
WINDOW_EVENT(unload,
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
"Window attribute: onclose": true,
|
||||
"Window attribute: oncuechange": true,
|
||||
"Window attribute: onmousewheel": true,
|
||||
"Window attribute: onstorage": true,
|
||||
"Window unforgeable attribute: window": true,
|
||||
"Window unforgeable attribute: document": true,
|
||||
"Window unforgeable attribute: top": true
|
||||
|
|
|
@ -67,7 +67,7 @@ nsIDOMWindowInternalWarning=Use of nsIDOMWindowInternal is deprecated. Use nsIDO
|
|||
FullScreenDeniedDisabled=Request for full-screen was denied because full-screen API is disabled by user preference.
|
||||
FullScreenDeniedFocusedPlugin=Request for full-screen was denied because a windowed plugin is focused.
|
||||
FullScreenDeniedHidden=Request for full-screen was denied because the document is no longer visible.
|
||||
FullScreenDeniedContainerNotAllowed=Request for full-screen was denied because at least one of the document's containing element is not iframe or does not have an "allowfullscreen" attribute.
|
||||
FullScreenDeniedContainerNotAllowed=Request for full-screen was denied because at least one of the document's containing elements is not an iframe or does not have an "allowfullscreen" attribute.
|
||||
FullScreenDeniedNotInputDriven=Request for full-screen was denied because Element.mozRequestFullScreen() was not called from inside a short running user-generated event handler.
|
||||
FullScreenDeniedNotInDocument=Request for full-screen was denied because requesting element is no longer in its document.
|
||||
FullScreenDeniedMovedDocument=Request for full-screen was denied because requesting element has moved document.
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include "mozilla/Attributes.h"
|
||||
#include "mozilla/StateMirroring.h"
|
||||
|
||||
#include "MediaEventSource.h"
|
||||
#include "MediaInfo.h"
|
||||
#include "nsISupports.h"
|
||||
#include "nsDataHashtable.h"
|
||||
|
@ -68,6 +69,15 @@ public:
|
|||
|
||||
virtual AbstractCanonical<media::NullableTimeUnit>* CanonicalDurationOrNull() { return nullptr; };
|
||||
|
||||
// Return an event that will be notified when data arrives in MediaResource.
|
||||
// MediaDecoderReader will register with this event to receive notifications
|
||||
// in order to udpate buffer ranges.
|
||||
// Return null if this decoder doesn't support the event.
|
||||
virtual MediaEventSource<void>* DataArrivedEvent()
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual void UpdateEstimatedMediaDuration(int64_t aDuration) {};
|
||||
public:
|
||||
|
|
|
@ -1447,10 +1447,13 @@ void
|
|||
MediaDecoder::NotifyDataArrived() {
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
|
||||
if (mDecoderStateMachine) {
|
||||
mDecoderStateMachine->DispatchNotifyDataArrived();
|
||||
// Don't publish events since task queues might be shutting down.
|
||||
if (mShuttingDown) {
|
||||
return;
|
||||
}
|
||||
|
||||
mDataArrivedEvent.Notify();
|
||||
|
||||
// ReadyState computation depends on MediaDecoder::CanPlayThrough, which
|
||||
// depends on the download rate.
|
||||
UpdateReadyState();
|
||||
|
|
|
@ -820,6 +820,11 @@ protected:
|
|||
RefPtr<MediaResource> mResource;
|
||||
|
||||
private:
|
||||
MediaEventSource<void>*
|
||||
DataArrivedEvent() override { return &mDataArrivedEvent; }
|
||||
|
||||
MediaEventProducer<void> mDataArrivedEvent;
|
||||
|
||||
// The state machine object for handling the decoding. It is safe to
|
||||
// call methods of this object from other threads. Its internal data
|
||||
// is synchronised on a monitor. The lifetime of this object is
|
||||
|
|
|
@ -79,6 +79,11 @@ MediaDecoderReader::MediaDecoderReader(AbstractMediaDecoder* aDecoder)
|
|||
MOZ_COUNT_CTOR(MediaDecoderReader);
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
|
||||
if (mDecoder && mDecoder->DataArrivedEvent()) {
|
||||
mDataArrivedListener = mDecoder->DataArrivedEvent()->Connect(
|
||||
mTaskQueue, this, &MediaDecoderReader::NotifyDataArrived);
|
||||
}
|
||||
|
||||
// Dispatch initialization that needs to happen on that task queue.
|
||||
nsCOMPtr<nsIRunnable> r = NS_NewRunnableMethod(this, &MediaDecoderReader::InitializationTask);
|
||||
mTaskQueue->Dispatch(r.forget());
|
||||
|
@ -360,6 +365,8 @@ MediaDecoderReader::Shutdown()
|
|||
mBaseAudioPromise.RejectIfExists(END_OF_STREAM, __func__);
|
||||
mBaseVideoPromise.RejectIfExists(END_OF_STREAM, __func__);
|
||||
|
||||
mDataArrivedListener.DisconnectIfExists();
|
||||
|
||||
ReleaseMediaResources();
|
||||
mDuration.DisconnectIfConnected();
|
||||
mBuffered.DisconnectAll();
|
||||
|
|
|
@ -223,15 +223,6 @@ public:
|
|||
virtual size_t SizeOfVideoQueueInFrames();
|
||||
virtual size_t SizeOfAudioQueueInFrames();
|
||||
|
||||
void DispatchNotifyDataArrived()
|
||||
{
|
||||
RefPtr<nsRunnable> r = NS_NewRunnableMethod(
|
||||
this, &MediaDecoderReader::NotifyDataArrived);
|
||||
|
||||
OwnerThread()->Dispatch(
|
||||
r.forget(), AbstractThread::DontAssertDispatchSuccess);
|
||||
}
|
||||
|
||||
void NotifyDataArrived()
|
||||
{
|
||||
MOZ_ASSERT(OnTaskQueue());
|
||||
|
@ -428,6 +419,8 @@ private:
|
|||
// "discontinuity" in the stream. For example after a seek.
|
||||
bool mAudioDiscontinuity;
|
||||
bool mVideoDiscontinuity;
|
||||
|
||||
MediaEventListener mDataArrivedListener;
|
||||
};
|
||||
|
||||
} // namespace mozilla
|
||||
|
|
|
@ -168,11 +168,6 @@ public:
|
|||
OwnerThread()->Dispatch(runnable.forget());
|
||||
}
|
||||
|
||||
void DispatchNotifyDataArrived()
|
||||
{
|
||||
mReader->DispatchNotifyDataArrived();
|
||||
}
|
||||
|
||||
// Notifies the state machine that should minimize the number of samples
|
||||
// decoded we preroll, until playback starts. The first time playback starts
|
||||
// the state machine is free to return to prerolling normally. Note
|
||||
|
|
|
@ -1229,11 +1229,11 @@ MediaStreamGraphImpl::UpdateMainThreadState()
|
|||
bool
|
||||
MediaStreamGraphImpl::OneIteration(GraphTime aStateEnd)
|
||||
{
|
||||
MaybeProduceMemoryReport();
|
||||
|
||||
// Process graph message from the main thread for this iteration.
|
||||
RunMessagesInQueue();
|
||||
|
||||
MaybeProduceMemoryReport();
|
||||
|
||||
GraphTime stateEnd = std::min(aStateEnd, mEndTime);
|
||||
UpdateGraph(stateEnd);
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ moz_gfx_memory_reset(MozGfxMemory *mem)
|
|||
mem->image->Release();
|
||||
|
||||
ImageContainer* container = ((MozGfxMemoryAllocator*) mem->memory.allocator)->reader->GetImageContainer();
|
||||
mem->image = reinterpret_cast<PlanarYCbCrImage*>(container->CreateImage(ImageFormat::PLANAR_YCBCR).take());
|
||||
mem->image = container->CreatePlanarYCbCrImage().forget().take();
|
||||
mem->data = mem->image->AllocateAndGetNewBuffer(mem->memory.size);
|
||||
}
|
||||
|
||||
|
|
|
@ -227,7 +227,7 @@ PDMFactory::CreateDecoderWithPDM(PlatformDecoderModule* aPDM,
|
|||
}
|
||||
|
||||
bool
|
||||
PDMFactory::SupportsMimeType(const nsACString& aMimeType)
|
||||
PDMFactory::SupportsMimeType(const nsACString& aMimeType) const
|
||||
{
|
||||
if (mEMEPDM) {
|
||||
return mEMEPDM->SupportsMimeType(aMimeType);
|
||||
|
@ -305,7 +305,7 @@ PDMFactory::StartupPDM(PlatformDecoderModule* aPDM)
|
|||
}
|
||||
|
||||
already_AddRefed<PlatformDecoderModule>
|
||||
PDMFactory::GetDecoder(const nsACString& aMimeType)
|
||||
PDMFactory::GetDecoder(const nsACString& aMimeType) const
|
||||
{
|
||||
RefPtr<PlatformDecoderModule> pdm;
|
||||
for (auto& current : mCurrentPDMs) {
|
||||
|
|
|
@ -35,7 +35,7 @@ public:
|
|||
layers::LayersBackend aLayersBackend = layers::LayersBackend::LAYERS_NONE,
|
||||
layers::ImageContainer* aImageContainer = nullptr);
|
||||
|
||||
bool SupportsMimeType(const nsACString& aMimeType);
|
||||
bool SupportsMimeType(const nsACString& aMimeType) const;
|
||||
|
||||
#ifdef MOZ_EME
|
||||
// Creates a PlatformDecoderModule that uses a CDMProxy to decrypt or
|
||||
|
@ -52,7 +52,9 @@ private:
|
|||
// Startup the provided PDM and add it to our list if successful.
|
||||
bool StartupPDM(PlatformDecoderModule* aPDM);
|
||||
// Returns the first PDM in our list supporting the mimetype.
|
||||
already_AddRefed<PlatformDecoderModule> GetDecoder(const nsACString& aMimeType);
|
||||
already_AddRefed<PlatformDecoderModule>
|
||||
GetDecoder(const nsACString& aMimeType) const;
|
||||
|
||||
already_AddRefed<MediaDataDecoder>
|
||||
CreateDecoderWithPDM(PlatformDecoderModule* aPDM,
|
||||
const TrackInfo& aConfig,
|
||||
|
|
|
@ -53,7 +53,7 @@ public:
|
|||
virtual nsresult Startup() { return NS_OK; };
|
||||
|
||||
// Indicates if the PlatformDecoderModule supports decoding of aMimeType.
|
||||
virtual bool SupportsMimeType(const nsACString& aMimeType) = 0;
|
||||
virtual bool SupportsMimeType(const nsACString& aMimeType) const = 0;
|
||||
|
||||
enum ConversionRequired {
|
||||
kNeedNone,
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
namespace mozilla {
|
||||
|
||||
bool
|
||||
AgnosticDecoderModule::SupportsMimeType(const nsACString& aMimeType)
|
||||
AgnosticDecoderModule::SupportsMimeType(const nsACString& aMimeType) const
|
||||
{
|
||||
return VPXDecoder::IsVPX(aMimeType) ||
|
||||
OpusDataDecoder::IsOpus(aMimeType) ||
|
||||
|
|
|
@ -10,7 +10,7 @@ public:
|
|||
AgnosticDecoderModule() = default;
|
||||
virtual ~AgnosticDecoderModule() = default;
|
||||
|
||||
bool SupportsMimeType(const nsACString& aMimeType) override;
|
||||
bool SupportsMimeType(const nsACString& aMimeType) const override;
|
||||
|
||||
ConversionRequired
|
||||
DecoderNeedsConversion(const TrackInfo& aConfig) const override
|
||||
|
|
|
@ -246,7 +246,7 @@ public:
|
|||
}
|
||||
|
||||
bool
|
||||
SupportsMimeType(const nsACString& aMimeType) override
|
||||
SupportsMimeType(const nsACString& aMimeType) const override
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -303,7 +303,7 @@ EMEDecoderModule::DecoderNeedsConversion(const TrackInfo& aConfig) const
|
|||
}
|
||||
|
||||
bool
|
||||
EMEDecoderModule::SupportsMimeType(const nsACString& aMimeType)
|
||||
EMEDecoderModule::SupportsMimeType(const nsACString& aMimeType) const
|
||||
{
|
||||
Maybe<nsCString> gmp;
|
||||
gmp.emplace(NS_ConvertUTF16toUTF8(mProxy->KeySystem()));
|
||||
|
|
|
@ -45,7 +45,7 @@ protected:
|
|||
DecoderNeedsConversion(const TrackInfo& aConfig) const override;
|
||||
|
||||
bool
|
||||
SupportsMimeType(const nsACString& aMimeType) override;
|
||||
SupportsMimeType(const nsACString& aMimeType) const override;
|
||||
|
||||
private:
|
||||
RefPtr<CDMProxy> mProxy;
|
||||
|
|
|
@ -228,7 +228,7 @@ GMPDecoderModule::SupportsMimeType(const nsACString& aMimeType,
|
|||
}
|
||||
|
||||
bool
|
||||
GMPDecoderModule::SupportsMimeType(const nsACString& aMimeType)
|
||||
GMPDecoderModule::SupportsMimeType(const nsACString& aMimeType) const
|
||||
{
|
||||
return SupportsMimeType(aMimeType, PreferredGMP(aMimeType));
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ public:
|
|||
DecoderNeedsConversion(const TrackInfo& aConfig) const override;
|
||||
|
||||
bool
|
||||
SupportsMimeType(const nsACString& aMimeType) override;
|
||||
SupportsMimeType(const nsACString& aMimeType) const override;
|
||||
|
||||
// Main thread only.
|
||||
static void Init();
|
||||
|
|
|
@ -18,11 +18,28 @@
|
|||
#include "nsAutoPtr.h"
|
||||
#include "nsPromiseFlatString.h"
|
||||
|
||||
#include "prlog.h"
|
||||
|
||||
#include <jni.h>
|
||||
|
||||
static PRLogModuleInfo* AndroidDecoderModuleLog()
|
||||
{
|
||||
static PRLogModuleInfo* sLogModule = nullptr;
|
||||
if (!sLogModule) {
|
||||
sLogModule = PR_NewLogModule("AndroidDecoderModule");
|
||||
}
|
||||
return sLogModule;
|
||||
}
|
||||
|
||||
#undef LOG
|
||||
#define LOG(arg, ...) MOZ_LOG(AndroidDecoderModuleLog(), \
|
||||
mozilla::LogLevel::Debug, ("AndroidDecoderModule(%p)::%s: " arg, \
|
||||
this, __func__, ##__VA_ARGS__))
|
||||
|
||||
using namespace mozilla;
|
||||
using namespace mozilla::gl;
|
||||
using namespace mozilla::widget::sdk;
|
||||
using media::TimeUnit;
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
|
@ -30,10 +47,11 @@ namespace mozilla {
|
|||
if (mCallback) { \
|
||||
mCallback->Func(__VA_ARGS__); \
|
||||
} else { \
|
||||
NS_WARNING("callback not set"); \
|
||||
NS_WARNING("Callback not set"); \
|
||||
}
|
||||
|
||||
static const char* TranslateMimeType(const nsACString& aMimeType)
|
||||
static const char*
|
||||
TranslateMimeType(const nsACString& aMimeType)
|
||||
{
|
||||
if (aMimeType.EqualsLiteral("video/webm; codecs=vp8")) {
|
||||
return "video/x-vnd.on2.vp8";
|
||||
|
@ -43,7 +61,8 @@ static const char* TranslateMimeType(const nsACString& aMimeType)
|
|||
return PromiseFlatCString(aMimeType).get();
|
||||
}
|
||||
|
||||
static MediaCodec::LocalRef CreateDecoder(const nsACString& aMimeType)
|
||||
static MediaCodec::LocalRef
|
||||
CreateDecoder(const nsACString& aMimeType)
|
||||
{
|
||||
MediaCodec::LocalRef codec;
|
||||
NS_ENSURE_SUCCESS(MediaCodec::CreateDecoderByType(TranslateMimeType(aMimeType),
|
||||
|
@ -51,19 +70,23 @@ static MediaCodec::LocalRef CreateDecoder(const nsACString& aMimeType)
|
|||
return codec;
|
||||
}
|
||||
|
||||
class VideoDataDecoder : public MediaCodecDataDecoder {
|
||||
class VideoDataDecoder : public MediaCodecDataDecoder
|
||||
{
|
||||
public:
|
||||
VideoDataDecoder(const VideoInfo& aConfig,
|
||||
MediaFormat::Param aFormat, MediaDataDecoderCallback* aCallback,
|
||||
MediaFormat::Param aFormat,
|
||||
MediaDataDecoderCallback* aCallback,
|
||||
layers::ImageContainer* aImageContainer)
|
||||
: MediaCodecDataDecoder(MediaData::Type::VIDEO_DATA, aConfig.mMimeType, aFormat, aCallback)
|
||||
: MediaCodecDataDecoder(MediaData::Type::VIDEO_DATA, aConfig.mMimeType,
|
||||
aFormat, aCallback)
|
||||
, mImageContainer(aImageContainer)
|
||||
, mConfig(aConfig)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
RefPtr<InitPromise> Init() override {
|
||||
RefPtr<InitPromise> Init() override
|
||||
{
|
||||
mSurfaceTexture = AndroidSurfaceTexture::Create();
|
||||
if (!mSurfaceTexture) {
|
||||
NS_WARNING("Failed to create SurfaceTexture for video decode\n");
|
||||
|
@ -77,22 +100,26 @@ public:
|
|||
return InitPromise::CreateAndResolve(TrackInfo::kVideoTrack, __func__);
|
||||
}
|
||||
|
||||
void Cleanup() override {
|
||||
void Cleanup() override
|
||||
{
|
||||
mGLContext = nullptr;
|
||||
}
|
||||
|
||||
nsresult Input(MediaRawData* aSample) override {
|
||||
nsresult Input(MediaRawData* aSample) override
|
||||
{
|
||||
return MediaCodecDataDecoder::Input(aSample);
|
||||
}
|
||||
|
||||
bool WantCopy() {
|
||||
bool WantCopy() const
|
||||
{
|
||||
// Allocating a texture is incredibly slow on PowerVR and may fail on
|
||||
// emulators, see bug 1190379.
|
||||
return mGLContext->Vendor() != GLVendor::Imagination &&
|
||||
mGLContext->Renderer() != GLRenderer::AndroidEmulator;
|
||||
}
|
||||
|
||||
EGLImage CopySurface(layers::Image* img) {
|
||||
EGLImage CopySurface(layers::Image* img)
|
||||
{
|
||||
mGLContext->MakeCurrent();
|
||||
|
||||
GLuint tex = CreateTextureForOffscreen(mGLContext, mGLContext->GetGLFormats(),
|
||||
|
@ -100,8 +127,8 @@ public:
|
|||
|
||||
auto helper = mGLContext->BlitHelper();
|
||||
const gl::OriginPos destOrigin = gl::OriginPos::TopLeft;
|
||||
if (!helper->BlitImageToTexture(img, img->GetSize(), tex, LOCAL_GL_TEXTURE_2D,
|
||||
destOrigin))
|
||||
if (!helper->BlitImageToTexture(img, img->GetSize(), tex,
|
||||
LOCAL_GL_TEXTURE_2D, destOrigin))
|
||||
{
|
||||
mGLContext->fDeleteTextures(1, &tex);
|
||||
return nullptr;
|
||||
|
@ -113,22 +140,24 @@ public:
|
|||
};
|
||||
|
||||
EGLContext eglContext = static_cast<GLContextEGL*>(mGLContext.get())->mContext;
|
||||
EGLImage eglImage = sEGLLibrary.fCreateImage(EGL_DISPLAY(), eglContext,
|
||||
LOCAL_EGL_GL_TEXTURE_2D_KHR,
|
||||
(EGLClientBuffer)tex, attribs);
|
||||
EGLImage eglImage = sEGLLibrary.fCreateImage(
|
||||
EGL_DISPLAY(), eglContext, LOCAL_EGL_GL_TEXTURE_2D_KHR,
|
||||
reinterpret_cast<EGLClientBuffer>(tex), attribs);
|
||||
mGLContext->fDeleteTextures(1, &tex);
|
||||
|
||||
return eglImage;
|
||||
}
|
||||
|
||||
nsresult PostOutput(BufferInfo::Param aInfo, MediaFormat::Param aFormat,
|
||||
const media::TimeUnit& aDuration) override {
|
||||
const TimeUnit& aDuration) override
|
||||
{
|
||||
if (!EnsureGLContext()) {
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
RefPtr<layers::Image> img =
|
||||
new SurfaceTextureImage(mSurfaceTexture.get(), mConfig.mDisplay, gl::OriginPos::BottomLeft);
|
||||
new SurfaceTextureImage(mSurfaceTexture.get(), mConfig.mDisplay,
|
||||
gl::OriginPos::BottomLeft);
|
||||
|
||||
if (WantCopy()) {
|
||||
EGLImage eglImage = CopySurface(img);
|
||||
|
@ -150,10 +179,8 @@ public:
|
|||
NS_WARNING("No EGL fence support detected, rendering artifacts may occur!");
|
||||
}
|
||||
|
||||
img = new layers::EGLImageImage(
|
||||
eglImage, eglSync,
|
||||
mConfig.mDisplay, gl::OriginPos::TopLeft,
|
||||
true /* owns */);
|
||||
img = new layers::EGLImageImage(eglImage, eglSync, mConfig.mDisplay,
|
||||
gl::OriginPos::TopLeft, true /* owns */);
|
||||
}
|
||||
|
||||
nsresult rv;
|
||||
|
@ -185,7 +212,8 @@ public:
|
|||
}
|
||||
|
||||
protected:
|
||||
bool EnsureGLContext() {
|
||||
bool EnsureGLContext()
|
||||
{
|
||||
if (mGLContext) {
|
||||
return true;
|
||||
}
|
||||
|
@ -200,29 +228,33 @@ protected:
|
|||
RefPtr<GLContext> mGLContext;
|
||||
};
|
||||
|
||||
class AudioDataDecoder : public MediaCodecDataDecoder {
|
||||
|
||||
class AudioDataDecoder : public MediaCodecDataDecoder
|
||||
{
|
||||
public:
|
||||
AudioDataDecoder(const AudioInfo& aConfig, MediaFormat::Param aFormat, MediaDataDecoderCallback* aCallback)
|
||||
: MediaCodecDataDecoder(MediaData::Type::AUDIO_DATA, aConfig.mMimeType, aFormat, aCallback)
|
||||
AudioDataDecoder(const AudioInfo& aConfig, MediaFormat::Param aFormat,
|
||||
MediaDataDecoderCallback* aCallback)
|
||||
: MediaCodecDataDecoder(MediaData::Type::AUDIO_DATA, aConfig.mMimeType,
|
||||
aFormat, aCallback)
|
||||
{
|
||||
JNIEnv* const env = jni::GetEnvForThread();
|
||||
|
||||
jni::Object::LocalRef buffer(env);
|
||||
NS_ENSURE_SUCCESS_VOID(aFormat->GetByteBuffer(NS_LITERAL_STRING("csd-0"), &buffer));
|
||||
NS_ENSURE_SUCCESS_VOID(aFormat->GetByteBuffer(NS_LITERAL_STRING("csd-0"),
|
||||
&buffer));
|
||||
|
||||
if (!buffer && aConfig.mCodecSpecificConfig->Length() >= 2) {
|
||||
buffer = jni::Object::LocalRef::Adopt(env, env->NewDirectByteBuffer(aConfig.mCodecSpecificConfig->Elements(),
|
||||
aConfig.mCodecSpecificConfig->Length()));
|
||||
NS_ENSURE_SUCCESS_VOID(aFormat->SetByteBuffer(NS_LITERAL_STRING("csd-0"), buffer));
|
||||
buffer = jni::Object::LocalRef::Adopt(
|
||||
env, env->NewDirectByteBuffer(aConfig.mCodecSpecificConfig->Elements(),
|
||||
aConfig.mCodecSpecificConfig->Length()));
|
||||
NS_ENSURE_SUCCESS_VOID(aFormat->SetByteBuffer(NS_LITERAL_STRING("csd-0"),
|
||||
buffer));
|
||||
}
|
||||
}
|
||||
|
||||
nsresult Output(BufferInfo::Param aInfo, void* aBuffer,
|
||||
MediaFormat::Param aFormat,
|
||||
const media::TimeUnit& aDuration) {
|
||||
MediaFormat::Param aFormat, const TimeUnit& aDuration)
|
||||
{
|
||||
// The output on Android is always 16-bit signed
|
||||
|
||||
nsresult rv;
|
||||
int32_t numChannels;
|
||||
NS_ENSURE_SUCCESS(rv =
|
||||
|
@ -239,7 +271,7 @@ public:
|
|||
NS_ENSURE_SUCCESS(rv = aInfo->Offset(&offset), rv);
|
||||
|
||||
#ifdef MOZ_SAMPLE_TYPE_S16
|
||||
int32_t numSamples = size / 2;
|
||||
const int32_t numSamples = size / 2;
|
||||
#else
|
||||
#error We only support 16-bit integer PCM
|
||||
#endif
|
||||
|
@ -247,8 +279,9 @@ public:
|
|||
const int32_t numFrames = numSamples / numChannels;
|
||||
auto audio = MakeUnique<AudioDataValue[]>(numSamples);
|
||||
|
||||
uint8_t* bufferStart = static_cast<uint8_t*>(aBuffer) + offset;
|
||||
PodCopy(audio.get(), reinterpret_cast<AudioDataValue*>(bufferStart), numSamples);
|
||||
const uint8_t* bufferStart = static_cast<uint8_t*>(aBuffer) + offset;
|
||||
PodCopy(audio.get(), reinterpret_cast<const AudioDataValue*>(bufferStart),
|
||||
numSamples);
|
||||
|
||||
int64_t presentationTimeUs;
|
||||
NS_ENSURE_SUCCESS(rv = aInfo->PresentationTimeUs(&presentationTimeUs), rv);
|
||||
|
@ -264,10 +297,11 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
|
||||
bool AndroidDecoderModule::SupportsMimeType(const nsACString& aMimeType)
|
||||
bool
|
||||
AndroidDecoderModule::SupportsMimeType(const nsACString& aMimeType) const
|
||||
{
|
||||
if (!AndroidBridge::Bridge() || (AndroidBridge::Bridge()->GetAPIVersion() < 16)) {
|
||||
if (!AndroidBridge::Bridge() ||
|
||||
(AndroidBridge::Bridge()->GetAPIVersion() < 16)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -286,11 +320,9 @@ bool AndroidDecoderModule::SupportsMimeType(const nsACString& aMimeType)
|
|||
|
||||
already_AddRefed<MediaDataDecoder>
|
||||
AndroidDecoderModule::CreateVideoDecoder(
|
||||
const VideoInfo& aConfig,
|
||||
layers::LayersBackend aLayersBackend,
|
||||
layers::ImageContainer* aImageContainer,
|
||||
FlushableTaskQueue* aVideoTaskQueue,
|
||||
MediaDataDecoderCallback* aCallback)
|
||||
const VideoInfo& aConfig, layers::LayersBackend aLayersBackend,
|
||||
layers::ImageContainer* aImageContainer, FlushableTaskQueue* aVideoTaskQueue,
|
||||
MediaDataDecoderCallback* aCallback)
|
||||
{
|
||||
MediaFormat::LocalRef format;
|
||||
|
||||
|
@ -307,9 +339,9 @@ AndroidDecoderModule::CreateVideoDecoder(
|
|||
}
|
||||
|
||||
already_AddRefed<MediaDataDecoder>
|
||||
AndroidDecoderModule::CreateAudioDecoder(const AudioInfo& aConfig,
|
||||
FlushableTaskQueue* aAudioTaskQueue,
|
||||
MediaDataDecoderCallback* aCallback)
|
||||
AndroidDecoderModule::CreateAudioDecoder(
|
||||
const AudioInfo& aConfig, FlushableTaskQueue* aAudioTaskQueue,
|
||||
MediaDataDecoderCallback* aCallback)
|
||||
{
|
||||
MOZ_ASSERT(aConfig.mBitDepth == 16, "We only handle 16-bit audio!");
|
||||
|
||||
|
@ -325,7 +357,6 @@ AndroidDecoderModule::CreateAudioDecoder(const AudioInfo& aConfig,
|
|||
new AudioDataDecoder(aConfig, format, aCallback);
|
||||
|
||||
return decoder.forget();
|
||||
|
||||
}
|
||||
|
||||
PlatformDecoderModule::ConversionRequired
|
||||
|
@ -333,9 +364,8 @@ AndroidDecoderModule::DecoderNeedsConversion(const TrackInfo& aConfig) const
|
|||
{
|
||||
if (aConfig.IsVideo()) {
|
||||
return kNeedAnnexB;
|
||||
} else {
|
||||
return kNeedNone;
|
||||
}
|
||||
return kNeedNone;
|
||||
}
|
||||
|
||||
MediaCodecDataDecoder::MediaCodecDataDecoder(MediaData::Type aType,
|
||||
|
@ -349,9 +379,7 @@ MediaCodecDataDecoder::MediaCodecDataDecoder(MediaData::Type aType,
|
|||
, mInputBuffers(nullptr)
|
||||
, mOutputBuffers(nullptr)
|
||||
, mMonitor("MediaCodecDataDecoder::mMonitor")
|
||||
, mFlushing(false)
|
||||
, mDraining(false)
|
||||
, mStopping(false)
|
||||
, mState(kDecoding)
|
||||
{
|
||||
|
||||
}
|
||||
|
@ -361,7 +389,8 @@ MediaCodecDataDecoder::~MediaCodecDataDecoder()
|
|||
Shutdown();
|
||||
}
|
||||
|
||||
RefPtr<MediaDataDecoder::InitPromise> MediaCodecDataDecoder::Init()
|
||||
RefPtr<MediaDataDecoder::InitPromise>
|
||||
MediaCodecDataDecoder::Init()
|
||||
{
|
||||
nsresult rv = InitDecoder(nullptr);
|
||||
|
||||
|
@ -371,10 +400,12 @@ RefPtr<MediaDataDecoder::InitPromise> MediaCodecDataDecoder::Init()
|
|||
|
||||
return NS_SUCCEEDED(rv) ?
|
||||
InitPromise::CreateAndResolve(type, __func__) :
|
||||
InitPromise::CreateAndReject(MediaDataDecoder::DecoderFailureReason::INIT_ERROR, __func__);
|
||||
InitPromise::CreateAndReject(
|
||||
MediaDataDecoder::DecoderFailureReason::INIT_ERROR, __func__);
|
||||
}
|
||||
|
||||
nsresult MediaCodecDataDecoder::InitDecoder(Surface::Param aSurface)
|
||||
nsresult
|
||||
MediaCodecDataDecoder::InitDecoder(Surface::Param aSurface)
|
||||
{
|
||||
mDecoder = CreateDecoder(mMimeType);
|
||||
if (!mDecoder) {
|
||||
|
@ -395,224 +426,312 @@ nsresult MediaCodecDataDecoder::InitDecoder(Surface::Param aSurface)
|
|||
return NS_OK;
|
||||
}
|
||||
|
||||
// This is in usec, so that's 10ms
|
||||
#define DECODER_TIMEOUT 10000
|
||||
// This is in usec, so that's 10ms.
|
||||
static const int64_t kDecoderTimeout = 10000;
|
||||
|
||||
#define HANDLE_DECODER_ERROR() \
|
||||
#define BREAK_ON_DECODER_ERROR() \
|
||||
if (NS_FAILED(res)) { \
|
||||
NS_WARNING("exiting decoder loop due to exception"); \
|
||||
if (mDraining) { \
|
||||
NS_WARNING("Exiting decoder loop due to exception"); \
|
||||
if (State() == kDrainDecoder) { \
|
||||
INVOKE_CALLBACK(DrainComplete); \
|
||||
mDraining = false; \
|
||||
State(kDecoding); \
|
||||
} \
|
||||
INVOKE_CALLBACK(Error); \
|
||||
break; \
|
||||
}
|
||||
|
||||
nsresult MediaCodecDataDecoder::GetInputBuffer(JNIEnv* env, int index, jni::Object::LocalRef* buffer)
|
||||
nsresult
|
||||
MediaCodecDataDecoder::GetInputBuffer(
|
||||
JNIEnv* aEnv, int aIndex, jni::Object::LocalRef* aBuffer)
|
||||
{
|
||||
bool retried = false;
|
||||
while (!*buffer) {
|
||||
*buffer = jni::Object::LocalRef::Adopt(env->GetObjectArrayElement(mInputBuffers.Get(), index));
|
||||
if (!*buffer) {
|
||||
if (!retried) {
|
||||
// Reset the input buffers and then try again
|
||||
nsresult res = ResetInputBuffers();
|
||||
if (NS_FAILED(res)) {
|
||||
return res;
|
||||
}
|
||||
retried = true;
|
||||
} else {
|
||||
// We already tried resetting the input buffers, return an error
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
MOZ_ASSERT(aEnv);
|
||||
MOZ_ASSERT(!*aBuffer);
|
||||
|
||||
int numTries = 2;
|
||||
|
||||
while (numTries--) {
|
||||
*aBuffer = jni::Object::LocalRef::Adopt(
|
||||
aEnv->GetObjectArrayElement(mInputBuffers.Get(), aIndex));
|
||||
if (*aBuffer) {
|
||||
return NS_OK;
|
||||
}
|
||||
nsresult res = ResetInputBuffers();
|
||||
if (NS_FAILED(res)) {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
bool
|
||||
MediaCodecDataDecoder::WaitForInput()
|
||||
{
|
||||
MonitorAutoLock lock(mMonitor);
|
||||
|
||||
while (State() == kDecoding && mQueue.empty()) {
|
||||
// Signal that we require more input.
|
||||
INVOKE_CALLBACK(InputExhausted);
|
||||
lock.Wait();
|
||||
}
|
||||
|
||||
return State() != kStopping;
|
||||
}
|
||||
|
||||
|
||||
MediaRawData*
|
||||
MediaCodecDataDecoder::PeekNextSample()
|
||||
{
|
||||
MonitorAutoLock lock(mMonitor);
|
||||
|
||||
if (State() == kFlushing) {
|
||||
mDecoder->Flush();
|
||||
ClearQueue();
|
||||
State(kDecoding);
|
||||
lock.Notify();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (mQueue.empty()) {
|
||||
if (State() == kDrainQueue) {
|
||||
State(kDrainDecoder);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// We're not stopping or flushing, so try to get a sample.
|
||||
return mQueue.front();
|
||||
}
|
||||
|
||||
nsresult
|
||||
MediaCodecDataDecoder::QueueSample(const MediaRawData* aSample)
|
||||
{
|
||||
MOZ_ASSERT(aSample);
|
||||
AutoLocalJNIFrame frame(jni::GetEnvForThread(), 1);
|
||||
|
||||
// We have a sample, try to feed it to the decoder.
|
||||
int32_t inputIndex = -1;
|
||||
nsresult res = mDecoder->DequeueInputBuffer(kDecoderTimeout, &inputIndex);
|
||||
if (NS_FAILED(res)) {
|
||||
return res;
|
||||
}
|
||||
|
||||
if (inputIndex < 0) {
|
||||
// There is no valid input buffer available.
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
jni::Object::LocalRef buffer(frame.GetEnv());
|
||||
res = GetInputBuffer(frame.GetEnv(), inputIndex, &buffer);
|
||||
if (NS_FAILED(res)) {
|
||||
return res;
|
||||
}
|
||||
|
||||
void* directBuffer = frame.GetEnv()->GetDirectBufferAddress(buffer.Get());
|
||||
|
||||
MOZ_ASSERT(frame.GetEnv()->GetDirectBufferCapacity(buffer.Get()) >=
|
||||
aSample->Size(),
|
||||
"Decoder buffer is not large enough for sample");
|
||||
|
||||
PodCopy(static_cast<uint8_t*>(directBuffer), aSample->Data(), aSample->Size());
|
||||
|
||||
res = mDecoder->QueueInputBuffer(inputIndex, 0, aSample->Size(),
|
||||
aSample->mTime, 0);
|
||||
if (NS_FAILED(res)) {
|
||||
return res;
|
||||
}
|
||||
|
||||
mDurations.push(TimeUnit::FromMicroseconds(aSample->mDuration));
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
nsresult
|
||||
MediaCodecDataDecoder::QueueEOS()
|
||||
{
|
||||
mMonitor.AssertCurrentThreadOwns();
|
||||
|
||||
nsresult res = NS_OK;
|
||||
int32_t inputIndex = -1;
|
||||
res = mDecoder->DequeueInputBuffer(kDecoderTimeout, &inputIndex);
|
||||
if (NS_FAILED(res) || inputIndex < 0) {
|
||||
return res;
|
||||
}
|
||||
|
||||
res = mDecoder->QueueInputBuffer(inputIndex, 0, 0, 0,
|
||||
MediaCodec::BUFFER_FLAG_END_OF_STREAM);
|
||||
if (NS_SUCCEEDED(res)) {
|
||||
State(kDrainWaitEOS);
|
||||
mMonitor.Notify();
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
void
|
||||
MediaCodecDataDecoder::HandleEOS(int32_t aOutputStatus)
|
||||
{
|
||||
MonitorAutoLock lock(mMonitor);
|
||||
|
||||
if (State() == kDrainWaitEOS) {
|
||||
State(kDecoding);
|
||||
mMonitor.Notify();
|
||||
|
||||
INVOKE_CALLBACK(DrainComplete);
|
||||
}
|
||||
|
||||
mDecoder->ReleaseOutputBuffer(aOutputStatus, false);
|
||||
}
|
||||
|
||||
TimeUnit
|
||||
MediaCodecDataDecoder::GetOutputDuration()
|
||||
{
|
||||
MOZ_ASSERT(!mDurations.empty(), "Should have had a duration queued");
|
||||
const TimeUnit duration = mDurations.front();
|
||||
mDurations.pop();
|
||||
return duration;
|
||||
}
|
||||
|
||||
nsresult
|
||||
MediaCodecDataDecoder::ProcessOutput(
|
||||
BufferInfo::Param aInfo, MediaFormat::Param aFormat, int32_t aStatus)
|
||||
{
|
||||
AutoLocalJNIFrame frame(jni::GetEnvForThread(), 1);
|
||||
|
||||
const TimeUnit duration = GetOutputDuration();
|
||||
const auto buffer = jni::Object::LocalRef::Adopt(
|
||||
frame.GetEnv()->GetObjectArrayElement(mOutputBuffers.Get(), aStatus));
|
||||
|
||||
if (buffer) {
|
||||
// The buffer will be null on Android L if we are decoding to a Surface.
|
||||
void* directBuffer = frame.GetEnv()->GetDirectBufferAddress(buffer.Get());
|
||||
Output(aInfo, directBuffer, aFormat, duration);
|
||||
}
|
||||
|
||||
// The Surface will be updated at this point (for video).
|
||||
mDecoder->ReleaseOutputBuffer(aStatus, true);
|
||||
PostOutput(aInfo, aFormat, duration);
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
void MediaCodecDataDecoder::DecoderLoop()
|
||||
void
|
||||
MediaCodecDataDecoder::DecoderLoop()
|
||||
{
|
||||
bool outputDone = false;
|
||||
|
||||
bool draining = false;
|
||||
bool waitingEOF = false;
|
||||
|
||||
bool isOutputDone = false;
|
||||
AutoLocalJNIFrame frame(jni::GetEnvForThread(), 1);
|
||||
RefPtr<MediaRawData> sample;
|
||||
|
||||
MediaFormat::LocalRef outputFormat(frame.GetEnv());
|
||||
nsresult res;
|
||||
nsresult res = NS_OK;
|
||||
|
||||
while (WaitForInput()) {
|
||||
sample = PeekNextSample();
|
||||
|
||||
for (;;) {
|
||||
{
|
||||
MonitorAutoLock lock(mMonitor);
|
||||
while (!mStopping && !mDraining && !mFlushing && mQueue.empty()) {
|
||||
if (mQueue.empty()) {
|
||||
// We could be waiting here forever if we don't signal that we need more input
|
||||
INVOKE_CALLBACK(InputExhausted);
|
||||
}
|
||||
lock.Wait();
|
||||
}
|
||||
|
||||
if (mStopping) {
|
||||
// Get out of the loop. This is the only exit point.
|
||||
break;
|
||||
}
|
||||
|
||||
if (mFlushing) {
|
||||
mDecoder->Flush();
|
||||
ClearQueue();
|
||||
mFlushing = false;
|
||||
lock.Notify();
|
||||
continue;
|
||||
}
|
||||
|
||||
// We're not stopping or draining, so try to get a sample
|
||||
if (!mQueue.empty()) {
|
||||
sample = mQueue.front();
|
||||
}
|
||||
|
||||
if (mDraining && !sample && !waitingEOF) {
|
||||
draining = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (draining && !waitingEOF) {
|
||||
MOZ_ASSERT(!sample, "Shouldn't have a sample when pushing EOF frame");
|
||||
|
||||
int32_t inputIndex;
|
||||
res = mDecoder->DequeueInputBuffer(DECODER_TIMEOUT, &inputIndex);
|
||||
HANDLE_DECODER_ERROR();
|
||||
|
||||
if (inputIndex >= 0) {
|
||||
res = mDecoder->QueueInputBuffer(inputIndex, 0, 0, 0, MediaCodec::BUFFER_FLAG_END_OF_STREAM);
|
||||
HANDLE_DECODER_ERROR();
|
||||
|
||||
waitingEOF = true;
|
||||
if (State() == kDrainDecoder) {
|
||||
MOZ_ASSERT(!sample, "Shouldn't have a sample when pushing EOF frame");
|
||||
res = QueueEOS();
|
||||
BREAK_ON_DECODER_ERROR();
|
||||
}
|
||||
}
|
||||
|
||||
if (sample) {
|
||||
// We have a sample, try to feed it to the decoder
|
||||
int inputIndex;
|
||||
res = mDecoder->DequeueInputBuffer(DECODER_TIMEOUT, &inputIndex);
|
||||
HANDLE_DECODER_ERROR();
|
||||
|
||||
if (inputIndex >= 0) {
|
||||
jni::Object::LocalRef buffer(frame.GetEnv());
|
||||
res = GetInputBuffer(frame.GetEnv(), inputIndex, &buffer);
|
||||
HANDLE_DECODER_ERROR();
|
||||
|
||||
void* directBuffer = frame.GetEnv()->GetDirectBufferAddress(buffer.Get());
|
||||
|
||||
MOZ_ASSERT(frame.GetEnv()->GetDirectBufferCapacity(buffer.Get()) >= sample->Size(),
|
||||
"Decoder buffer is not large enough for sample");
|
||||
|
||||
{
|
||||
// We're feeding this to the decoder, so remove it from the queue
|
||||
MonitorAutoLock lock(mMonitor);
|
||||
mQueue.pop();
|
||||
}
|
||||
|
||||
PodCopy((uint8_t*)directBuffer, sample->Data(), sample->Size());
|
||||
|
||||
res = mDecoder->QueueInputBuffer(inputIndex, 0, sample->Size(),
|
||||
sample->mTime, 0);
|
||||
HANDLE_DECODER_ERROR();
|
||||
|
||||
mDurations.push(media::TimeUnit::FromMicroseconds(sample->mDuration));
|
||||
sample = nullptr;
|
||||
outputDone = false;
|
||||
res = QueueSample(sample);
|
||||
if (NS_SUCCEEDED(res)) {
|
||||
// We've fed this into the decoder, so remove it from the queue.
|
||||
MonitorAutoLock lock(mMonitor);
|
||||
mQueue.pop();
|
||||
isOutputDone = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!outputDone) {
|
||||
BufferInfo::LocalRef bufferInfo;
|
||||
res = BufferInfo::New(&bufferInfo);
|
||||
HANDLE_DECODER_ERROR();
|
||||
if (isOutputDone) {
|
||||
continue;
|
||||
}
|
||||
|
||||
int32_t outputStatus;
|
||||
res = mDecoder->DequeueOutputBuffer(bufferInfo, DECODER_TIMEOUT, &outputStatus);
|
||||
HANDLE_DECODER_ERROR();
|
||||
BufferInfo::LocalRef bufferInfo;
|
||||
nsresult res = BufferInfo::New(&bufferInfo);
|
||||
BREAK_ON_DECODER_ERROR();
|
||||
|
||||
if (outputStatus == MediaCodec::INFO_TRY_AGAIN_LATER) {
|
||||
// We might want to call mCallback->InputExhausted() here, but there seems to be
|
||||
// some possible bad interactions here with the threading
|
||||
} else if (outputStatus == MediaCodec::INFO_OUTPUT_BUFFERS_CHANGED) {
|
||||
res = ResetOutputBuffers();
|
||||
HANDLE_DECODER_ERROR();
|
||||
} else if (outputStatus == MediaCodec::INFO_OUTPUT_FORMAT_CHANGED) {
|
||||
res = mDecoder->GetOutputFormat(ReturnTo(&outputFormat));
|
||||
HANDLE_DECODER_ERROR();
|
||||
} else if (outputStatus < 0) {
|
||||
NS_WARNING("unknown error from decoder!");
|
||||
INVOKE_CALLBACK(Error);
|
||||
int32_t outputStatus = -1;
|
||||
res = mDecoder->DequeueOutputBuffer(bufferInfo, kDecoderTimeout,
|
||||
&outputStatus);
|
||||
BREAK_ON_DECODER_ERROR();
|
||||
|
||||
// Don't break here just in case it's recoverable. If it's not, others stuff will fail later and
|
||||
// we'll bail out.
|
||||
} else {
|
||||
int32_t flags;
|
||||
res = bufferInfo->Flags(&flags);
|
||||
HANDLE_DECODER_ERROR();
|
||||
if (outputStatus == MediaCodec::INFO_TRY_AGAIN_LATER) {
|
||||
// We might want to call mCallback->InputExhausted() here, but there seems
|
||||
// to be some possible bad interactions here with the threading.
|
||||
} else if (outputStatus == MediaCodec::INFO_OUTPUT_BUFFERS_CHANGED) {
|
||||
res = ResetOutputBuffers();
|
||||
BREAK_ON_DECODER_ERROR();
|
||||
} else if (outputStatus == MediaCodec::INFO_OUTPUT_FORMAT_CHANGED) {
|
||||
res = mDecoder->GetOutputFormat(ReturnTo(&outputFormat));
|
||||
BREAK_ON_DECODER_ERROR();
|
||||
} else if (outputStatus < 0) {
|
||||
NS_WARNING("Unknown error from decoder!");
|
||||
INVOKE_CALLBACK(Error);
|
||||
// Don't break here just in case it's recoverable. If it's not, other
|
||||
// stuff will fail later and we'll bail out.
|
||||
} else {
|
||||
// We have a valid buffer index >= 0 here.
|
||||
int32_t flags;
|
||||
nsresult res = bufferInfo->Flags(&flags);
|
||||
BREAK_ON_DECODER_ERROR();
|
||||
|
||||
// We have a valid buffer index >= 0 here
|
||||
if (flags & MediaCodec::BUFFER_FLAG_END_OF_STREAM) {
|
||||
if (draining) {
|
||||
draining = false;
|
||||
waitingEOF = false;
|
||||
|
||||
mMonitor.Lock();
|
||||
mDraining = false;
|
||||
mMonitor.Notify();
|
||||
mMonitor.Unlock();
|
||||
|
||||
INVOKE_CALLBACK(DrainComplete);
|
||||
}
|
||||
|
||||
mDecoder->ReleaseOutputBuffer(outputStatus, false);
|
||||
outputDone = true;
|
||||
|
||||
// We only queue empty EOF frames, so we're done for now
|
||||
continue;
|
||||
}
|
||||
|
||||
MOZ_ASSERT(!mDurations.empty(), "Should have had a duration queued");
|
||||
|
||||
media::TimeUnit duration;
|
||||
if (!mDurations.empty()) {
|
||||
duration = mDurations.front();
|
||||
mDurations.pop();
|
||||
}
|
||||
|
||||
auto buffer = jni::Object::LocalRef::Adopt(
|
||||
frame.GetEnv()->GetObjectArrayElement(mOutputBuffers.Get(), outputStatus));
|
||||
if (buffer) {
|
||||
// The buffer will be null on Android L if we are decoding to a Surface
|
||||
void* directBuffer = frame.GetEnv()->GetDirectBufferAddress(buffer.Get());
|
||||
Output(bufferInfo, directBuffer, outputFormat, duration);
|
||||
}
|
||||
|
||||
// The Surface will be updated at this point (for video)
|
||||
mDecoder->ReleaseOutputBuffer(outputStatus, true);
|
||||
|
||||
PostOutput(bufferInfo, outputFormat, duration);
|
||||
if (flags & MediaCodec::BUFFER_FLAG_END_OF_STREAM) {
|
||||
HandleEOS(outputStatus);
|
||||
isOutputDone = true;
|
||||
// We only queue empty EOF frames, so we're done for now.
|
||||
continue;
|
||||
}
|
||||
|
||||
res = ProcessOutput(bufferInfo, outputFormat, outputStatus);
|
||||
BREAK_ON_DECODER_ERROR();
|
||||
}
|
||||
}
|
||||
|
||||
Cleanup();
|
||||
|
||||
// We're done
|
||||
// We're done.
|
||||
MonitorAutoLock lock(mMonitor);
|
||||
mStopping = false;
|
||||
State(kShutdown);
|
||||
mMonitor.Notify();
|
||||
}
|
||||
|
||||
void MediaCodecDataDecoder::ClearQueue()
|
||||
const char*
|
||||
MediaCodecDataDecoder::ModuleStateStr(ModuleState aState) {
|
||||
static const char* kStr[] = {
|
||||
"Decoding", "Flushing", "DrainQueue", "DrainDecoder", "DrainWaitEOS",
|
||||
"Stopping", "Shutdown"
|
||||
};
|
||||
|
||||
MOZ_ASSERT(aState < sizeof(kStr) / sizeof(kStr[0]));
|
||||
return kStr[aState];
|
||||
}
|
||||
|
||||
MediaCodecDataDecoder::ModuleState
|
||||
MediaCodecDataDecoder::State() const
|
||||
{
|
||||
return mState;
|
||||
}
|
||||
|
||||
void
|
||||
MediaCodecDataDecoder::State(ModuleState aState)
|
||||
{
|
||||
LOG("%s -> %s", ModuleStateStr(mState), ModuleStateStr(aState));
|
||||
|
||||
if (aState == kDrainDecoder) {
|
||||
MOZ_ASSERT(mState == kDrainQueue);
|
||||
} else if (aState == kDrainWaitEOS) {
|
||||
MOZ_ASSERT(mState == kDrainDecoder);
|
||||
}
|
||||
|
||||
mState = aState;
|
||||
}
|
||||
|
||||
void
|
||||
MediaCodecDataDecoder::ClearQueue()
|
||||
{
|
||||
mMonitor.AssertCurrentThreadOwns();
|
||||
|
||||
while (!mQueue.empty()) {
|
||||
mQueue.pop();
|
||||
}
|
||||
|
@ -621,7 +740,9 @@ void MediaCodecDataDecoder::ClearQueue()
|
|||
}
|
||||
}
|
||||
|
||||
nsresult MediaCodecDataDecoder::Input(MediaRawData* aSample) {
|
||||
nsresult
|
||||
MediaCodecDataDecoder::Input(MediaRawData* aSample)
|
||||
{
|
||||
MonitorAutoLock lock(mMonitor);
|
||||
mQueue.push(aSample);
|
||||
lock.NotifyAll();
|
||||
|
@ -629,53 +750,61 @@ nsresult MediaCodecDataDecoder::Input(MediaRawData* aSample) {
|
|||
return NS_OK;
|
||||
}
|
||||
|
||||
nsresult MediaCodecDataDecoder::ResetInputBuffers()
|
||||
nsresult
|
||||
MediaCodecDataDecoder::ResetInputBuffers()
|
||||
{
|
||||
return mDecoder->GetInputBuffers(ReturnTo(&mInputBuffers));
|
||||
}
|
||||
|
||||
nsresult MediaCodecDataDecoder::ResetOutputBuffers()
|
||||
nsresult
|
||||
MediaCodecDataDecoder::ResetOutputBuffers()
|
||||
{
|
||||
return mDecoder->GetOutputBuffers(ReturnTo(&mOutputBuffers));
|
||||
}
|
||||
|
||||
nsresult MediaCodecDataDecoder::Flush() {
|
||||
nsresult
|
||||
MediaCodecDataDecoder::Flush()
|
||||
{
|
||||
MonitorAutoLock lock(mMonitor);
|
||||
mFlushing = true;
|
||||
State(kFlushing);
|
||||
lock.Notify();
|
||||
|
||||
while (mFlushing) {
|
||||
while (State() == kFlushing) {
|
||||
lock.Wait();
|
||||
}
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
nsresult MediaCodecDataDecoder::Drain() {
|
||||
nsresult
|
||||
MediaCodecDataDecoder::Drain()
|
||||
{
|
||||
MonitorAutoLock lock(mMonitor);
|
||||
if (mDraining) {
|
||||
if (State() == kDrainDecoder || State() == kDrainQueue) {
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
mDraining = true;
|
||||
State(kDrainQueue);
|
||||
lock.Notify();
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
|
||||
nsresult MediaCodecDataDecoder::Shutdown() {
|
||||
nsresult
|
||||
MediaCodecDataDecoder::Shutdown()
|
||||
{
|
||||
MonitorAutoLock lock(mMonitor);
|
||||
|
||||
if (!mThread || mStopping) {
|
||||
if (!mThread || State() == kStopping) {
|
||||
// Already shutdown or in the process of doing so
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
mStopping = true;
|
||||
State(kStopping);
|
||||
lock.Notify();
|
||||
|
||||
while (mStopping) {
|
||||
while (State() == kStopping) {
|
||||
lock.Wait();
|
||||
}
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ public:
|
|||
AndroidDecoderModule() {}
|
||||
virtual ~AndroidDecoderModule() {}
|
||||
|
||||
bool SupportsMimeType(const nsACString& aMimeType) override;
|
||||
bool SupportsMimeType(const nsACString& aMimeType) const override;
|
||||
|
||||
ConversionRequired
|
||||
DecoderNeedsConversion(const TrackInfo& aConfig) const override;
|
||||
|
@ -59,8 +59,55 @@ public:
|
|||
nsresult Input(MediaRawData* aSample) override;
|
||||
|
||||
protected:
|
||||
enum ModuleState {
|
||||
kDecoding = 0,
|
||||
kFlushing,
|
||||
kDrainQueue,
|
||||
kDrainDecoder,
|
||||
kDrainWaitEOS,
|
||||
kStopping,
|
||||
kShutdown
|
||||
};
|
||||
|
||||
friend class AndroidDecoderModule;
|
||||
|
||||
static const char* ModuleStateStr(ModuleState aState);
|
||||
|
||||
virtual nsresult InitDecoder(widget::sdk::Surface::Param aSurface);
|
||||
|
||||
virtual nsresult Output(widget::sdk::BufferInfo::Param aInfo, void* aBuffer,
|
||||
widget::sdk::MediaFormat::Param aFormat, const media::TimeUnit& aDuration)
|
||||
{
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
virtual nsresult PostOutput(widget::sdk::BufferInfo::Param aInfo,
|
||||
widget::sdk::MediaFormat::Param aFormat, const media::TimeUnit& aDuration)
|
||||
{
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
virtual void Cleanup() {};
|
||||
|
||||
nsresult ResetInputBuffers();
|
||||
nsresult ResetOutputBuffers();
|
||||
|
||||
nsresult GetInputBuffer(JNIEnv* env, int index, jni::Object::LocalRef* buffer);
|
||||
bool WaitForInput();
|
||||
MediaRawData* PeekNextSample();
|
||||
nsresult QueueSample(const MediaRawData* aSample);
|
||||
nsresult QueueEOS();
|
||||
void HandleEOS(int32_t aOutputStatus);
|
||||
media::TimeUnit GetOutputDuration();
|
||||
nsresult ProcessOutput(widget::sdk::BufferInfo::Param aInfo,
|
||||
widget::sdk::MediaFormat::Param aFormat,
|
||||
int32_t aStatus);
|
||||
ModuleState State() const;
|
||||
void State(ModuleState aState);
|
||||
void DecoderLoop();
|
||||
|
||||
virtual void ClearQueue();
|
||||
|
||||
MediaData::Type mType;
|
||||
|
||||
nsAutoCString mMimeType;
|
||||
|
@ -77,28 +124,14 @@ protected:
|
|||
|
||||
// Only these members are protected by mMonitor.
|
||||
Monitor mMonitor;
|
||||
bool mFlushing;
|
||||
bool mDraining;
|
||||
bool mStopping;
|
||||
|
||||
ModuleState mState;
|
||||
|
||||
SampleQueue mQueue;
|
||||
// Durations are stored in microseconds.
|
||||
std::queue<media::TimeUnit> mDurations;
|
||||
|
||||
virtual nsresult InitDecoder(widget::sdk::Surface::Param aSurface);
|
||||
|
||||
virtual nsresult Output(widget::sdk::BufferInfo::Param aInfo, void* aBuffer, widget::sdk::MediaFormat::Param aFormat, const media::TimeUnit& aDuration) { return NS_OK; }
|
||||
virtual nsresult PostOutput(widget::sdk::BufferInfo::Param aInfo, widget::sdk::MediaFormat::Param aFormat, const media::TimeUnit& aDuration) { return NS_OK; }
|
||||
virtual void Cleanup() {};
|
||||
|
||||
nsresult ResetInputBuffers();
|
||||
nsresult ResetOutputBuffers();
|
||||
|
||||
void DecoderLoop();
|
||||
nsresult GetInputBuffer(JNIEnv* env, int index, jni::Object::LocalRef* buffer);
|
||||
virtual void ClearQueue();
|
||||
};
|
||||
|
||||
} // namwspace mozilla
|
||||
} // namespace mozilla
|
||||
|
||||
#endif
|
||||
|
|
|
@ -117,7 +117,7 @@ AppleDecoderModule::CreateAudioDecoder(const AudioInfo& aConfig,
|
|||
}
|
||||
|
||||
bool
|
||||
AppleDecoderModule::SupportsMimeType(const nsACString& aMimeType)
|
||||
AppleDecoderModule::SupportsMimeType(const nsACString& aMimeType) const
|
||||
{
|
||||
return (sIsCoreMediaAvailable &&
|
||||
(aMimeType.EqualsLiteral("audio/mpeg") ||
|
||||
|
|
|
@ -32,7 +32,7 @@ public:
|
|||
FlushableTaskQueue* aAudioTaskQueue,
|
||||
MediaDataDecoderCallback* aCallback) override;
|
||||
|
||||
bool SupportsMimeType(const nsACString& aMimeType) override;
|
||||
bool SupportsMimeType(const nsACString& aMimeType) const override;
|
||||
|
||||
ConversionRequired
|
||||
DecoderNeedsConversion(const TrackInfo& aConfig) const override;
|
||||
|
|
|
@ -61,7 +61,7 @@ public:
|
|||
return decoder.forget();
|
||||
}
|
||||
|
||||
bool SupportsMimeType(const nsACString& aMimeType) override
|
||||
bool SupportsMimeType(const nsACString& aMimeType) const override
|
||||
{
|
||||
AVCodecID audioCodec = FFmpegAudioDecoder<V>::GetCodecId(aMimeType);
|
||||
AVCodecID videoCodec = FFmpegH264Decoder<V>::GetCodecId(aMimeType);
|
||||
|
|
|
@ -61,7 +61,7 @@ GonkDecoderModule::DecoderNeedsConversion(const TrackInfo& aConfig) const
|
|||
}
|
||||
|
||||
bool
|
||||
GonkDecoderModule::SupportsMimeType(const nsACString& aMimeType)
|
||||
GonkDecoderModule::SupportsMimeType(const nsACString& aMimeType) const
|
||||
{
|
||||
return aMimeType.EqualsLiteral("audio/mp4a-latm") ||
|
||||
aMimeType.EqualsLiteral("audio/3gpp") ||
|
||||
|
|
|
@ -35,7 +35,7 @@ public:
|
|||
ConversionRequired
|
||||
DecoderNeedsConversion(const TrackInfo& aConfig) const override;
|
||||
|
||||
bool SupportsMimeType(const nsACString& aMimeType) override;
|
||||
bool SupportsMimeType(const nsACString& aMimeType) const override;
|
||||
|
||||
};
|
||||
|
||||
|
|
|
@ -164,7 +164,7 @@ CanCreateWMFDecoder()
|
|||
}
|
||||
|
||||
bool
|
||||
WMFDecoderModule::SupportsMimeType(const nsACString& aMimeType)
|
||||
WMFDecoderModule::SupportsMimeType(const nsACString& aMimeType) const
|
||||
{
|
||||
if ((aMimeType.EqualsLiteral("audio/mp4a-latm") ||
|
||||
aMimeType.EqualsLiteral("audio/mp4")) &&
|
||||
|
|
|
@ -31,7 +31,7 @@ public:
|
|||
FlushableTaskQueue* aAudioTaskQueue,
|
||||
MediaDataDecoderCallback* aCallback) override;
|
||||
|
||||
bool SupportsMimeType(const nsACString& aMimeType) override;
|
||||
bool SupportsMimeType(const nsACString& aMimeType) const override;
|
||||
|
||||
ConversionRequired
|
||||
DecoderNeedsConversion(const TrackInfo& aConfig) const override;
|
||||
|
|
|
@ -74,7 +74,9 @@ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
|
|||
|
||||
NS_IMPL_ADDREF_INHERITED(AudioContext, DOMEventTargetHelper)
|
||||
NS_IMPL_RELEASE_INHERITED(AudioContext, DOMEventTargetHelper)
|
||||
|
||||
NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(AudioContext)
|
||||
NS_INTERFACE_MAP_ENTRY(nsIMemoryReporter)
|
||||
NS_INTERFACE_MAP_END_INHERITING(DOMEventTargetHelper)
|
||||
|
||||
static float GetSampleRateForAudioContext(bool aIsOffline, float aSampleRate)
|
||||
|
|
|
@ -178,7 +178,9 @@ public:
|
|||
virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override
|
||||
{
|
||||
size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf);
|
||||
amount += mBuffer->SizeOfIncludingThis(aMallocSizeOf);
|
||||
if (mBuffer) {
|
||||
amount += mBuffer->SizeOfIncludingThis(aMallocSizeOf);
|
||||
}
|
||||
return amount;
|
||||
}
|
||||
|
||||
|
|
|
@ -600,7 +600,9 @@ WebAudioDecodeJob::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
|
|||
if (mOutput) {
|
||||
amount += mOutput->SizeOfIncludingThis(aMallocSizeOf);
|
||||
}
|
||||
amount += mBuffer->SizeOfIncludingThis(aMallocSizeOf);
|
||||
if (mBuffer) {
|
||||
amount += mBuffer->SizeOfIncludingThis(aMallocSizeOf);
|
||||
}
|
||||
return amount;
|
||||
}
|
||||
|
||||
|
|
|
@ -183,4 +183,3 @@ skip-if = (toolkit == 'gonk' && !debug) || android_version == '10' || android_ve
|
|||
[test_waveShaperPassThrough.html]
|
||||
[test_waveShaperInvalidLengthCurve.html]
|
||||
[test_WebAudioMemoryReporting.html]
|
||||
skip-if = debug # assertion failures: bug 1222202
|
||||
|
|
|
@ -13,10 +13,14 @@ var ac = new AudioContext();
|
|||
var sp = ac.createScriptProcessor(4096, 1, 1);
|
||||
sp.connect(ac.destination);
|
||||
|
||||
// Not started so as to test
|
||||
// https://bugzilla.mozilla.org/show_bug.cgi?id=1225003#c2
|
||||
var oac = new OfflineAudioContext(1, 1, 48000);
|
||||
|
||||
var nodeTypes = ["ScriptProcessorNode", "AudioDestinationNode"];
|
||||
var objectTypes = ["dom-nodes", "engine-objects", "stream-objects"];
|
||||
|
||||
var usages = {};
|
||||
var usages = { "explicit/webaudio/audiocontext": 0 };
|
||||
|
||||
for (var i = 0; i < nodeTypes.length; ++i) {
|
||||
for (var j = 0; j < objectTypes.length; ++j) {
|
||||
|
@ -43,5 +47,8 @@ SpecialPowers.Cc["@mozilla.org/memory-reporter-manager;1"].
|
|||
getService(SpecialPowers.Ci.nsIMemoryReporterManager).
|
||||
getReports(handleReport, null, finished, null, /* anonymized = */ false);
|
||||
|
||||
// To test bug 1225003, run a failing decodeAudioData() job over a time when
|
||||
// the tasks from getReports() are expected to run.
|
||||
ac.decodeAudioData(new ArrayBuffer(4), function(){}, function(){});
|
||||
</script>
|
||||
</html>
|
||||
|
|
|
@ -149,6 +149,10 @@ public:
|
|||
|
||||
bool dummy;
|
||||
mPort->DispatchEvent(static_cast<dom::Event*>(event.get()), &dummy);
|
||||
|
||||
// We must check if we were waiting for this message in order to shutdown
|
||||
// the port.
|
||||
mPort->UpdateMustKeepAlive();
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
|
@ -216,8 +220,10 @@ public:
|
|||
|
||||
virtual bool Notify(JSContext* aCx, workers::Status aStatus) override
|
||||
{
|
||||
if (mPort && aStatus > Running) {
|
||||
mPort->Close();
|
||||
if (aStatus > Running) {
|
||||
// We cannot process messages anymore because we cannot dispatch new
|
||||
// runnables. Let's force a Close().
|
||||
mPort->CloseForced();
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -289,7 +295,7 @@ MessagePort::MessagePort(nsPIDOMWindow* aWindow)
|
|||
|
||||
MessagePort::~MessagePort()
|
||||
{
|
||||
Close();
|
||||
CloseForced();
|
||||
MOZ_ASSERT(!mWorkerFeature);
|
||||
}
|
||||
|
||||
|
@ -336,7 +342,6 @@ MessagePort::Initialize(const nsID& aUUID,
|
|||
mIdentifier->sequenceId() = aSequenceID;
|
||||
|
||||
mState = aState;
|
||||
mNextStep = eNextStepNone;
|
||||
|
||||
if (mNeutered) {
|
||||
// If this port is neutered we don't want to keep it alive artificially nor
|
||||
|
@ -452,8 +457,9 @@ MessagePort::PostMessage(JSContext* aCx, JS::Handle<JS::Value> aMessage,
|
|||
return;
|
||||
}
|
||||
|
||||
// Not entangled yet, but already closed.
|
||||
if (mNextStep != eNextStepNone) {
|
||||
// Not entangled yet, but already closed/disentangled.
|
||||
if (mState == eStateEntanglingForDisentangle ||
|
||||
mState == eStateEntanglingForClose) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -490,11 +496,55 @@ MessagePort::Start()
|
|||
void
|
||||
MessagePort::Dispatch()
|
||||
{
|
||||
if (!mMessageQueueEnabled || mMessages.IsEmpty() || mDispatchRunnable ||
|
||||
mState > eStateEntangled || mNextStep != eNextStepNone) {
|
||||
if (!mMessageQueueEnabled || mMessages.IsEmpty() || mDispatchRunnable) {
|
||||
return;
|
||||
}
|
||||
|
||||
switch (mState) {
|
||||
case eStateUnshippedEntangled:
|
||||
// Everything is fine here. We have messages because the other
|
||||
// port populates our queue directly.
|
||||
break;
|
||||
|
||||
case eStateEntangling:
|
||||
// Everything is fine here as well. We have messages because the other
|
||||
// port populated our queue directly when we were in the
|
||||
// eStateUnshippedEntangled state.
|
||||
break;
|
||||
|
||||
case eStateEntanglingForDisentangle:
|
||||
// Here we don't want to ship messages because these messages must be
|
||||
// delivered by the cloned version of this one. They will be sent in the
|
||||
// SendDisentangle().
|
||||
return;
|
||||
|
||||
case eStateEntanglingForClose:
|
||||
// We still want to deliver messages if we are closing. These messages
|
||||
// are here from the previous eStateUnshippedEntangled state.
|
||||
break;
|
||||
|
||||
case eStateEntangled:
|
||||
// This port is up and running.
|
||||
break;
|
||||
|
||||
case eStateDisentangling:
|
||||
// If we are in the process to disentangle the port, we cannot dispatch
|
||||
// messages. They will be sent to the cloned version of this port via
|
||||
// SendDisentangle();
|
||||
return;
|
||||
|
||||
case eStateDisentangled:
|
||||
MOZ_CRASH("This cannot happen.");
|
||||
// It cannot happen because Disentangle should take off all the pending
|
||||
// messages.
|
||||
break;
|
||||
|
||||
case eStateDisentangledForClose:
|
||||
// If we are here is because the port has been closed. We can still
|
||||
// process the pending messages.
|
||||
break;
|
||||
}
|
||||
|
||||
RefPtr<SharedMessagePortMessage> data = mMessages.ElementAt(0);
|
||||
mMessages.RemoveElementAt(0);
|
||||
|
||||
|
@ -510,6 +560,24 @@ MessagePort::Dispatch()
|
|||
void
|
||||
MessagePort::Close()
|
||||
{
|
||||
CloseInternal(true /* aSoftly */);
|
||||
}
|
||||
|
||||
void
|
||||
MessagePort::CloseForced()
|
||||
{
|
||||
CloseInternal(false /* aSoftly */);
|
||||
}
|
||||
|
||||
void
|
||||
MessagePort::CloseInternal(bool aSoftly)
|
||||
{
|
||||
// If we have some messages to send but we don't want a 'soft' close, we have
|
||||
// to flush them now.
|
||||
if (!aSoftly) {
|
||||
mMessages.Clear();
|
||||
}
|
||||
|
||||
if (mState == eStateUnshippedEntangled) {
|
||||
MOZ_ASSERT(mUnshippedEntangledPort);
|
||||
|
||||
|
@ -517,8 +585,8 @@ MessagePort::Close()
|
|||
RefPtr<MessagePort> port = Move(mUnshippedEntangledPort);
|
||||
MOZ_ASSERT(mUnshippedEntangledPort == nullptr);
|
||||
|
||||
mState = eStateDisentangled;
|
||||
port->Close();
|
||||
mState = eStateDisentangledForClose;
|
||||
port->CloseInternal(aSoftly);
|
||||
|
||||
UpdateMustKeepAlive();
|
||||
return;
|
||||
|
@ -526,7 +594,20 @@ MessagePort::Close()
|
|||
|
||||
// Not entangled yet, we have to wait.
|
||||
if (mState == eStateEntangling) {
|
||||
mNextStep = eNextStepClose;
|
||||
mState = eStateEntanglingForClose;
|
||||
return;
|
||||
}
|
||||
|
||||
// Not entangled but already cloned or closed
|
||||
if (mState == eStateEntanglingForDisentangle ||
|
||||
mState == eStateEntanglingForClose) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Maybe we were already closing the port but softly. In this case we call
|
||||
// UpdateMustKeepAlive() to consider the empty pending message queue.
|
||||
if (mState == eStateDisentangledForClose && !aSoftly) {
|
||||
UpdateMustKeepAlive();
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -536,7 +617,7 @@ MessagePort::Close()
|
|||
|
||||
// We don't care about stopping the sending of messages because from now all
|
||||
// the incoming messages will be ignored.
|
||||
mState = eStateDisentangled;
|
||||
mState = eStateDisentangledForClose;
|
||||
|
||||
MOZ_ASSERT(mActor);
|
||||
|
||||
|
@ -576,8 +657,11 @@ MessagePort::SetOnmessage(EventHandlerNonNull* aCallback)
|
|||
void
|
||||
MessagePort::Entangled(nsTArray<MessagePortMessage>& aMessages)
|
||||
{
|
||||
MOZ_ASSERT(mState == eStateEntangling);
|
||||
MOZ_ASSERT(mState == eStateEntangling ||
|
||||
mState == eStateEntanglingForDisentangle ||
|
||||
mState == eStateEntanglingForClose);
|
||||
|
||||
State oldState = mState;
|
||||
mState = eStateEntangled;
|
||||
|
||||
// If we have pending messages, these have to be sent.
|
||||
|
@ -598,8 +682,10 @@ MessagePort::Entangled(nsTArray<MessagePortMessage>& aMessages)
|
|||
return;
|
||||
}
|
||||
|
||||
if (mNextStep == eNextStepClose) {
|
||||
Close();
|
||||
// If the next step is to close the port, we do it ignoring the received
|
||||
// messages.
|
||||
if (oldState == eStateEntanglingForClose) {
|
||||
CloseForced();
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -607,12 +693,11 @@ MessagePort::Entangled(nsTArray<MessagePortMessage>& aMessages)
|
|||
|
||||
// We were waiting for the entangling callback in order to disentangle this
|
||||
// port immediately after.
|
||||
if (mNextStep == eNextStepDisentangle) {
|
||||
if (oldState == eStateEntanglingForDisentangle) {
|
||||
StartDisentangling();
|
||||
return;
|
||||
}
|
||||
|
||||
MOZ_ASSERT(mNextStep == eNextStepNone);
|
||||
Dispatch();
|
||||
}
|
||||
|
||||
|
@ -623,7 +708,6 @@ MessagePort::StartDisentangling()
|
|||
MOZ_ASSERT(mState == eStateEntangled);
|
||||
|
||||
mState = eStateDisentangling;
|
||||
mNextStep = eNextStepNone;
|
||||
|
||||
// Sending this message we communicate to the parent actor that we don't want
|
||||
// to receive any new messages. It is possible that a message has been
|
||||
|
@ -635,8 +719,12 @@ MessagePort::StartDisentangling()
|
|||
void
|
||||
MessagePort::MessagesReceived(nsTArray<MessagePortMessage>& aMessages)
|
||||
{
|
||||
MOZ_ASSERT(mState == eStateEntangled || mState == eStateDisentangling);
|
||||
MOZ_ASSERT(mNextStep == eNextStepNone);
|
||||
MOZ_ASSERT(mState == eStateEntangled ||
|
||||
mState == eStateDisentangling ||
|
||||
// This last step can happen only if Close() has been called
|
||||
// manually. At this point SendClose() is sent but we can still
|
||||
// receive something until the Closing request is processed.
|
||||
mState == eStateDisentangledForClose);
|
||||
MOZ_ASSERT(mMessagesForTheOtherPort.IsEmpty());
|
||||
|
||||
RemoveDocFromBFCache();
|
||||
|
@ -700,7 +788,8 @@ MessagePort::CloneAndDisentangle(MessagePortIdentifier& aIdentifier)
|
|||
|
||||
// We already have a 'next step'. We have to consider this port as already
|
||||
// cloned/closed/disentangled.
|
||||
if (mNextStep != eNextStepNone) {
|
||||
if (mState == eStateEntanglingForDisentangle ||
|
||||
mState == eStateEntanglingForClose) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -730,27 +819,28 @@ MessagePort::CloneAndDisentangle(MessagePortIdentifier& aIdentifier)
|
|||
// Register this component to PBackground.
|
||||
ConnectToPBackground();
|
||||
|
||||
mNextStep = eNextStepDisentangle;
|
||||
mState = eStateEntanglingForDisentangle;
|
||||
return;
|
||||
}
|
||||
|
||||
// Not entangled yet, we have to wait.
|
||||
if (mState < eStateEntangled) {
|
||||
mNextStep = eNextStepDisentangle;
|
||||
if (mState == eStateEntangling) {
|
||||
mState = eStateEntanglingForDisentangle;
|
||||
return;
|
||||
}
|
||||
|
||||
MOZ_ASSERT(mState == eStateEntangled);
|
||||
StartDisentangling();
|
||||
}
|
||||
|
||||
void
|
||||
MessagePort::Closed()
|
||||
{
|
||||
if (mState == eStateDisentangled) {
|
||||
if (mState >= eStateDisentangled) {
|
||||
return;
|
||||
}
|
||||
|
||||
mState = eStateDisentangled;
|
||||
mState = eStateDisentangledForClose;
|
||||
|
||||
if (mActor) {
|
||||
mActor->SetPort(nullptr);
|
||||
|
@ -789,7 +879,9 @@ MessagePort::ActorCreated(mozilla::ipc::PBackgroundChild* aActor)
|
|||
MOZ_ASSERT(aActor);
|
||||
MOZ_ASSERT(!mActor);
|
||||
MOZ_ASSERT(mIdentifier);
|
||||
MOZ_ASSERT(mState == eStateEntangling);
|
||||
MOZ_ASSERT(mState == eStateEntangling ||
|
||||
mState == eStateEntanglingForDisentangle ||
|
||||
mState == eStateEntanglingForClose);
|
||||
|
||||
PMessagePortChild* actor =
|
||||
aActor->SendPMessagePortConstructor(mIdentifier->uuid(),
|
||||
|
@ -805,7 +897,9 @@ MessagePort::ActorCreated(mozilla::ipc::PBackgroundChild* aActor)
|
|||
void
|
||||
MessagePort::UpdateMustKeepAlive()
|
||||
{
|
||||
if (mState == eStateDisentangled && mIsKeptAlive) {
|
||||
if (mState >= eStateDisentangled &&
|
||||
mMessages.IsEmpty() &&
|
||||
mIsKeptAlive) {
|
||||
mIsKeptAlive = false;
|
||||
|
||||
if (mWorkerFeature) {
|
||||
|
@ -859,7 +953,7 @@ MessagePort::Observe(nsISupports* aSubject, const char* aTopic,
|
|||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
|
||||
if (innerID == mInnerID) {
|
||||
Close();
|
||||
CloseForced();
|
||||
}
|
||||
|
||||
return NS_OK;
|
||||
|
|
|
@ -25,6 +25,7 @@ class DispatchEventRunnable;
|
|||
class MessagePortChild;
|
||||
class MessagePortIdentifier;
|
||||
class MessagePortMessage;
|
||||
class PostMessageRunnable;
|
||||
class SharedMessagePortMessage;
|
||||
|
||||
namespace workers {
|
||||
|
@ -36,6 +37,7 @@ class MessagePort final : public DOMEventTargetHelper
|
|||
, public nsIObserver
|
||||
{
|
||||
friend class DispatchEventRunnable;
|
||||
friend class PostMessageRunnable;
|
||||
|
||||
public:
|
||||
NS_DECL_NSIIPCBACKGROUNDCHILDCREATECALLBACK
|
||||
|
@ -52,6 +54,7 @@ public:
|
|||
Create(nsPIDOMWindow* aWindow, const MessagePortIdentifier& aIdentifier,
|
||||
ErrorResult& aRv);
|
||||
|
||||
// For IPC.
|
||||
static void
|
||||
ForceClose(const MessagePortIdentifier& aIdentifier);
|
||||
|
||||
|
@ -77,6 +80,8 @@ public:
|
|||
|
||||
void CloneAndDisentangle(MessagePortIdentifier& aIdentifier);
|
||||
|
||||
void CloseForced();
|
||||
|
||||
// These methods are useful for MessagePortChild
|
||||
|
||||
void Entangled(nsTArray<MessagePortMessage>& aMessages);
|
||||
|
@ -96,11 +101,16 @@ private:
|
|||
// StateEntangling.
|
||||
eStateUnshippedEntangled,
|
||||
|
||||
// If the port is closed or cloned when we are in this state, we set the
|
||||
// mNextStep. This 'next' operation will be done when entangled() message
|
||||
// is received.
|
||||
// If the port is closed or cloned when we are in this state, we go in one
|
||||
// of the following 2 steps. EntanglingForClose or ForDisentangle.
|
||||
eStateEntangling,
|
||||
|
||||
// We are not fully entangled yet but are already disentangled.
|
||||
eStateEntanglingForDisentangle,
|
||||
|
||||
// We are not fully entangled yet but are already closed.
|
||||
eStateEntanglingForClose,
|
||||
|
||||
// When entangled() is received we send all the messages in the
|
||||
// mMessagesForTheOtherPort to the actor and we change the state to
|
||||
// StateEntangled. At this point the port is entangled with the other. We
|
||||
|
@ -121,7 +131,11 @@ private:
|
|||
// don't receive any other message, so nothing will be lost.
|
||||
// Disentangling the port we send all the messages from the mMessages
|
||||
// though the actor.
|
||||
eStateDisentangled
|
||||
eStateDisentangled,
|
||||
|
||||
// We are here if Close() has been called. We are disentangled but we can
|
||||
// still send pending messages.
|
||||
eStateDisentangledForClose
|
||||
};
|
||||
|
||||
void Initialize(const nsID& aUUID, const nsID& aDestinationUUID,
|
||||
|
@ -138,6 +152,8 @@ private:
|
|||
|
||||
void RemoveDocFromBFCache();
|
||||
|
||||
void CloseInternal(bool aSoftly);
|
||||
|
||||
// This method is meant to keep alive the MessagePort when this object is
|
||||
// creating the actor and until the actor is entangled.
|
||||
// We release the object when the port is closed or disentangled.
|
||||
|
@ -165,14 +181,6 @@ private:
|
|||
|
||||
State mState;
|
||||
|
||||
// This 'nextStep' is used when we are waiting to be entangled but the
|
||||
// content has called Clone() or Close().
|
||||
enum {
|
||||
eNextStepNone,
|
||||
eNextStepDisentangle,
|
||||
eNextStepClose
|
||||
} mNextStep;
|
||||
|
||||
bool mMessageQueueEnabled;
|
||||
|
||||
bool mIsKeptAlive;
|
||||
|
|
|
@ -36,6 +36,9 @@ public:
|
|||
: mDestinationUUID(aDestinationUUID)
|
||||
, mSequenceID(1)
|
||||
, mParent(nullptr)
|
||||
// By default we don't know the next parent.
|
||||
, mWaitingForNewParent(true)
|
||||
, mNextStepCloseAll(false)
|
||||
{
|
||||
MOZ_COUNT_CTOR(MessagePortServiceData);
|
||||
}
|
||||
|
@ -62,6 +65,9 @@ public:
|
|||
|
||||
FallibleTArray<NextParent> mNextParents;
|
||||
FallibleTArray<RefPtr<SharedMessagePortMessage>> mMessages;
|
||||
|
||||
bool mWaitingForNewParent;
|
||||
bool mNextStepCloseAll;
|
||||
};
|
||||
|
||||
/* static */ MessagePortService*
|
||||
|
@ -113,31 +119,49 @@ MessagePortService::RequestEntangling(MessagePortParent* aParent,
|
|||
// This is a security check.
|
||||
if (!data->mDestinationUUID.Equals(aDestinationUUID)) {
|
||||
MOZ_ASSERT(false, "DestinationUUIDs do not match!");
|
||||
CloseAll(aParent->ID());
|
||||
return false;
|
||||
}
|
||||
|
||||
if (aSequenceID < data->mSequenceID) {
|
||||
MOZ_ASSERT(false, "Invalid sequence ID!");
|
||||
CloseAll(aParent->ID());
|
||||
return false;
|
||||
}
|
||||
|
||||
if (aSequenceID == data->mSequenceID) {
|
||||
if (data->mParent) {
|
||||
MOZ_ASSERT(false, "Two ports cannot have the same sequenceID.");
|
||||
CloseAll(aParent->ID());
|
||||
return false;
|
||||
}
|
||||
|
||||
// We activate this port, sending all the messages.
|
||||
data->mParent = aParent;
|
||||
data->mWaitingForNewParent = false;
|
||||
FallibleTArray<MessagePortMessage> array;
|
||||
if (!SharedMessagePortMessage::FromSharedToMessagesParent(aParent,
|
||||
data->mMessages,
|
||||
array)) {
|
||||
CloseAll(aParent->ID());
|
||||
return false;
|
||||
}
|
||||
|
||||
data->mMessages.Clear();
|
||||
return aParent->Entangled(array);
|
||||
|
||||
// We can entangle the port.
|
||||
if (!aParent->Entangled(array)) {
|
||||
CloseAll(aParent->ID());
|
||||
return false;
|
||||
}
|
||||
|
||||
// If we were waiting for this parent in order to close this channel, this
|
||||
// is the time to do it.
|
||||
if (data->mNextStepCloseAll) {
|
||||
CloseAll(aParent->ID());
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// This new parent will be the next one when a Disentangle request is
|
||||
|
@ -145,6 +169,7 @@ MessagePortService::RequestEntangling(MessagePortParent* aParent,
|
|||
MessagePortServiceData::NextParent* nextParent =
|
||||
data->mNextParents.AppendElement(mozilla::fallible);
|
||||
if (!nextParent) {
|
||||
CloseAll(aParent->ID());
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -193,6 +218,7 @@ MessagePortService::DisentanglePort(
|
|||
// We didn't find the parent.
|
||||
if (!nextParent) {
|
||||
data->mMessages.SwapElements(aMessages);
|
||||
data->mWaitingForNewParent = true;
|
||||
data->mParent = nullptr;
|
||||
return true;
|
||||
}
|
||||
|
@ -250,7 +276,7 @@ MessagePortService::CloseAllDebugCheck(const nsID& aID,
|
|||
#endif
|
||||
|
||||
void
|
||||
MessagePortService::CloseAll(const nsID& aUUID)
|
||||
MessagePortService::CloseAll(const nsID& aUUID, bool aForced)
|
||||
{
|
||||
MessagePortServiceData* data;
|
||||
if (!mPorts.Get(aUUID, &data)) {
|
||||
|
@ -267,9 +293,24 @@ MessagePortService::CloseAll(const nsID& aUUID)
|
|||
}
|
||||
|
||||
nsID destinationUUID = data->mDestinationUUID;
|
||||
|
||||
// If we have informations about the other port and that port has some
|
||||
// pending messages to deliver but the parent has not processed them yet,
|
||||
// because its entangling request didn't arrive yet), we cannot close this
|
||||
// channel.
|
||||
MessagePortServiceData* destinationData;
|
||||
if (!aForced &&
|
||||
mPorts.Get(destinationUUID, &destinationData) &&
|
||||
!destinationData->mMessages.IsEmpty() &&
|
||||
destinationData->mWaitingForNewParent) {
|
||||
MOZ_ASSERT(!destinationData->mNextStepCloseAll);
|
||||
destinationData->mNextStepCloseAll = true;
|
||||
return;
|
||||
}
|
||||
|
||||
mPorts.Remove(aUUID);
|
||||
|
||||
CloseAll(destinationUUID);
|
||||
CloseAll(destinationUUID, aForced);
|
||||
|
||||
// CloseAll calls itself recursively and it can happen that it deletes
|
||||
// itself. Before continuing we must check if we are still alive.
|
||||
|
@ -370,7 +411,7 @@ MessagePortService::ForceClose(const nsID& aUUID,
|
|||
return false;
|
||||
}
|
||||
|
||||
CloseAll(aUUID);
|
||||
CloseAll(aUUID, true);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ public:
|
|||
private:
|
||||
~MessagePortService() {}
|
||||
|
||||
void CloseAll(const nsID& aUUID);
|
||||
void CloseAll(const nsID& aUUID, bool aForced = false);
|
||||
void MaybeShutdown();
|
||||
|
||||
class MessagePortServiceData;
|
||||
|
|
|
@ -24,3 +24,5 @@ support-files =
|
|||
[test_messageChannel_any.html]
|
||||
[test_messageChannel_forceClose.html]
|
||||
[test_messageChannel_bug1178076.html]
|
||||
[test_messageChannel_bug1224825.html]
|
||||
[test_messageChannel_worker_forceClose.html]
|
||||
|
|
|
@ -0,0 +1,94 @@
|
|||
<!DOCTYPE HTML>
|
||||
<html>
|
||||
<!--
|
||||
https://bugzilla.mozilla.org/show_bug.cgi?id=1224825
|
||||
-->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>Test for Bug 1224825</title>
|
||||
<script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
|
||||
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
|
||||
</head>
|
||||
<body>
|
||||
<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1224825">Mozilla Bug 1224825</a>
|
||||
<div id="content"></div>
|
||||
<pre id="test">
|
||||
</pre>
|
||||
<script type="application/javascript">
|
||||
|
||||
var MAX = 100;
|
||||
|
||||
function test_fullDeliveredMessages() {
|
||||
var worker = new Worker('data:javascript,onmessage = function(e) { e.ports[0].onmessage = function(evt) { postMessage(evt.data);}}');
|
||||
|
||||
var count = 0;
|
||||
worker.onmessage = function(e) {
|
||||
is(e.data, count, "Correct value expected!");
|
||||
ok(count < MAX,"No count > MAX messages!");
|
||||
if (++count == MAX) {
|
||||
|
||||
SimpleTest.requestFlakyTimeout("Testing an event not happening");
|
||||
setTimeout(function() {
|
||||
runTests();
|
||||
}, 200);
|
||||
|
||||
info("All the messages correctly received");
|
||||
}
|
||||
}
|
||||
|
||||
var mc = new MessageChannel();
|
||||
worker.postMessage(42, [mc.port2]);
|
||||
|
||||
for (var i = 0; i < MAX; ++i) {
|
||||
mc.port1.postMessage(i);
|
||||
}
|
||||
|
||||
mc.port1.close();
|
||||
|
||||
for (var i = 0; i < MAX * 2; ++i) {
|
||||
mc.port1.postMessage(i);
|
||||
}
|
||||
}
|
||||
|
||||
function test_closeInBetween() {
|
||||
var mc = new MessageChannel();
|
||||
|
||||
for (var i = 0; i < MAX; ++i) {
|
||||
mc.port1.postMessage(i);
|
||||
}
|
||||
|
||||
mc.port1.onmessage = function(e) {
|
||||
ok (e.data < MAX/2, "Correct message received from port1:" + e.data);
|
||||
}
|
||||
|
||||
mc.port2.onmessage = function(e) {
|
||||
ok (e.data < MAX, "Correct message received from port2:" + e.data);
|
||||
if (e.data == MAX/2) {
|
||||
mc.port2.close();
|
||||
}
|
||||
|
||||
mc.port2.postMessage(e.data);
|
||||
|
||||
if (e.data == MAX - 1) {
|
||||
runTests();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var tests = [ test_fullDeliveredMessages, test_closeInBetween ];
|
||||
|
||||
function runTests() {
|
||||
if (!tests.length) {
|
||||
SimpleTest.finish();
|
||||
return;
|
||||
}
|
||||
|
||||
var test = tests.shift();
|
||||
test();
|
||||
}
|
||||
|
||||
SimpleTest.waitForExplicitFinish();
|
||||
runTests();
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
|
@ -0,0 +1,27 @@
|
|||
<!DOCTYPE HTML>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>Test for forcing the closing of the port in workers</title>
|
||||
<script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
|
||||
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
|
||||
</head>
|
||||
<body>
|
||||
<div id="content"></div>
|
||||
<pre id="test">
|
||||
</pre>
|
||||
<script type="application/javascript">
|
||||
|
||||
var worker = new Worker('data:javascript,onmessage = function(e) { "doing nothing with this port"; }');
|
||||
|
||||
var mc = new MessageChannel();
|
||||
worker.postMessage(42, [mc.port2]);
|
||||
|
||||
for (var i = 0; i < 10; ++i) {
|
||||
mc.port1.postMessage(i);
|
||||
}
|
||||
|
||||
ok(true, "All the messages are sent! We should shutdown correctly.");
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
|
@ -209,6 +209,11 @@ PushRecord.prototype = {
|
|||
return this.quota === 0;
|
||||
},
|
||||
|
||||
matchesOriginAttributes(pattern) {
|
||||
return ChromeUtils.originAttributesMatchPattern(
|
||||
this.principal.originAttributes, pattern);
|
||||
},
|
||||
|
||||
toSubscription() {
|
||||
return {
|
||||
pushEndpoint: this.pushEndpoint,
|
||||
|
|
|
@ -277,37 +277,31 @@ this.PushService = {
|
|||
})
|
||||
break;
|
||||
|
||||
case "webapps-clear-data":
|
||||
console.debug("webapps-clear-data");
|
||||
|
||||
let data = aSubject
|
||||
.QueryInterface(Ci.mozIApplicationClearPrivateDataParams);
|
||||
if (!data) {
|
||||
console.error("webapps-clear-data: Failed to get information " +
|
||||
"about application");
|
||||
return;
|
||||
}
|
||||
|
||||
var originAttributes =
|
||||
ChromeUtils.originAttributesToSuffix({ appId: data.appId,
|
||||
inBrowser: data.browserOnly });
|
||||
this._db.getAllByOriginAttributes(originAttributes)
|
||||
.then(records => Promise.all(records.map(record =>
|
||||
this._db.delete(record.keyID)
|
||||
.catch(err => {
|
||||
console.error("webapps-clear-data: Error removing record",
|
||||
record, err);
|
||||
// This is the record we were unable to delete.
|
||||
return record;
|
||||
})
|
||||
.then(maybeDeleted => this._backgroundUnregister(maybeDeleted))
|
||||
)
|
||||
));
|
||||
|
||||
case "clear-origin-data":
|
||||
this._clearOriginData(data).catch(error => {
|
||||
console.error("clearOriginData: Error clearing origin data:", error);
|
||||
});
|
||||
break;
|
||||
}
|
||||
},
|
||||
|
||||
_clearOriginData: function(data) {
|
||||
console.log("clearOriginData()");
|
||||
|
||||
if (!data) {
|
||||
return Promise.resolve();
|
||||
}
|
||||
|
||||
let pattern = JSON.parse(data);
|
||||
return this._db.clearIf(record => {
|
||||
if (!record.matchesOriginAttributes(pattern)) {
|
||||
return false;
|
||||
}
|
||||
this._backgroundUnregister(record);
|
||||
return true;
|
||||
});
|
||||
},
|
||||
|
||||
/**
|
||||
* Sends an unregister request to the server in the background. If the
|
||||
* service is not connected, this function is a no-op.
|
||||
|
@ -487,7 +481,7 @@ this.PushService = {
|
|||
return;
|
||||
}
|
||||
|
||||
Services.obs.addObserver(this, "webapps-clear-data", false);
|
||||
Services.obs.addObserver(this, "clear-origin-data", false);
|
||||
|
||||
// On B2G the NetworkManager interface fires a network-active-changed
|
||||
// event.
|
||||
|
@ -614,7 +608,7 @@ this.PushService = {
|
|||
prefs.ignore("connection.enabled", this);
|
||||
|
||||
Services.obs.removeObserver(this, this._networkStateChangeEventName);
|
||||
Services.obs.removeObserver(this, "webapps-clear-data");
|
||||
Services.obs.removeObserver(this, "clear-origin-data");
|
||||
Services.obs.removeObserver(this, "idle-daily");
|
||||
Services.obs.removeObserver(this, "perm-changed");
|
||||
},
|
||||
|
|
|
@ -12,6 +12,7 @@ Cu.import('resource://gre/modules/Timer.jsm');
|
|||
Cu.import('resource://gre/modules/Promise.jsm');
|
||||
Cu.import('resource://gre/modules/Preferences.jsm');
|
||||
Cu.import('resource://gre/modules/PlacesUtils.jsm');
|
||||
Cu.import('resource://gre/modules/ObjectUtils.jsm');
|
||||
|
||||
const serviceExports = Cu.import('resource://gre/modules/PushService.jsm', {});
|
||||
const servicePrefs = new Preferences('dom.push.');
|
||||
|
|
|
@ -0,0 +1,144 @@
|
|||
/* Any copyright is dedicated to the Public Domain.
|
||||
http://creativecommons.org/publicdomain/zero/1.0/ */
|
||||
|
||||
'use strict';
|
||||
|
||||
const {PushDB, PushService, PushServiceWebSocket} = serviceExports;
|
||||
|
||||
const userAgentID = 'bd744428-f125-436a-b6d0-dd0c9845837f';
|
||||
|
||||
let clearForPattern = Task.async(function* (testRecords, pattern) {
|
||||
let patternString = JSON.stringify(pattern);
|
||||
yield PushService._clearOriginData(patternString);
|
||||
|
||||
for (let length = testRecords.length; length--;) {
|
||||
let test = testRecords[length];
|
||||
let originSuffix = ChromeUtils.originAttributesToSuffix(
|
||||
test.originAttributes);
|
||||
|
||||
let registration = yield PushNotificationService.registration(
|
||||
test.scope,
|
||||
originSuffix
|
||||
);
|
||||
|
||||
let url = test.scope + originSuffix;
|
||||
|
||||
if (ObjectUtils.deepEqual(test.clearIf, pattern)) {
|
||||
ok(!registration, 'Should clear registration ' + url +
|
||||
' for pattern ' + patternString);
|
||||
testRecords.splice(length, 1);
|
||||
} else {
|
||||
ok(registration, 'Should not clear registration ' + url +
|
||||
' for pattern ' + patternString);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
function run_test() {
|
||||
do_get_profile();
|
||||
setPrefs({
|
||||
userAgentID,
|
||||
requestTimeout: 1000,
|
||||
retryBaseInterval: 150
|
||||
});
|
||||
disableServiceWorkerEvents(
|
||||
'https://example.org/1'
|
||||
);
|
||||
run_next_test();
|
||||
}
|
||||
|
||||
add_task(function* test_webapps_cleardata() {
|
||||
let db = PushServiceWebSocket.newPushDB();
|
||||
do_register_cleanup(() => {return db.drop().then(_ => db.close());});
|
||||
|
||||
let testRecords = [{
|
||||
scope: 'https://example.org/1',
|
||||
originAttributes: { appId: 1 },
|
||||
clearIf: { appId: 1, inBrowser: false },
|
||||
}, {
|
||||
scope: 'https://example.org/1',
|
||||
originAttributes: { appId: 1, inBrowser: true },
|
||||
clearIf: { appId: 1 },
|
||||
}, {
|
||||
scope: 'https://example.org/1',
|
||||
originAttributes: { appId: 2, inBrowser: true },
|
||||
clearIf: { appId: 2, inBrowser: true },
|
||||
}, {
|
||||
scope: 'https://example.org/2',
|
||||
originAttributes: { appId: 1 },
|
||||
clearIf: { appId: 1, inBrowser: false },
|
||||
}, {
|
||||
scope: 'https://example.org/2',
|
||||
originAttributes: { appId: 2, inBrowser: true },
|
||||
clearIf: { appId: 2, inBrowser: true },
|
||||
}, {
|
||||
scope: 'https://example.org/3',
|
||||
originAttributes: { appId: 3, inBrowser: true },
|
||||
clearIf: { inBrowser: true },
|
||||
}, {
|
||||
scope: 'https://example.org/3',
|
||||
originAttributes: { appId: 4, inBrowser: true },
|
||||
clearIf: { inBrowser: true },
|
||||
}];
|
||||
|
||||
let unregisterDone;
|
||||
let unregisterPromise = new Promise(resolve =>
|
||||
unregisterDone = after(testRecords.length, resolve));
|
||||
|
||||
PushService.init({
|
||||
serverURI: "wss://push.example.org",
|
||||
networkInfo: new MockDesktopNetworkInfo(),
|
||||
db,
|
||||
makeWebSocket(uri) {
|
||||
return new MockWebSocket(uri, {
|
||||
onHello(data) {
|
||||
equal(data.messageType, 'hello', 'Handshake: wrong message type');
|
||||
equal(data.uaid, userAgentID, 'Handshake: wrong device ID');
|
||||
this.serverSendMsg(JSON.stringify({
|
||||
messageType: 'hello',
|
||||
status: 200,
|
||||
uaid: userAgentID
|
||||
}));
|
||||
},
|
||||
onRegister(data) {
|
||||
equal(data.messageType, 'register', 'Register: wrong message type');
|
||||
this.serverSendMsg(JSON.stringify({
|
||||
messageType: 'register',
|
||||
status: 200,
|
||||
channelID: data.channelID,
|
||||
uaid: userAgentID,
|
||||
pushEndpoint: 'https://example.com/update/' + Math.random(),
|
||||
}));
|
||||
},
|
||||
onUnregister(data) {
|
||||
unregisterDone();
|
||||
},
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
yield Promise.all(testRecords.map(test =>
|
||||
PushNotificationService.register(
|
||||
test.scope,
|
||||
ChromeUtils.originAttributesToSuffix(test.originAttributes)
|
||||
)
|
||||
));
|
||||
|
||||
// Removes records for all scopes with the same app ID. Excludes records
|
||||
// where `inBrowser` is true.
|
||||
yield clearForPattern(testRecords, { appId: 1, inBrowser: false });
|
||||
|
||||
// Removes the remaining record for app ID 1, where `inBrowser` is true.
|
||||
yield clearForPattern(testRecords, { appId: 1 });
|
||||
|
||||
// Removes all records for all scopes with the same app ID, where
|
||||
// `inBrowser` is true.
|
||||
yield clearForPattern(testRecords, { appId: 2, inBrowser: true });
|
||||
|
||||
// Removes all records where `inBrowser` is true.
|
||||
yield clearForPattern(testRecords, { inBrowser: true });
|
||||
|
||||
equal(testRecords.length, 0, 'Should remove all test records');
|
||||
yield waitForPromise(unregisterPromise, DEFAULT_TIMEOUT,
|
||||
'Timed out waiting for unregister');
|
||||
});
|
|
@ -1,95 +0,0 @@
|
|||
/* Any copyright is dedicated to the Public Domain.
|
||||
http://creativecommons.org/publicdomain/zero/1.0/ */
|
||||
|
||||
'use strict';
|
||||
|
||||
const {PushDB, PushService, PushServiceWebSocket} = serviceExports;
|
||||
|
||||
const userAgentID = 'bd744428-f125-436a-b6d0-dd0c9845837f';
|
||||
|
||||
function run_test() {
|
||||
do_get_profile();
|
||||
setPrefs({
|
||||
userAgentID,
|
||||
requestTimeout: 1000,
|
||||
retryBaseInterval: 150
|
||||
});
|
||||
disableServiceWorkerEvents(
|
||||
'https://example.org/1'
|
||||
);
|
||||
run_next_test();
|
||||
}
|
||||
|
||||
add_task(function* test_webapps_cleardata() {
|
||||
let db = PushServiceWebSocket.newPushDB();
|
||||
do_register_cleanup(() => {return db.drop().then(_ => db.close());});
|
||||
|
||||
let unregisterDone;
|
||||
let unregisterPromise = new Promise(resolve => unregisterDone = resolve);
|
||||
|
||||
PushService.init({
|
||||
serverURI: "wss://push.example.org",
|
||||
networkInfo: new MockDesktopNetworkInfo(),
|
||||
db,
|
||||
makeWebSocket(uri) {
|
||||
return new MockWebSocket(uri, {
|
||||
onHello(data) {
|
||||
equal(data.messageType, 'hello', 'Handshake: wrong message type');
|
||||
equal(data.uaid, userAgentID, 'Handshake: wrong device ID');
|
||||
this.serverSendMsg(JSON.stringify({
|
||||
messageType: 'hello',
|
||||
status: 200,
|
||||
uaid: userAgentID
|
||||
}));
|
||||
},
|
||||
onRegister(data) {
|
||||
equal(data.messageType, 'register', 'Register: wrong message type');
|
||||
this.serverSendMsg(JSON.stringify({
|
||||
messageType: 'register',
|
||||
status: 200,
|
||||
channelID: data.channelID,
|
||||
uaid: userAgentID,
|
||||
pushEndpoint: 'https://example.com/update/' + Math.random(),
|
||||
}));
|
||||
},
|
||||
onUnregister(data) {
|
||||
unregisterDone();
|
||||
},
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
let registers = yield Promise.all([
|
||||
PushNotificationService.register(
|
||||
'https://example.org/1',
|
||||
ChromeUtils.originAttributesToSuffix({ appId: 1, inBrowser: false })),
|
||||
PushNotificationService.register(
|
||||
'https://example.org/1',
|
||||
ChromeUtils.originAttributesToSuffix({ appId: 1, inBrowser: true })),
|
||||
]);
|
||||
|
||||
Services.obs.notifyObservers(
|
||||
{ appId: 1, browserOnly: false,
|
||||
QueryInterface: XPCOMUtils.generateQI([Ci.mozIApplicationClearPrivateDataParams])},
|
||||
"webapps-clear-data", "");
|
||||
|
||||
let waitAWhile = new Promise(function(res) {
|
||||
setTimeout(res, 2000);
|
||||
});
|
||||
yield waitAWhile;
|
||||
|
||||
let registration;
|
||||
registration = yield PushNotificationService.registration(
|
||||
'https://example.org/1',
|
||||
ChromeUtils.originAttributesToSuffix({ appId: 1, inBrowser: false }));
|
||||
ok(!registration, 'Registration for { 1, false } should not exist.');
|
||||
|
||||
registration = yield PushNotificationService.registration(
|
||||
'https://example.org/1',
|
||||
ChromeUtils.originAttributesToSuffix({ appId: 1, inBrowser: true }));
|
||||
ok(registration, 'Registration for { 1, true } should still exist.');
|
||||
|
||||
yield waitForPromise(unregisterPromise, DEFAULT_TIMEOUT,
|
||||
'Timed out waiting for unregister');
|
||||
});
|
||||
|
|
@ -4,6 +4,7 @@ tail =
|
|||
# Push notifications and alarms are currently disabled on Android.
|
||||
skip-if = toolkit == 'android'
|
||||
|
||||
[test_clear_origin_data.js]
|
||||
[test_drop_expired.js]
|
||||
[test_notification_ack.js]
|
||||
[test_notification_data.js]
|
||||
|
@ -39,7 +40,6 @@ run-sequentially = This will delete all existing push subscriptions.
|
|||
[test_unregister_invalid_json.js]
|
||||
[test_unregister_not_found.js]
|
||||
[test_unregister_success.js]
|
||||
[test_webapps_cleardata.js]
|
||||
[test_updateRecordNoEncryptionKeys_ws.js]
|
||||
[test_reconnect_retry.js]
|
||||
[test_retry_ws.js]
|
||||
|
|
|
@ -134,7 +134,7 @@ interface WindowEventHandlers {
|
|||
attribute EventHandler onpagehide;
|
||||
attribute EventHandler onpageshow;
|
||||
attribute EventHandler onpopstate;
|
||||
//(Not implemented)attribute EventHandler onstorage;
|
||||
attribute EventHandler onstorage;
|
||||
attribute EventHandler onunload;
|
||||
};
|
||||
|
||||
|
|
|
@ -54,8 +54,10 @@ interface NavigatorID {
|
|||
[NoInterfaceObject, Exposed=(Window,Worker)]
|
||||
interface NavigatorLanguage {
|
||||
|
||||
// These 2 values are cached. They are updated when pref
|
||||
// intl.accept_languages is changed.
|
||||
// These two attributes are cached because this interface is also implemented
|
||||
// by Workernavigator and this way we don't have to go back to the
|
||||
// main-thread from the worker thread anytime we need to retrieve them. They
|
||||
// are updated when pref intl.accept_languages is changed.
|
||||
|
||||
[Pure, Cached]
|
||||
readonly attribute DOMString? language;
|
||||
|
|
|
@ -79,18 +79,15 @@ class DataStoreGetStringRunnable final : public DataStoreRunnable
|
|||
|
||||
FuncType mFunc;
|
||||
nsAString& mString;
|
||||
ErrorResult& mRv;
|
||||
|
||||
public:
|
||||
DataStoreGetStringRunnable(WorkerPrivate* aWorkerPrivate,
|
||||
const nsMainThreadPtrHandle<DataStore>& aBackingStore,
|
||||
FuncType aFunc,
|
||||
nsAString& aString,
|
||||
ErrorResult& aRv)
|
||||
nsAString& aString)
|
||||
: DataStoreRunnable(aWorkerPrivate, aBackingStore)
|
||||
, mFunc(aFunc)
|
||||
, mString(aString)
|
||||
, mRv(aRv)
|
||||
{
|
||||
MOZ_ASSERT(aWorkerPrivate);
|
||||
aWorkerPrivate->AssertIsOnWorkerThread();
|
||||
|
@ -102,9 +99,15 @@ protected:
|
|||
{
|
||||
AssertIsOnMainThread();
|
||||
|
||||
ErrorResult rv;
|
||||
nsString string;
|
||||
(mBackingStore.get()->*mFunc)(string, mRv);
|
||||
(mBackingStore.get()->*mFunc)(string, rv);
|
||||
mString.Assign(string);
|
||||
|
||||
if (NS_WARN_IF(rv.Failed())) {
|
||||
rv.SuppressException();
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
@ -113,17 +116,12 @@ protected:
|
|||
// thread.
|
||||
class DataStoreGetReadOnlyRunnable final : public DataStoreRunnable
|
||||
{
|
||||
ErrorResult& mRv;
|
||||
|
||||
public:
|
||||
bool mReadOnly;
|
||||
|
||||
public:
|
||||
DataStoreGetReadOnlyRunnable(WorkerPrivate* aWorkerPrivate,
|
||||
const nsMainThreadPtrHandle<DataStore>& aBackingStore,
|
||||
ErrorResult& aRv)
|
||||
const nsMainThreadPtrHandle<DataStore>& aBackingStore)
|
||||
: DataStoreRunnable(aWorkerPrivate, aBackingStore)
|
||||
, mRv(aRv)
|
||||
{
|
||||
MOZ_ASSERT(aWorkerPrivate);
|
||||
aWorkerPrivate->AssertIsOnWorkerThread();
|
||||
|
@ -135,7 +133,12 @@ protected:
|
|||
{
|
||||
AssertIsOnMainThread();
|
||||
|
||||
mReadOnly = mBackingStore->GetReadOnly(mRv);
|
||||
ErrorResult rv;
|
||||
mReadOnly = mBackingStore->GetReadOnly(rv);
|
||||
if (NS_WARN_IF(rv.Failed())) {
|
||||
rv.SuppressException();
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
@ -147,6 +150,7 @@ public:
|
|||
const nsMainThreadPtrHandle<DataStore>& aBackingStore,
|
||||
Promise* aWorkerPromise)
|
||||
: DataStoreRunnable(aWorkerPrivate, aBackingStore)
|
||||
, mFailed(false)
|
||||
{
|
||||
MOZ_ASSERT(aWorkerPrivate);
|
||||
aWorkerPrivate->AssertIsOnWorkerThread();
|
||||
|
@ -167,31 +171,34 @@ public:
|
|||
return true;
|
||||
}
|
||||
|
||||
bool Failed() const
|
||||
{
|
||||
return mFailed;
|
||||
}
|
||||
|
||||
protected:
|
||||
RefPtr<PromiseWorkerProxy> mPromiseWorkerProxy;
|
||||
bool mFailed;
|
||||
};
|
||||
|
||||
// A DataStoreRunnable to run DataStore::Get(...) on the main thread.
|
||||
class DataStoreGetRunnable final : public DataStoreProxyRunnable
|
||||
{
|
||||
Sequence<OwningStringOrUnsignedLong> mId;
|
||||
ErrorResult& mRv;
|
||||
|
||||
public:
|
||||
DataStoreGetRunnable(WorkerPrivate* aWorkerPrivate,
|
||||
const nsMainThreadPtrHandle<DataStore>& aBackingStore,
|
||||
Promise* aWorkerPromise,
|
||||
const Sequence<OwningStringOrUnsignedLong>& aId,
|
||||
ErrorResult& aRv)
|
||||
Promise* aWorkerPromise)
|
||||
: DataStoreProxyRunnable(aWorkerPrivate, aBackingStore, aWorkerPromise)
|
||||
, mRv(aRv)
|
||||
{
|
||||
MOZ_ASSERT(aWorkerPrivate);
|
||||
aWorkerPrivate->AssertIsOnWorkerThread();
|
||||
}
|
||||
|
||||
if (!mId.AppendElements(aId, fallible)) {
|
||||
mRv.Throw(NS_ERROR_OUT_OF_MEMORY);
|
||||
}
|
||||
Sequence<OwningStringOrUnsignedLong>& Id()
|
||||
{
|
||||
return mId;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
@ -200,8 +207,14 @@ protected:
|
|||
{
|
||||
AssertIsOnMainThread();
|
||||
|
||||
RefPtr<Promise> promise = mBackingStore->Get(mId, mRv);
|
||||
ErrorResult rv;
|
||||
RefPtr<Promise> promise = mBackingStore->Get(mId, rv);
|
||||
promise->AppendNativeHandler(mPromiseWorkerProxy);
|
||||
if (NS_WARN_IF(rv.Failed())) {
|
||||
rv.SuppressException();
|
||||
mFailed = true;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
@ -212,30 +225,28 @@ class DataStorePutRunnable final : public DataStoreProxyRunnable
|
|||
{
|
||||
const StringOrUnsignedLong& mId;
|
||||
const nsString mRevisionId;
|
||||
ErrorResult& mRv;
|
||||
nsresult mError;
|
||||
|
||||
public:
|
||||
DataStorePutRunnable(WorkerPrivate* aWorkerPrivate,
|
||||
const nsMainThreadPtrHandle<DataStore>& aBackingStore,
|
||||
Promise* aWorkerPromise,
|
||||
JSContext* aCx,
|
||||
JS::Handle<JS::Value> aObj,
|
||||
const StringOrUnsignedLong& aId,
|
||||
const nsAString& aRevisionId,
|
||||
ErrorResult& aRv)
|
||||
const nsAString& aRevisionId)
|
||||
: DataStoreProxyRunnable(aWorkerPrivate, aBackingStore, aWorkerPromise)
|
||||
, StructuredCloneHolder(CloningNotSupported, TransferringNotSupported,
|
||||
SameProcessDifferentThread)
|
||||
, mId(aId)
|
||||
, mRevisionId(aRevisionId)
|
||||
, mRv(aRv)
|
||||
, mError(NS_OK)
|
||||
{
|
||||
MOZ_ASSERT(aWorkerPrivate);
|
||||
aWorkerPrivate->AssertIsOnWorkerThread();
|
||||
}
|
||||
|
||||
// This needs to be structured cloned while it's still on the worker thread.
|
||||
Write(aCx, aObj, mRv);
|
||||
NS_WARN_IF(mRv.Failed());
|
||||
nsresult ErrorCode() const
|
||||
{
|
||||
return mError;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
@ -247,22 +258,28 @@ protected:
|
|||
// Initialise an AutoJSAPI with the target window.
|
||||
AutoJSAPI jsapi;
|
||||
if (NS_WARN_IF(!jsapi.Init(mBackingStore->GetParentObject()))) {
|
||||
mRv.Throw(NS_ERROR_UNEXPECTED);
|
||||
mError = NS_ERROR_UNEXPECTED;
|
||||
return true;
|
||||
}
|
||||
JSContext* cx = jsapi.cx();
|
||||
|
||||
ErrorResult rv;
|
||||
JS::Rooted<JS::Value> value(cx);
|
||||
Read(mBackingStore->GetParentObject(), cx, &value, mRv);
|
||||
if (NS_WARN_IF(mRv.Failed())) {
|
||||
Read(mBackingStore->GetParentObject(), cx, &value, rv);
|
||||
if (NS_WARN_IF(rv.Failed())) {
|
||||
rv.SuppressException();
|
||||
mError = NS_ERROR_DOM_DATA_CLONE_ERR;
|
||||
return true;
|
||||
}
|
||||
|
||||
RefPtr<Promise> promise = mBackingStore->Put(cx, value, mId,
|
||||
mRevisionId, rv);
|
||||
if (NS_WARN_IF(rv.Failed())) {
|
||||
rv.SuppressException();
|
||||
mError = NS_ERROR_FAILURE;
|
||||
return true;
|
||||
}
|
||||
|
||||
RefPtr<Promise> promise = mBackingStore->Put(cx,
|
||||
value,
|
||||
mId,
|
||||
mRevisionId,
|
||||
mRv);
|
||||
promise->AppendNativeHandler(mPromiseWorkerProxy);
|
||||
return true;
|
||||
}
|
||||
|
@ -274,30 +291,28 @@ class DataStoreAddRunnable final : public DataStoreProxyRunnable
|
|||
{
|
||||
const Optional<StringOrUnsignedLong>& mId;
|
||||
const nsString mRevisionId;
|
||||
ErrorResult& mRv;
|
||||
nsresult mResult;
|
||||
|
||||
public:
|
||||
DataStoreAddRunnable(WorkerPrivate* aWorkerPrivate,
|
||||
const nsMainThreadPtrHandle<DataStore>& aBackingStore,
|
||||
Promise* aWorkerPromise,
|
||||
JSContext* aCx,
|
||||
JS::Handle<JS::Value> aObj,
|
||||
const Optional<StringOrUnsignedLong>& aId,
|
||||
const nsAString& aRevisionId,
|
||||
ErrorResult& aRv)
|
||||
const nsAString& aRevisionId)
|
||||
: DataStoreProxyRunnable(aWorkerPrivate, aBackingStore, aWorkerPromise)
|
||||
, StructuredCloneHolder(CloningNotSupported, TransferringNotSupported,
|
||||
SameProcessDifferentThread)
|
||||
, mId(aId)
|
||||
, mRevisionId(aRevisionId)
|
||||
, mRv(aRv)
|
||||
, mResult(NS_OK)
|
||||
{
|
||||
MOZ_ASSERT(aWorkerPrivate);
|
||||
aWorkerPrivate->AssertIsOnWorkerThread();
|
||||
}
|
||||
|
||||
// This needs to be structured cloned while it's still on the worker thread.
|
||||
Write(aCx, aObj, mRv);
|
||||
NS_WARN_IF(mRv.Failed());
|
||||
nsresult ErrorCode() const
|
||||
{
|
||||
return mResult;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
@ -309,22 +324,28 @@ protected:
|
|||
// Initialise an AutoJSAPI with the target window.
|
||||
AutoJSAPI jsapi;
|
||||
if (NS_WARN_IF(!jsapi.Init(mBackingStore->GetParentObject()))) {
|
||||
mRv.Throw(NS_ERROR_UNEXPECTED);
|
||||
mResult = NS_ERROR_UNEXPECTED;
|
||||
return true;
|
||||
}
|
||||
JSContext* cx = jsapi.cx();
|
||||
|
||||
ErrorResult rv;
|
||||
JS::Rooted<JS::Value> value(cx);
|
||||
Read(mBackingStore->GetParentObject(), cx, &value, mRv);
|
||||
if (NS_WARN_IF(mRv.Failed())) {
|
||||
Read(mBackingStore->GetParentObject(), cx, &value, rv);
|
||||
if (NS_WARN_IF(rv.Failed())) {
|
||||
rv.SuppressException();
|
||||
mResult = NS_ERROR_DOM_DATA_CLONE_ERR;
|
||||
return true;
|
||||
}
|
||||
|
||||
RefPtr<Promise> promise = mBackingStore->Add(cx, value, mId,
|
||||
mRevisionId, rv);
|
||||
if (NS_WARN_IF(rv.Failed())) {
|
||||
rv.SuppressException();
|
||||
mResult = NS_ERROR_FAILURE;
|
||||
return true;
|
||||
}
|
||||
|
||||
RefPtr<Promise> promise = mBackingStore->Add(cx,
|
||||
value,
|
||||
mId,
|
||||
mRevisionId,
|
||||
mRv);
|
||||
promise->AppendNativeHandler(mPromiseWorkerProxy);
|
||||
return true;
|
||||
}
|
||||
|
@ -336,19 +357,16 @@ class DataStoreRemoveRunnable final : public DataStoreProxyRunnable
|
|||
{
|
||||
const StringOrUnsignedLong& mId;
|
||||
const nsString mRevisionId;
|
||||
ErrorResult& mRv;
|
||||
|
||||
public:
|
||||
DataStoreRemoveRunnable(WorkerPrivate* aWorkerPrivate,
|
||||
const nsMainThreadPtrHandle<DataStore>& aBackingStore,
|
||||
Promise* aWorkerPromise,
|
||||
const StringOrUnsignedLong& aId,
|
||||
const nsAString& aRevisionId,
|
||||
ErrorResult& aRv)
|
||||
const nsAString& aRevisionId)
|
||||
: DataStoreProxyRunnable(aWorkerPrivate, aBackingStore, aWorkerPromise)
|
||||
, mId(aId)
|
||||
, mRevisionId(aRevisionId)
|
||||
, mRv(aRv)
|
||||
{
|
||||
MOZ_ASSERT(aWorkerPrivate);
|
||||
aWorkerPrivate->AssertIsOnWorkerThread();
|
||||
|
@ -360,7 +378,14 @@ protected:
|
|||
{
|
||||
AssertIsOnMainThread();
|
||||
|
||||
RefPtr<Promise> promise = mBackingStore->Remove(mId, mRevisionId, mRv);
|
||||
ErrorResult rv;
|
||||
RefPtr<Promise> promise = mBackingStore->Remove(mId, mRevisionId, rv);
|
||||
if (NS_WARN_IF(rv.Failed())) {
|
||||
rv.SuppressException();
|
||||
mFailed = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
promise->AppendNativeHandler(mPromiseWorkerProxy);
|
||||
return true;
|
||||
}
|
||||
|
@ -370,17 +395,14 @@ protected:
|
|||
class DataStoreClearRunnable final : public DataStoreProxyRunnable
|
||||
{
|
||||
const nsString mRevisionId;
|
||||
ErrorResult& mRv;
|
||||
|
||||
public:
|
||||
DataStoreClearRunnable(WorkerPrivate* aWorkerPrivate,
|
||||
const nsMainThreadPtrHandle<DataStore>& aBackingStore,
|
||||
Promise* aWorkerPromise,
|
||||
const nsAString& aRevisionId,
|
||||
ErrorResult& aRv)
|
||||
const nsAString& aRevisionId)
|
||||
: DataStoreProxyRunnable(aWorkerPrivate, aBackingStore, aWorkerPromise)
|
||||
, mRevisionId(aRevisionId)
|
||||
, mRv(aRv)
|
||||
{
|
||||
MOZ_ASSERT(aWorkerPrivate);
|
||||
aWorkerPrivate->AssertIsOnWorkerThread();
|
||||
|
@ -392,7 +414,14 @@ protected:
|
|||
{
|
||||
AssertIsOnMainThread();
|
||||
|
||||
RefPtr<Promise> promise = mBackingStore->Clear(mRevisionId, mRv);
|
||||
ErrorResult rv;
|
||||
RefPtr<Promise> promise = mBackingStore->Clear(mRevisionId, rv);
|
||||
if (NS_WARN_IF(rv.Failed())) {
|
||||
rv.SuppressException();
|
||||
mFailed = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
promise->AppendNativeHandler(mPromiseWorkerProxy);
|
||||
return true;
|
||||
}
|
||||
|
@ -403,23 +432,27 @@ class DataStoreSyncStoreRunnable final : public DataStoreRunnable
|
|||
{
|
||||
WorkerDataStoreCursor* mWorkerCursor;
|
||||
const nsString mRevisionId;
|
||||
ErrorResult& mRv;
|
||||
bool mFailed;
|
||||
|
||||
public:
|
||||
DataStoreSyncStoreRunnable(WorkerPrivate* aWorkerPrivate,
|
||||
const nsMainThreadPtrHandle<DataStore>& aBackingStore,
|
||||
WorkerDataStoreCursor* aWorkerCursor,
|
||||
const nsAString& aRevisionId,
|
||||
ErrorResult& aRv)
|
||||
const nsAString& aRevisionId)
|
||||
: DataStoreRunnable(aWorkerPrivate, aBackingStore)
|
||||
, mWorkerCursor(aWorkerCursor)
|
||||
, mRevisionId(aRevisionId)
|
||||
, mRv(aRv)
|
||||
, mFailed(false)
|
||||
{
|
||||
MOZ_ASSERT(aWorkerPrivate);
|
||||
aWorkerPrivate->AssertIsOnWorkerThread();
|
||||
}
|
||||
|
||||
bool Failed() const
|
||||
{
|
||||
return mFailed;
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual bool
|
||||
MainThreadRun() override
|
||||
|
@ -427,7 +460,14 @@ protected:
|
|||
AssertIsOnMainThread();
|
||||
|
||||
// Point WorkerDataStoreCursor to DataStoreCursor.
|
||||
RefPtr<DataStoreCursor> cursor = mBackingStore->Sync(mRevisionId, mRv);
|
||||
ErrorResult rv;
|
||||
RefPtr<DataStoreCursor> cursor = mBackingStore->Sync(mRevisionId, rv);
|
||||
if (NS_WARN_IF(rv.Failed())) {
|
||||
rv.SuppressException();
|
||||
mFailed = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
nsMainThreadPtrHandle<DataStoreCursor> backingCursor(
|
||||
new nsMainThreadPtrHolder<DataStoreCursor>(cursor));
|
||||
mWorkerCursor->SetBackingDataStoreCursor(backingCursor);
|
||||
|
@ -447,8 +487,7 @@ WorkerDataStore::GetName(JSContext* aCx, nsAString& aName, ErrorResult& aRv)
|
|||
new DataStoreGetStringRunnable(workerPrivate,
|
||||
mBackingStore,
|
||||
&DataStore::GetName,
|
||||
aName,
|
||||
aRv);
|
||||
aName);
|
||||
runnable->Dispatch(aCx);
|
||||
}
|
||||
|
||||
|
@ -463,8 +502,7 @@ WorkerDataStore::GetOwner(JSContext* aCx, nsAString& aOwner, ErrorResult& aRv)
|
|||
new DataStoreGetStringRunnable(workerPrivate,
|
||||
mBackingStore,
|
||||
&DataStore::GetOwner,
|
||||
aOwner,
|
||||
aRv);
|
||||
aOwner);
|
||||
runnable->Dispatch(aCx);
|
||||
}
|
||||
|
||||
|
@ -476,7 +514,7 @@ WorkerDataStore::GetReadOnly(JSContext* aCx, ErrorResult& aRv)
|
|||
workerPrivate->AssertIsOnWorkerThread();
|
||||
|
||||
RefPtr<DataStoreGetReadOnlyRunnable> runnable =
|
||||
new DataStoreGetReadOnlyRunnable(workerPrivate, mBackingStore, aRv);
|
||||
new DataStoreGetReadOnlyRunnable(workerPrivate, mBackingStore);
|
||||
runnable->Dispatch(aCx);
|
||||
|
||||
return runnable->mReadOnly;
|
||||
|
@ -499,11 +537,20 @@ WorkerDataStore::Get(JSContext* aCx,
|
|||
RefPtr<DataStoreGetRunnable> runnable =
|
||||
new DataStoreGetRunnable(workerPrivate,
|
||||
mBackingStore,
|
||||
promise,
|
||||
aId,
|
||||
aRv);
|
||||
promise);
|
||||
|
||||
if (!runnable->Id().AppendElements(aId, fallible)) {
|
||||
aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
runnable->Dispatch(aCx);
|
||||
|
||||
if (runnable->Failed()) {
|
||||
aRv.Throw(NS_ERROR_FAILURE);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return promise.forget();
|
||||
}
|
||||
|
||||
|
@ -527,13 +574,20 @@ WorkerDataStore::Put(JSContext* aCx,
|
|||
new DataStorePutRunnable(workerPrivate,
|
||||
mBackingStore,
|
||||
promise,
|
||||
aCx,
|
||||
aObj,
|
||||
aId,
|
||||
aRevisionId,
|
||||
aRv);
|
||||
aRevisionId);
|
||||
runnable->Write(aCx, aObj, aRv);
|
||||
if (NS_WARN_IF(aRv.Failed())) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
runnable->Dispatch(aCx);
|
||||
|
||||
if (NS_FAILED(runnable->ErrorCode())) {
|
||||
aRv.Throw(runnable->ErrorCode());
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return promise.forget();
|
||||
}
|
||||
|
||||
|
@ -557,13 +611,20 @@ WorkerDataStore::Add(JSContext* aCx,
|
|||
new DataStoreAddRunnable(workerPrivate,
|
||||
mBackingStore,
|
||||
promise,
|
||||
aCx,
|
||||
aObj,
|
||||
aId,
|
||||
aRevisionId,
|
||||
aRv);
|
||||
aRevisionId);
|
||||
runnable->Write(aCx, aObj, aRv);
|
||||
if (NS_WARN_IF(aRv.Failed())) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
runnable->Dispatch(aCx);
|
||||
|
||||
if (NS_FAILED(runnable->ErrorCode())) {
|
||||
aRv.Throw(runnable->ErrorCode());
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return promise.forget();
|
||||
}
|
||||
|
||||
|
@ -587,10 +648,14 @@ WorkerDataStore::Remove(JSContext* aCx,
|
|||
mBackingStore,
|
||||
promise,
|
||||
aId,
|
||||
aRevisionId,
|
||||
aRv);
|
||||
aRevisionId);
|
||||
runnable->Dispatch(aCx);
|
||||
|
||||
if (runnable->Failed()) {
|
||||
aRv.Throw(NS_ERROR_FAILURE);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return promise.forget();
|
||||
}
|
||||
|
||||
|
@ -612,10 +677,14 @@ WorkerDataStore::Clear(JSContext* aCx,
|
|||
new DataStoreClearRunnable(workerPrivate,
|
||||
mBackingStore,
|
||||
promise,
|
||||
aRevisionId,
|
||||
aRv);
|
||||
aRevisionId);
|
||||
runnable->Dispatch(aCx);
|
||||
|
||||
if (runnable->Failed()) {
|
||||
aRv.Throw(NS_ERROR_FAILURE);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return promise.forget();
|
||||
}
|
||||
|
||||
|
@ -632,23 +701,18 @@ WorkerDataStore::GetRevisionId(JSContext* aCx,
|
|||
new DataStoreGetStringRunnable(workerPrivate,
|
||||
mBackingStore,
|
||||
&DataStore::GetRevisionId,
|
||||
aRevisionId,
|
||||
aRv);
|
||||
aRevisionId);
|
||||
runnable->Dispatch(aCx);
|
||||
}
|
||||
|
||||
// A DataStoreRunnable to run DataStore::GetLength(...) on the main thread.
|
||||
class DataStoreGetLengthRunnable final : public DataStoreProxyRunnable
|
||||
{
|
||||
ErrorResult& mRv;
|
||||
|
||||
public:
|
||||
DataStoreGetLengthRunnable(WorkerPrivate* aWorkerPrivate,
|
||||
const nsMainThreadPtrHandle<DataStore>& aBackingStore,
|
||||
Promise* aWorkerPromise,
|
||||
ErrorResult& aRv)
|
||||
Promise* aWorkerPromise)
|
||||
: DataStoreProxyRunnable(aWorkerPrivate, aBackingStore, aWorkerPromise)
|
||||
, mRv(aRv)
|
||||
{
|
||||
MOZ_ASSERT(aWorkerPrivate);
|
||||
aWorkerPrivate->AssertIsOnWorkerThread();
|
||||
|
@ -660,8 +724,14 @@ protected:
|
|||
{
|
||||
AssertIsOnMainThread();
|
||||
|
||||
RefPtr<Promise> promise = mBackingStore->GetLength(mRv);
|
||||
ErrorResult rv;
|
||||
RefPtr<Promise> promise = mBackingStore->GetLength(rv);
|
||||
promise->AppendNativeHandler(mPromiseWorkerProxy);
|
||||
if (NS_WARN_IF(rv.Failed())) {
|
||||
rv.SuppressException();
|
||||
mFailed = true;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
@ -681,10 +751,14 @@ WorkerDataStore::GetLength(JSContext* aCx, ErrorResult& aRv)
|
|||
RefPtr<DataStoreGetLengthRunnable> runnable =
|
||||
new DataStoreGetLengthRunnable(workerPrivate,
|
||||
mBackingStore,
|
||||
promise,
|
||||
aRv);
|
||||
promise);
|
||||
runnable->Dispatch(aCx);
|
||||
|
||||
if (runnable->Failed()) {
|
||||
aRv.Throw(NS_ERROR_FAILURE);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return promise.forget();
|
||||
}
|
||||
|
||||
|
@ -709,10 +783,14 @@ WorkerDataStore::Sync(JSContext* aCx,
|
|||
new DataStoreSyncStoreRunnable(workerPrivate,
|
||||
mBackingStore,
|
||||
workerCursor,
|
||||
aRevisionId,
|
||||
aRv);
|
||||
aRevisionId);
|
||||
runnable->Dispatch(aCx);
|
||||
|
||||
if (runnable->Failed()) {
|
||||
aRv.Throw(NS_ERROR_FAILURE);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return workerCursor.forget();
|
||||
}
|
||||
|
||||
|
|
|
@ -136,7 +136,6 @@ SharedWorker::Close()
|
|||
|
||||
if (mMessagePort) {
|
||||
mMessagePort->Close();
|
||||
mMessagePort = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ onconnect = function(evt) {
|
|||
test: (evtFromPort2.data.type == "connected"),
|
||||
msg: "The original message received" });
|
||||
port.postMessage({type: "finish"});
|
||||
close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,6 +28,9 @@ sw1.port.onmessage = function(event) {
|
|||
}
|
||||
|
||||
if (event.data.type == "finish") {
|
||||
info("Finished!");
|
||||
ok(sw1.port, "The port still exists");
|
||||
sw1.port.foo = sw1; // Just a test to see if we leak.
|
||||
SimpleTest.finish();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<body>
|
||||
|
||||
<div style="mix-blend-mode: saturation; margin-right: 1187px; transform: translateX(2px);">
|
||||
<div style="border-style: outset; mix-blend-mode: color-dodge; display: inherit; padding: 1760px; float: right;"></div>
|
||||
<div style="height: 2000px; overflow: auto;"></div>
|
||||
</div>
|
||||
|
||||
</body>
|
||||
</html>
|
|
@ -126,3 +126,4 @@ load 944579.html
|
|||
pref(security.fileuri.strict_origin_policy,false) load 950000.html
|
||||
load 1034403-1.html
|
||||
load balinese-letter-spacing.html
|
||||
load 1225125-1.html
|
||||
|
|
|
@ -25,6 +25,7 @@ namespace JS {
|
|||
_(GetProp_InlineAccess) \
|
||||
_(GetProp_Innerize) \
|
||||
_(GetProp_InlineCache) \
|
||||
_(GetProp_SharedCache) \
|
||||
\
|
||||
_(SetProp_CommonSetter) \
|
||||
_(SetProp_TypedObject) \
|
||||
|
|
|
@ -0,0 +1,354 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||
* vim: set ts=8 sts=4 et sw=4 tw=99:
|
||||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef js_UbiNodeDominatorTree_h
|
||||
#define js_UbiNodeDominatorTree_h
|
||||
|
||||
#include "mozilla/DebugOnly.h"
|
||||
#include "mozilla/Maybe.h"
|
||||
#include "mozilla/Move.h"
|
||||
#include "mozilla/UniquePtr.h"
|
||||
|
||||
#include "jsalloc.h"
|
||||
|
||||
#include "js/UbiNode.h"
|
||||
#include "js/UbiNodePostOrder.h"
|
||||
#include "js/Utility.h"
|
||||
#include "js/Vector.h"
|
||||
|
||||
namespace JS {
|
||||
namespace ubi {
|
||||
|
||||
/**
|
||||
* In a directed graph with a root node `R`, a node `A` is said to "dominate" a
|
||||
* node `B` iff every path from `R` to `B` contains `A`. A node `A` is said to
|
||||
* be the "immediate dominator" of a node `B` iff it dominates `B`, is not `B`
|
||||
* itself, and does not dominate any other nodes which also dominate `B` in
|
||||
* turn.
|
||||
*
|
||||
* If we take every node from a graph `G` and create a new graph `T` with edges
|
||||
* to each node from its immediate dominator, then `T` is a tree (each node has
|
||||
* only one immediate dominator, or none if it is the root). This tree is called
|
||||
* a "dominator tree".
|
||||
*
|
||||
* This class represents a dominator tree constructed from a `JS::ubi::Node`
|
||||
* heap graph. The domination relationship and dominator trees are useful tools
|
||||
* for analyzing heap graphs because they tell you:
|
||||
*
|
||||
* - Exactly what could be reclaimed by the GC if some node `A` became
|
||||
* unreachable: those nodes which are dominated by `A`,
|
||||
*
|
||||
* - The "retained size" of a node in the heap graph, in contrast to its
|
||||
* "shallow size". The "shallow size" is the space taken by a node itself,
|
||||
* not counting anything it references. The "retained size" of a node is its
|
||||
* shallow size plus the size of all the things that would be collected if
|
||||
* the original node wasn't (directly or indirectly) referencing them. In
|
||||
* other words, the retained size is the shallow size of a node plus the
|
||||
* shallow sizes of every other node it dominates. For example, the root
|
||||
* node in a binary tree might have a small shallow size that does not take
|
||||
* up much space itself, but it dominates the rest of the binary tree and
|
||||
* its retained size is therefore significant (assuming no external
|
||||
* references into the tree).
|
||||
*
|
||||
* The simple, engineered algorithm presented in "A Simple, Fast Dominance
|
||||
* Algorithm" by Cooper el al[0] is used to find dominators and construct the
|
||||
* dominator tree. This algorithm runs in O(n^2) time, but is faster in practice
|
||||
* than alternative algorithms with better theoretical running times, such as
|
||||
* Lengauer-Tarjan which runs in O(e * log(n)). The big caveat to that statement
|
||||
* is that Cooper et al found it is faster in practice *on control flow graphs*
|
||||
* and I'm not convinced that this property also holds on *heap* graphs. That
|
||||
* said, the implementation of this algorithm is *much* simpler than
|
||||
* Lengauer-Tarjan and has been found to be fast enough at least for the time
|
||||
* being.
|
||||
*
|
||||
* [0]: http://www.cs.rice.edu/~keith/EMBED/dom.pdf
|
||||
*/
|
||||
class JS_PUBLIC_API(DominatorTree)
|
||||
{
|
||||
private:
|
||||
// Type aliases.
|
||||
using NodeSet = js::HashSet<Node, js::DefaultHasher<Node>, js::SystemAllocPolicy>;
|
||||
using NodeSetPtr = mozilla::UniquePtr<NodeSet, JS::DeletePolicy<NodeSet>>;
|
||||
using PredecessorSets = js::HashMap<Node, NodeSetPtr, js::DefaultHasher<Node>,
|
||||
js::SystemAllocPolicy>;
|
||||
using NodeToIndexMap = js::HashMap<Node, uint32_t, js::DefaultHasher<Node>,
|
||||
js::SystemAllocPolicy>;
|
||||
|
||||
private:
|
||||
// Data members.
|
||||
mozilla::Vector<Node> postOrder;
|
||||
NodeToIndexMap nodeToPostOrderIndex;
|
||||
mozilla::Vector<uint32_t> doms;
|
||||
|
||||
private:
|
||||
// We use `UNDEFINED` as a sentinel value in the `doms` vector to signal
|
||||
// that we haven't found any dominators for the node at the corresponding
|
||||
// index in `postOrder` yet.
|
||||
static const uint32_t UNDEFINED = UINT32_MAX;
|
||||
|
||||
DominatorTree(mozilla::Vector<Node>&& postOrder, NodeToIndexMap&& nodeToPostOrderIndex,
|
||||
mozilla::Vector<uint32_t>&& doms)
|
||||
: postOrder(mozilla::Move(postOrder))
|
||||
, nodeToPostOrderIndex(mozilla::Move(nodeToPostOrderIndex))
|
||||
, doms(mozilla::Move(doms))
|
||||
{ }
|
||||
|
||||
static uint32_t intersect(mozilla::Vector<uint32_t>& doms, uint32_t finger1, uint32_t finger2) {
|
||||
while (finger1 != finger2) {
|
||||
if (finger1 < finger2)
|
||||
finger1 = doms[finger1];
|
||||
else if (finger2 < finger1)
|
||||
finger2 = doms[finger2];
|
||||
}
|
||||
return finger1;
|
||||
}
|
||||
|
||||
// Do the post order traversal of the heap graph and populate our
|
||||
// predecessor sets.
|
||||
static bool doTraversal(JSRuntime* rt, AutoCheckCannotGC& noGC, const Node& root,
|
||||
mozilla::Vector<Node>& postOrder, PredecessorSets& predecessorSets) {
|
||||
uint32_t nodeCount = 0;
|
||||
auto onNode = [&](const Node& node) {
|
||||
nodeCount++;
|
||||
if (MOZ_UNLIKELY(nodeCount == UINT32_MAX))
|
||||
return false;
|
||||
return postOrder.append(node);
|
||||
};
|
||||
|
||||
auto onEdge = [&](const Node& origin, const Edge& edge) {
|
||||
auto p = predecessorSets.lookupForAdd(edge.referent);
|
||||
if (!p) {
|
||||
auto set = rt->make_unique<NodeSet>();
|
||||
if (!set ||
|
||||
!set->init() ||
|
||||
!predecessorSets.add(p, edge.referent, mozilla::Move(set)))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
MOZ_ASSERT(p && p->value());
|
||||
return p->value()->put(origin);
|
||||
};
|
||||
|
||||
PostOrder traversal(rt, noGC);
|
||||
return traversal.init() &&
|
||||
traversal.addStart(root) &&
|
||||
traversal.traverse(onNode, onEdge);
|
||||
}
|
||||
|
||||
// Populates the given `map` with an entry for each node to its index in
|
||||
// `postOrder`.
|
||||
static bool mapNodesToTheirIndices(mozilla::Vector<Node>& postOrder, NodeToIndexMap& map) {
|
||||
MOZ_ASSERT(!map.initialized());
|
||||
MOZ_ASSERT(postOrder.length() < UINT32_MAX);
|
||||
uint32_t length = postOrder.length();
|
||||
if (!map.init(length))
|
||||
return false;
|
||||
for (uint32_t i = 0; i < length; i++)
|
||||
map.putNewInfallible(postOrder[i], i);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Convert the Node -> NodeSet predecessorSets to a index -> Vector<index>
|
||||
// form.
|
||||
static bool convertPredecessorSetsToVectors(
|
||||
const Node& root,
|
||||
mozilla::Vector<Node>& postOrder,
|
||||
PredecessorSets& predecessorSets,
|
||||
NodeToIndexMap& nodeToPostOrderIndex,
|
||||
mozilla::Vector<mozilla::Vector<uint32_t>>& predecessorVectors)
|
||||
{
|
||||
MOZ_ASSERT(postOrder.length() < UINT32_MAX);
|
||||
uint32_t length = postOrder.length();
|
||||
|
||||
MOZ_ASSERT(predecessorVectors.length() == 0);
|
||||
if (!predecessorVectors.growBy(length))
|
||||
return false;
|
||||
|
||||
for (uint32_t i = 0; i < length - 1; i++) {
|
||||
auto& node = postOrder[i];
|
||||
MOZ_ASSERT(node != root,
|
||||
"Only the last node should be root, since this was a post order traversal.");
|
||||
|
||||
auto ptr = predecessorSets.lookup(node);
|
||||
MOZ_ASSERT(ptr,
|
||||
"Because this isn't the root, it had better have predecessors, or else how "
|
||||
"did we even find it.");
|
||||
|
||||
auto& predecessors = ptr->value();
|
||||
if (!predecessorVectors[i].reserve(predecessors->count()))
|
||||
return false;
|
||||
for (auto range = predecessors->all(); !range.empty(); range.popFront()) {
|
||||
auto ptr = nodeToPostOrderIndex.lookup(range.front());
|
||||
MOZ_ASSERT(ptr);
|
||||
predecessorVectors[i].infallibleAppend(ptr->value());
|
||||
}
|
||||
}
|
||||
predecessorSets.finish();
|
||||
return true;
|
||||
}
|
||||
|
||||
// Initialize `doms` such that the immediate dominator of the `root` is the
|
||||
// `root` itself and all others are `UNDEFINED`.
|
||||
static bool initializeDominators(mozilla::Vector<uint32_t>& doms, uint32_t length) {
|
||||
MOZ_ASSERT(doms.length() == 0);
|
||||
if (!doms.growByUninitialized(length))
|
||||
return false;
|
||||
doms[length - 1] = length - 1;
|
||||
for (uint32_t i = 0; i < length - 1; i++)
|
||||
doms[i] = UNDEFINED;
|
||||
return true;
|
||||
}
|
||||
|
||||
void assertSanity() const {
|
||||
MOZ_ASSERT(postOrder.length() == doms.length());
|
||||
MOZ_ASSERT(postOrder.length() == nodeToPostOrderIndex.count());
|
||||
}
|
||||
|
||||
public:
|
||||
// DominatorTree is not copy-able.
|
||||
DominatorTree(const DominatorTree&) = delete;
|
||||
DominatorTree& operator=(const DominatorTree&) = delete;
|
||||
|
||||
// DominatorTree is move-able.
|
||||
DominatorTree(DominatorTree&& rhs)
|
||||
: postOrder(mozilla::Move(rhs.postOrder))
|
||||
, nodeToPostOrderIndex(mozilla::Move(rhs.nodeToPostOrderIndex))
|
||||
, doms(mozilla::Move(rhs.doms))
|
||||
{
|
||||
MOZ_ASSERT(this != &rhs, "self-move is not allowed");
|
||||
}
|
||||
DominatorTree& operator=(DominatorTree&& rhs) {
|
||||
this->~DominatorTree();
|
||||
new (this) DominatorTree(mozilla::Move(rhs));
|
||||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct a `DominatorTree` of the heap graph visible from `root`. The
|
||||
* `root` is also used as the root of the resulting dominator tree.
|
||||
*
|
||||
* The resulting `DominatorTree` instance must not outlive the
|
||||
* `JS::ubi::Node` graph it was constructed from.
|
||||
*
|
||||
* - For `JS::ubi::Node` graphs backed by the live heap graph, this means
|
||||
* that the `DominatorTree`'s lifetime _must_ be contained within the
|
||||
* scope of the provided `AutoCheckCannotGC` reference because a GC will
|
||||
* invalidate the nodes.
|
||||
*
|
||||
* - For `JS::ubi::Node` graphs backed by some other offline structure
|
||||
* provided by the embedder, the resulting `DominatorTree`'s lifetime is
|
||||
* bounded by that offline structure's lifetime.
|
||||
*
|
||||
* In practice, this means that within SpiderMonkey we must treat
|
||||
* `DominatorTree` as if it were backed by the live heap graph and trust
|
||||
* that embedders with knowledge of the graph's implementation will do the
|
||||
* Right Thing.
|
||||
*
|
||||
* Returns `mozilla::Nothing()` on OOM failure. It is the caller's
|
||||
* responsibility to handle and report the OOM.
|
||||
*/
|
||||
static mozilla::Maybe<DominatorTree>
|
||||
Create(JSRuntime* rt, AutoCheckCannotGC& noGC, const Node& root) {
|
||||
mozilla::Vector<Node> postOrder;
|
||||
PredecessorSets predecessorSets;
|
||||
if (!predecessorSets.init() || !doTraversal(rt, noGC, root, postOrder, predecessorSets))
|
||||
return mozilla::Nothing();
|
||||
|
||||
MOZ_ASSERT(postOrder.length() < UINT32_MAX);
|
||||
uint32_t length = postOrder.length();
|
||||
MOZ_ASSERT(postOrder[length - 1] == root);
|
||||
|
||||
// From here on out we wish to avoid hash table lookups, and we use
|
||||
// indices into `postOrder` instead of actual nodes wherever
|
||||
// possible. This greatly improves the performance of this
|
||||
// implementation, but we have to pay a little bit of upfront cost to
|
||||
// convert our data structures to play along first.
|
||||
|
||||
NodeToIndexMap nodeToPostOrderIndex;
|
||||
if (!mapNodesToTheirIndices(postOrder, nodeToPostOrderIndex))
|
||||
return mozilla::Nothing();
|
||||
|
||||
mozilla::Vector<mozilla::Vector<uint32_t>> predecessorVectors;
|
||||
if (!convertPredecessorSetsToVectors(root, postOrder, predecessorSets, nodeToPostOrderIndex,
|
||||
predecessorVectors))
|
||||
return mozilla::Nothing();
|
||||
|
||||
mozilla::Vector<uint32_t> doms;
|
||||
if (!initializeDominators(doms, length))
|
||||
return mozilla::Nothing();
|
||||
|
||||
bool changed = true;
|
||||
while (changed) {
|
||||
changed = false;
|
||||
|
||||
// Iterate over the non-root nodes in reverse post order.
|
||||
for (uint32_t indexPlusOne = length - 1; indexPlusOne > 0; indexPlusOne--) {
|
||||
MOZ_ASSERT(postOrder[indexPlusOne - 1] != root);
|
||||
|
||||
// Take the intersection of every predecessor's dominator set;
|
||||
// that is the current best guess at the immediate dominator for
|
||||
// this node.
|
||||
|
||||
uint32_t newIDomIdx = UNDEFINED;
|
||||
|
||||
auto& predecessors = predecessorVectors[indexPlusOne - 1];
|
||||
auto range = predecessors.all();
|
||||
for ( ; !range.empty(); range.popFront()) {
|
||||
auto idx = range.front();
|
||||
if (doms[idx] != UNDEFINED) {
|
||||
newIDomIdx = idx;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
MOZ_ASSERT(newIDomIdx != UNDEFINED,
|
||||
"Because the root is initialized to dominate itself and is the first "
|
||||
"node in every path, there must exist a predecessor to this node that "
|
||||
"also has a dominator.");
|
||||
|
||||
for ( ; !range.empty(); range.popFront()) {
|
||||
auto idx = range.front();
|
||||
if (doms[idx] != UNDEFINED)
|
||||
newIDomIdx = intersect(doms, newIDomIdx, idx);
|
||||
}
|
||||
|
||||
// If the immediate dominator changed, we will have to do
|
||||
// another pass of the outer while loop to continue the forward
|
||||
// dataflow.
|
||||
if (newIDomIdx != doms[indexPlusOne - 1]) {
|
||||
doms[indexPlusOne - 1] = newIDomIdx;
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return mozilla::Some(DominatorTree(mozilla::Move(postOrder),
|
||||
mozilla::Move(nodeToPostOrderIndex),
|
||||
mozilla::Move(doms)));
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the immediate dominator of the given `node`. If `node` was not
|
||||
* reachable from the `root` that this dominator tree was constructed from,
|
||||
* then return the null `JS::ubi::Node`.
|
||||
*/
|
||||
Node getImmediateDominator(const Node& node) const {
|
||||
assertSanity();
|
||||
auto ptr = nodeToPostOrderIndex.lookup(node);
|
||||
if (!ptr)
|
||||
return Node();
|
||||
|
||||
auto idx = ptr->value();
|
||||
MOZ_ASSERT(idx < postOrder.length());
|
||||
return postOrder[doms[idx]];
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace ubi
|
||||
} // namespace JS
|
||||
|
||||
#endif // js_UbiNodeDominatorTree_h
|
|
@ -8,6 +8,7 @@
|
|||
#define js_UbiNodePostOrder_h
|
||||
|
||||
#include "mozilla/DebugOnly.h"
|
||||
#include "mozilla/Maybe.h"
|
||||
#include "mozilla/Move.h"
|
||||
|
||||
#include "jsalloc.h"
|
||||
|
@ -19,32 +20,43 @@
|
|||
namespace JS {
|
||||
namespace ubi {
|
||||
|
||||
// A post-order depth-first traversal of `ubi::Node` graphs.
|
||||
//
|
||||
// NB: This traversal visits each node reachable from the start set exactly
|
||||
// once, and does not visit edges at all. Therefore, this traversal would be a
|
||||
// very poor choice for recording multiple paths to the same node, for example.
|
||||
// If your analysis needs to consider edges, use `JS::ubi::BreadthFirst`
|
||||
// instead.
|
||||
//
|
||||
// No GC may occur while an instance of `PostOrder` is live.
|
||||
//
|
||||
// The `Visitor` type provided to `PostOrder::traverse` must have the following
|
||||
// member:
|
||||
//
|
||||
// bool operator()(Node& node)
|
||||
//
|
||||
// The visitor method. This method is called once for each `node` reachable
|
||||
// from the start set in post-order.
|
||||
//
|
||||
// The visitor function should return true on success, or false if an error
|
||||
// occurs. A false return value terminates the traversal immediately, and
|
||||
// causes `PostOrder::traverse` to return false.
|
||||
/**
|
||||
* A post-order depth-first traversal of `ubi::Node` graphs.
|
||||
*
|
||||
* No GC may occur while an instance of `PostOrder` is live.
|
||||
*
|
||||
* The `NodeVisitor` type provided to `PostOrder::traverse` must have the
|
||||
* following member:
|
||||
*
|
||||
* bool operator()(Node& node)
|
||||
*
|
||||
* The node visitor method. This method is called once for each `node`
|
||||
* reachable from the start set in post-order.
|
||||
*
|
||||
* This visitor function should return true on success, or false if an error
|
||||
* occurs. A false return value terminates the traversal immediately, and
|
||||
* causes `PostOrder::traverse` to return false.
|
||||
*
|
||||
* The `EdgeVisitor` type provided to `PostOrder::traverse` must have the
|
||||
* following member:
|
||||
*
|
||||
* bool operator()(Node& origin, Edge& edge)
|
||||
*
|
||||
* The edge visitor method. This method is called once for each outgoing
|
||||
* `edge` from `origin` that is reachable from the start set.
|
||||
*
|
||||
* NB: UNLIKE NODES, THERE IS NO GUARANTEED ORDER IN WHICH EDGES AND THEIR
|
||||
* ORIGINS ARE VISITED!
|
||||
*
|
||||
* This visitor function should return true on success, or false if an error
|
||||
* occurs. A false return value terminates the traversal immediately, and
|
||||
* causes `PostOrder::traverse` to return false.
|
||||
*/
|
||||
struct PostOrder {
|
||||
private:
|
||||
struct OriginAndEdges {
|
||||
Node origin;
|
||||
EdgeVector edges;
|
||||
Node origin;
|
||||
EdgeVector edges;
|
||||
|
||||
OriginAndEdges(const Node& node, EdgeVector&& edges)
|
||||
: origin(node)
|
||||
|
@ -120,14 +132,16 @@ struct PostOrder {
|
|||
}
|
||||
|
||||
// Traverse the graph in post-order, starting with the set of nodes passed
|
||||
// to `addStart` and applying `visitor::operator()` for each node in
|
||||
// the graph, as described above.
|
||||
// to `addStart` and applying `onNode::operator()` for each node in the
|
||||
// graph and `onEdge::operator()` for each edge in the graph, as described
|
||||
// above.
|
||||
//
|
||||
// This should be called only once per instance of this class.
|
||||
//
|
||||
// Return false on OOM or error return from `visitor::operator()`.
|
||||
template<typename Visitor>
|
||||
bool traverse(Visitor visitor) {
|
||||
// Return false on OOM or error return from `onNode::operator()` or
|
||||
// `onEdge::operator()`.
|
||||
template<typename NodeVisitor, typename EdgeVisitor>
|
||||
bool traverse(NodeVisitor onNode, EdgeVisitor onEdge) {
|
||||
MOZ_ASSERT(!traversed, "Can only traverse() once!");
|
||||
traversed = true;
|
||||
|
||||
|
@ -136,7 +150,7 @@ struct PostOrder {
|
|||
auto& edges = stack.back().edges;
|
||||
|
||||
if (edges.empty()) {
|
||||
if (!visitor(origin))
|
||||
if (!onNode(origin))
|
||||
return false;
|
||||
stack.popBack();
|
||||
continue;
|
||||
|
@ -145,14 +159,20 @@ struct PostOrder {
|
|||
Edge edge = mozilla::Move(edges.back());
|
||||
edges.popBack();
|
||||
|
||||
if (!onEdge(origin, edge))
|
||||
return false;
|
||||
|
||||
auto ptr = seen.lookupForAdd(edge.referent);
|
||||
// We've already seen this node, don't follow its edges.
|
||||
if (ptr)
|
||||
continue;
|
||||
|
||||
// Mark the referent as seen and follow its edges.
|
||||
if (!seen.add(ptr, edge.referent) || !pushForTraversing(edge.referent))
|
||||
if (!seen.add(ptr, edge.referent) ||
|
||||
!pushForTraversing(edge.referent))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
|
@ -33,8 +33,6 @@ class JS_PUBLIC_API(WeakMapPtr)
|
|||
V lookup(const K& key);
|
||||
bool put(JSContext* cx, const K& key, const V& value);
|
||||
|
||||
static void keyMarkCallback(JSTracer* trc, K key, void* data);
|
||||
|
||||
private:
|
||||
void* ptr;
|
||||
|
||||
|
|
|
@ -408,9 +408,16 @@ struct Zone : public JS::shadow::Zone,
|
|||
uniqueIds_.remove(cell);
|
||||
}
|
||||
|
||||
// Off-thread parsing should not result in any UIDs being created.
|
||||
void assertNoUniqueIdsInZone() const {
|
||||
MOZ_ASSERT(uniqueIds_.count() == 0);
|
||||
// When finished parsing off-thread, transfer any UIDs we created in the
|
||||
// off-thread zone into the target zone.
|
||||
void adoptUniqueIds(JS::Zone* source) {
|
||||
js::AutoEnterOOMUnsafeRegion oomUnsafe;
|
||||
for (js::gc::UniqueIdMap::Enum e(source->uniqueIds_); !e.empty(); e.popFront()) {
|
||||
MOZ_ASSERT(!uniqueIds_.has(e.front().key()));
|
||||
if (!uniqueIds_.put(e.front().key(), e.front().value()))
|
||||
oomUnsafe.crash("failed to transfer unique ids from off-main-thread");
|
||||
}
|
||||
source->uniqueIds_.clear();
|
||||
}
|
||||
|
||||
#ifdef JSGC_HASH_TABLE_CHECKS
|
||||
|
|
|
@ -766,7 +766,8 @@ BaselineCompiler::emitArgumentTypeChecks()
|
|||
frame.pushThis();
|
||||
frame.popRegsAndSync(1);
|
||||
|
||||
ICTypeMonitor_Fallback::Compiler compiler(cx, (uint32_t) 0);
|
||||
ICTypeMonitor_Fallback::Compiler compiler(cx, ICStubCompiler::Engine::Baseline,
|
||||
(uint32_t) 0);
|
||||
if (!emitNonOpIC(compiler.getStub(&stubSpace_)))
|
||||
return false;
|
||||
|
||||
|
@ -774,7 +775,8 @@ BaselineCompiler::emitArgumentTypeChecks()
|
|||
frame.pushArg(i);
|
||||
frame.popRegsAndSync(1);
|
||||
|
||||
ICTypeMonitor_Fallback::Compiler compiler(cx, i + 1);
|
||||
ICTypeMonitor_Fallback::Compiler compiler(cx, ICStubCompiler::Engine::Baseline,
|
||||
i + 1);
|
||||
if (!emitNonOpIC(compiler.getStub(&stubSpace_)))
|
||||
return false;
|
||||
}
|
||||
|
@ -2258,7 +2260,7 @@ BaselineCompiler::emit_JSOP_GETPROP()
|
|||
frame.popRegsAndSync(1);
|
||||
|
||||
// Call IC.
|
||||
ICGetProp_Fallback::Compiler compiler(cx);
|
||||
ICGetProp_Fallback::Compiler compiler(cx, ICStubCompiler::Engine::Baseline);
|
||||
if (!emitOpIC(compiler.getStub(&stubSpace_)))
|
||||
return false;
|
||||
|
||||
|
@ -2361,7 +2363,8 @@ BaselineCompiler::emit_JSOP_GETALIASEDVAR()
|
|||
|
||||
if (ionCompileable_) {
|
||||
// No need to monitor types if we know Ion can't compile this script.
|
||||
ICTypeMonitor_Fallback::Compiler compiler(cx, (ICMonitoredFallbackStub*) nullptr);
|
||||
ICTypeMonitor_Fallback::Compiler compiler(cx, ICStubCompiler::Engine::Baseline,
|
||||
(ICMonitoredFallbackStub*) nullptr);
|
||||
if (!emitOpIC(compiler.getStub(&stubSpace_)))
|
||||
return false;
|
||||
}
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -93,26 +93,6 @@ namespace jit {
|
|||
_(GetIntrinsic_Fallback) \
|
||||
_(GetIntrinsic_Constant) \
|
||||
\
|
||||
_(GetProp_Fallback) \
|
||||
_(GetProp_ArrayLength) \
|
||||
_(GetProp_UnboxedArrayLength) \
|
||||
_(GetProp_Primitive) \
|
||||
_(GetProp_StringLength) \
|
||||
_(GetProp_Native) \
|
||||
_(GetProp_NativeDoesNotExist) \
|
||||
_(GetProp_NativePrototype) \
|
||||
_(GetProp_Unboxed) \
|
||||
_(GetProp_TypedObject) \
|
||||
_(GetProp_CallScripted) \
|
||||
_(GetProp_CallNative) \
|
||||
_(GetProp_CallNativeGlobal) \
|
||||
_(GetProp_CallDOMProxyNative) \
|
||||
_(GetProp_CallDOMProxyWithGenerationNative) \
|
||||
_(GetProp_DOMProxyShadowed) \
|
||||
_(GetProp_ArgumentsLength) \
|
||||
_(GetProp_ArgumentsCallee) \
|
||||
_(GetProp_Generic) \
|
||||
\
|
||||
_(SetProp_Fallback) \
|
||||
_(SetProp_Native) \
|
||||
_(SetProp_NativeAdd) \
|
||||
|
|
|
@ -1758,6 +1758,11 @@ CodeGenerator::visitUnarySharedStub(LUnarySharedStub* lir)
|
|||
case JSOP_NEG:
|
||||
emitSharedStub(ICStub::Kind::UnaryArith_Fallback, lir);
|
||||
break;
|
||||
case JSOP_CALLPROP:
|
||||
case JSOP_GETPROP:
|
||||
case JSOP_LENGTH:
|
||||
emitSharedStub(ICStub::Kind::GetProp_Fallback, lir);
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Unsupported jsop in shared stubs.");
|
||||
}
|
||||
|
@ -7969,6 +7974,11 @@ CodeGenerator::linkSharedStubs(JSContext* cx)
|
|||
stub = stubCompiler.getStub(&stubSpace_);
|
||||
break;
|
||||
}
|
||||
case ICStub::Kind::GetProp_Fallback: {
|
||||
ICGetProp_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::IonMonkey);
|
||||
stub = stubCompiler.getStub(&stubSpace_);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
MOZ_CRASH("Unsupported shared stub.");
|
||||
}
|
||||
|
|
|
@ -10888,6 +10888,11 @@ IonBuilder::jsop_getprop(PropertyName* name)
|
|||
if (!getPropTryCache(&emitted, obj, name, barrier, types) || emitted)
|
||||
return emitted;
|
||||
|
||||
// Try to emit a shared stub.
|
||||
trackOptimizationAttempt(TrackedStrategy::GetProp_SharedCache);
|
||||
if (!getPropTrySharedStub(&emitted, obj) || emitted)
|
||||
return emitted;
|
||||
|
||||
// Emit a call.
|
||||
MCallGetProperty* call = MCallGetProperty::New(alloc(), obj, name);
|
||||
current->add(call);
|
||||
|
@ -11838,6 +11843,27 @@ IonBuilder::getPropTryCache(bool* emitted, MDefinition* obj, PropertyName* name,
|
|||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
IonBuilder::getPropTrySharedStub(bool* emitted, MDefinition* obj)
|
||||
{
|
||||
MOZ_ASSERT(*emitted == false);
|
||||
|
||||
// Try to emit a shared stub cache.
|
||||
|
||||
if (js_JitOptions.disableSharedStubs)
|
||||
return true;
|
||||
|
||||
MInstruction* stub = MUnarySharedStub::New(alloc(), obj);
|
||||
current->add(stub);
|
||||
current->push(stub);
|
||||
|
||||
if (!resumeAfter(stub))
|
||||
return false;
|
||||
|
||||
*emitted = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
MDefinition*
|
||||
IonBuilder::tryInnerizeWindow(MDefinition* obj)
|
||||
{
|
||||
|
|
|
@ -455,6 +455,7 @@ class IonBuilder
|
|||
TemporaryTypeSet* types);
|
||||
bool getPropTryCache(bool* emitted, MDefinition* obj, PropertyName* name,
|
||||
BarrierKind barrier, TemporaryTypeSet* types);
|
||||
bool getPropTrySharedStub(bool* emitted, MDefinition* obj);
|
||||
|
||||
// jsop_setprop() helpers.
|
||||
bool setPropTryCommonSetter(bool* emitted, MDefinition* obj,
|
||||
|
|
|
@ -171,6 +171,9 @@ class JitFrameIterator
|
|||
bool isIonStub() const {
|
||||
return type_ == JitFrame_IonStub;
|
||||
}
|
||||
bool isIonStubMaybeUnwound() const {
|
||||
return type_ == JitFrame_IonStub || type_ == JitFrame_Unwound_IonStub;
|
||||
}
|
||||
bool isBailoutJS() const {
|
||||
return type_ == JitFrame_Bailout;
|
||||
}
|
||||
|
|
|
@ -1624,10 +1624,13 @@ GetPcScript(JSContext* cx, JSScript** scriptRes, jsbytecode** pcRes)
|
|||
MOZ_ASSERT(it.isBaselineStub() || it.isBaselineJS() || it.isIonJS());
|
||||
}
|
||||
|
||||
// Skip Baseline stub frames.
|
||||
// Skip Baseline or Ion stub frames.
|
||||
if (it.isBaselineStubMaybeUnwound()) {
|
||||
++it;
|
||||
MOZ_ASSERT(it.isBaselineJS());
|
||||
} else if (it.isIonStubMaybeUnwound()) {
|
||||
++it;
|
||||
MOZ_ASSERT(it.isIonJS());
|
||||
}
|
||||
|
||||
MOZ_ASSERT(it.isBaselineJS() || it.isIonJS());
|
||||
|
|
|
@ -5547,10 +5547,13 @@ jit::PropertyWriteNeedsTypeBarrier(TempAllocator& alloc, CompilerConstraintList*
|
|||
implicitType);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Perform additional filtering to make sure that any unboxed property
|
||||
// being written can accommodate the value.
|
||||
if (key->isGroup() && key->group()->maybeUnboxedLayout()) {
|
||||
// Perform additional filtering to make sure that any unboxed property
|
||||
// being written can accommodate the value.
|
||||
for (size_t i = 0; i < types->getObjectCount(); i++) {
|
||||
TypeSet::ObjectKey* key = types->getObject(i);
|
||||
if (key && key->isGroup() && key->group()->maybeUnboxedLayout()) {
|
||||
const UnboxedLayout& layout = key->group()->unboxedLayout();
|
||||
if (name) {
|
||||
const UnboxedLayout::Property* property = layout.lookup(name);
|
||||
|
|
|
@ -439,6 +439,7 @@ class MacroAssembler : public MacroAssemblerSpecific
|
|||
|
||||
void Push(const Operand op) DEFINED_ON(x86_shared);
|
||||
void Push(Register reg) PER_SHARED_ARCH;
|
||||
void Push(Register reg1, Register reg2, Register reg3, Register reg4) DEFINED_ON(arm64);
|
||||
void Push(const Imm32 imm) PER_SHARED_ARCH;
|
||||
void Push(const ImmWord imm) PER_SHARED_ARCH;
|
||||
void Push(const ImmPtr imm) PER_SHARED_ARCH;
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче