зеркало из https://github.com/mozilla/gecko-dev.git
Backed out changeset f4c8b3de527e (bug 1286895) for crashing in test_bug1241485.html. r=backout
This commit is contained in:
Родитель
70fd734aff
Коммит
3be5f0ac43
|
@ -90,15 +90,6 @@ using mozilla::Preferences;
|
|||
// Half the size of the actual C stack, to be safe.
|
||||
#define WORKER_CONTEXT_NATIVE_STACK_LIMIT 128 * sizeof(size_t) * 1024
|
||||
|
||||
// The maximum number of hardware concurrency, overridable via pref.
|
||||
#define MAX_HARDWARE_CONCURRENCY 8
|
||||
|
||||
// The maximum number of threads to use for workers, overridable via pref.
|
||||
#define MAX_WORKERS_PER_DOMAIN 512
|
||||
|
||||
static_assert(MAX_WORKERS_PER_DOMAIN >= 1,
|
||||
"We should allow at least one worker per domain.");
|
||||
|
||||
// The default number of seconds that close handlers will be allowed to run for
|
||||
// content workers.
|
||||
#define MAX_SCRIPT_RUN_TIME_SEC 10
|
||||
|
@ -110,8 +101,6 @@ static_assert(MAX_WORKERS_PER_DOMAIN >= 1,
|
|||
#define MAX_IDLE_THREADS 20
|
||||
|
||||
#define PREF_WORKERS_PREFIX "dom.workers."
|
||||
#define PREF_WORKERS_MAX_PER_DOMAIN PREF_WORKERS_PREFIX "maxPerDomain"
|
||||
#define PREF_WORKERS_MAX_HARDWARE_CONCURRENCY "dom.maxHardwareConcurrency"
|
||||
|
||||
#define PREF_MAX_SCRIPT_RUN_TIME_CONTENT "dom.max_script_run_time"
|
||||
#define PREF_MAX_SCRIPT_RUN_TIME_CHROME "dom.max_chrome_script_run_time"
|
||||
|
@ -148,9 +137,6 @@ namespace {
|
|||
|
||||
const uint32_t kNoIndex = uint32_t(-1);
|
||||
|
||||
uint32_t gMaxWorkersPerDomain = MAX_WORKERS_PER_DOMAIN;
|
||||
uint32_t gMaxHardwareConcurrency = MAX_HARDWARE_CONCURRENCY;
|
||||
|
||||
// Does not hold an owning reference.
|
||||
RuntimeService* gRuntimeService = nullptr;
|
||||
|
||||
|
@ -1371,7 +1357,6 @@ RuntimeService::RegisterWorker(WorkerPrivate* aWorkerPrivate)
|
|||
|
||||
const bool isServiceWorker = aWorkerPrivate->IsServiceWorker();
|
||||
const bool isSharedWorker = aWorkerPrivate->IsSharedWorker();
|
||||
const bool isDedicatedWorker = aWorkerPrivate->IsDedicatedWorker();
|
||||
if (isServiceWorker) {
|
||||
AssertIsOnMainThread();
|
||||
Telemetry::Accumulate(Telemetry::SERVICE_WORKER_SPAWN_ATTEMPTS, 1);
|
||||
|
@ -1393,13 +1378,6 @@ RuntimeService::RegisterWorker(WorkerPrivate* aWorkerPrivate)
|
|||
NS_ASSERTION(!sharedWorkerScriptSpec.IsEmpty(), "Empty spec!");
|
||||
}
|
||||
|
||||
bool exemptFromPerDomainMax = false;
|
||||
if (isServiceWorker) {
|
||||
AssertIsOnMainThread();
|
||||
exemptFromPerDomainMax = Preferences::GetBool("dom.serviceWorkers.exemptFromPerDomainMax",
|
||||
false);
|
||||
}
|
||||
|
||||
const nsCString& domain = aWorkerPrivate->Domain();
|
||||
|
||||
WorkerDomainInfo* domainInfo;
|
||||
|
@ -1415,34 +1393,14 @@ RuntimeService::RegisterWorker(WorkerPrivate* aWorkerPrivate)
|
|||
mDomainMap.Put(domain, domainInfo);
|
||||
}
|
||||
|
||||
queued = gMaxWorkersPerDomain &&
|
||||
domainInfo->ActiveWorkerCount() >= gMaxWorkersPerDomain &&
|
||||
!domain.IsEmpty() &&
|
||||
!exemptFromPerDomainMax;
|
||||
|
||||
if (queued) {
|
||||
domainInfo->mQueuedWorkers.AppendElement(aWorkerPrivate);
|
||||
|
||||
// Worker spawn gets queued due to hitting max workers per domain
|
||||
// limit so let's log a warning.
|
||||
WorkerPrivate::ReportErrorToConsole("HittingMaxWorkersPerDomain2");
|
||||
|
||||
if (isServiceWorker) {
|
||||
Telemetry::Accumulate(Telemetry::SERVICE_WORKER_SPAWN_GETS_QUEUED, 1);
|
||||
} else if (isSharedWorker) {
|
||||
Telemetry::Accumulate(Telemetry::SHARED_WORKER_SPAWN_GETS_QUEUED, 1);
|
||||
} else if (isDedicatedWorker) {
|
||||
Telemetry::Accumulate(Telemetry::DEDICATED_WORKER_SPAWN_GETS_QUEUED, 1);
|
||||
}
|
||||
}
|
||||
else if (parent) {
|
||||
if (parent) {
|
||||
domainInfo->mChildWorkerCount++;
|
||||
}
|
||||
else if (isServiceWorker) {
|
||||
domainInfo->mActiveServiceWorkers.AppendElement(aWorkerPrivate);
|
||||
domainInfo->mServiceWorkers.AppendElement(aWorkerPrivate);
|
||||
}
|
||||
else {
|
||||
domainInfo->mActiveWorkers.AppendElement(aWorkerPrivate);
|
||||
domainInfo->mWorkers.AppendElement(aWorkerPrivate);
|
||||
}
|
||||
|
||||
if (isSharedWorker) {
|
||||
|
@ -1556,50 +1514,26 @@ RuntimeService::UnregisterWorker(WorkerPrivate* aWorkerPrivate)
|
|||
NS_ERROR("Don't have an entry for this domain!");
|
||||
}
|
||||
|
||||
// Remove old worker from everywhere.
|
||||
uint32_t index = domainInfo->mQueuedWorkers.IndexOf(aWorkerPrivate);
|
||||
if (index != kNoIndex) {
|
||||
// Was queued, remove from the list.
|
||||
domainInfo->mQueuedWorkers.RemoveElementAt(index);
|
||||
}
|
||||
else if (parent) {
|
||||
if (parent) {
|
||||
MOZ_ASSERT(domainInfo->mChildWorkerCount, "Must be non-zero!");
|
||||
domainInfo->mChildWorkerCount--;
|
||||
}
|
||||
else if (aWorkerPrivate->IsServiceWorker()) {
|
||||
MOZ_ASSERT(domainInfo->mActiveServiceWorkers.Contains(aWorkerPrivate),
|
||||
MOZ_ASSERT(domainInfo->mServiceWorkers.Contains(aWorkerPrivate),
|
||||
"Don't know about this worker!");
|
||||
domainInfo->mActiveServiceWorkers.RemoveElement(aWorkerPrivate);
|
||||
domainInfo->mServiceWorkers.RemoveElement(aWorkerPrivate);
|
||||
}
|
||||
else {
|
||||
MOZ_ASSERT(domainInfo->mActiveWorkers.Contains(aWorkerPrivate),
|
||||
MOZ_ASSERT(domainInfo->mWorkers.Contains(aWorkerPrivate),
|
||||
"Don't know about this worker!");
|
||||
domainInfo->mActiveWorkers.RemoveElement(aWorkerPrivate);
|
||||
domainInfo->mWorkers.RemoveElement(aWorkerPrivate);
|
||||
}
|
||||
|
||||
if (aWorkerPrivate->IsSharedWorker()) {
|
||||
RemoveSharedWorker(domainInfo, aWorkerPrivate);
|
||||
}
|
||||
|
||||
// See if there's a queued worker we can schedule.
|
||||
if (domainInfo->ActiveWorkerCount() < gMaxWorkersPerDomain &&
|
||||
!domainInfo->mQueuedWorkers.IsEmpty()) {
|
||||
queuedWorker = domainInfo->mQueuedWorkers[0];
|
||||
domainInfo->mQueuedWorkers.RemoveElementAt(0);
|
||||
|
||||
if (queuedWorker->GetParent()) {
|
||||
domainInfo->mChildWorkerCount++;
|
||||
}
|
||||
else if (queuedWorker->IsServiceWorker()) {
|
||||
domainInfo->mActiveServiceWorkers.AppendElement(queuedWorker);
|
||||
}
|
||||
else {
|
||||
domainInfo->mActiveWorkers.AppendElement(queuedWorker);
|
||||
}
|
||||
}
|
||||
|
||||
if (domainInfo->HasNoWorkers()) {
|
||||
MOZ_ASSERT(domainInfo->mQueuedWorkers.IsEmpty());
|
||||
mDomainMap.Remove(domain);
|
||||
}
|
||||
}
|
||||
|
@ -1880,15 +1814,6 @@ RuntimeService::Init()
|
|||
NS_WARNING("Failed to register timeout cache!");
|
||||
}
|
||||
|
||||
int32_t maxPerDomain = Preferences::GetInt(PREF_WORKERS_MAX_PER_DOMAIN,
|
||||
MAX_WORKERS_PER_DOMAIN);
|
||||
gMaxWorkersPerDomain = std::max(0, maxPerDomain);
|
||||
|
||||
int32_t maxHardwareConcurrency =
|
||||
Preferences::GetInt(PREF_WORKERS_MAX_HARDWARE_CONCURRENCY,
|
||||
MAX_HARDWARE_CONCURRENCY);
|
||||
gMaxHardwareConcurrency = std::max(0, maxHardwareConcurrency);
|
||||
|
||||
rv = InitOSFileConstants();
|
||||
if (NS_FAILED(rv)) {
|
||||
return rv;
|
||||
|
@ -2080,26 +2005,18 @@ RuntimeService::AddAllTopLevelWorkersToArray(nsTArray<WorkerPrivate*>& aWorkers)
|
|||
WorkerDomainInfo* aData = iter.UserData();
|
||||
|
||||
#ifdef DEBUG
|
||||
for (uint32_t index = 0; index < aData->mActiveWorkers.Length(); index++) {
|
||||
MOZ_ASSERT(!aData->mActiveWorkers[index]->GetParent(),
|
||||
for (uint32_t index = 0; index < aData->mWorkers.Length(); index++) {
|
||||
MOZ_ASSERT(!aData->mWorkers[index]->GetParent(),
|
||||
"Shouldn't have a parent in this list!");
|
||||
}
|
||||
for (uint32_t index = 0; index < aData->mActiveServiceWorkers.Length(); index++) {
|
||||
MOZ_ASSERT(!aData->mActiveServiceWorkers[index]->GetParent(),
|
||||
for (uint32_t index = 0; index < aData->mServiceWorkers.Length(); index++) {
|
||||
MOZ_ASSERT(!aData->mServiceWorkers[index]->GetParent(),
|
||||
"Shouldn't have a parent in this list!");
|
||||
}
|
||||
#endif
|
||||
|
||||
aWorkers.AppendElements(aData->mActiveWorkers);
|
||||
aWorkers.AppendElements(aData->mActiveServiceWorkers);
|
||||
|
||||
// These might not be top-level workers...
|
||||
for (uint32_t index = 0; index < aData->mQueuedWorkers.Length(); index++) {
|
||||
WorkerPrivate* worker = aData->mQueuedWorkers[index];
|
||||
if (!worker->GetParent()) {
|
||||
aWorkers.AppendElement(worker);
|
||||
}
|
||||
}
|
||||
aWorkers.AppendElements(aData->mWorkers);
|
||||
aWorkers.AppendElements(aData->mServiceWorkers);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2124,7 +2041,7 @@ RuntimeService::CancelWorkersForWindow(nsPIDOMWindowInner* aWindow)
|
|||
{
|
||||
AssertIsOnMainThread();
|
||||
|
||||
AutoTArray<WorkerPrivate*, MAX_WORKERS_PER_DOMAIN> workers;
|
||||
nsTArray<WorkerPrivate*> workers;
|
||||
GetWorkersForWindow(aWindow, workers);
|
||||
|
||||
if (!workers.IsEmpty()) {
|
||||
|
@ -2146,7 +2063,7 @@ RuntimeService::FreezeWorkersForWindow(nsPIDOMWindowInner* aWindow)
|
|||
AssertIsOnMainThread();
|
||||
MOZ_ASSERT(aWindow);
|
||||
|
||||
AutoTArray<WorkerPrivate*, MAX_WORKERS_PER_DOMAIN> workers;
|
||||
nsTArray<WorkerPrivate*> workers;
|
||||
GetWorkersForWindow(aWindow, workers);
|
||||
|
||||
for (uint32_t index = 0; index < workers.Length(); index++) {
|
||||
|
@ -2160,7 +2077,7 @@ RuntimeService::ThawWorkersForWindow(nsPIDOMWindowInner* aWindow)
|
|||
AssertIsOnMainThread();
|
||||
MOZ_ASSERT(aWindow);
|
||||
|
||||
AutoTArray<WorkerPrivate*, MAX_WORKERS_PER_DOMAIN> workers;
|
||||
nsTArray<WorkerPrivate*> workers;
|
||||
GetWorkersForWindow(aWindow, workers);
|
||||
|
||||
for (uint32_t index = 0; index < workers.Length(); index++) {
|
||||
|
@ -2174,7 +2091,7 @@ RuntimeService::SuspendWorkersForWindow(nsPIDOMWindowInner* aWindow)
|
|||
AssertIsOnMainThread();
|
||||
MOZ_ASSERT(aWindow);
|
||||
|
||||
AutoTArray<WorkerPrivate*, MAX_WORKERS_PER_DOMAIN> workers;
|
||||
nsTArray<WorkerPrivate*> workers;
|
||||
GetWorkersForWindow(aWindow, workers);
|
||||
|
||||
for (uint32_t index = 0; index < workers.Length(); index++) {
|
||||
|
@ -2188,7 +2105,7 @@ RuntimeService::ResumeWorkersForWindow(nsPIDOMWindowInner* aWindow)
|
|||
AssertIsOnMainThread();
|
||||
MOZ_ASSERT(aWindow);
|
||||
|
||||
AutoTArray<WorkerPrivate*, MAX_WORKERS_PER_DOMAIN> workers;
|
||||
nsTArray<WorkerPrivate*> workers;
|
||||
GetWorkersForWindow(aWindow, workers);
|
||||
|
||||
for (uint32_t index = 0; index < workers.Length(); index++) {
|
||||
|
@ -2465,9 +2382,7 @@ RuntimeService::ClampedHardwareConcurrency() const
|
|||
if (numberOfProcessors <= 0) {
|
||||
numberOfProcessors = 1; // Must be one there somewhere
|
||||
}
|
||||
uint32_t clampedValue = std::min(uint32_t(numberOfProcessors),
|
||||
gMaxHardwareConcurrency);
|
||||
clampedHardwareConcurrency.compareExchange(0, clampedValue);
|
||||
clampedHardwareConcurrency = numberOfProcessors;
|
||||
}
|
||||
|
||||
return clampedHardwareConcurrency;
|
||||
|
|
|
@ -42,34 +42,21 @@ class RuntimeService final : public nsIObserver
|
|||
struct WorkerDomainInfo
|
||||
{
|
||||
nsCString mDomain;
|
||||
nsTArray<WorkerPrivate*> mActiveWorkers;
|
||||
nsTArray<WorkerPrivate*> mActiveServiceWorkers;
|
||||
nsTArray<WorkerPrivate*> mQueuedWorkers;
|
||||
nsTArray<WorkerPrivate*> mWorkers;
|
||||
nsTArray<WorkerPrivate*> mServiceWorkers;
|
||||
nsClassHashtable<nsCStringHashKey, SharedWorkerInfo> mSharedWorkerInfos;
|
||||
uint32_t mChildWorkerCount;
|
||||
|
||||
WorkerDomainInfo()
|
||||
: mActiveWorkers(1), mChildWorkerCount(0)
|
||||
: mWorkers(1), mChildWorkerCount(0)
|
||||
{ }
|
||||
|
||||
uint32_t
|
||||
ActiveWorkerCount() const
|
||||
{
|
||||
return mActiveWorkers.Length() +
|
||||
mChildWorkerCount;
|
||||
}
|
||||
|
||||
uint32_t
|
||||
ActiveServiceWorkerCount() const
|
||||
{
|
||||
return mActiveServiceWorkers.Length();
|
||||
}
|
||||
|
||||
bool
|
||||
HasNoWorkers() const
|
||||
{
|
||||
return ActiveWorkerCount() == 0 &&
|
||||
ActiveServiceWorkerCount() == 0;
|
||||
return mWorkers.IsEmpty() &&
|
||||
mServiceWorkers.IsEmpty() &&
|
||||
!mChildWorkerCount;
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -144,7 +144,6 @@ support-files =
|
|||
[test_bug1132395.html]
|
||||
skip-if = true # bug 1176225
|
||||
[test_bug1132924.html]
|
||||
[test_bug1241485.html]
|
||||
[test_chromeWorker.html]
|
||||
[test_clearTimeouts.html]
|
||||
[test_close.html]
|
||||
|
|
|
@ -1,82 +0,0 @@
|
|||
<!DOCTYPE HTML>
|
||||
<html>
|
||||
<!--
|
||||
https://bugzilla.mozilla.org/show_bug.cgi?id=1241485
|
||||
-->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>Test for Bug 1241485</title>
|
||||
<script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
|
||||
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
|
||||
<script type="application/javascript">
|
||||
|
||||
/** Test for Bug 1241485 **/
|
||||
SimpleTest.waitForExplicitFinish();
|
||||
SimpleTest.requestCompleteLog();
|
||||
SimpleTest.requestFlakyTimeout("requestFlakyTimeout is silly.");
|
||||
|
||||
var limit = SpecialPowers.Services.prefs.getIntPref("dom.workers.maxPerDomain");
|
||||
var workers = new Array();
|
||||
var workerToWait = null;
|
||||
var url = URL.createObjectURL(new Blob(["postMessage('loaded');"]));
|
||||
var timeouts = new Array();
|
||||
|
||||
function addTimeout(fn, time) {
|
||||
timeouts.push(setTimeout(fn, time));
|
||||
}
|
||||
|
||||
function createWorker() {
|
||||
workerToWait = new Worker(url);
|
||||
workerToWait.onmessage = function(e) {
|
||||
if (!workers) {
|
||||
// finish() has been called already.
|
||||
return;
|
||||
}
|
||||
workers.push(workerToWait);
|
||||
info(workers.length + " workers");
|
||||
addTimeout(createWorker, 0);
|
||||
if (workers.length == limit) {
|
||||
// Just give the worker creation loop some more time to try to
|
||||
// create more workers to check that we don't go over the limit.
|
||||
addTimeout(finish, 250);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
function test() {
|
||||
info("Expecting no more than " + limit + " workers.");
|
||||
// Make sure we finish at some point, even if creating workers takes
|
||||
// lots of time.
|
||||
addTimeout(finish, 10000);
|
||||
addTimeout(createWorker, 0);
|
||||
}
|
||||
|
||||
function finish() {
|
||||
for (var i = 0; i < timeouts.length; ++i) {
|
||||
clearTimeout(timeouts[i]);
|
||||
}
|
||||
|
||||
if (workerToWait) {
|
||||
workerToWait.onmessage = null;
|
||||
}
|
||||
|
||||
ok(workers.length <= limit, "Too many workers created!");
|
||||
|
||||
workers = null;
|
||||
SpecialPowers.gc();
|
||||
|
||||
SimpleTest.finish();
|
||||
}
|
||||
|
||||
</script>
|
||||
</head>
|
||||
<body onload="test();">
|
||||
<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1241485">Mozilla Bug 1241485</a>
|
||||
<p id="display"></p>
|
||||
<div id="content" style="display: none">
|
||||
|
||||
</div>
|
||||
<pre id="test">
|
||||
</pre>
|
||||
</body>
|
||||
</html>
|
|
@ -141,10 +141,6 @@ pref("dom.select_events.enabled", true);
|
|||
// Whether or not Web Workers are enabled.
|
||||
pref("dom.workers.enabled", true);
|
||||
|
||||
// The number of workers per domain allowed to run concurrently.
|
||||
// We're going for effectively infinite, while preventing abuse.
|
||||
pref("dom.workers.maxPerDomain", 512);
|
||||
|
||||
pref("dom.serviceWorkers.enabled", false);
|
||||
|
||||
// The amount of time (milliseconds) service workers keep running after each event.
|
||||
|
@ -5516,9 +5512,6 @@ pref("media.seekToNextFrame.enabled", false);
|
|||
pref("media.seekToNextFrame.enabled", true);
|
||||
#endif
|
||||
|
||||
// return the maximum number of cores that navigator.hardwareCurrency returns
|
||||
pref("dom.maxHardwareConcurrency", 16);
|
||||
|
||||
// Shutdown the osfile worker if its no longer needed.
|
||||
#if !defined(RELEASE_BUILD)
|
||||
pref("osfile.reset_worker_delay", 30000);
|
||||
|
|
|
@ -8478,27 +8478,6 @@
|
|||
"kind": "count",
|
||||
"description": "Count ServiceWorkers that really did get a thread created for them. File bugs in Core::DOM in case of a Telemetry regression."
|
||||
},
|
||||
"SERVICE_WORKER_SPAWN_GETS_QUEUED": {
|
||||
"alert_emails": ["amarchesini@mozilla.com"],
|
||||
"bug_numbers": [1286895],
|
||||
"expires_in_version": "never",
|
||||
"kind": "count",
|
||||
"description": "Tracking whether a ServiceWorker spawn gets queued due to hitting max workers per domain limit. File bugs in Core::DOM in case of a Telemetry regression."
|
||||
},
|
||||
"SHARED_WORKER_SPAWN_GETS_QUEUED": {
|
||||
"alert_emails": ["amarchesini@mozilla.com"],
|
||||
"bug_numbers": [1286895],
|
||||
"expires_in_version": "never",
|
||||
"kind": "count",
|
||||
"description": "Tracking whether a SharedWorker spawn gets queued due to hitting max workers per domain limit. File bugs in Core::DOM in case of a Telemetry regression."
|
||||
},
|
||||
"DEDICATED_WORKER_SPAWN_GETS_QUEUED": {
|
||||
"alert_emails": ["amarchesini@mozilla.com"],
|
||||
"bug_numbers": [1286895],
|
||||
"expires_in_version": "never",
|
||||
"kind": "count",
|
||||
"description": "Tracking whether a DedicatedWorker spawn gets queued due to hitting max workers per domain limit. File bugs in Core::DOM in case of a Telemetry regression."
|
||||
},
|
||||
"SERVICE_WORKER_REGISTRATIONS": {
|
||||
"expires_in_version": "50",
|
||||
"kind": "count",
|
||||
|
|
Загрузка…
Ссылка в новой задаче