Bug 1350432 - Initial Quantum DOM scheduler implementation, disabled by default (r=froydnj)

MozReview-Commit-ID: JWBxz3bwgwD
This commit is contained in:
Bill McCloskey 2017-07-28 14:56:49 -07:00
Родитель 06a265a5bd
Коммит f90a87caa9
27 изменённых файлов: 1969 добавлений и 137 удалений

Просмотреть файл

@ -85,6 +85,7 @@
#include "mozilla/Preferences.h"
#include "mozilla/ProcessHangMonitor.h"
#include "mozilla/ProcessHangMonitorIPC.h"
#include "mozilla/Scheduler.h"
#include "mozilla/ScopeExit.h"
#include "mozilla/ScriptPreloader.h"
#include "mozilla/Services.h"
@ -2016,7 +2017,6 @@ ContentParent::LaunchSubprocess(ProcessPriority aInitialPriority /* = PROCESS_PR
nsAutoCString value;
Preferences::GetCString(ContentPrefs::GetContentPref(i), value);
stringPrefs.Append(nsPrintfCString("%u:%d;%s|", i, value.Length(), value.get()));
}
break;
case nsIPrefBranch::PREF_INVALID:
@ -2027,6 +2027,8 @@ ContentParent::LaunchSubprocess(ProcessPriority aInitialPriority /* = PROCESS_PR
}
}
nsCString schedulerPrefs = Scheduler::GetPrefs();
extraArgs.push_back("-intPrefs");
extraArgs.push_back(intPrefs.get());
extraArgs.push_back("-boolPrefs");
@ -2034,6 +2036,11 @@ ContentParent::LaunchSubprocess(ProcessPriority aInitialPriority /* = PROCESS_PR
extraArgs.push_back("-stringPrefs");
extraArgs.push_back(stringPrefs.get());
// Scheduler prefs need to be handled differently because the scheduler needs
// to start up in the content process before the normal preferences service.
extraArgs.push_back("-schedulerPrefs");
extraArgs.push_back(schedulerPrefs.get());
if (gSafeMode) {
extraArgs.push_back("-safeMode");
}

Просмотреть файл

@ -8,6 +8,7 @@
#include "ContentProcess.h"
#include "ContentPrefs.h"
#include "mozilla/Scheduler.h"
#if defined(XP_MACOSX) && defined(MOZ_CONTENT_SANDBOX)
#include <stdlib.h>
@ -115,6 +116,7 @@ ContentProcess::Init(int aArgc, char* aArgv[])
bool foundIntPrefs = false;
bool foundBoolPrefs = false;
bool foundStringPrefs = false;
bool foundSchedulerPrefs = false;
uint64_t childID;
bool isForBrowser;
@ -125,6 +127,7 @@ ContentProcess::Init(int aArgc, char* aArgv[])
nsCOMPtr<nsIFile> profileDir;
#endif
char* schedulerPrefs;
InfallibleTArray<PrefSetting> prefsArray;
for (int idx = aArgc; idx > 0; idx--) {
if (!aArgv[idx]) {
@ -204,8 +207,10 @@ ContentProcess::Init(int aArgc, char* aArgv[])
}
SET_PREF_PHASE(END_INIT_PREFS);
foundStringPrefs = true;
}
else if (!strcmp(aArgv[idx], "-safeMode")) {
} else if (!strcmp(aArgv[idx], "-schedulerPrefs")) {
schedulerPrefs = aArgv[idx + 1];
foundSchedulerPrefs = true;
} else if (!strcmp(aArgv[idx], "-safeMode")) {
gSafeMode = true;
}
@ -226,7 +231,13 @@ ContentProcess::Init(int aArgc, char* aArgv[])
}
#endif /* XP_MACOSX && MOZ_CONTENT_SANDBOX */
bool allFound = foundAppdir && foundChildID && foundIsForBrowser && foundIntPrefs && foundBoolPrefs && foundStringPrefs;
bool allFound = foundAppdir
&& foundChildID
&& foundIsForBrowser
&& foundIntPrefs
&& foundBoolPrefs
&& foundStringPrefs
&& foundSchedulerPrefs;
#if defined(XP_MACOSX) && defined(MOZ_CONTENT_SANDBOX)
allFound &= foundProfile;
@ -237,6 +248,7 @@ ContentProcess::Init(int aArgc, char* aArgv[])
}
}
Preferences::SetInitPreferences(&prefsArray);
Scheduler::SetPrefs(schedulerPrefs);
mContent.Init(IOThreadChild::message_loop(),
ParentPid(),
IOThreadChild::channel(),

Просмотреть файл

@ -95,14 +95,6 @@ class ThreadLocal
};
#endif
bool initialized() const {
#ifdef MOZ_HAS_THREAD_LOCAL
return true;
#else
return mInited;
#endif
}
public:
// __thread does not allow non-trivial constructors, but we can
// instead rely on zero-initialization.
@ -112,6 +104,14 @@ public:
{}
#endif
bool initialized() const {
#ifdef MOZ_HAS_THREAD_LOCAL
return true;
#else
return mInited;
#endif
}
MOZ_MUST_USE inline bool init();
void infallibleInit() {
MOZ_RELEASE_ASSERT(init(), "Infallible TLS initialization failed");

Просмотреть файл

@ -3323,6 +3323,13 @@ pref("dom.ipc.processCount.extension", 1);
// Don't use a native event loop in the content process.
pref("dom.ipc.useNativeEventProcessing.content", false);
// Quantum DOM scheduling:
pref("dom.ipc.scheduler", false);
pref("dom.ipc.scheduler.useMultipleQueues", false);
pref("dom.ipc.scheduler.preemption", false);
pref("dom.ipc.scheduler.threadCount", 2);
pref("dom.ipc.scheduler.chaoticScheduling", false);
// Disable support for SVG
pref("svg.disabled", false);

Просмотреть файл

@ -72,6 +72,7 @@
#include "mozilla/ipc/TestShellParent.h"
#include "mozilla/ipc/XPCShellEnvironment.h"
#include "mozilla/Scheduler.h"
#include "mozilla/WindowsDllBlocklist.h"
#include "GMPProcessChild.h"
@ -873,6 +874,8 @@ XRE_ShutdownChildProcess()
mozilla::DebugOnly<MessageLoop*> ioLoop = XRE_GetIOMessageLoop();
MOZ_ASSERT(!!ioLoop, "Bad shutdown order");
Scheduler::Shutdown();
// Quit() sets off the following chain of events
// (1) UI loop starts quitting
// (2) UI loop returns from Run() in XRE_InitChildProcess()
@ -880,6 +883,7 @@ XRE_ShutdownChildProcess()
// (4) ProcessChild joins the IO thread
// (5) exit()
MessageLoop::current()->Quit();
#if defined(XP_MACOSX)
nsCOMPtr<nsIAppShell> appShell(do_GetService(kAppShellCID));
if (appShell) {

Просмотреть файл

@ -42,6 +42,7 @@
#include "GeckoProfilerReporter.h"
#include "ProfilerIOInterposeObserver.h"
#include "mozilla/AutoProfilerLabel.h"
#include "mozilla/Scheduler.h"
#include "mozilla/StackWalk.h"
#include "mozilla/StaticPtr.h"
#include "mozilla/ThreadLocal.h"
@ -2977,7 +2978,7 @@ profiler_register_thread(const char* aName, void* aGuessStackTop)
{
DEBUG_LOG("profiler_register_thread(%s)", aName);
MOZ_RELEASE_ASSERT(!NS_IsMainThread());
MOZ_ASSERT_IF(NS_IsMainThread(), Scheduler::IsCooperativeThread());
MOZ_RELEASE_ASSERT(CorePS::Exists());
PSAutoLock lock(gPSMutex);
@ -2989,7 +2990,7 @@ profiler_register_thread(const char* aName, void* aGuessStackTop)
void
profiler_unregister_thread()
{
MOZ_RELEASE_ASSERT(!NS_IsMainThread());
MOZ_ASSERT_IF(NS_IsMainThread(), Scheduler::IsCooperativeThread());
MOZ_RELEASE_ASSERT(CorePS::Exists());
PSAutoLock lock(gPSMutex);

Просмотреть файл

@ -45,12 +45,18 @@ public:
// Get an event from the front of the queue. aPriority is an out param. If the
// implementation supports priorities, then this should be the same priority
// that the event was pushed with. aPriority may be null.
// that the event was pushed with. aPriority may be null. This should return
// null if the queue is non-empty but the event in front is not ready to run.
virtual already_AddRefed<nsIRunnable> GetEvent(EventPriority* aPriority,
const MutexAutoLock& aProofOfLock) = 0;
// Returns true if the queue is non-empty.
virtual bool HasPendingEvent(const MutexAutoLock& aProofOfLock) = 0;
// Returns true if the queue is empty. Implies !HasReadyEvent().
virtual bool IsEmpty(const MutexAutoLock& aProofOfLock) = 0;
// Returns true if the queue is non-empty and if the event in front is ready
// to run. Implies !IsEmpty(). This should return true iff GetEvent returns a
// non-null value.
virtual bool HasReadyEvent(const MutexAutoLock& aProofOfLock) = 0;
// Returns the number of events in the queue.
virtual size_t Count(const MutexAutoLock& aProofOfLock) const = 0;

Просмотреть файл

@ -0,0 +1,266 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "CooperativeThreadPool.h"
#include "base/message_loop.h"
#include "mozilla/IOInterposer.h"
#include "nsError.h"
#include "nsThreadUtils.h"
using namespace mozilla;
static bool gCooperativeSchedulingEnabled;
MOZ_THREAD_LOCAL(CooperativeThreadPool::CooperativeThread*) CooperativeThreadPool::sTlsCurrentThread;
// Windows silliness. winbase.h defines an empty no-argument Yield macro.
#undef Yield
CooperativeThreadPool::CooperativeThreadPool(size_t aNumThreads,
Mutex& aMutex,
Controller& aController)
: mMutex(aMutex)
, mShutdownCondition(mMutex, "CoopShutdown")
, mRunning(false)
, mNumThreads(std::min(aNumThreads, kMaxThreads))
, mRunningThreads(0)
, mController(aController)
, mSelectedThread(size_t(0))
{
MOZ_ASSERT(aNumThreads < kMaxThreads);
gCooperativeSchedulingEnabled = true;
sTlsCurrentThread.infallibleInit();
MutexAutoLock lock(mMutex);
mRunning = true;
mRunningThreads = mNumThreads;
for (size_t i = 0; i < mNumThreads; i++) {
mThreads[i] = MakeUnique<CooperativeThread>(this, i);
}
}
CooperativeThreadPool::~CooperativeThreadPool()
{
MOZ_ASSERT(!mRunning);
}
const size_t CooperativeThreadPool::kMaxThreads;
void
CooperativeThreadPool::Shutdown()
{
// This will not be called on any of the cooperative threads.
{
MutexAutoLock lock(mMutex);
MOZ_ASSERT(mRunning);
mRunning = false;
}
for (size_t i = 0; i < mNumThreads; i++) {
mThreads[i]->BeginShutdown();
}
{
MutexAutoLock lock(mMutex);
while (mRunningThreads) {
mShutdownCondition.Wait();
}
}
for (size_t i = 0; i < mNumThreads; i++) {
mThreads[i]->EndShutdown();
}
}
void
CooperativeThreadPool::RecheckBlockers(const MutexAutoLock& aProofOfLock)
{
aProofOfLock.AssertOwns(mMutex);
if (!mSelectedThread.is<AllThreadsBlocked>()) {
return;
}
for (size_t i = 0; i < mNumThreads; i++) {
if (mThreads[i]->mRunning && !mThreads[i]->IsBlocked(aProofOfLock)) {
mSelectedThread = AsVariant(i);
mThreads[i]->mCondVar.Notify();
return;
}
}
// It may be valid to reach this point. For example, if we are waiting for an
// event to be posted from a non-main thread. Even if the queue is non-empty,
// it may have only idle events that we do not want to run (because we are
// expecting a vsync soon).
}
/* static */ void
CooperativeThreadPool::Yield(Resource* aBlocker, const MutexAutoLock& aProofOfLock)
{
if (!gCooperativeSchedulingEnabled) {
return;
}
CooperativeThread* thread = sTlsCurrentThread.get();
MOZ_RELEASE_ASSERT(thread);
thread->SetBlocker(aBlocker);
thread->Yield(aProofOfLock);
}
/* static */ bool
CooperativeThreadPool::IsCooperativeThread()
{
if (!gCooperativeSchedulingEnabled) {
return false;
}
return !!sTlsCurrentThread.get();
}
CooperativeThreadPool::SelectedThread
CooperativeThreadPool::CurrentThreadIndex(const MutexAutoLock& aProofOfLock) const
{
aProofOfLock.AssertOwns(mMutex);
return mSelectedThread;
}
CooperativeThreadPool::CooperativeThread::CooperativeThread(CooperativeThreadPool* aPool,
size_t aIndex)
: mPool(aPool)
, mCondVar(aPool->mMutex, "CooperativeThreadPool")
, mBlocker(nullptr)
, mIndex(aIndex)
, mRunning(true)
{
mThread = PR_CreateThread(PR_USER_THREAD, ThreadFunc, this,
PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
PR_JOINABLE_THREAD, 0);
MOZ_RELEASE_ASSERT(mThread);
}
void
CooperativeThreadPool::CooperativeThread::ThreadMethod()
{
char stackTop;
MOZ_ASSERT(gCooperativeSchedulingEnabled);
sTlsCurrentThread.set(this);
nsCString name = mPool->mThreadNaming.GetNextThreadName("Main");
PR_SetCurrentThreadName(name.get());
mozilla::IOInterposer::RegisterCurrentThread();
{
// Make sure only one thread at a time can proceed. This only happens during
// thread startup.
MutexAutoLock lock(mPool->mMutex);
while (mPool->mSelectedThread != AsVariant(mIndex)) {
mCondVar.Wait();
}
}
mPool->mController.OnStartThread(mIndex, name, &stackTop);
nsCOMPtr<nsIThread> thread = do_GetCurrentThread();
mEventTarget = thread;
// The main event loop for this thread.
for (;;) {
{
MutexAutoLock lock(mPool->mMutex);
if (!mPool->mRunning) {
break;
}
}
bool processedEvent;
thread->ProcessNextEvent(true, &processedEvent);
}
mPool->mController.OnStopThread(mIndex);
mozilla::IOInterposer::UnregisterCurrentThread();
MutexAutoLock lock(mPool->mMutex);
mPool->mRunningThreads--;
mRunning = false;
mPool->mSelectedThread = AsVariant(AllThreadsBlocked::Blocked);
mPool->RecheckBlockers(lock);
mPool->mShutdownCondition.Notify();
}
/* static */ void
CooperativeThreadPool::CooperativeThread::ThreadFunc(void* aArg)
{
auto thread = static_cast<CooperativeThreadPool::CooperativeThread*>(aArg);
thread->ThreadMethod();
}
void
CooperativeThreadPool::CooperativeThread::BeginShutdown()
{
mEventTarget->Dispatch(new mozilla::Runnable("CooperativeShutdownEvent"),
nsIEventTarget::DISPATCH_NORMAL);
}
void
CooperativeThreadPool::CooperativeThread::EndShutdown()
{
PR_JoinThread(mThread);
}
bool
CooperativeThreadPool::CooperativeThread::IsBlocked(const MutexAutoLock& aProofOfLock)
{
if (!mBlocker) {
return false;
}
return !mBlocker->IsAvailable(aProofOfLock);
}
void
CooperativeThreadPool::CooperativeThread::Yield(const MutexAutoLock& aProofOfLock)
{
aProofOfLock.AssertOwns(mPool->mMutex);
// First select the next thread to run.
size_t selected = mIndex + 1;
bool found = false;
do {
if (selected >= mPool->mNumThreads) {
selected = 0;
}
if (mPool->mThreads[selected]->mRunning
&& !mPool->mThreads[selected]->IsBlocked(aProofOfLock)) {
found = true;
break;
}
selected++;
} while (selected != mIndex + 1);
if (found) {
mPool->mSelectedThread = AsVariant(selected);
mPool->mThreads[selected]->mCondVar.Notify();
} else {
// We need to block all threads. Some thread will be unblocked when
// RecheckBlockers is called (if a new event is posted for an outside
// thread, for example).
mPool->mSelectedThread = AsVariant(AllThreadsBlocked::Blocked);
}
mPool->mController.OnSuspendThread(mIndex);
while (mPool->mSelectedThread != AsVariant(mIndex)) {
mCondVar.Wait();
}
mPool->mController.OnResumeThread(mIndex);
}

Просмотреть файл

@ -0,0 +1,150 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_CooperativeThreadPool_h
#define mozilla_CooperativeThreadPool_h
#include "mozilla/Array.h"
#include "mozilla/CondVar.h"
#include "mozilla/Mutex.h"
#include "mozilla/UniquePtr.h"
#include "mozilla/Variant.h"
#include "prthread.h"
// Windows silliness. winbase.h defines an empty no-argument Yield macro.
#undef Yield
class nsIEventTarget;
namespace mozilla {
// A CooperativeThreadPool is a pool of threads that process jobs, with at most
// one thread running at a given time. While processing a job, a thread can
// yield to another thread in the pool. Threads can be blocked on abstract
// Resources. When a thread yields, we iterate over all threads and look for
// one whose Resource has become available. It's possible that no thread is
// available to run, in which case all threads in the pool sleep. An outside
// thread can get things started again by calling RecheckBlockers (presumably
// after it has mutated some state that it expects would cause a Resource to
// become available).
class CooperativeThreadPool
{
public:
// Every pool must have a controller object, on which callbacks are invoked.
class Controller
{
public:
// Called when a new thread in the pool is started. aIndex is the index of
// the thread within the pool. aName is the thread name (e.g., Main#4), and
// aStackTop is the guess for the address of the top of the stack. The
// thread will begin processing events immediately after OnStartThread is called.
virtual void OnStartThread(size_t aIndex, const nsACString& aName, void* aStackTop) = 0;
// Called when a thread in the pool is about to be shut down.
virtual void OnStopThread(size_t aIndex) = 0;
// Threads in the pool are either suspended or running. At most one thread
// will be running at any time. All other threads will be suspended.
// Called when a thread is resumed (un-suspended). Note that OnResumeThread
// will not be called if the thread is starting up. OnStartThread will be
// called instead.
virtual void OnResumeThread(size_t aIndex) = 0;
// Called when a thread in the pool is about to be suspended. Note that
// OnSuspendThread will not be called if the thread is shutting
// down. OnStopThread is called instead.
virtual void OnSuspendThread(size_t aIndex) = 0;
};
CooperativeThreadPool(size_t aNumThreads,
Mutex& aMutex,
Controller& aController);
~CooperativeThreadPool();
void Shutdown();
// An abstract class representing something that can be blocked on. Examples
// are an event queue (where IsAvailable would check if the queue is
// non-empty) or an object that can be owned by only one thread at a time
// (where IsAvailable would check if the object is unowned).
class Resource {
public:
virtual bool IsAvailable(const MutexAutoLock& aProofOfLock) = 0;
};
// Typically called by a thread outside the pool, this will check if any
// thread is blocked on a resource that has become available. In this case,
// the thread will resume. Nothing is done if some thread in the pool was
// already running.
void RecheckBlockers(const MutexAutoLock& aProofOfLock);
// Must be called from within the thread pool. Causes the current thread to be
// blocked on aBlocker, which can be null if the thread is ready to run
// unconditionally.
static void Yield(Resource* aBlocker, const MutexAutoLock& aProofOfLock);
static bool IsCooperativeThread();
enum class AllThreadsBlocked { Blocked };
using SelectedThread = Variant<size_t, AllThreadsBlocked>;
SelectedThread CurrentThreadIndex(const MutexAutoLock& aProofOfLock) const;
static const size_t kMaxThreads = 16;
private:
class CooperativeThread
{
friend class CooperativeThreadPool;
public:
CooperativeThread(CooperativeThreadPool* aPool,
size_t aIndex);
void BeginShutdown();
void EndShutdown();
void Started();
bool IsBlocked(const MutexAutoLock& aProofOfLock);
void SetBlocker(Resource* aResource) { mBlocker = aResource; }
void Yield(const MutexAutoLock& aProofOfLock);
private:
static void ThreadFunc(void* aArg);
void ThreadMethod();
CooperativeThreadPool* mPool;
CondVar mCondVar;
Resource* mBlocker;
PRThread* mThread;
nsCOMPtr<nsIEventTarget> mEventTarget;
const size_t mIndex;
bool mRunning;
};
class StartRunnable;
Mutex& mMutex;
CondVar mShutdownCondition;
nsThreadPoolNaming mThreadNaming;
bool mRunning;
const size_t mNumThreads;
size_t mRunningThreads;
Controller& mController;
Array<UniquePtr<CooperativeThread>, kMaxThreads> mThreads;
SelectedThread mSelectedThread;
static MOZ_THREAD_LOCAL(CooperativeThread*) sTlsCurrentThread;
};
} // namespace mozilla
#endif // mozilla_CooperativeThreadPool_h

Просмотреть файл

@ -38,9 +38,15 @@ EventQueue::GetEvent(EventPriority* aPriority,
}
bool
EventQueue::HasPendingEvent(const MutexAutoLock& aProofOfLock)
EventQueue::IsEmpty(const MutexAutoLock& aProofOfLock)
{
return !mQueue.IsEmpty();
return mQueue.IsEmpty();
}
bool
EventQueue::HasReadyEvent(const MutexAutoLock& aProofOfLock)
{
return !IsEmpty(aProofOfLock);
}
already_AddRefed<nsIRunnable>

Просмотреть файл

@ -25,7 +25,9 @@ public:
const MutexAutoLock& aProofOfLock) final;
already_AddRefed<nsIRunnable> GetEvent(EventPriority* aPriority,
const MutexAutoLock& aProofOfLock) final;
bool HasPendingEvent(const MutexAutoLock& aProofOfLock) final;
bool IsEmpty(const MutexAutoLock& aProofOfLock) final;
bool HasReadyEvent(const MutexAutoLock& aProofOfLock) final;
size_t Count(const MutexAutoLock& aProofOfLock) const final;
already_AddRefed<nsIRunnable> PeekEvent(const MutexAutoLock& aProofOfLock);

Просмотреть файл

@ -0,0 +1,197 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "LabeledEventQueue.h"
#include "mozilla/Scheduler.h"
#include "mozilla/SchedulerGroup.h"
#include "nsQueryObject.h"
LabeledEventQueue::LabeledEventQueue()
{
}
static SchedulerGroup*
GetSchedulerGroup(nsIRunnable* aEvent)
{
RefPtr<SchedulerGroup::Runnable> groupRunnable = do_QueryObject(aEvent);
if (!groupRunnable) {
// It's not labeled.
return nullptr;
}
return groupRunnable->Group();
}
static bool
IsReadyToRun(nsIRunnable* aEvent, SchedulerGroup* aEventGroup)
{
if (!Scheduler::AnyEventRunning()) {
return true;
}
if (Scheduler::UnlabeledEventRunning()) {
return false;
}
if (aEventGroup) {
return !aEventGroup->IsRunning();
}
nsCOMPtr<nsILabelableRunnable> labelable = do_QueryInterface(aEvent);
if (!labelable) {
return false;
}
AutoTArray<RefPtr<SchedulerGroup>, 1> groups;
bool labeled = labelable->GetAffectedSchedulerGroups(groups);
if (!labeled) {
return false;
}
for (SchedulerGroup* group : groups) {
if (group->IsRunning()) {
return false;
}
}
return true;
}
void
LabeledEventQueue::PutEvent(already_AddRefed<nsIRunnable>&& aEvent,
EventPriority aPriority,
const MutexAutoLock& aProofOfLock)
{
nsCOMPtr<nsIRunnable> event(aEvent);
MOZ_ASSERT(event.get());
SchedulerGroup* group = GetSchedulerGroup(event);
bool isLabeled = !!group;
// Create a new epoch if necessary.
Epoch* epoch;
if (mEpochs.IsEmpty()) {
epoch = &mEpochs.Push(Epoch::First(isLabeled));
} else {
Epoch& lastEpoch = mEpochs.LastElement();
if (lastEpoch.IsLabeled() != isLabeled) {
epoch = &mEpochs.Push(lastEpoch.NextEpoch(isLabeled));
} else {
epoch = &lastEpoch;
}
}
mNumEvents++;
epoch->mNumEvents++;
RunnableEpochQueue* queue = isLabeled ? mLabeled.LookupOrAdd(group) : &mUnlabeled;
queue->Push(QueueEntry(event.forget(), epoch->mEpochNumber));
}
void
LabeledEventQueue::PopEpoch()
{
Epoch& epoch = mEpochs.FirstElement();
MOZ_ASSERT(epoch.mNumEvents > 0);
if (epoch.mNumEvents == 1) {
mEpochs.Pop();
} else {
epoch.mNumEvents--;
}
mNumEvents--;
}
already_AddRefed<nsIRunnable>
LabeledEventQueue::GetEvent(EventPriority* aPriority,
const MutexAutoLock& aProofOfLock)
{
if (mEpochs.IsEmpty()) {
return nullptr;
}
Epoch epoch = mEpochs.FirstElement();
if (!epoch.IsLabeled()) {
QueueEntry entry = mUnlabeled.FirstElement();
if (IsReadyToRun(entry.mRunnable, nullptr)) {
PopEpoch();
mUnlabeled.Pop();
MOZ_ASSERT(entry.mEpochNumber == epoch.mEpochNumber);
MOZ_ASSERT(entry.mRunnable.get());
return entry.mRunnable.forget();
}
}
for (auto iter = mLabeled.Iter(); !iter.Done(); iter.Next()) {
SchedulerGroup* key = iter.Key();
RunnableEpochQueue* queue = iter.Data();
MOZ_ASSERT(!queue->IsEmpty());
QueueEntry entry = queue->FirstElement();
if (entry.mEpochNumber != epoch.mEpochNumber) {
continue;
}
if (IsReadyToRun(entry.mRunnable, key)) {
PopEpoch();
queue->Pop();
if (queue->IsEmpty()) {
iter.Remove();
}
return entry.mRunnable.forget();
}
}
return nullptr;
}
bool
LabeledEventQueue::IsEmpty(const MutexAutoLock& aProofOfLock)
{
return mEpochs.IsEmpty();
}
size_t
LabeledEventQueue::Count(const MutexAutoLock& aProofOfLock) const
{
return mNumEvents;
}
bool
LabeledEventQueue::HasReadyEvent(const MutexAutoLock& aProofOfLock)
{
if (mEpochs.IsEmpty()) {
return false;
}
Epoch& frontEpoch = mEpochs.FirstElement();
if (!frontEpoch.IsLabeled()) {
QueueEntry entry = mUnlabeled.FirstElement();
return IsReadyToRun(entry.mRunnable, nullptr);
}
// Go through the labeled queues and look for one whose head is from the
// current epoch and is allowed to run.
uintptr_t currentEpoch = frontEpoch.mEpochNumber;
for (auto iter = mLabeled.Iter(); !iter.Done(); iter.Next()) {
SchedulerGroup* key = iter.Key();
RunnableEpochQueue* queue = iter.Data();
MOZ_ASSERT(!queue->IsEmpty());
QueueEntry entry = queue->FirstElement();
if (entry.mEpochNumber != currentEpoch) {
continue;
}
if (IsReadyToRun(entry.mRunnable, key)) {
return true;
}
}
return false;
}

Просмотреть файл

@ -0,0 +1,142 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_LabeledEventQueue_h
#define mozilla_LabeledEventQueue_h
#include "mozilla/AbstractEventQueue.h"
#include "mozilla/Queue.h"
#include "nsClassHashtable.h"
#include "nsHashKeys.h"
namespace mozilla {
class SchedulerGroup;
// LabeledEventQueue is actually a set of queues. There is one queue for each
// SchedulerGroup, as well as one queue for unlabeled events (those with no
// associated SchedulerGroup). When an event is added to a LabeledEventQueue, we
// query its SchedulerGroup and then add it to the appropriate queue. When an
// event is fetched, we heuristically pick a SchedulerGroup and return an event
// from its queue. Ideally the heuristic should give precedence to
// SchedulerGroups corresponding to the foreground tabs. The correctness of this
// data structure relies on the invariant that events from different
// SchedulerGroups cannot affect each other.
class LabeledEventQueue final : public AbstractEventQueue
{
public:
LabeledEventQueue();
void PutEvent(already_AddRefed<nsIRunnable>&& aEvent,
EventPriority aPriority,
const MutexAutoLock& aProofOfLock) final;
already_AddRefed<nsIRunnable> GetEvent(EventPriority* aPriority,
const MutexAutoLock& aProofOfLock) final;
bool IsEmpty(const MutexAutoLock& aProofOfLock) final;
size_t Count(const MutexAutoLock& aProofOfLock) const final;
bool HasReadyEvent(const MutexAutoLock& aProofOfLock) final;
void EnableInputEventPrioritization(const MutexAutoLock& aProofOfLock) final {}
void FlushInputEventPrioritization(const MutexAutoLock& aProofOfLock) final {}
void SuspendInputEventPrioritization(const MutexAutoLock& aProofOfLock) final {}
void ResumeInputEventPrioritization(const MutexAutoLock& aProofOfLock) final {}
private:
// The basic problem here is to keep track of the ordering relationships
// between events. As long as there are only labeled events, there can be one
// queue per SchedulerGroup. However, if an unlabeled event is pushed, we must
// remember that it should run after all the labeled events currently in the
// queue. To do this, the queues are arranged in "epochs". Each time the tail
// of the queue transitions from labeled to unlabeled (or from unlabeled to
// labeled) a new epoch starts. Events within different epochs are ordered
// according to which epoch happened first. Within a labeled epoch, there is
// one queue per SchedulerGroup. So events from different SchedulerGroups
// within the same epoch are unordered with respect to each other. Within an
// unlabeled epoch, there is a single queue that orders all the unlabeled
// events.
//
// The data structures we use are:
// 1. A queue of epochs. For each epoch, we store its epoch number. This number
// is odd for labeled epochs and even for unlabeled epochs. We also store the
// number of events in the epoch.
// 2. A single queue for all unlabeled events. For each event in the queue, we
// store the runnable as well as the epoch number.
// 3. For labeled events, one queue for each SchedulerGroup. Each element in
// these queues also keeps track of the epoch it belongs to.
//
// To push an event, we see if we can remain in the same epoch or if we have
// to start a new one. If we have to start a new one, we push onto the epoch
// queue. Then, based on whether the event is labeled or not, we push the
// runnable and the epoch number into the appopriate queue.
//
// To pop an event, we look at the epoch at the front of the epoch queue. If
// it is unlabeled, then we pop the first event in the unlabeled queue. If it
// is labeled, we can pop from any of the SchedulerGroup queues. Then we
// decrement the number of events in the current epoch. If this number reaches
// zero, we pop from the epoch queue.
struct QueueEntry
{
nsCOMPtr<nsIRunnable> mRunnable;
uintptr_t mEpochNumber;
QueueEntry(already_AddRefed<nsIRunnable> aRunnable, uintptr_t aEpoch)
: mRunnable(aRunnable)
, mEpochNumber(aEpoch)
{}
};
struct Epoch
{
static Epoch First(bool aIsLabeled)
{
// Odd numbers are labeled, even are unlabeled.
uintptr_t number = aIsLabeled ? 1 : 0;
return Epoch(number, aIsLabeled);
}
static bool EpochNumberIsLabeled(uintptr_t aEpochNumber)
{
// Odd numbers are labeled, even are unlabeled.
return (aEpochNumber & 1) ? true : false;
}
uintptr_t mEpochNumber;
size_t mNumEvents;
Epoch(uintptr_t aEpochNumber, bool aIsLabeled)
: mEpochNumber(aEpochNumber)
, mNumEvents(0)
{
MOZ_ASSERT(aIsLabeled == EpochNumberIsLabeled(aEpochNumber));
}
bool IsLabeled() const { return EpochNumberIsLabeled(mEpochNumber); }
Epoch NextEpoch(bool aIsLabeled) const
{
MOZ_ASSERT(aIsLabeled == !IsLabeled());
return Epoch(mEpochNumber + 1, aIsLabeled);
}
};
void PopEpoch();
using RunnableEpochQueue = Queue<QueueEntry, 32>;
using LabeledMap = nsClassHashtable<nsRefPtrHashKey<SchedulerGroup>, RunnableEpochQueue>;
using EpochQueue = Queue<Epoch, 8>;
LabeledMap mLabeled;
RunnableEpochQueue mUnlabeled;
EpochQueue mEpochs;
size_t mNumEvents = 0;
};
} // namespace mozilla
#endif // mozilla_LabeledEventQueue_h

Просмотреть файл

@ -114,25 +114,16 @@ PrioritizedEventQueue<InnerQueueT>::GetIdleDeadline()
}
template<class InnerQueueT>
already_AddRefed<nsIRunnable>
PrioritizedEventQueue<InnerQueueT>::GetEvent(EventPriority* aPriority,
EventPriority
PrioritizedEventQueue<InnerQueueT>::SelectQueue(bool aUpdateState,
const MutexAutoLock& aProofOfLock)
{
MakeScopeExit([&] {
mHasPendingEventsPromisedIdleEvent = false;
});
#ifndef RELEASE_OR_BETA
// Clear mNextIdleDeadline so that it is possible to determine that
// we're running an idle runnable in ProcessNextEvent.
*mNextIdleDeadline = TimeStamp();
#endif
bool highPending = mHighQueue->HasPendingEvent(aProofOfLock);
bool normalPending = mNormalQueue->HasPendingEvent(aProofOfLock);
bool highPending = !mHighQueue->IsEmpty(aProofOfLock);
bool normalPending = !mNormalQueue->IsEmpty(aProofOfLock);
size_t inputCount = mInputQueue->Count(aProofOfLock);
if (mInputQueueState == STATE_ENABLED && mInputHandlingStartTime.IsNull() && inputCount > 0) {
if (aUpdateState && mInputQueueState == STATE_ENABLED &&
mInputHandlingStartTime.IsNull() && inputCount > 0) {
mInputHandlingStartTime =
InputEventStatistics::Get()
.GetInputHandlingStartTime(inputCount);
@ -183,7 +174,29 @@ PrioritizedEventQueue<InnerQueueT>::GetEvent(EventPriority* aPriority,
MOZ_ASSERT_IF(queue == EventPriority::Input,
mInputQueueState != STATE_DISABLED && mInputQueueState != STATE_SUSPEND);
if (aUpdateState) {
mProcessHighPriorityQueue = highPending;
}
return queue;
}
template<class InnerQueueT>
already_AddRefed<nsIRunnable>
PrioritizedEventQueue<InnerQueueT>::GetEvent(EventPriority* aPriority,
const MutexAutoLock& aProofOfLock)
{
MakeScopeExit([&] {
mHasPendingEventsPromisedIdleEvent = false;
});
#ifndef RELEASE_OR_BETA
// Clear mNextIdleDeadline so that it is possible to determine that
// we're running an idle runnable in ProcessNextEvent.
*mNextIdleDeadline = TimeStamp();
#endif
EventPriority queue = SelectQueue(true, aProofOfLock);
if (aPriority) {
*aPriority = queue;
@ -211,7 +224,7 @@ PrioritizedEventQueue<InnerQueueT>::GetEvent(EventPriority* aPriority,
// If we get here, then all queues except idle are empty.
MOZ_ASSERT(queue == EventPriority::Idle);
if (!mIdleQueue->HasPendingEvent(aProofOfLock)) {
if (mIdleQueue->IsEmpty(aProofOfLock)) {
MOZ_ASSERT(!mHasPendingEventsPromisedIdleEvent);
return nullptr;
}
@ -240,29 +253,47 @@ PrioritizedEventQueue<InnerQueueT>::GetEvent(EventPriority* aPriority,
template<class InnerQueueT>
bool
PrioritizedEventQueue<InnerQueueT>::HasPendingEvent(const MutexAutoLock& aProofOfLock)
PrioritizedEventQueue<InnerQueueT>::IsEmpty(const MutexAutoLock& aProofOfLock)
{
// Just check IsEmpty() on the sub-queues. Don't bother checking the idle
// deadline since that only determines whether an idle event is ready or not.
return mHighQueue->IsEmpty(aProofOfLock)
&& mInputQueue->IsEmpty(aProofOfLock)
&& mNormalQueue->IsEmpty(aProofOfLock)
&& mIdleQueue->IsEmpty(aProofOfLock);
}
template<class InnerQueueT>
bool
PrioritizedEventQueue<InnerQueueT>::HasReadyEvent(const MutexAutoLock& aProofOfLock)
{
mHasPendingEventsPromisedIdleEvent = false;
if (mHighQueue->HasPendingEvent(aProofOfLock) ||
mInputQueue->HasPendingEvent(aProofOfLock) ||
mNormalQueue->HasPendingEvent(aProofOfLock)) {
EventPriority queue = SelectQueue(false, aProofOfLock);
if (queue == EventPriority::High) {
return mHighQueue->HasReadyEvent(aProofOfLock);
} else if (queue == EventPriority::Input) {
return mIdleQueue->HasReadyEvent(aProofOfLock);
} else if (queue == EventPriority::Normal) {
return mNormalQueue->HasReadyEvent(aProofOfLock);
}
MOZ_ASSERT(queue == EventPriority::Idle);
// If we get here, then both the high and normal queues are empty.
if (mIdleQueue->IsEmpty(aProofOfLock)) {
return false;
}
TimeStamp idleDeadline = GetIdleDeadline();
if (idleDeadline && mIdleQueue->HasReadyEvent(aProofOfLock)) {
mHasPendingEventsPromisedIdleEvent = true;
return true;
}
bool hasPendingIdleEvent = false;
// Note that GetIdleDeadline() checks mHasPendingEventsPromisedIdleEvent,
// but that's OK since we set it to false in the beginning of this method!
TimeStamp idleDeadline = GetIdleDeadline();
// Only examine the idle queue if we are in an idle period.
if (idleDeadline) {
hasPendingIdleEvent = mIdleQueue->HasPendingEvent(aProofOfLock);
mHasPendingEventsPromisedIdleEvent = hasPendingIdleEvent;
}
return hasPendingIdleEvent;
return false;
}
template<class InnerQueueT>

Просмотреть файл

@ -50,8 +50,10 @@ public:
const MutexAutoLock& aProofOfLock) final;
already_AddRefed<nsIRunnable> GetEvent(EventPriority* aPriority,
const MutexAutoLock& aProofOfLock) final;
bool HasPendingEvent(const MutexAutoLock& aProofOfLock) final;
bool IsEmpty(const MutexAutoLock& aProofOfLock) final;
size_t Count(const MutexAutoLock& aProofOfLock) const final;
bool HasReadyEvent(const MutexAutoLock& aProofOfLock) final;
// When checking the idle deadline, we need to drop whatever mutex protects
// this queue. This method allows that mutex to be stored so that we can drop
@ -72,6 +74,8 @@ public:
void ResumeInputEventPrioritization(const MutexAutoLock& aProofOfLock) final;
private:
EventPriority SelectQueue(bool aUpdateState, const MutexAutoLock& aProofOfLock);
// Returns a null TimeStamp if we're not in the idle period.
mozilla::TimeStamp GetIdleDeadline();

813
xpcom/threads/Scheduler.cpp Normal file
Просмотреть файл

@ -0,0 +1,813 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "Scheduler.h"
#include "jsfriendapi.h"
#include "LabeledEventQueue.h"
#include "LeakRefPtr.h"
#include "mozilla/CooperativeThreadPool.h"
#include "mozilla/dom/ScriptSettings.h"
#include "mozilla/ipc/BackgroundChild.h"
#include "mozilla/SchedulerGroup.h"
#include "nsCycleCollector.h"
#include "nsIThread.h"
#include "nsPrintfCString.h"
#include "nsThread.h"
#include "nsThreadManager.h"
#include "PrioritizedEventQueue.h"
#include "xpcpublic.h"
// Windows silliness. winbase.h defines an empty no-argument Yield macro.
#undef Yield
using namespace mozilla;
// Using the anonymous namespace here causes GCC to generate:
// error: 'mozilla::SchedulerImpl' has a field 'mozilla::SchedulerImpl::mQueue' whose type uses the anonymous namespace
namespace mozilla {
namespace detail {
class SchedulerEventQueue final : public SynchronizedEventQueue
{
public:
explicit SchedulerEventQueue(UniquePtr<AbstractEventQueue> aQueue)
: mLock("Scheduler")
, mNonCooperativeCondVar(mLock, "SchedulerNonCoop")
, mQueue(Move(aQueue))
, mScheduler(nullptr)
{}
bool PutEvent(already_AddRefed<nsIRunnable>&& aEvent,
EventPriority aPriority) final;
void Disconnect(const MutexAutoLock& aProofOfLock) final {}
already_AddRefed<nsIRunnable> GetEvent(bool aMayWait,
EventPriority* aPriority) final;
bool HasPendingEvent() final;
bool HasPendingEvent(const MutexAutoLock& aProofOfLock);
bool ShutdownIfNoPendingEvents() final;
already_AddRefed<nsIThreadObserver> GetObserver() final;
already_AddRefed<nsIThreadObserver> GetObserverOnThread() final;
void SetObserver(nsIThreadObserver* aObserver) final;
void EnableInputEventPrioritization() final;
void FlushInputEventPrioritization() final;
void SuspendInputEventPrioritization() final;
void ResumeInputEventPrioritization() final;
bool UseCooperativeScheduling() const;
void SetScheduler(SchedulerImpl* aScheduler);
Mutex& MutexRef() { return mLock; }
private:
Mutex mLock;
CondVar mNonCooperativeCondVar;
// Using the actual type here would avoid a virtual dispatch. However, that
// would prevent us from switching between EventQueue and LabeledEventQueue at
// runtime.
UniquePtr<AbstractEventQueue> mQueue;
bool mEventsAreDoomed = false;
SchedulerImpl* mScheduler;
nsCOMPtr<nsIThreadObserver> mObserver;
};
} // namespace detail
} // namespace mozilla
using mozilla::detail::SchedulerEventQueue;
class mozilla::SchedulerImpl
{
public:
explicit SchedulerImpl(SchedulerEventQueue* aQueue);
static already_AddRefed<SchedulerEventQueue>
CreateQueue(nsIIdlePeriod* aIdlePeriod, nsThread** aThread);
void Start();
void Shutdown();
void Dispatch(already_AddRefed<nsIRunnable> aEvent);
void Yield();
static void EnterNestedEventLoop(Scheduler::EventLoopActivation& aOuterActivation);
static void ExitNestedEventLoop(Scheduler::EventLoopActivation& aOuterActivation);
static void StartEvent(Scheduler::EventLoopActivation& aActivation);
static void FinishEvent(Scheduler::EventLoopActivation& aActivation);
void SetJSContext(size_t aIndex, JSContext* aCx)
{
mContexts[aIndex] = aCx;
}
static void YieldCallback(JSContext* aCx);
static bool InterruptCallback(JSContext* aCx);
CooperativeThreadPool* GetThreadPool() { return mThreadPool.get(); }
static bool UnlabeledEventRunning() { return sUnlabeledEventRunning; }
static bool AnyEventRunning() { return sNumThreadsRunning > 0; }
CooperativeThreadPool::Resource* GetQueueResource() { return &mQueueResource; }
bool UseCooperativeScheduling() const { return mQueue->UseCooperativeScheduling(); }
// Preferences.
static bool sPrefScheduler;
static bool sPrefChaoticScheduling;
static bool sPrefPreemption;
static size_t sPrefThreadCount;
static bool sPrefUseMultipleQueues;
private:
void Interrupt(JSContext* aCx);
void YieldFromJS(JSContext* aCx);
static void SwitcherThread(void* aData);
void Switcher();
size_t mNumThreads;
// Protects mQueue as well as mThreadPool. The lock comes from the SchedulerEventQueue.
Mutex& mLock;
CondVar mShutdownCondVar;
bool mShuttingDown;
UniquePtr<CooperativeThreadPool> mThreadPool;
RefPtr<SchedulerEventQueue> mQueue;
class QueueResource : public CooperativeThreadPool::Resource
{
public:
explicit QueueResource(SchedulerImpl* aScheduler)
: mScheduler(aScheduler)
{}
bool IsAvailable(const MutexAutoLock& aProofOfLock) override;
private:
SchedulerImpl* mScheduler;
};
QueueResource mQueueResource;
class SystemZoneResource : public CooperativeThreadPool::Resource
{
public:
explicit SystemZoneResource(SchedulerImpl* aScheduler)
: mScheduler(aScheduler) {}
bool IsAvailable(const MutexAutoLock& aProofOfLock) override;
private:
SchedulerImpl* mScheduler;
};
SystemZoneResource mSystemZoneResource;
class ThreadController : public CooperativeThreadPool::Controller
{
public:
ThreadController(SchedulerImpl* aScheduler, SchedulerEventQueue* aQueue)
: mScheduler(aScheduler)
, mMainVirtual(GetCurrentVirtualThread())
, mMainLoop(MessageLoop::current())
, mMainQueue(aQueue)
{}
void OnStartThread(size_t aIndex, const nsACString& aName, void* aStackTop) override;
void OnStopThread(size_t aIndex) override;
void OnSuspendThread(size_t aIndex) override;
void OnResumeThread(size_t aIndex) override;
private:
SchedulerImpl* mScheduler;
PRThread* mMainVirtual;
MessageLoop* mMainLoop;
MessageLoop* mOldMainLoop;
RefPtr<SynchronizedEventQueue> mMainQueue;
};
ThreadController mController;
static size_t sNumThreadsRunning;
static bool sUnlabeledEventRunning;
JSContext* mContexts[CooperativeThreadPool::kMaxThreads];
};
bool SchedulerImpl::sPrefScheduler;
bool SchedulerImpl::sPrefChaoticScheduling;
bool SchedulerImpl::sPrefPreemption;
bool SchedulerImpl::sPrefUseMultipleQueues;
size_t SchedulerImpl::sPrefThreadCount;
size_t SchedulerImpl::sNumThreadsRunning;
bool SchedulerImpl::sUnlabeledEventRunning;
bool
SchedulerEventQueue::PutEvent(already_AddRefed<nsIRunnable>&& aEvent,
EventPriority aPriority)
{
// We want to leak the reference when we fail to dispatch it, so that
// we won't release the event in a wrong thread.
LeakRefPtr<nsIRunnable> event(Move(aEvent));
nsCOMPtr<nsIThreadObserver> obs;
{
MutexAutoLock lock(mLock);
if (mEventsAreDoomed) {
return false;
}
mQueue->PutEvent(event.take(), aPriority, lock);
if (mScheduler) {
CooperativeThreadPool* pool = mScheduler->GetThreadPool();
MOZ_ASSERT(pool);
pool->RecheckBlockers(lock);
} else {
mNonCooperativeCondVar.Notify();
}
// Make sure to grab the observer before dropping the lock, otherwise the
// event that we just placed into the queue could run and eventually delete
// this nsThread before the calling thread is scheduled again. We would then
// crash while trying to access a dead nsThread.
obs = mObserver;
}
if (obs) {
obs->OnDispatchedEvent();
}
return true;
}
already_AddRefed<nsIRunnable>
SchedulerEventQueue::GetEvent(bool aMayWait,
EventPriority* aPriority)
{
MutexAutoLock lock(mLock);
if (SchedulerImpl::sPrefChaoticScheduling) {
CooperativeThreadPool::Yield(nullptr, lock);
}
nsCOMPtr<nsIRunnable> event;
for (;;) {
event = mQueue->GetEvent(aPriority, lock);
if (event || !aMayWait) {
break;
}
if (mScheduler) {
CooperativeThreadPool::Yield(mScheduler->GetQueueResource(), lock);
} else {
mNonCooperativeCondVar.Wait();
}
}
return event.forget();
}
bool
SchedulerEventQueue::HasPendingEvent()
{
MutexAutoLock lock(mLock);
return HasPendingEvent(lock);
}
bool
SchedulerEventQueue::HasPendingEvent(const MutexAutoLock& aProofOfLock)
{
return mQueue->HasReadyEvent(aProofOfLock);
}
bool
SchedulerEventQueue::ShutdownIfNoPendingEvents()
{
MutexAutoLock lock(mLock);
MOZ_ASSERT(!mScheduler);
if (mQueue->IsEmpty(lock)) {
mEventsAreDoomed = true;
return true;
}
return false;
}
bool
SchedulerEventQueue::UseCooperativeScheduling() const
{
MOZ_ASSERT(NS_IsMainThread());
return !!mScheduler;
}
void
SchedulerEventQueue::SetScheduler(SchedulerImpl* aScheduler)
{
MutexAutoLock lock(mLock);
mScheduler = aScheduler;
}
already_AddRefed<nsIThreadObserver>
SchedulerEventQueue::GetObserver()
{
MutexAutoLock lock(mLock);
return do_AddRef(mObserver.get());
}
already_AddRefed<nsIThreadObserver>
SchedulerEventQueue::GetObserverOnThread()
{
MOZ_ASSERT(NS_IsMainThread());
return do_AddRef(mObserver.get());
}
void
SchedulerEventQueue::SetObserver(nsIThreadObserver* aObserver)
{
MutexAutoLock lock(mLock);
mObserver = aObserver;
}
void
SchedulerEventQueue::EnableInputEventPrioritization()
{
MutexAutoLock lock(mLock);
mQueue->EnableInputEventPrioritization(lock);
}
void
SchedulerEventQueue::FlushInputEventPrioritization()
{
MutexAutoLock lock(mLock);
mQueue->FlushInputEventPrioritization(lock);
}
void
SchedulerEventQueue::SuspendInputEventPrioritization()
{
MutexAutoLock lock(mLock);
mQueue->SuspendInputEventPrioritization(lock);
}
void
SchedulerEventQueue::ResumeInputEventPrioritization()
{
MutexAutoLock lock(mLock);
mQueue->ResumeInputEventPrioritization(lock);
}
UniquePtr<SchedulerImpl> Scheduler::sScheduler;
SchedulerImpl::SchedulerImpl(SchedulerEventQueue* aQueue)
: mNumThreads(sPrefThreadCount)
, mLock(aQueue->MutexRef())
, mShutdownCondVar(aQueue->MutexRef(), "SchedulerImpl")
, mShuttingDown(false)
, mQueue(aQueue)
, mQueueResource(this)
, mSystemZoneResource(this)
, mController(this, aQueue)
, mContexts()
{
}
void
SchedulerImpl::Interrupt(JSContext* aCx)
{
MutexAutoLock lock(mLock);
CooperativeThreadPool::Yield(nullptr, lock);
}
/* static */ bool
SchedulerImpl::InterruptCallback(JSContext* aCx)
{
Scheduler::sScheduler->Interrupt(aCx);
return true;
}
void
SchedulerImpl::YieldFromJS(JSContext* aCx)
{
MutexAutoLock lock(mLock);
CooperativeThreadPool::Yield(&mSystemZoneResource, lock);
}
/* static */ void
SchedulerImpl::YieldCallback(JSContext* aCx)
{
Scheduler::sScheduler->YieldFromJS(aCx);
}
void
SchedulerImpl::Switcher()
{
// This thread switcher is extremely basic and only meant for testing. The
// goal is to switch as much as possible without regard for performance.
MutexAutoLock lock(mLock);
while (!mShuttingDown) {
CooperativeThreadPool::SelectedThread threadIndex = mThreadPool->CurrentThreadIndex(lock);
if (threadIndex.is<size_t>()) {
JSContext* cx = mContexts[threadIndex.as<size_t>()];
if (cx) {
JS_RequestInterruptCallbackCanWait(cx);
}
}
mShutdownCondVar.Wait(PR_MicrosecondsToInterval(50));
}
}
/* static */ void
SchedulerImpl::SwitcherThread(void* aData)
{
static_cast<SchedulerImpl*>(aData)->Switcher();
}
/* static */ already_AddRefed<SchedulerEventQueue>
SchedulerImpl::CreateQueue(nsIIdlePeriod* aIdlePeriod, nsThread** aThread)
{
UniquePtr<AbstractEventQueue> queue;
RefPtr<nsThread> mainThread;
if (sPrefUseMultipleQueues) {
using MainThreadQueueT = PrioritizedEventQueue<LabeledEventQueue>;
queue = MakeUnique<MainThreadQueueT>(
MakeUnique<LabeledEventQueue>(),
MakeUnique<LabeledEventQueue>(),
MakeUnique<LabeledEventQueue>(),
MakeUnique<LabeledEventQueue>(),
do_AddRef(aIdlePeriod));
} else {
using MainThreadQueueT = PrioritizedEventQueue<EventQueue>;
queue = MakeUnique<MainThreadQueueT>(
MakeUnique<EventQueue>(),
MakeUnique<EventQueue>(),
MakeUnique<EventQueue>(),
MakeUnique<EventQueue>(),
do_AddRef(aIdlePeriod));
}
auto prioritized = static_cast<PrioritizedEventQueue<AbstractEventQueue>*>(queue.get());
RefPtr<SchedulerEventQueue> synchronizedQueue = new SchedulerEventQueue(Move(queue));
prioritized->SetMutexRef(synchronizedQueue->MutexRef());
// Setup "main" thread
mainThread = new nsThread(WrapNotNull(synchronizedQueue), nsThread::MAIN_THREAD, 0);
prioritized->SetNextIdleDeadlineRef(mainThread->NextIdleDeadlineRef());
mainThread.forget(aThread);
return synchronizedQueue.forget();
}
void
SchedulerImpl::Start()
{
NS_DispatchToMainThread(NS_NewRunnableFunction("Scheduler::Start", [this]() -> void {
// Let's pretend the runnable here isn't actually running.
MOZ_ASSERT(sUnlabeledEventRunning);
sUnlabeledEventRunning = false;
MOZ_ASSERT(sNumThreadsRunning == 1);
sNumThreadsRunning = 0;
mQueue->SetScheduler(this);
xpc::YieldCooperativeContext();
mThreadPool = MakeUnique<CooperativeThreadPool>(mNumThreads, mLock,
mController);
PRThread* switcher = nullptr;
if (sPrefPreemption) {
switcher = PR_CreateThread(PR_USER_THREAD,
SwitcherThread,
this,
PR_PRIORITY_HIGH,
PR_GLOBAL_THREAD,
PR_JOINABLE_THREAD,
0);
}
{
MutexAutoLock mutex(mLock);
while (!mShuttingDown) {
mShutdownCondVar.Wait();
}
}
if (switcher) {
PR_JoinThread(switcher);
}
mThreadPool->Shutdown();
mThreadPool = nullptr;
mQueue->SetScheduler(nullptr);
xpc::ResumeCooperativeContext();
// Put things back to the way they were before we started scheduling.
MOZ_ASSERT(!sUnlabeledEventRunning);
sUnlabeledEventRunning = true;
MOZ_ASSERT(sNumThreadsRunning == 0);
sNumThreadsRunning = 1;
// Delete the SchedulerImpl. Don't use it after this point.
Scheduler::sScheduler = nullptr;
}));
}
void
SchedulerImpl::Shutdown()
{
MutexAutoLock lock(mLock);
mShuttingDown = true;
mShutdownCondVar.Notify();
}
bool
SchedulerImpl::QueueResource::IsAvailable(const MutexAutoLock& aProofOfLock)
{
mScheduler->mLock.AssertCurrentThreadOwns();
RefPtr<SchedulerEventQueue> queue = mScheduler->mQueue;
return queue->HasPendingEvent(aProofOfLock);
}
bool
SchedulerImpl::SystemZoneResource::IsAvailable(const MutexAutoLock& aProofOfLock)
{
mScheduler->mLock.AssertCurrentThreadOwns();
JSContext* cx = dom::danger::GetJSContext();
return js::SystemZoneAvailable(cx);
}
MOZ_THREAD_LOCAL(Scheduler::EventLoopActivation*) Scheduler::EventLoopActivation::sTopActivation;
/* static */ void
Scheduler::EventLoopActivation::Init()
{
sTopActivation.infallibleInit();
}
Scheduler::EventLoopActivation::EventLoopActivation()
: mPrev(sTopActivation.get())
, mProcessingEvent(false)
, mIsLabeled(false)
{
sTopActivation.set(this);
if (mPrev && mPrev->mProcessingEvent) {
SchedulerImpl::EnterNestedEventLoop(*mPrev);
}
}
Scheduler::EventLoopActivation::~EventLoopActivation()
{
if (mProcessingEvent) {
SchedulerImpl::FinishEvent(*this);
}
MOZ_ASSERT(sTopActivation.get() == this);
sTopActivation.set(mPrev);
if (mPrev && mPrev->mProcessingEvent) {
SchedulerImpl::ExitNestedEventLoop(*mPrev);
}
}
/* static */ void
SchedulerImpl::StartEvent(Scheduler::EventLoopActivation& aActivation)
{
MOZ_ASSERT(!sUnlabeledEventRunning);
if (aActivation.IsLabeled()) {
SchedulerGroup::SetValidatingAccess(SchedulerGroup::StartValidation);
for (SchedulerGroup* group : aActivation.EventGroupsAffected()) {
MOZ_ASSERT(!group->IsRunning());
group->SetIsRunning(true);
}
} else {
sUnlabeledEventRunning = true;
}
sNumThreadsRunning++;
}
/* static */ void
SchedulerImpl::FinishEvent(Scheduler::EventLoopActivation& aActivation)
{
if (aActivation.IsLabeled()) {
for (SchedulerGroup* group : aActivation.EventGroupsAffected()) {
MOZ_ASSERT(group->IsRunning());
group->SetIsRunning(false);
}
SchedulerGroup::SetValidatingAccess(SchedulerGroup::EndValidation);
} else {
MOZ_ASSERT(sUnlabeledEventRunning);
sUnlabeledEventRunning = false;
}
MOZ_ASSERT(sNumThreadsRunning > 0);
sNumThreadsRunning--;
}
// When we enter a nested event loop, we act as if the outer event loop's event
// finished. When we exit the nested event loop, we "resume" the outer event
// loop's event.
/* static */ void
SchedulerImpl::EnterNestedEventLoop(Scheduler::EventLoopActivation& aOuterActivation)
{
FinishEvent(aOuterActivation);
}
/* static */ void
SchedulerImpl::ExitNestedEventLoop(Scheduler::EventLoopActivation& aOuterActivation)
{
StartEvent(aOuterActivation);
}
void
Scheduler::EventLoopActivation::SetEvent(nsIRunnable* aEvent,
EventPriority aPriority)
{
if (nsCOMPtr<nsILabelableRunnable> labelable = do_QueryInterface(aEvent)) {
if (labelable->GetAffectedSchedulerGroups(mEventGroups)) {
mIsLabeled = true;
}
}
mPriority = aPriority;
mProcessingEvent = aEvent != nullptr;
if (aEvent) {
SchedulerImpl::StartEvent(*this);
}
}
void
SchedulerImpl::ThreadController::OnStartThread(size_t aIndex, const nsACString& aName, void* aStackTop)
{
using mozilla::ipc::BackgroundChild;
// Causes GetCurrentVirtualThread() to return mMainVirtual and NS_IsMainThread()
// to return true.
NS_SetMainThread(mMainVirtual);
// This will initialize the thread's mVirtualThread to mMainVirtual since
// GetCurrentVirtualThread() now returns mMainVirtual.
nsThreadManager::get().CreateCurrentThread(mMainQueue, nsThread::MAIN_THREAD);
profiler_register_thread(aName.BeginReading(), &aStackTop);
mOldMainLoop = MessageLoop::current();
MessageLoop::set_current(mMainLoop);
xpc::CreateCooperativeContext();
JSContext* cx = dom::danger::GetJSContext();
mScheduler->SetJSContext(aIndex, cx);
if (sPrefPreemption) {
JS_AddInterruptCallback(cx, SchedulerImpl::InterruptCallback);
}
js::SetCooperativeYieldCallback(cx, SchedulerImpl::YieldCallback);
}
void
SchedulerImpl::ThreadController::OnStopThread(size_t aIndex)
{
xpc::DestroyCooperativeContext();
NS_UnsetMainThread();
MessageLoop::set_current(mOldMainLoop);
RefPtr<nsThread> self = static_cast<nsThread*>(NS_GetCurrentThread());
nsThreadManager::get().UnregisterCurrentThread(*self);
profiler_unregister_thread();
}
void
SchedulerImpl::ThreadController::OnSuspendThread(size_t aIndex)
{
xpc::YieldCooperativeContext();
}
void
SchedulerImpl::ThreadController::OnResumeThread(size_t aIndex)
{
xpc::ResumeCooperativeContext();
}
void
SchedulerImpl::Yield()
{
MutexAutoLock lock(mLock);
CooperativeThreadPool::Yield(nullptr, lock);
}
/* static */ already_AddRefed<nsThread>
Scheduler::Init(nsIIdlePeriod* aIdlePeriod)
{
MOZ_ASSERT(!sScheduler);
RefPtr<nsThread> mainThread;
RefPtr<SchedulerEventQueue> queue = SchedulerImpl::CreateQueue(aIdlePeriod, getter_AddRefs(mainThread));
sScheduler = MakeUnique<SchedulerImpl>(queue);
return mainThread.forget();
}
/* static */ void
Scheduler::Start()
{
sScheduler->Start();
}
/* static */ void
Scheduler::Shutdown()
{
if (sScheduler) {
sScheduler->Shutdown();
}
}
/* static */ nsCString
Scheduler::GetPrefs()
{
MOZ_ASSERT(XRE_IsParentProcess());
nsPrintfCString result("%d%d%d%d,%d",
Preferences::GetBool("dom.ipc.scheduler", false),
Preferences::GetBool("dom.ipc.scheduler.chaoticScheduling", false),
Preferences::GetBool("dom.ipc.scheduler.preemption", false) ,
Preferences::GetBool("dom.ipc.scheduler.useMultipleQueues", false),
Preferences::GetInt("dom.ipc.scheduler.threadCount", 2));
return result;
}
/* static */ void
Scheduler::SetPrefs(const char* aPrefs)
{
MOZ_ASSERT(XRE_IsContentProcess());
SchedulerImpl::sPrefScheduler = aPrefs[0] == '1';
SchedulerImpl::sPrefChaoticScheduling = aPrefs[1] == '1';
SchedulerImpl::sPrefPreemption = aPrefs[2] == '1';
SchedulerImpl::sPrefUseMultipleQueues = aPrefs[3] == '1';
MOZ_ASSERT(aPrefs[4] == ',');
SchedulerImpl::sPrefThreadCount = atoi(aPrefs + 5);
}
/* static */ bool
Scheduler::IsSchedulerEnabled()
{
return SchedulerImpl::sPrefScheduler;
}
/* static */ bool
Scheduler::IsCooperativeThread()
{
return CooperativeThreadPool::IsCooperativeThread();
}
/* static */ void
Scheduler::Yield()
{
sScheduler->Yield();
}
/* static */ bool
Scheduler::UnlabeledEventRunning()
{
return SchedulerImpl::UnlabeledEventRunning();
}
/* static */ bool
Scheduler::AnyEventRunning()
{
return SchedulerImpl::AnyEventRunning();
}

106
xpcom/threads/Scheduler.h Normal file
Просмотреть файл

@ -0,0 +1,106 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_Scheduler_h
#define mozilla_Scheduler_h
#include "mozilla/Attributes.h"
#include "mozilla/EventQueue.h"
#include "mozilla/RefPtr.h"
#include "mozilla/UniquePtr.h"
// Windows silliness. winbase.h defines an empty no-argument Yield macro.
#undef Yield
class nsIIdlePeriod;
class nsThread;
namespace mozilla {
class SchedulerGroup;
class SchedulerImpl;
class SynchronizedEventQueue;
// This is the central class for scheduling work on the "main" thread. It starts
// a pool of cooperatively scheduled threads (using CooperativeThreadPool) and
// controls them using a single, main-thread event queue
// (SchedulerEventQueue). Even if cooperative scheduling is not enabled,
// Scheduler can schedule work on the main thread. Its behavior is controlled by
// a number of preferences:
//
// "dom.ipc.scheduler": If this pref is false, Scheduler is not used at all.
//
// "dom.ipc.scheduler.useMultipleQueues": When this pref is true, a
// LabeledEventQueue is used for the main thread event queue. This divides the
// event queue into multiple queues, one per SchedulerGroup. If the pref is
// false, a normal EventQueue is used. Either way, event prioritization via
// PrioritizedEventQueue still happens.
//
// "dom.ipc.scheduler.preemption": If this pref is true, then cooperative
// threads can be preempted before they have finished. This might happen if a
// different cooperative thread is running an event for a higher priority
// SchedulerGroup.
//
// "dom.ipc.scheduler.threadCount": The number of cooperative threads to start.
//
// "dom.ipc.scheduler.chaoticScheduling": When this pref is set, we make an
// effort to switch between threads even when it is not necessary to do
// this. This is useful for debugging.
class Scheduler
{
public:
static already_AddRefed<nsThread> Init(nsIIdlePeriod* aIdlePeriod);
static void Start();
static void Shutdown();
// Scheduler prefs need to be handled differently because the scheduler needs
// to start up in the content process before the normal preferences service.
static nsCString GetPrefs();
static void SetPrefs(const char* aPrefs);
static bool IsSchedulerEnabled();
static bool IsCooperativeThread();
static void Yield();
static bool UnlabeledEventRunning();
static bool AnyEventRunning();
class MOZ_RAII EventLoopActivation
{
public:
EventLoopActivation();
~EventLoopActivation();
static void Init();
bool IsNested() const { return !!mPrev; }
void SetEvent(nsIRunnable* aEvent, EventPriority aPriority);
EventPriority Priority() const { return mPriority; }
bool IsLabeled() { return mIsLabeled; }
const nsTArray<RefPtr<SchedulerGroup>>& EventGroupsAffected() { return mEventGroups; }
private:
EventLoopActivation* mPrev;
bool mProcessingEvent;
bool mIsLabeled;
nsTArray<RefPtr<SchedulerGroup>> mEventGroups;
EventPriority mPriority;
static MOZ_THREAD_LOCAL(EventLoopActivation*) sTopActivation;
};
private:
friend class SchedulerImpl;
static UniquePtr<SchedulerImpl> sScheduler;
};
} // namespace mozilla
#endif // mozilla_Scheduler_h

Просмотреть файл

@ -203,11 +203,14 @@ SchedulerGroup::MarkVsyncRan()
gEarliestUnprocessedVsync = 0;
}
SchedulerGroup* SchedulerGroup::sRunningDispatcher;
MOZ_THREAD_LOCAL(bool) SchedulerGroup::sTlsValidatingAccess;
SchedulerGroup::SchedulerGroup()
: mAccessValid(false)
: mIsRunning(false)
{
if (NS_IsMainThread()) {
sTlsValidatingAccess.infallibleInit();
}
}
nsresult
@ -336,15 +339,15 @@ SchedulerGroup::InternalUnlabeledDispatch(TaskCategory aCategory,
return rv;
}
void
/* static */ void
SchedulerGroup::SetValidatingAccess(ValidationType aType)
{
sRunningDispatcher = aType == StartValidation ? this : nullptr;
mAccessValid = aType == StartValidation;
bool validating = aType == StartValidation;
sTlsValidatingAccess.set(validating);
dom::AutoJSAPI jsapi;
jsapi.Init();
js::EnableAccessValidation(jsapi.cx(), !!sRunningDispatcher);
js::EnableAccessValidation(jsapi.cx(), validating);
}
SchedulerGroup::Runnable::Runnable(already_AddRefed<nsIRunnable>&& aRunnable,
@ -384,8 +387,6 @@ SchedulerGroup::Runnable::Run()
{
MOZ_RELEASE_ASSERT(NS_IsMainThread());
mGroup->SetValidatingAccess(StartValidation);
nsresult result;
{
@ -416,23 +417,3 @@ NS_IMPL_ISUPPORTS_INHERITED(SchedulerGroup::Runnable,
nsIRunnablePriority,
nsILabelableRunnable,
SchedulerGroup::Runnable)
SchedulerGroup::AutoProcessEvent::AutoProcessEvent()
: mPrevRunningDispatcher(SchedulerGroup::sRunningDispatcher)
{
SchedulerGroup* prev = sRunningDispatcher;
if (prev) {
MOZ_ASSERT(prev->mAccessValid);
prev->SetValidatingAccess(EndValidation);
}
}
SchedulerGroup::AutoProcessEvent::~AutoProcessEvent()
{
MOZ_ASSERT(!sRunningDispatcher);
SchedulerGroup* prev = mPrevRunningDispatcher;
if (prev) {
prev->SetValidatingAccess(StartValidation);
}
}

Просмотреть файл

@ -9,6 +9,7 @@
#include "mozilla/AlreadyAddRefed.h"
#include "mozilla/TaskCategory.h"
#include "mozilla/ThreadLocal.h"
#include "mozilla/TimeStamp.h"
#include "nsCOMPtr.h"
#include "nsILabelableRunnable.h"
@ -50,22 +51,13 @@ public:
// "background" state.
virtual bool IsBackground() const { return false; }
class MOZ_STACK_CLASS AutoProcessEvent final {
public:
AutoProcessEvent();
~AutoProcessEvent();
private:
SchedulerGroup* mPrevRunningDispatcher;
};
// This function returns true if it's currently safe to run code associated
// with this SchedulerGroup. It will return true either if we're inside an
// unlabeled runnable or if we're inside a runnable labeled with this
// SchedulerGroup.
bool IsSafeToRun() const
{
return !sRunningDispatcher || mAccessValid;
return !sTlsValidatingAccess.get() || mIsRunning;
}
// This function returns true if it's currently safe to run unlabeled code
@ -73,7 +65,7 @@ public:
// unlabeled runnable.
static bool IsSafeToRunUnlabeled()
{
return !sRunningDispatcher;
return !sTlsValidatingAccess.get();
}
// Ensure that it's valid to access the TabGroup at this time.
@ -114,7 +106,7 @@ public:
};
friend class Runnable;
bool* GetValidAccessPtr() { return &mAccessValid; }
bool* GetValidAccessPtr() { return &mIsRunning; }
virtual nsresult Dispatch(TaskCategory aCategory,
already_AddRefed<nsIRunnable>&& aRunnable);
@ -136,6 +128,15 @@ public:
static void MarkVsyncRan();
void SetIsRunning(bool aIsRunning) { mIsRunning = aIsRunning; }
bool IsRunning() const { return mIsRunning; }
enum ValidationType {
StartValidation,
EndValidation,
};
static void SetValidatingAccess(ValidationType aType);
protected:
static nsresult InternalUnlabeledDispatch(TaskCategory aCategory,
already_AddRefed<Runnable>&& aRunnable);
@ -161,14 +162,9 @@ protected:
// dispatcher.
void Shutdown(bool aXPCOMShutdown);
enum ValidationType {
StartValidation,
EndValidation,
};
void SetValidatingAccess(ValidationType aType);
static MOZ_THREAD_LOCAL(bool) sTlsValidatingAccess;
static SchedulerGroup* sRunningDispatcher;
bool mAccessValid;
bool mIsRunning;
nsCOMPtr<nsISerialEventTarget> mEventTargets[size_t(TaskCategory::Count)];
RefPtr<AbstractThread> mAbstractThreads[size_t(TaskCategory::Count)];

Просмотреть файл

@ -149,9 +149,9 @@ ThreadEventQueue<InnerQueueT>::HasPendingEvent()
// We always get events from the topmost queue when there are nested queues.
if (mNestedQueues.IsEmpty()) {
return mBaseQueue->HasPendingEvent(lock);
return mBaseQueue->HasReadyEvent(lock);
} else {
return mNestedQueues.LastElement().mQueue->HasPendingEvent(lock);
return mNestedQueues.LastElement().mQueue->HasReadyEvent(lock);
}
}
@ -160,7 +160,7 @@ bool
ThreadEventQueue<InnerQueueT>::ShutdownIfNoPendingEvents()
{
MutexAutoLock lock(mLock);
if (mNestedQueues.IsEmpty() && !mBaseQueue->HasPendingEvent(lock)) {
if (mNestedQueues.IsEmpty() && mBaseQueue->IsEmpty(lock)) {
mEventsAreDoomed = true;
return true;
}

Просмотреть файл

@ -170,7 +170,7 @@ class ThrottledEventQueue::Inner final : public nsIObserver
// executor. We do this now, before running the event, because
// the event might spin the event loop and we don't want to stall
// the queue.
if (mEventQueue.HasPendingEvent(lock)) {
if (mEventQueue.HasReadyEvent(lock)) {
// Dispatch the next base target runnable to attempt to execute
// the next throttled event. We must do this before executing
// the event in case the event spins the event loop.

Просмотреть файл

@ -39,6 +39,7 @@ EXPORTS.mozilla += [
'AbstractThread.h',
'BlockingResourceBase.h',
'CondVar.h',
'CooperativeThreadPool.h',
'DeadlockDetector.h',
'EventQueue.h',
'HangAnnotations.h',
@ -53,6 +54,7 @@ EXPORTS.mozilla += [
'RecursiveMutex.h',
'ReentrantMonitor.h',
'RWLock.h',
'Scheduler.h',
'SchedulerGroup.h',
'SharedThreadPool.h',
'StateMirroring.h',
@ -74,10 +76,12 @@ SOURCES += [
UNIFIED_SOURCES += [
'AbstractThread.cpp',
'BlockingResourceBase.cpp',
'CooperativeThreadPool.cpp',
'EventQueue.cpp',
'HangAnnotations.cpp',
'HangMonitor.cpp',
'InputEventStatistics.cpp',
'LabeledEventQueue.cpp',
'LazyIdleThread.cpp',
'MainThreadIdlePeriod.cpp',
'nsEnvironment.cpp',
@ -92,6 +96,7 @@ UNIFIED_SOURCES += [
'PrioritizedEventQueue.cpp',
'RecursiveMutex.cpp',
'RWLock.cpp',
'Scheduler.cpp',
'SchedulerGroup.cpp',
'SharedThreadPool.cpp',
'SynchronizedEventQueue.cpp',

Просмотреть файл

@ -28,6 +28,7 @@
#include "mozilla/IOInterposer.h"
#include "mozilla/ipc/MessageChannel.h"
#include "mozilla/ipc/BackgroundChild.h"
#include "mozilla/Scheduler.h"
#include "mozilla/SchedulerGroup.h"
#include "mozilla/Services.h"
#include "mozilla/SystemGroup.h"
@ -928,10 +929,10 @@ nsThread::ProcessNextEvent(bool aMayWait, bool* aResult)
// and repeat the nested event loop since its state change hasn't happened yet.
bool reallyWait = aMayWait && (mNestedEventLoopDepth > 0 || !ShuttingDown());
Maybe<SchedulerGroup::AutoProcessEvent> ape;
Maybe<Scheduler::EventLoopActivation> activation;
if (mIsMainThread == MAIN_THREAD) {
DoMainThreadSpecificProcessing(reallyWait);
ape.emplace();
activation.emplace();
}
++mNestedEventLoopDepth;
@ -965,6 +966,10 @@ nsThread::ProcessNextEvent(bool aMayWait, bool* aResult)
EventPriority priority;
nsCOMPtr<nsIRunnable> event = mEvents->GetEvent(reallyWait, &priority);
if (activation.isSome()) {
activation.ref().SetEvent(event, priority);
}
*aResult = (event.get() != nullptr);
if (event) {

Просмотреть файл

@ -13,6 +13,7 @@
#include "mozilla/AbstractThread.h"
#include "mozilla/EventQueue.h"
#include "mozilla/Preferences.h"
#include "mozilla/Scheduler.h"
#include "mozilla/SystemGroup.h"
#include "mozilla/ThreadEventQueue.h"
#include "mozilla/ThreadLocal.h"
@ -28,6 +29,7 @@
using namespace mozilla;
static MOZ_THREAD_LOCAL(bool) sTLSIsMainThread;
static MOZ_THREAD_LOCAL(PRThread*) gTlsCurrentVirtualThread;
bool
NS_IsMainThread()
@ -45,6 +47,26 @@ NS_SetMainThread()
MOZ_ASSERT(NS_IsMainThread());
}
void
NS_SetMainThread(PRThread* aVirtualThread)
{
MOZ_ASSERT(Scheduler::IsCooperativeThread());
MOZ_ASSERT(!gTlsCurrentVirtualThread.get());
gTlsCurrentVirtualThread.set(aVirtualThread);
NS_SetMainThread();
}
void
NS_UnsetMainThread()
{
MOZ_ASSERT(Scheduler::IsCooperativeThread());
sTLSIsMainThread.set(false);
MOZ_ASSERT(!NS_IsMainThread());
gTlsCurrentVirtualThread.set(nullptr);
}
typedef nsTArray<NotNull<RefPtr<nsThread>>> nsThreadArray;
//-----------------------------------------------------------------------------
@ -84,6 +106,12 @@ nsThreadManager::Init()
return NS_OK;
}
if (!gTlsCurrentVirtualThread.init()) {
return NS_ERROR_UNEXPECTED;
}
Scheduler::EventLoopActivation::Init();
if (PR_NewThreadPrivateIndex(&mCurThreadIndex, ReleaseObject) == PR_FAILURE) {
return NS_ERROR_FAILURE;
}
@ -99,28 +127,35 @@ nsThreadManager::Init()
0;
#endif
nsCOMPtr<nsIIdlePeriod> idlePeriod = new MainThreadIdlePeriod();
bool startScheduler = false;
if (XRE_IsContentProcess() && Scheduler::IsSchedulerEnabled()) {
mMainThread = Scheduler::Init(idlePeriod);
startScheduler = true;
} else {
using MainThreadQueueT = PrioritizedEventQueue<EventQueue>;
nsCOMPtr<nsIIdlePeriod> idlePeriod = new MainThreadIdlePeriod();
auto prioritized = MakeUnique<MainThreadQueueT>(MakeUnique<EventQueue>(),
MakeUnique<EventQueue>(),
MakeUnique<EventQueue>(),
MakeUnique<EventQueue>(),
idlePeriod.forget());
// Save a reference temporarily so we can set some state on it.
// Save a copy temporarily so we can set some state on it.
MainThreadQueueT* prioritizedRef = prioritized.get();
RefPtr<ThreadEventQueue<MainThreadQueueT>> queue =
new ThreadEventQueue<MainThreadQueueT>(Move(prioritized));
prioritizedRef->SetMutexRef(queue->MutexRef());
// Setup "main" thread
mMainThread = new nsThread(WrapNotNull(queue), nsThread::MAIN_THREAD, 0);
prioritizedRef->SetMutexRef(queue->MutexRef());
#ifndef RELEASE_OR_BETA
prioritizedRef->SetNextIdleDeadlineRef(mMainThread->NextIdleDeadlineRef());
#endif
}
nsresult rv = mMainThread->InitCurrentThread();
if (NS_FAILED(rv)) {
@ -137,6 +172,10 @@ nsThreadManager::Init()
AbstractThread::InitMainThread();
mInitialized = true;
if (startScheduler) {
Scheduler::Start();
}
return NS_OK;
}
@ -248,6 +287,26 @@ nsThreadManager::UnregisterCurrentThread(nsThread& aThread)
// Ref-count balanced via ReleaseObject
}
nsThread*
nsThreadManager::CreateCurrentThread(SynchronizedEventQueue* aQueue,
nsThread::MainThreadFlag aMainThread)
{
// Make sure we don't have an nsThread yet.
MOZ_ASSERT(!PR_GetThreadPrivate(mCurThreadIndex));
if (!mInitialized) {
return nullptr;
}
// OK, that's fine. We'll dynamically create one :-)
RefPtr<nsThread> thread = new nsThread(WrapNotNull(aQueue), aMainThread, 0);
if (!thread || NS_FAILED(thread->InitCurrentThread())) {
return nullptr;
}
return thread.get(); // reference held in TLS
}
nsThread*
nsThreadManager::GetCurrentThread()
{
@ -477,3 +536,26 @@ nsThreadManager::IdleDispatchToMainThread(nsIRunnable *aEvent, uint32_t aTimeout
return NS_IdleDispatchToThread(event.forget(), mMainThread);
}
namespace mozilla {
PRThread*
GetCurrentVirtualThread()
{
// We call GetCurrentVirtualThread very early in startup, before the TLS is
// initialized. Make sure we don't assert in that case.
if (gTlsCurrentVirtualThread.initialized()) {
if (gTlsCurrentVirtualThread.get()) {
return gTlsCurrentVirtualThread.get();
}
}
return PR_GetCurrentThread();
}
PRThread*
GetCurrentPhysicalThread()
{
return PR_GetCurrentThread();
}
} // namespace mozilla

Просмотреть файл

@ -44,6 +44,15 @@ public:
// initialized.
nsThread* GetCurrentThread();
// CreateCurrentThread sets up an nsThread for the current thread. It uses the
// event queue and main thread flags passed in. It should only be called once
// for the current thread. After it returns, GetCurrentThread() will return
// the thread that was created. GetCurrentThread() will also create a thread
// (lazily), but it doesn't allow the queue or main-thread attributes to be
// specified.
nsThread* CreateCurrentThread(mozilla::SynchronizedEventQueue* aQueue,
nsThread::MainThreadFlag aMainThread);
// Returns the maximal number of threads that have been in existence
// simultaneously during the execution of the thread manager.
uint32_t GetHighestNumberOfThreads();

Просмотреть файл

@ -581,18 +581,6 @@ nsAutoLowPriorityIO::~nsAutoLowPriorityIO()
namespace mozilla {
PRThread*
GetCurrentVirtualThread()
{
return PR_GetCurrentThread();
}
PRThread*
GetCurrentPhysicalThread()
{
return PR_GetCurrentThread();
}
nsIEventTarget*
GetCurrentThreadEventTarget()
{

Просмотреть файл

@ -1681,6 +1681,18 @@ private:
void
NS_SetMainThread();
// Used only on cooperatively scheduled "main" threads. Causes the thread to be
// considered a main thread and also causes GetCurrentVirtualThread to return
// aVirtualThread.
void
NS_SetMainThread(PRThread* aVirtualThread);
// Used only on cooperatively scheduled "main" threads. Causes the thread to no
// longer be considered a main thread. Also causes GetCurrentVirtualThread() to
// return a unique value.
void
NS_UnsetMainThread();
/**
* Return the expiration time of the next timer to run on the current
* thread. If that expiration time is greater than aDefault, then