2016-05-25 00:45:44 +03:00
|
|
|
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
|
|
|
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
2015-04-06 22:12:09 +03:00
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
|
|
|
|
|
|
#if !defined(TaskDispatcher_h_)
|
|
|
|
#define TaskDispatcher_h_
|
|
|
|
|
2015-07-16 21:52:43 +03:00
|
|
|
#include "mozilla/AbstractThread.h"
|
2016-11-05 02:21:59 +03:00
|
|
|
#include "mozilla/Maybe.h"
|
2015-04-06 22:12:09 +03:00
|
|
|
#include "mozilla/UniquePtr.h"
|
2016-08-23 07:09:32 +03:00
|
|
|
#include "mozilla/Unused.h"
|
2015-04-06 22:12:09 +03:00
|
|
|
|
|
|
|
#include "nsISupportsImpl.h"
|
|
|
|
#include "nsTArray.h"
|
|
|
|
#include "nsThreadUtils.h"
|
|
|
|
|
2015-06-09 22:41:24 +03:00
|
|
|
#include <queue>
|
|
|
|
|
2015-04-06 22:12:09 +03:00
|
|
|
namespace mozilla {
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A classic approach to cross-thread communication is to dispatch asynchronous
|
|
|
|
* runnables to perform updates on other threads. This generally works well, but
|
|
|
|
* there are sometimes reasons why we might want to delay the actual dispatch of
|
|
|
|
* these tasks until a specified moment. At present, this is primarily useful to
|
|
|
|
* ensure that mirrored state gets updated atomically - but there may be other
|
|
|
|
* applications as well.
|
|
|
|
*
|
|
|
|
* TaskDispatcher is a general abstract class that accepts tasks and dispatches
|
|
|
|
* them at some later point. These groups of tasks are per-target-thread, and
|
2015-04-16 22:24:54 +03:00
|
|
|
* contain separate queues for several kinds of tasks (see comments below). - "state change tasks" (which
|
2015-04-06 22:12:09 +03:00
|
|
|
* run first, and are intended to be used to update the value held by mirrors),
|
|
|
|
* and regular tasks, which are other arbitrary operations that the are gated
|
|
|
|
* to run after all the state changes have completed.
|
|
|
|
*/
|
|
|
|
class TaskDispatcher
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
TaskDispatcher() {}
|
|
|
|
virtual ~TaskDispatcher() {}
|
|
|
|
|
2015-04-16 22:24:54 +03:00
|
|
|
// Direct tasks are run directly (rather than dispatched asynchronously) when
|
|
|
|
// the tail dispatcher fires. A direct task may cause other tasks to be added
|
|
|
|
// to the tail dispatcher.
|
|
|
|
virtual void AddDirectTask(already_AddRefed<nsIRunnable> aRunnable) = 0;
|
|
|
|
|
|
|
|
// State change tasks are dispatched asynchronously always run before regular
|
|
|
|
// tasks. They are intended to be used to update the value held by mirrors
|
|
|
|
// before any other dispatched tasks are run on the target thread.
|
2015-04-06 22:12:09 +03:00
|
|
|
virtual void AddStateChangeTask(AbstractThread* aThread,
|
|
|
|
already_AddRefed<nsIRunnable> aRunnable) = 0;
|
2015-04-16 22:24:54 +03:00
|
|
|
|
|
|
|
// Regular tasks are dispatched asynchronously, and run after state change
|
|
|
|
// tasks.
|
2015-04-06 22:12:09 +03:00
|
|
|
virtual void AddTask(AbstractThread* aThread,
|
|
|
|
already_AddRefed<nsIRunnable> aRunnable,
|
2015-04-15 03:25:14 +03:00
|
|
|
AbstractThread::DispatchFailureHandling aFailureHandling = AbstractThread::AssertDispatchSuccess) = 0;
|
2015-04-16 19:20:22 +03:00
|
|
|
|
2015-06-16 02:16:19 +03:00
|
|
|
virtual void DispatchTasksFor(AbstractThread* aThread) = 0;
|
2015-04-16 19:20:22 +03:00
|
|
|
virtual bool HasTasksFor(AbstractThread* aThread) = 0;
|
2015-04-16 22:24:54 +03:00
|
|
|
virtual void DrainDirectTasks() = 0;
|
2015-04-06 22:12:09 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* AutoTaskDispatcher is a stack-scoped TaskDispatcher implementation that fires
|
|
|
|
* its queued tasks when it is popped off the stack.
|
|
|
|
*/
|
2015-04-14 21:58:30 +03:00
|
|
|
class AutoTaskDispatcher : public TaskDispatcher
|
2015-04-06 22:12:09 +03:00
|
|
|
{
|
|
|
|
public:
|
2016-01-25 05:30:42 +03:00
|
|
|
explicit AutoTaskDispatcher(bool aIsTailDispatcher = false)
|
|
|
|
: mIsTailDispatcher(aIsTailDispatcher)
|
|
|
|
{}
|
|
|
|
|
2015-04-06 22:12:09 +03:00
|
|
|
~AutoTaskDispatcher()
|
|
|
|
{
|
2015-04-16 22:24:54 +03:00
|
|
|
// Given that direct tasks may trigger other code that uses the tail
|
|
|
|
// dispatcher, it's better to avoid processing them in the tail dispatcher's
|
|
|
|
// destructor. So we require TailDispatchers to manually invoke
|
|
|
|
// DrainDirectTasks before the AutoTaskDispatcher gets destroyed. In truth,
|
|
|
|
// this is only necessary in the case where this AutoTaskDispatcher can be
|
|
|
|
// accessed by the direct tasks it dispatches (true for TailDispatchers, but
|
|
|
|
// potentially not true for other hypothetical AutoTaskDispatchers). Feel
|
|
|
|
// free to loosen this restriction to apply only to mIsTailDispatcher if a
|
|
|
|
// use-case requires it.
|
2016-01-25 05:30:42 +03:00
|
|
|
MOZ_ASSERT(!HaveDirectTasks());
|
2015-04-16 22:24:54 +03:00
|
|
|
|
2015-04-06 22:12:09 +03:00
|
|
|
for (size_t i = 0; i < mTaskGroups.Length(); ++i) {
|
2015-06-16 02:16:19 +03:00
|
|
|
DispatchTaskGroup(Move(mTaskGroups[i]));
|
2015-04-06 22:12:09 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-25 05:30:42 +03:00
|
|
|
bool HaveDirectTasks() const
|
|
|
|
{
|
|
|
|
return mDirectTasks.isSome() && !mDirectTasks->empty();
|
|
|
|
}
|
|
|
|
|
2015-04-16 22:24:54 +03:00
|
|
|
void DrainDirectTasks() override
|
|
|
|
{
|
2016-01-25 05:30:42 +03:00
|
|
|
while (HaveDirectTasks()) {
|
|
|
|
nsCOMPtr<nsIRunnable> r = mDirectTasks->front();
|
|
|
|
mDirectTasks->pop();
|
2015-04-16 22:24:54 +03:00
|
|
|
r->Run();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void AddDirectTask(already_AddRefed<nsIRunnable> aRunnable) override
|
|
|
|
{
|
2016-01-25 05:30:42 +03:00
|
|
|
if (mDirectTasks.isNothing()) {
|
|
|
|
mDirectTasks.emplace();
|
|
|
|
}
|
|
|
|
mDirectTasks->push(Move(aRunnable));
|
2015-04-16 22:24:54 +03:00
|
|
|
}
|
|
|
|
|
2015-04-06 22:12:09 +03:00
|
|
|
void AddStateChangeTask(AbstractThread* aThread,
|
|
|
|
already_AddRefed<nsIRunnable> aRunnable) override
|
|
|
|
{
|
2017-03-31 12:01:35 +03:00
|
|
|
nsCOMPtr<nsIRunnable> r = aRunnable;
|
|
|
|
MOZ_RELEASE_ASSERT(r);
|
|
|
|
EnsureTaskGroup(aThread).mStateChangeTasks.AppendElement(r.forget());
|
2015-04-06 22:12:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void AddTask(AbstractThread* aThread,
|
|
|
|
already_AddRefed<nsIRunnable> aRunnable,
|
2015-04-15 03:25:14 +03:00
|
|
|
AbstractThread::DispatchFailureHandling aFailureHandling) override
|
2015-04-06 22:12:09 +03:00
|
|
|
{
|
2017-03-31 12:01:35 +03:00
|
|
|
nsCOMPtr<nsIRunnable> r = aRunnable;
|
|
|
|
MOZ_RELEASE_ASSERT(r);
|
2017-03-08 12:11:27 +03:00
|
|
|
// To preserve the event order, we need to append a new group if the last
|
|
|
|
// group is not targeted for |aThread|.
|
|
|
|
// See https://bugzilla.mozilla.org/show_bug.cgi?id=1318226&mark=0-3#c0
|
|
|
|
// for the details of the issue.
|
|
|
|
if (mTaskGroups.Length() == 0 || mTaskGroups.LastElement()->mThread != aThread) {
|
|
|
|
mTaskGroups.AppendElement(new PerThreadTaskGroup(aThread));
|
|
|
|
}
|
|
|
|
|
|
|
|
PerThreadTaskGroup& group = *mTaskGroups.LastElement();
|
2017-03-31 12:01:35 +03:00
|
|
|
group.mRegularTasks.AppendElement(r.forget());
|
2015-04-06 22:12:09 +03:00
|
|
|
|
|
|
|
// The task group needs to assert dispatch success if any of the runnables
|
|
|
|
// it's dispatching want to assert it.
|
2015-04-15 03:25:14 +03:00
|
|
|
if (aFailureHandling == AbstractThread::AssertDispatchSuccess) {
|
|
|
|
group.mFailureHandling = AbstractThread::AssertDispatchSuccess;
|
|
|
|
}
|
2015-04-06 22:12:09 +03:00
|
|
|
}
|
|
|
|
|
2015-04-16 22:24:54 +03:00
|
|
|
bool HasTasksFor(AbstractThread* aThread) override
|
|
|
|
{
|
2016-01-25 05:30:42 +03:00
|
|
|
return !!GetTaskGroup(aThread) ||
|
|
|
|
(aThread == AbstractThread::GetCurrent() && HaveDirectTasks());
|
2015-04-16 22:24:54 +03:00
|
|
|
}
|
2015-04-16 19:20:22 +03:00
|
|
|
|
2015-06-16 02:16:19 +03:00
|
|
|
void DispatchTasksFor(AbstractThread* aThread) override
|
|
|
|
{
|
2017-03-08 12:11:27 +03:00
|
|
|
// Dispatch all groups that match |aThread|.
|
2015-06-16 02:16:19 +03:00
|
|
|
for (size_t i = 0; i < mTaskGroups.Length(); ++i) {
|
|
|
|
if (mTaskGroups[i]->mThread == aThread) {
|
|
|
|
DispatchTaskGroup(Move(mTaskGroups[i]));
|
2017-03-08 12:11:27 +03:00
|
|
|
mTaskGroups.RemoveElementAt(i--);
|
2015-06-16 02:16:19 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-06 22:12:09 +03:00
|
|
|
private:
|
|
|
|
|
|
|
|
struct PerThreadTaskGroup
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
explicit PerThreadTaskGroup(AbstractThread* aThread)
|
2015-04-15 03:25:14 +03:00
|
|
|
: mThread(aThread), mFailureHandling(AbstractThread::DontAssertDispatchSuccess)
|
2015-04-06 22:12:09 +03:00
|
|
|
{
|
|
|
|
MOZ_COUNT_CTOR(PerThreadTaskGroup);
|
|
|
|
}
|
|
|
|
|
|
|
|
~PerThreadTaskGroup() { MOZ_COUNT_DTOR(PerThreadTaskGroup); }
|
|
|
|
|
2015-10-18 08:24:48 +03:00
|
|
|
RefPtr<AbstractThread> mThread;
|
2015-04-06 22:12:09 +03:00
|
|
|
nsTArray<nsCOMPtr<nsIRunnable>> mStateChangeTasks;
|
|
|
|
nsTArray<nsCOMPtr<nsIRunnable>> mRegularTasks;
|
2015-04-15 03:25:14 +03:00
|
|
|
AbstractThread::DispatchFailureHandling mFailureHandling;
|
2015-04-06 22:12:09 +03:00
|
|
|
};
|
|
|
|
|
2016-04-26 03:23:21 +03:00
|
|
|
class TaskGroupRunnable : public Runnable
|
2015-04-06 22:12:09 +03:00
|
|
|
{
|
|
|
|
public:
|
|
|
|
explicit TaskGroupRunnable(UniquePtr<PerThreadTaskGroup>&& aTasks) : mTasks(Move(aTasks)) {}
|
|
|
|
|
2016-08-08 05:18:10 +03:00
|
|
|
NS_IMETHOD Run() override
|
2015-04-06 22:12:09 +03:00
|
|
|
{
|
2015-04-16 22:24:54 +03:00
|
|
|
// State change tasks get run all together before any code is run, so
|
|
|
|
// that all state changes are made in an atomic unit.
|
2015-04-06 22:12:09 +03:00
|
|
|
for (size_t i = 0; i < mTasks->mStateChangeTasks.Length(); ++i) {
|
|
|
|
mTasks->mStateChangeTasks[i]->Run();
|
|
|
|
}
|
|
|
|
|
2015-04-16 22:24:54 +03:00
|
|
|
// Once the state changes have completed, drain any direct tasks
|
|
|
|
// generated by those state changes (i.e. watcher notification tasks).
|
|
|
|
// This needs to be outside the loop because we don't want to run code
|
|
|
|
// that might observe intermediate states.
|
|
|
|
MaybeDrainDirectTasks();
|
|
|
|
|
2015-04-06 22:12:09 +03:00
|
|
|
for (size_t i = 0; i < mTasks->mRegularTasks.Length(); ++i) {
|
|
|
|
mTasks->mRegularTasks[i]->Run();
|
2015-04-16 22:24:54 +03:00
|
|
|
|
|
|
|
// Scope direct tasks tightly to the task that generated them.
|
|
|
|
MaybeDrainDirectTasks();
|
2015-04-06 22:12:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2015-04-16 22:24:54 +03:00
|
|
|
void MaybeDrainDirectTasks()
|
|
|
|
{
|
|
|
|
AbstractThread* currentThread = AbstractThread::GetCurrent();
|
|
|
|
if (currentThread) {
|
|
|
|
currentThread->TailDispatcher().DrainDirectTasks();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-06 22:12:09 +03:00
|
|
|
UniquePtr<PerThreadTaskGroup> mTasks;
|
|
|
|
};
|
|
|
|
|
|
|
|
PerThreadTaskGroup& EnsureTaskGroup(AbstractThread* aThread)
|
2015-04-16 19:20:22 +03:00
|
|
|
{
|
|
|
|
PerThreadTaskGroup* existing = GetTaskGroup(aThread);
|
|
|
|
if (existing) {
|
|
|
|
return *existing;
|
|
|
|
}
|
|
|
|
|
|
|
|
mTaskGroups.AppendElement(new PerThreadTaskGroup(aThread));
|
|
|
|
return *mTaskGroups.LastElement();
|
|
|
|
}
|
|
|
|
|
|
|
|
PerThreadTaskGroup* GetTaskGroup(AbstractThread* aThread)
|
2015-04-06 22:12:09 +03:00
|
|
|
{
|
|
|
|
for (size_t i = 0; i < mTaskGroups.Length(); ++i) {
|
|
|
|
if (mTaskGroups[i]->mThread == aThread) {
|
2015-04-16 19:20:22 +03:00
|
|
|
return mTaskGroups[i].get();
|
2015-04-06 22:12:09 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-16 19:20:22 +03:00
|
|
|
// Not found.
|
|
|
|
return nullptr;
|
2015-04-06 22:12:09 +03:00
|
|
|
}
|
|
|
|
|
2015-06-16 02:16:19 +03:00
|
|
|
void DispatchTaskGroup(UniquePtr<PerThreadTaskGroup> aGroup)
|
|
|
|
{
|
2015-10-18 08:24:48 +03:00
|
|
|
RefPtr<AbstractThread> thread = aGroup->mThread;
|
2015-06-16 02:16:19 +03:00
|
|
|
|
|
|
|
AbstractThread::DispatchFailureHandling failureHandling = aGroup->mFailureHandling;
|
|
|
|
AbstractThread::DispatchReason reason = mIsTailDispatcher ? AbstractThread::TailDispatch
|
|
|
|
: AbstractThread::NormalDispatch;
|
|
|
|
nsCOMPtr<nsIRunnable> r = new TaskGroupRunnable(Move(aGroup));
|
|
|
|
thread->Dispatch(r.forget(), failureHandling, reason);
|
|
|
|
}
|
|
|
|
|
2016-01-25 05:30:42 +03:00
|
|
|
// Direct tasks. We use a Maybe<> because (a) this class is hot, (b)
|
|
|
|
// mDirectTasks often doesn't get anything put into it, and (c) the
|
|
|
|
// std::queue implementation in GNU libstdc++ does two largish heap
|
|
|
|
// allocations when creating a new std::queue.
|
|
|
|
mozilla::Maybe<std::queue<nsCOMPtr<nsIRunnable>>> mDirectTasks;
|
2015-04-16 22:24:54 +03:00
|
|
|
|
2015-04-06 22:12:09 +03:00
|
|
|
// Task groups, organized by thread.
|
|
|
|
nsTArray<UniquePtr<PerThreadTaskGroup>> mTaskGroups;
|
2015-04-14 20:58:49 +03:00
|
|
|
|
|
|
|
// True if this TaskDispatcher represents the tail dispatcher for the thread
|
|
|
|
// upon which it runs.
|
|
|
|
const bool mIsTailDispatcher;
|
2015-04-06 22:12:09 +03:00
|
|
|
};
|
|
|
|
|
2015-04-03 02:47:35 +03:00
|
|
|
// Little utility class to allow declaring AutoTaskDispatcher as a default
|
|
|
|
// parameter for methods that take a TaskDispatcher&.
|
|
|
|
template<typename T>
|
|
|
|
class PassByRef
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
PassByRef() {}
|
|
|
|
operator T&() { return mVal; }
|
|
|
|
private:
|
|
|
|
T mVal;
|
|
|
|
};
|
|
|
|
|
2015-04-06 22:12:09 +03:00
|
|
|
} // namespace mozilla
|
|
|
|
|
|
|
|
#endif
|