2010-09-20 22:37:09 +04:00
|
|
|
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
|
2011-06-12 05:37:09 +04:00
|
|
|
* vim: set sw=2 ts=8 et tw=80 :
|
2010-09-20 22:37:09 +04:00
|
|
|
*/
|
2012-05-21 15:12:37 +04:00
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
2010-09-20 22:37:09 +04:00
|
|
|
|
|
|
|
#ifndef mozilla_net_ChannelEventQueue_h
|
|
|
|
#define mozilla_net_ChannelEventQueue_h
|
|
|
|
|
2016-01-21 00:56:04 +03:00
|
|
|
#include "nsTArray.h"
|
|
|
|
#include "nsAutoPtr.h"
|
2017-03-18 06:36:08 +03:00
|
|
|
#include "nsIEventTarget.h"
|
|
|
|
#include "nsThreadUtils.h"
|
2017-06-12 01:21:00 +03:00
|
|
|
#include "nsXULAppAPI.h"
|
2017-03-18 06:36:08 +03:00
|
|
|
#include "mozilla/DebugOnly.h"
|
2016-03-14 19:10:26 +03:00
|
|
|
#include "mozilla/Mutex.h"
|
2017-07-04 20:43:47 +03:00
|
|
|
#include "mozilla/RecursiveMutex.h"
|
2016-01-21 00:56:04 +03:00
|
|
|
#include "mozilla/UniquePtr.h"
|
2017-03-18 06:36:08 +03:00
|
|
|
#include "mozilla/Unused.h"
|
2011-06-12 05:37:09 +04:00
|
|
|
|
2011-05-04 17:36:23 +04:00
|
|
|
class nsISupports;
|
2011-06-12 05:37:09 +04:00
|
|
|
|
2010-09-20 22:37:09 +04:00
|
|
|
namespace mozilla {
|
|
|
|
namespace net {
|
|
|
|
|
|
|
|
class ChannelEvent
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
ChannelEvent() { MOZ_COUNT_CTOR(ChannelEvent); }
|
|
|
|
virtual ~ChannelEvent() { MOZ_COUNT_DTOR(ChannelEvent); }
|
|
|
|
virtual void Run() = 0;
|
2017-03-18 06:36:08 +03:00
|
|
|
virtual already_AddRefed<nsIEventTarget> GetEventTarget() = 0;
|
|
|
|
};
|
|
|
|
|
2017-06-12 01:21:00 +03:00
|
|
|
// Note that MainThreadChannelEvent should not be used in child process since
|
|
|
|
// GetEventTarget() directly returns an unlabeled event target.
|
2017-03-18 06:36:08 +03:00
|
|
|
class MainThreadChannelEvent : public ChannelEvent
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
MainThreadChannelEvent() { MOZ_COUNT_CTOR(MainThreadChannelEvent); }
|
|
|
|
virtual ~MainThreadChannelEvent() { MOZ_COUNT_DTOR(MainThreadChannelEvent); }
|
|
|
|
|
|
|
|
already_AddRefed<nsIEventTarget>
|
|
|
|
GetEventTarget() override
|
|
|
|
{
|
2017-06-12 01:21:00 +03:00
|
|
|
MOZ_ASSERT(XRE_IsParentProcess());
|
|
|
|
|
2017-06-01 23:43:44 +03:00
|
|
|
return do_AddRef(GetMainThreadEventTarget());
|
2017-03-18 06:36:08 +03:00
|
|
|
}
|
2010-09-20 22:37:09 +04:00
|
|
|
};
|
|
|
|
|
2017-06-12 01:21:00 +03:00
|
|
|
// This event is designed to be only used for e10s child channels.
|
|
|
|
// The goal is to force the child channel to implement GetNeckoTarget()
|
|
|
|
// which should return a labeled main thread event target so that this
|
|
|
|
// channel event can be dispatched correctly.
|
|
|
|
template<typename T>
|
|
|
|
class NeckoTargetChannelEvent : public ChannelEvent
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
explicit NeckoTargetChannelEvent(T *aChild)
|
|
|
|
: mChild(aChild)
|
|
|
|
{
|
|
|
|
MOZ_COUNT_CTOR(NeckoTargetChannelEvent);
|
|
|
|
}
|
|
|
|
virtual ~NeckoTargetChannelEvent()
|
|
|
|
{
|
|
|
|
MOZ_COUNT_DTOR(NeckoTargetChannelEvent);
|
|
|
|
}
|
|
|
|
|
|
|
|
already_AddRefed<nsIEventTarget>
|
|
|
|
GetEventTarget() override
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(mChild);
|
|
|
|
|
|
|
|
return mChild->GetNeckoTarget();
|
|
|
|
}
|
|
|
|
|
|
|
|
protected:
|
|
|
|
T *mChild;
|
|
|
|
};
|
|
|
|
|
2010-09-20 22:37:09 +04:00
|
|
|
// Workaround for Necko re-entrancy dangers. We buffer IPDL messages in a
|
|
|
|
// queue if still dispatching previous one(s) to listeners/observers.
|
|
|
|
// Otherwise synchronous XMLHttpRequests and/or other code that spins the
|
|
|
|
// event loop (ex: IPDL rpc) could cause listener->OnDataAvailable (for
|
2011-06-12 05:37:09 +04:00
|
|
|
// instance) to be dispatched and called before mListener->OnStartRequest has
|
|
|
|
// completed.
|
2010-09-20 22:37:09 +04:00
|
|
|
|
2015-03-21 19:28:04 +03:00
|
|
|
class ChannelEventQueue final
|
2010-09-20 22:37:09 +04:00
|
|
|
{
|
2015-01-06 01:31:19 +03:00
|
|
|
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ChannelEventQueue)
|
2013-06-05 03:10:55 +04:00
|
|
|
|
2010-09-20 22:37:09 +04:00
|
|
|
public:
|
2014-08-05 17:20:50 +04:00
|
|
|
explicit ChannelEventQueue(nsISupports *owner)
|
2013-06-05 03:10:55 +04:00
|
|
|
: mSuspendCount(0)
|
2011-06-12 05:37:09 +04:00
|
|
|
, mSuspended(false)
|
2017-03-18 06:36:08 +03:00
|
|
|
, mForcedCount(0)
|
2011-06-12 05:37:09 +04:00
|
|
|
, mFlushing(false)
|
2016-03-14 19:10:26 +03:00
|
|
|
, mOwner(owner)
|
|
|
|
, mMutex("ChannelEventQueue::mMutex")
|
2017-07-04 20:43:47 +03:00
|
|
|
, mRunningMutex("ChannelEventQueue::mRunningMutex")
|
2016-03-14 19:10:26 +03:00
|
|
|
{}
|
2011-06-12 05:37:09 +04:00
|
|
|
|
|
|
|
// Puts IPDL-generated channel event into queue, to be run later
|
|
|
|
// automatically when EndForcedQueueing and/or Resume is called.
|
2016-03-14 19:10:26 +03:00
|
|
|
//
|
|
|
|
// @param aCallback - the ChannelEvent
|
2016-03-14 20:46:22 +03:00
|
|
|
// @param aAssertionWhenNotQueued - this optional param will be used in an
|
2016-03-14 19:10:26 +03:00
|
|
|
// assertion when the event is executed directly.
|
|
|
|
inline void RunOrEnqueue(ChannelEvent* aCallback,
|
2016-03-14 20:46:22 +03:00
|
|
|
bool aAssertionWhenNotQueued = false);
|
2017-03-18 06:36:08 +03:00
|
|
|
|
|
|
|
// Append ChannelEvent in front of the event queue.
|
|
|
|
inline nsresult PrependEvent(UniquePtr<ChannelEvent>& aEvent);
|
2016-01-21 00:56:04 +03:00
|
|
|
inline nsresult PrependEvents(nsTArray<UniquePtr<ChannelEvent>>& aEvents);
|
2011-06-12 05:37:09 +04:00
|
|
|
|
2016-03-14 19:10:26 +03:00
|
|
|
// After StartForcedQueueing is called, RunOrEnqueue() will start enqueuing
|
|
|
|
// events that will be run/flushed when EndForcedQueueing is called.
|
2011-06-12 05:37:09 +04:00
|
|
|
// - Note: queueing may still be required after EndForcedQueueing() (if the
|
2016-03-14 19:10:26 +03:00
|
|
|
// queue is suspended, etc): always call RunOrEnqueue() to avoid race
|
|
|
|
// conditions.
|
2011-06-12 05:37:09 +04:00
|
|
|
inline void StartForcedQueueing();
|
|
|
|
inline void EndForcedQueueing();
|
|
|
|
|
2016-03-14 19:10:26 +03:00
|
|
|
// Suspend/resume event queue. RunOrEnqueue() will start enqueuing
|
|
|
|
// events and they will be run/flushed when resume is called. These should be
|
|
|
|
// called when the channel owning the event queue is suspended/resumed.
|
2017-03-18 06:36:08 +03:00
|
|
|
void Suspend();
|
2013-06-05 03:10:55 +04:00
|
|
|
// Resume flushes the queue asynchronously, i.e. items in queue will be
|
|
|
|
// dispatched in a new event on the current thread.
|
2013-09-19 17:54:39 +04:00
|
|
|
void Resume();
|
2011-06-12 05:37:09 +04:00
|
|
|
|
|
|
|
private:
|
2014-04-02 22:20:46 +04:00
|
|
|
// Private destructor, to discourage deletion outside of Release():
|
|
|
|
~ChannelEventQueue()
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2017-03-18 06:36:08 +03:00
|
|
|
void SuspendInternal();
|
|
|
|
void ResumeInternal();
|
|
|
|
|
2011-06-12 05:37:09 +04:00
|
|
|
inline void MaybeFlushQueue();
|
|
|
|
void FlushQueue();
|
2013-06-05 03:10:55 +04:00
|
|
|
inline void CompleteResume();
|
2010-09-20 22:37:09 +04:00
|
|
|
|
2016-03-14 19:10:26 +03:00
|
|
|
ChannelEvent* TakeEvent();
|
|
|
|
|
2016-01-21 00:56:04 +03:00
|
|
|
nsTArray<UniquePtr<ChannelEvent>> mEventQueue;
|
2010-09-20 22:37:09 +04:00
|
|
|
|
2013-06-05 03:10:55 +04:00
|
|
|
uint32_t mSuspendCount;
|
2017-03-18 06:36:08 +03:00
|
|
|
bool mSuspended;
|
|
|
|
uint32_t mForcedCount; // Support ForcedQueueing on multiple thread.
|
2011-06-12 05:37:09 +04:00
|
|
|
bool mFlushing;
|
2010-10-16 09:26:14 +04:00
|
|
|
|
2011-06-12 05:37:09 +04:00
|
|
|
// Keep ptr to avoid refcount cycle: only grab ref during flushing.
|
2011-05-04 17:36:23 +04:00
|
|
|
nsISupports *mOwner;
|
2010-10-16 09:26:14 +04:00
|
|
|
|
2017-03-18 06:36:08 +03:00
|
|
|
// For atomic mEventQueue operation and state update
|
2016-03-14 19:10:26 +03:00
|
|
|
Mutex mMutex;
|
|
|
|
|
2017-03-18 06:36:08 +03:00
|
|
|
// To guarantee event execution order among threads
|
2017-07-04 20:43:47 +03:00
|
|
|
RecursiveMutex mRunningMutex;
|
2014-03-28 00:58:19 +04:00
|
|
|
|
2011-06-12 05:37:09 +04:00
|
|
|
friend class AutoEventEnqueuer;
|
2010-09-20 22:37:09 +04:00
|
|
|
};
|
|
|
|
|
2016-03-14 19:10:26 +03:00
|
|
|
inline void
|
|
|
|
ChannelEventQueue::RunOrEnqueue(ChannelEvent* aCallback,
|
2016-03-14 20:46:22 +03:00
|
|
|
bool aAssertionWhenNotQueued)
|
2010-09-20 22:37:09 +04:00
|
|
|
{
|
2016-03-14 19:10:26 +03:00
|
|
|
MOZ_ASSERT(aCallback);
|
2011-06-12 05:37:09 +04:00
|
|
|
|
2017-03-18 06:36:08 +03:00
|
|
|
// Events execution could be a destruction of the channel (and our own
|
|
|
|
// destructor) unless we make sure its refcount doesn't drop to 0 while this
|
|
|
|
// method is running.
|
|
|
|
nsCOMPtr<nsISupports> kungFuDeathGrip(mOwner);
|
|
|
|
Unused << kungFuDeathGrip; // Not used in this function
|
|
|
|
|
2016-03-14 20:46:22 +03:00
|
|
|
// To avoid leaks.
|
|
|
|
UniquePtr<ChannelEvent> event(aCallback);
|
|
|
|
|
2017-03-18 06:36:08 +03:00
|
|
|
// To guarantee that the running event and all the events generated within
|
|
|
|
// it will be finished before events on other threads.
|
2017-07-04 20:43:47 +03:00
|
|
|
RecursiveMutexAutoLock lock(mRunningMutex);
|
2017-03-18 06:36:08 +03:00
|
|
|
|
2016-03-14 19:10:26 +03:00
|
|
|
{
|
|
|
|
MutexAutoLock lock(mMutex);
|
2011-06-12 05:37:09 +04:00
|
|
|
|
2017-03-18 06:36:08 +03:00
|
|
|
bool enqueue = !!mForcedCount || mSuspended || mFlushing || !mEventQueue.IsEmpty();
|
2010-09-20 22:37:09 +04:00
|
|
|
|
2016-03-14 19:10:26 +03:00
|
|
|
if (enqueue) {
|
2016-03-14 20:46:22 +03:00
|
|
|
mEventQueue.AppendElement(Move(event));
|
2016-03-14 19:10:26 +03:00
|
|
|
return;
|
|
|
|
}
|
2017-03-18 06:36:08 +03:00
|
|
|
|
|
|
|
nsCOMPtr<nsIEventTarget> target = event->GetEventTarget();
|
|
|
|
MOZ_ASSERT(target);
|
|
|
|
|
|
|
|
bool isCurrentThread = false;
|
|
|
|
DebugOnly<nsresult> rv = target->IsOnCurrentThread(&isCurrentThread);
|
|
|
|
MOZ_ASSERT(NS_SUCCEEDED(rv));
|
|
|
|
|
|
|
|
if (!isCurrentThread) {
|
|
|
|
// Leverage Suspend/Resume mechanism to trigger flush procedure without
|
|
|
|
// creating a new one.
|
|
|
|
SuspendInternal();
|
|
|
|
mEventQueue.AppendElement(Move(event));
|
|
|
|
ResumeInternal();
|
|
|
|
return;
|
|
|
|
}
|
2016-03-14 19:10:26 +03:00
|
|
|
}
|
|
|
|
|
2016-03-14 20:46:22 +03:00
|
|
|
MOZ_RELEASE_ASSERT(!aAssertionWhenNotQueued);
|
|
|
|
event->Run();
|
2011-06-12 05:37:09 +04:00
|
|
|
}
|
2010-09-20 22:37:09 +04:00
|
|
|
|
2011-06-12 05:37:09 +04:00
|
|
|
inline void
|
|
|
|
ChannelEventQueue::StartForcedQueueing()
|
|
|
|
{
|
2016-03-14 19:10:26 +03:00
|
|
|
MutexAutoLock lock(mMutex);
|
2017-03-18 06:36:08 +03:00
|
|
|
++mForcedCount;
|
2010-09-20 22:37:09 +04:00
|
|
|
}
|
|
|
|
|
2011-06-12 05:37:09 +04:00
|
|
|
inline void
|
|
|
|
ChannelEventQueue::EndForcedQueueing()
|
2010-09-20 22:37:09 +04:00
|
|
|
{
|
2017-03-18 06:36:08 +03:00
|
|
|
bool tryFlush = false;
|
2016-03-14 19:10:26 +03:00
|
|
|
{
|
|
|
|
MutexAutoLock lock(mMutex);
|
2017-03-18 06:36:08 +03:00
|
|
|
MOZ_ASSERT(mForcedCount > 0);
|
|
|
|
if(!--mForcedCount) {
|
|
|
|
tryFlush = true;
|
|
|
|
}
|
2016-03-14 19:10:26 +03:00
|
|
|
}
|
|
|
|
|
2017-03-18 06:36:08 +03:00
|
|
|
if (tryFlush) {
|
|
|
|
MaybeFlushQueue();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
inline nsresult
|
|
|
|
ChannelEventQueue::PrependEvent(UniquePtr<ChannelEvent>& aEvent)
|
|
|
|
{
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
|
|
|
|
// Prepending event while no queue flush foreseen might cause the following
|
|
|
|
// channel events not run. This assertion here guarantee there must be a
|
|
|
|
// queue flush, either triggered by Resume or EndForcedQueueing, to execute
|
|
|
|
// the added event.
|
|
|
|
MOZ_ASSERT(mSuspended || !!mForcedCount);
|
|
|
|
|
|
|
|
UniquePtr<ChannelEvent>* newEvent =
|
|
|
|
mEventQueue.InsertElementAt(0, Move(aEvent));
|
|
|
|
|
|
|
|
if (!newEvent) {
|
|
|
|
return NS_ERROR_OUT_OF_MEMORY;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NS_OK;
|
2010-09-20 22:37:09 +04:00
|
|
|
}
|
|
|
|
|
2015-10-11 19:13:09 +03:00
|
|
|
inline nsresult
|
2016-01-21 00:56:04 +03:00
|
|
|
ChannelEventQueue::PrependEvents(nsTArray<UniquePtr<ChannelEvent>>& aEvents)
|
2015-10-11 19:13:09 +03:00
|
|
|
{
|
2016-03-14 19:10:26 +03:00
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
|
2017-03-18 06:36:08 +03:00
|
|
|
// Prepending event while no queue flush foreseen might cause the following
|
|
|
|
// channel events not run. This assertion here guarantee there must be a
|
|
|
|
// queue flush, either triggered by Resume or EndForcedQueueing, to execute
|
|
|
|
// the added events.
|
|
|
|
MOZ_ASSERT(mSuspended || !!mForcedCount);
|
|
|
|
|
2016-01-21 00:56:04 +03:00
|
|
|
UniquePtr<ChannelEvent>* newEvents =
|
|
|
|
mEventQueue.InsertElementsAt(0, aEvents.Length());
|
|
|
|
if (!newEvents) {
|
2015-10-11 19:13:09 +03:00
|
|
|
return NS_ERROR_OUT_OF_MEMORY;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < aEvents.Length(); i++) {
|
2016-01-21 00:56:04 +03:00
|
|
|
newEvents[i] = Move(aEvents[i]);
|
2015-10-11 19:13:09 +03:00
|
|
|
}
|
2016-03-14 19:10:26 +03:00
|
|
|
|
2015-10-11 19:13:09 +03:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2013-06-05 03:10:55 +04:00
|
|
|
inline void
|
|
|
|
ChannelEventQueue::CompleteResume()
|
|
|
|
{
|
2017-03-18 06:36:08 +03:00
|
|
|
bool tryFlush = false;
|
2016-03-14 19:10:26 +03:00
|
|
|
{
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
|
|
|
|
// channel may have been suspended again since Resume fired event to call
|
|
|
|
// this.
|
|
|
|
if (!mSuspendCount) {
|
|
|
|
// we need to remain logically suspended (for purposes of queuing incoming
|
|
|
|
// messages) until this point, else new incoming messages could run before
|
|
|
|
// queued ones.
|
|
|
|
mSuspended = false;
|
2017-03-18 06:36:08 +03:00
|
|
|
tryFlush = true;
|
2016-03-14 19:10:26 +03:00
|
|
|
}
|
2013-06-05 03:10:55 +04:00
|
|
|
}
|
2016-03-14 19:10:26 +03:00
|
|
|
|
2017-03-18 06:36:08 +03:00
|
|
|
if (tryFlush) {
|
|
|
|
MaybeFlushQueue();
|
|
|
|
}
|
2010-09-20 22:37:09 +04:00
|
|
|
}
|
|
|
|
|
2011-06-12 05:37:09 +04:00
|
|
|
inline void
|
|
|
|
ChannelEventQueue::MaybeFlushQueue()
|
|
|
|
{
|
|
|
|
// Don't flush if forced queuing on, we're already being flushed, or
|
|
|
|
// suspended, or there's nothing to flush
|
2016-03-14 19:10:26 +03:00
|
|
|
bool flushQueue = false;
|
|
|
|
|
|
|
|
{
|
|
|
|
MutexAutoLock lock(mMutex);
|
2017-03-18 06:36:08 +03:00
|
|
|
flushQueue = !mForcedCount && !mFlushing && !mSuspended &&
|
2016-03-14 19:10:26 +03:00
|
|
|
!mEventQueue.IsEmpty();
|
2018-01-04 13:53:02 +03:00
|
|
|
|
|
|
|
// Only one thread is allowed to run FlushQueue at a time.
|
|
|
|
if (flushQueue) {
|
|
|
|
mFlushing = true;
|
|
|
|
}
|
2016-03-14 19:10:26 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (flushQueue) {
|
2011-06-12 05:37:09 +04:00
|
|
|
FlushQueue();
|
2016-03-14 19:10:26 +03:00
|
|
|
}
|
2010-10-16 09:26:14 +04:00
|
|
|
}
|
|
|
|
|
2016-03-14 19:10:26 +03:00
|
|
|
// Ensures that RunOrEnqueue() will be collecting events during its lifetime
|
|
|
|
// (letting caller know incoming IPDL msgs should be queued). Flushes the queue
|
|
|
|
// when it goes out of scope.
|
2014-12-05 00:23:33 +03:00
|
|
|
class MOZ_STACK_CLASS AutoEventEnqueuer
|
2010-09-20 22:37:09 +04:00
|
|
|
{
|
|
|
|
public:
|
2017-06-09 14:59:41 +03:00
|
|
|
explicit AutoEventEnqueuer(ChannelEventQueue *queue)
|
|
|
|
: mEventQueue(queue)
|
|
|
|
, mOwner(queue->mOwner)
|
|
|
|
{
|
2013-06-05 03:10:55 +04:00
|
|
|
mEventQueue->StartForcedQueueing();
|
2010-09-20 22:37:09 +04:00
|
|
|
}
|
2011-06-12 05:37:09 +04:00
|
|
|
~AutoEventEnqueuer() {
|
2013-06-05 03:10:55 +04:00
|
|
|
mEventQueue->EndForcedQueueing();
|
2010-09-20 22:37:09 +04:00
|
|
|
}
|
|
|
|
private:
|
2015-10-18 08:24:48 +03:00
|
|
|
RefPtr<ChannelEventQueue> mEventQueue;
|
2017-06-09 14:59:41 +03:00
|
|
|
// Ensure channel object lives longer than ChannelEventQueue.
|
|
|
|
nsCOMPtr<nsISupports> mOwner;
|
2010-09-20 22:37:09 +04:00
|
|
|
};
|
|
|
|
|
2015-07-13 18:25:42 +03:00
|
|
|
} // namespace net
|
|
|
|
} // namespace mozilla
|
2010-09-20 22:37:09 +04:00
|
|
|
|
|
|
|
#endif
|