2010-09-20 22:37:09 +04:00
|
|
|
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
|
2011-06-12 05:37:09 +04:00
|
|
|
* vim: set sw=2 ts=8 et tw=80 :
|
2010-09-20 22:37:09 +04:00
|
|
|
*/
|
2012-05-21 15:12:37 +04:00
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
2010-09-20 22:37:09 +04:00
|
|
|
|
|
|
|
#ifndef mozilla_net_ChannelEventQueue_h
|
|
|
|
#define mozilla_net_ChannelEventQueue_h
|
|
|
|
|
2011-06-12 05:37:09 +04:00
|
|
|
#include <nsTArray.h>
|
|
|
|
#include <nsAutoPtr.h>
|
|
|
|
|
2011-05-04 17:36:23 +04:00
|
|
|
class nsISupports;
|
2014-03-28 00:58:19 +04:00
|
|
|
class nsIEventTarget;
|
|
|
|
class nsIThread;
|
2011-06-12 05:37:09 +04:00
|
|
|
|
2010-09-20 22:37:09 +04:00
|
|
|
namespace mozilla {
|
|
|
|
namespace net {
|
|
|
|
|
|
|
|
class ChannelEvent
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
ChannelEvent() { MOZ_COUNT_CTOR(ChannelEvent); }
|
|
|
|
virtual ~ChannelEvent() { MOZ_COUNT_DTOR(ChannelEvent); }
|
|
|
|
virtual void Run() = 0;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Workaround for Necko re-entrancy dangers. We buffer IPDL messages in a
|
|
|
|
// queue if still dispatching previous one(s) to listeners/observers.
|
|
|
|
// Otherwise synchronous XMLHttpRequests and/or other code that spins the
|
|
|
|
// event loop (ex: IPDL rpc) could cause listener->OnDataAvailable (for
|
2011-06-12 05:37:09 +04:00
|
|
|
// instance) to be dispatched and called before mListener->OnStartRequest has
|
|
|
|
// completed.
|
2010-09-20 22:37:09 +04:00
|
|
|
|
2011-06-12 05:37:09 +04:00
|
|
|
class AutoEventEnqueuerBase;
|
2010-10-16 09:26:14 +04:00
|
|
|
|
2014-04-02 22:20:46 +04:00
|
|
|
class ChannelEventQueue MOZ_FINAL
|
2010-09-20 22:37:09 +04:00
|
|
|
{
|
2013-06-05 03:10:55 +04:00
|
|
|
NS_INLINE_DECL_REFCOUNTING(ChannelEventQueue)
|
|
|
|
|
2010-09-20 22:37:09 +04:00
|
|
|
public:
|
2014-08-05 17:20:50 +04:00
|
|
|
explicit ChannelEventQueue(nsISupports *owner)
|
2013-06-05 03:10:55 +04:00
|
|
|
: mSuspendCount(0)
|
2011-06-12 05:37:09 +04:00
|
|
|
, mSuspended(false)
|
2013-06-05 03:10:55 +04:00
|
|
|
, mForced(false)
|
2011-06-12 05:37:09 +04:00
|
|
|
, mFlushing(false)
|
|
|
|
, mOwner(owner) {}
|
|
|
|
|
|
|
|
// Checks to determine if an IPDL-generated channel event can be processed
|
|
|
|
// immediately, or needs to be queued using Enqueue().
|
|
|
|
inline bool ShouldEnqueue();
|
|
|
|
|
|
|
|
// Puts IPDL-generated channel event into queue, to be run later
|
|
|
|
// automatically when EndForcedQueueing and/or Resume is called.
|
|
|
|
inline void Enqueue(ChannelEvent* callback);
|
|
|
|
|
|
|
|
// After StartForcedQueueing is called, ShouldEnqueue() will return true and
|
|
|
|
// no events will be run/flushed until EndForcedQueueing is called.
|
|
|
|
// - Note: queueing may still be required after EndForcedQueueing() (if the
|
|
|
|
// queue is suspended, etc): always call ShouldEnqueue() to determine
|
|
|
|
// whether queueing is needed.
|
|
|
|
inline void StartForcedQueueing();
|
|
|
|
inline void EndForcedQueueing();
|
|
|
|
|
|
|
|
// Suspend/resume event queue. ShouldEnqueue() will return true and no events
|
|
|
|
// will be run/flushed until resume is called. These should be called when
|
|
|
|
// the channel owning the event queue is suspended/resumed.
|
|
|
|
inline void Suspend();
|
2013-06-05 03:10:55 +04:00
|
|
|
// Resume flushes the queue asynchronously, i.e. items in queue will be
|
|
|
|
// dispatched in a new event on the current thread.
|
2013-09-19 17:54:39 +04:00
|
|
|
void Resume();
|
2011-06-12 05:37:09 +04:00
|
|
|
|
2014-03-28 00:58:19 +04:00
|
|
|
// Retargets delivery of events to the target thread specified.
|
|
|
|
nsresult RetargetDeliveryTo(nsIEventTarget* aTargetThread);
|
|
|
|
|
2011-06-12 05:37:09 +04:00
|
|
|
private:
|
2014-04-02 22:20:46 +04:00
|
|
|
// Private destructor, to discourage deletion outside of Release():
|
|
|
|
~ChannelEventQueue()
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2011-06-12 05:37:09 +04:00
|
|
|
inline void MaybeFlushQueue();
|
|
|
|
void FlushQueue();
|
2013-06-05 03:10:55 +04:00
|
|
|
inline void CompleteResume();
|
2010-09-20 22:37:09 +04:00
|
|
|
|
|
|
|
nsTArray<nsAutoPtr<ChannelEvent> > mEventQueue;
|
|
|
|
|
2013-06-05 03:10:55 +04:00
|
|
|
uint32_t mSuspendCount;
|
|
|
|
bool mSuspended;
|
2011-06-12 05:37:09 +04:00
|
|
|
bool mForced;
|
|
|
|
bool mFlushing;
|
2010-10-16 09:26:14 +04:00
|
|
|
|
2011-06-12 05:37:09 +04:00
|
|
|
// Keep ptr to avoid refcount cycle: only grab ref during flushing.
|
2011-05-04 17:36:23 +04:00
|
|
|
nsISupports *mOwner;
|
2010-10-16 09:26:14 +04:00
|
|
|
|
2014-10-10 20:56:43 +04:00
|
|
|
// EventTarget for delivery of events to the correct thread.
|
|
|
|
nsCOMPtr<nsIEventTarget> mTargetThread;
|
2014-03-28 00:58:19 +04:00
|
|
|
|
2011-06-12 05:37:09 +04:00
|
|
|
friend class AutoEventEnqueuer;
|
2010-09-20 22:37:09 +04:00
|
|
|
};
|
|
|
|
|
2011-06-12 05:37:09 +04:00
|
|
|
inline bool
|
|
|
|
ChannelEventQueue::ShouldEnqueue()
|
2010-09-20 22:37:09 +04:00
|
|
|
{
|
2011-06-12 05:37:09 +04:00
|
|
|
bool answer = mForced || mSuspended || mFlushing;
|
|
|
|
|
|
|
|
NS_ABORT_IF_FALSE(answer == true || mEventQueue.IsEmpty(),
|
|
|
|
"Should always enqueue if ChannelEventQueue not empty");
|
|
|
|
|
|
|
|
return answer;
|
2010-09-20 22:37:09 +04:00
|
|
|
}
|
|
|
|
|
2011-06-12 05:37:09 +04:00
|
|
|
inline void
|
|
|
|
ChannelEventQueue::Enqueue(ChannelEvent* callback)
|
2010-09-20 22:37:09 +04:00
|
|
|
{
|
2011-06-12 05:37:09 +04:00
|
|
|
mEventQueue.AppendElement(callback);
|
|
|
|
}
|
2010-09-20 22:37:09 +04:00
|
|
|
|
2011-06-12 05:37:09 +04:00
|
|
|
inline void
|
|
|
|
ChannelEventQueue::StartForcedQueueing()
|
|
|
|
{
|
|
|
|
mForced = true;
|
2010-09-20 22:37:09 +04:00
|
|
|
}
|
|
|
|
|
2011-06-12 05:37:09 +04:00
|
|
|
inline void
|
|
|
|
ChannelEventQueue::EndForcedQueueing()
|
2010-09-20 22:37:09 +04:00
|
|
|
{
|
2011-06-12 05:37:09 +04:00
|
|
|
mForced = false;
|
|
|
|
MaybeFlushQueue();
|
2010-09-20 22:37:09 +04:00
|
|
|
}
|
|
|
|
|
2011-06-12 05:37:09 +04:00
|
|
|
inline void
|
|
|
|
ChannelEventQueue::Suspend()
|
2010-09-20 22:37:09 +04:00
|
|
|
{
|
2011-06-12 05:37:09 +04:00
|
|
|
mSuspended = true;
|
2013-06-05 03:10:55 +04:00
|
|
|
mSuspendCount++;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void
|
|
|
|
ChannelEventQueue::CompleteResume()
|
|
|
|
{
|
|
|
|
// channel may have been suspended again since Resume fired event to call this.
|
|
|
|
if (!mSuspendCount) {
|
|
|
|
// we need to remain logically suspended (for purposes of queuing incoming
|
|
|
|
// messages) until this point, else new incoming messages could run before
|
|
|
|
// queued ones.
|
|
|
|
mSuspended = false;
|
|
|
|
MaybeFlushQueue();
|
|
|
|
}
|
2010-09-20 22:37:09 +04:00
|
|
|
}
|
|
|
|
|
2011-06-12 05:37:09 +04:00
|
|
|
inline void
|
|
|
|
ChannelEventQueue::MaybeFlushQueue()
|
|
|
|
{
|
|
|
|
// Don't flush if forced queuing on, we're already being flushed, or
|
|
|
|
// suspended, or there's nothing to flush
|
|
|
|
if (!mForced && !mFlushing && !mSuspended && !mEventQueue.IsEmpty())
|
|
|
|
FlushQueue();
|
2010-10-16 09:26:14 +04:00
|
|
|
}
|
|
|
|
|
2011-06-12 05:37:09 +04:00
|
|
|
// Ensures that ShouldEnqueue() will be true during its lifetime (letting
|
|
|
|
// caller know incoming IPDL msgs should be queued). Flushes the queue when it
|
|
|
|
// goes out of scope.
|
2014-12-05 00:23:33 +03:00
|
|
|
class MOZ_STACK_CLASS AutoEventEnqueuer
|
2010-09-20 22:37:09 +04:00
|
|
|
{
|
|
|
|
public:
|
2014-08-05 17:20:50 +04:00
|
|
|
explicit AutoEventEnqueuer(ChannelEventQueue *queue) : mEventQueue(queue) {
|
2013-06-05 03:10:55 +04:00
|
|
|
mEventQueue->StartForcedQueueing();
|
2010-09-20 22:37:09 +04:00
|
|
|
}
|
2011-06-12 05:37:09 +04:00
|
|
|
~AutoEventEnqueuer() {
|
2013-06-05 03:10:55 +04:00
|
|
|
mEventQueue->EndForcedQueueing();
|
2010-09-20 22:37:09 +04:00
|
|
|
}
|
|
|
|
private:
|
2014-12-05 00:23:33 +03:00
|
|
|
nsRefPtr<ChannelEventQueue> mEventQueue;
|
2010-09-20 22:37:09 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|