2009-06-29 22:38:29 +04:00
|
|
|
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
|
|
|
* vim: sw=4 ts=4 et :
|
|
|
|
* ***** BEGIN LICENSE BLOCK *****
|
|
|
|
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
|
|
|
|
*
|
|
|
|
* The contents of this file are subject to the Mozilla Public License Version
|
|
|
|
* 1.1 (the "License"); you may not use this file except in compliance with
|
|
|
|
* the License. You may obtain a copy of the License at
|
|
|
|
* http://www.mozilla.org/MPL/
|
|
|
|
*
|
|
|
|
* Software distributed under the License is distributed on an "AS IS" basis,
|
|
|
|
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
|
|
|
* for the specific language governing rights and limitations under the
|
|
|
|
* License.
|
|
|
|
*
|
|
|
|
* The Original Code is Mozilla Plugin App.
|
|
|
|
*
|
|
|
|
* The Initial Developer of the Original Code is
|
|
|
|
* Chris Jones <jones.chris.g@gmail.com>
|
|
|
|
* Portions created by the Initial Developer are Copyright (C) 2009
|
|
|
|
* the Initial Developer. All Rights Reserved.
|
|
|
|
*
|
|
|
|
* Contributor(s):
|
|
|
|
*
|
|
|
|
* Alternatively, the contents of this file may be used under the terms of
|
|
|
|
* either the GNU General Public License Version 2 or later (the "GPL"), or
|
|
|
|
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
|
|
|
|
* in which case the provisions of the GPL or the LGPL are applicable instead
|
|
|
|
* of those above. If you wish to allow use of your version of this file only
|
|
|
|
* under the terms of either the GPL or the LGPL, and not to allow others to
|
|
|
|
* use your version of this file under the terms of the MPL, indicate your
|
|
|
|
* decision by deleting the provisions above and replace them with the notice
|
|
|
|
* and other provisions required by the GPL or the LGPL. If you do not delete
|
|
|
|
* the provisions above, a recipient may use your version of this file under
|
|
|
|
* the terms of any one of the MPL, the GPL or the LGPL.
|
|
|
|
*
|
|
|
|
* ***** END LICENSE BLOCK ***** */
|
|
|
|
|
|
|
|
#ifndef ipc_glue_RPCChannel_h
|
|
|
|
#define ipc_glue_RPCChannel_h 1
|
|
|
|
|
2009-09-11 11:28:09 +04:00
|
|
|
// FIXME/cjones probably shouldn't depend on STL
|
|
|
|
#include <queue>
|
2009-06-29 22:38:29 +04:00
|
|
|
#include <stack>
|
|
|
|
|
2010-03-12 01:24:15 +03:00
|
|
|
#include "base/basictypes.h"
|
|
|
|
|
|
|
|
#include "pratom.h"
|
|
|
|
|
2009-07-14 01:55:04 +04:00
|
|
|
#include "mozilla/ipc/SyncChannel.h"
|
2010-02-15 10:47:00 +03:00
|
|
|
#include "nsAutoPtr.h"
|
2009-06-29 22:38:29 +04:00
|
|
|
|
|
|
|
namespace mozilla {
|
|
|
|
namespace ipc {
|
|
|
|
//-----------------------------------------------------------------------------
|
|
|
|
|
2009-07-14 01:55:04 +04:00
|
|
|
class RPCChannel : public SyncChannel
|
2009-06-29 22:38:29 +04:00
|
|
|
{
|
2010-02-16 21:44:21 +03:00
|
|
|
friend class CxxStackFrame;
|
|
|
|
|
2009-06-29 22:38:29 +04:00
|
|
|
public:
|
2010-03-11 10:35:30 +03:00
|
|
|
// What happens if RPC calls race?
|
|
|
|
enum RacyRPCPolicy {
|
|
|
|
RRPError,
|
|
|
|
RRPChildWins,
|
|
|
|
RRPParentWins
|
|
|
|
};
|
|
|
|
|
2009-07-16 02:33:37 +04:00
|
|
|
class /*NS_INTERFACE_CLASS*/ RPCListener :
|
|
|
|
public SyncChannel::SyncListener
|
2009-06-29 22:38:29 +04:00
|
|
|
{
|
|
|
|
public:
|
2009-07-16 02:06:30 +04:00
|
|
|
virtual ~RPCListener() { }
|
2009-12-03 11:16:28 +03:00
|
|
|
|
|
|
|
virtual void OnChannelClose() = 0;
|
|
|
|
virtual void OnChannelError() = 0;
|
2009-07-14 01:55:04 +04:00
|
|
|
virtual Result OnMessageReceived(const Message& aMessage) = 0;
|
2010-02-10 03:02:55 +03:00
|
|
|
virtual bool OnReplyTimeout() = 0;
|
2009-07-14 01:55:04 +04:00
|
|
|
virtual Result OnMessageReceived(const Message& aMessage,
|
|
|
|
Message*& aReply) = 0;
|
2009-06-29 22:38:29 +04:00
|
|
|
virtual Result OnCallReceived(const Message& aMessage,
|
2009-07-02 09:45:19 +04:00
|
|
|
Message*& aReply) = 0;
|
2010-02-16 21:44:22 +03:00
|
|
|
|
|
|
|
virtual void OnEnteredCxxStack()
|
|
|
|
{
|
|
|
|
NS_RUNTIMEABORT("default impl shouldn't be invoked");
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void OnExitedCxxStack()
|
|
|
|
{
|
|
|
|
NS_RUNTIMEABORT("default impl shouldn't be invoked");
|
2010-03-11 10:35:30 +03:00
|
|
|
}
|
2009-06-29 22:38:29 +04:00
|
|
|
|
2010-03-11 10:35:30 +03:00
|
|
|
virtual RacyRPCPolicy MediateRPCRace(const Message& parent,
|
|
|
|
const Message& child)
|
|
|
|
{
|
|
|
|
return RRPChildWins;
|
|
|
|
}
|
2009-10-09 01:44:43 +04:00
|
|
|
};
|
|
|
|
|
2010-03-11 10:35:30 +03:00
|
|
|
RPCChannel(RPCListener* aListener);
|
2009-06-29 22:38:29 +04:00
|
|
|
|
2009-11-13 01:16:54 +03:00
|
|
|
virtual ~RPCChannel();
|
2009-06-29 22:38:29 +04:00
|
|
|
|
2010-02-15 10:47:00 +03:00
|
|
|
NS_OVERRIDE
|
|
|
|
void Clear();
|
|
|
|
|
2009-07-14 01:55:04 +04:00
|
|
|
// Make an RPC to the other side of the channel
|
|
|
|
bool Call(Message* msg, Message* reply);
|
2009-06-29 22:38:29 +04:00
|
|
|
|
2010-02-16 21:44:21 +03:00
|
|
|
// RPCChannel overrides these so that the async and sync messages
|
|
|
|
// can be counted against mStackFrames
|
|
|
|
NS_OVERRIDE
|
|
|
|
virtual bool Send(Message* msg);
|
|
|
|
NS_OVERRIDE
|
|
|
|
virtual bool Send(Message* msg, Message* reply);
|
|
|
|
|
2010-01-27 09:41:32 +03:00
|
|
|
// Asynchronously, send the child a message that puts it in such a
|
|
|
|
// state that it can't send messages to the parent unless the
|
|
|
|
// parent sends a message to it first. The child stays in this
|
|
|
|
// state until the parent calls |UnblockChild()|.
|
|
|
|
//
|
|
|
|
// It is an error to
|
|
|
|
// - call this on the child side of the channel.
|
|
|
|
// - nest |BlockChild()| calls
|
|
|
|
// - call this when the child is already blocked on a sync or RPC
|
|
|
|
// in-/out- message/call
|
|
|
|
//
|
|
|
|
// Return true iff successful.
|
|
|
|
bool BlockChild();
|
|
|
|
|
|
|
|
// Asynchronously undo |BlockChild()|.
|
|
|
|
//
|
|
|
|
// It is an error to
|
|
|
|
// - call this on the child side of the channel
|
|
|
|
// - call this without a matching |BlockChild()|
|
|
|
|
//
|
|
|
|
// Return true iff successful.
|
|
|
|
bool UnblockChild();
|
|
|
|
|
2010-03-07 23:16:02 +03:00
|
|
|
// Return true iff this has code on the C++ stack.
|
|
|
|
bool IsOnCxxStack() const {
|
|
|
|
return 0 < mCxxStackFrames;
|
|
|
|
}
|
|
|
|
|
2010-01-27 09:41:32 +03:00
|
|
|
NS_OVERRIDE
|
|
|
|
virtual bool OnSpecialMessage(uint16 id, const Message& msg);
|
|
|
|
|
2009-10-09 10:21:39 +04:00
|
|
|
// Override the SyncChannel handler so we can dispatch RPC
|
|
|
|
// messages. Called on the IO thread only.
|
2010-01-27 09:41:32 +03:00
|
|
|
NS_OVERRIDE
|
|
|
|
virtual void OnMessageReceived(const Message& msg);
|
|
|
|
NS_OVERRIDE
|
|
|
|
virtual void OnChannelError();
|
2009-06-29 22:38:29 +04:00
|
|
|
|
2010-02-10 01:34:38 +03:00
|
|
|
#ifdef OS_WIN
|
|
|
|
static bool IsSpinLoopActive() {
|
|
|
|
return (sInnerEventLoopDepth > 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
protected:
|
2010-02-12 09:31:08 +03:00
|
|
|
bool WaitForNotify();
|
2010-02-10 01:34:38 +03:00
|
|
|
bool IsMessagePending();
|
|
|
|
bool SpinInternalEventLoop();
|
|
|
|
static void EnterModalLoop() {
|
|
|
|
sInnerEventLoopDepth++;
|
|
|
|
}
|
|
|
|
static void ExitModalLoop() {
|
|
|
|
sInnerEventLoopDepth--;
|
|
|
|
NS_ASSERTION(sInnerEventLoopDepth >= 0,
|
|
|
|
"sInnerEventLoopDepth dropped below zero!");
|
|
|
|
}
|
|
|
|
|
|
|
|
static int sInnerEventLoopDepth;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
private:
|
2009-10-09 10:21:39 +04:00
|
|
|
// Called on worker thread only
|
2009-09-11 11:28:09 +04:00
|
|
|
|
2010-03-11 10:35:30 +03:00
|
|
|
RPCListener* Listener() const {
|
|
|
|
return static_cast<RPCListener*>(mListener);
|
|
|
|
}
|
|
|
|
|
2010-03-12 08:21:58 +03:00
|
|
|
NS_OVERRIDE
|
|
|
|
virtual bool ShouldDeferNotifyMaybeError() {
|
|
|
|
return 0 < mCxxStackFrames;
|
|
|
|
}
|
|
|
|
|
2010-02-10 03:02:54 +03:00
|
|
|
bool EventOccurred();
|
|
|
|
|
2009-10-09 10:21:39 +04:00
|
|
|
void MaybeProcessDeferredIncall();
|
|
|
|
void EnqueuePendingMessages();
|
2009-09-22 19:23:29 +04:00
|
|
|
|
2009-10-09 10:21:39 +04:00
|
|
|
void OnMaybeDequeueOne();
|
|
|
|
void Incall(const Message& call, size_t stackDepth);
|
|
|
|
void DispatchIncall(const Message& call);
|
2009-06-29 22:38:29 +04:00
|
|
|
|
2010-01-27 09:41:32 +03:00
|
|
|
void BlockOnParent();
|
|
|
|
void UnblockFromParent();
|
|
|
|
|
2010-02-16 21:44:21 +03:00
|
|
|
// This helper class managed RPCChannel.mCxxStackDepth on behalf
|
|
|
|
// of RPCChannel. When the stack depth is incremented from zero
|
|
|
|
// to non-zero, it invokes an RPCChannel callback, and similarly
|
|
|
|
// for when the depth goes from non-zero to zero;
|
|
|
|
void EnteredCxxStack()
|
|
|
|
{
|
2010-03-11 10:35:30 +03:00
|
|
|
Listener()->OnEnteredCxxStack();
|
2010-02-16 21:44:21 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void ExitedCxxStack()
|
|
|
|
{
|
2010-03-11 10:35:30 +03:00
|
|
|
Listener()->OnExitedCxxStack();
|
2010-02-16 21:44:21 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
class NS_STACK_CLASS CxxStackFrame
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
CxxStackFrame(RPCChannel& that) : mThat(that) {
|
|
|
|
NS_ABORT_IF_FALSE(0 <= mThat.mCxxStackFrames,
|
|
|
|
"mismatched CxxStackFrame ctor/dtor");
|
|
|
|
mThat.AssertWorkerThread();
|
|
|
|
|
|
|
|
if (0 == mThat.mCxxStackFrames++)
|
|
|
|
mThat.EnteredCxxStack();
|
|
|
|
}
|
|
|
|
|
|
|
|
~CxxStackFrame() {
|
|
|
|
bool exitingStack = (0 == --mThat.mCxxStackFrames);
|
|
|
|
|
|
|
|
// mListener could have gone away if Close() was called while
|
|
|
|
// RPCChannel code was still on the stack
|
|
|
|
if (!mThat.mListener)
|
|
|
|
return;
|
|
|
|
|
|
|
|
mThat.AssertWorkerThread();
|
|
|
|
if (exitingStack)
|
|
|
|
mThat.ExitedCxxStack();
|
|
|
|
}
|
|
|
|
private:
|
|
|
|
RPCChannel& mThat;
|
|
|
|
|
|
|
|
// disable harmful methods
|
|
|
|
CxxStackFrame();
|
|
|
|
CxxStackFrame(const CxxStackFrame&);
|
|
|
|
CxxStackFrame& operator=(const CxxStackFrame&);
|
|
|
|
};
|
|
|
|
|
2009-09-11 11:28:09 +04:00
|
|
|
// Called from both threads
|
2009-08-19 09:22:01 +04:00
|
|
|
size_t StackDepth() {
|
|
|
|
mMutex.AssertCurrentThreadOwns();
|
2009-09-11 11:28:09 +04:00
|
|
|
return mStack.size();
|
2009-08-19 09:22:01 +04:00
|
|
|
}
|
|
|
|
|
2009-10-09 02:41:18 +04:00
|
|
|
void DebugAbort(const char* file, int line, const char* cond,
|
2009-10-09 01:44:43 +04:00
|
|
|
const char* why,
|
2009-10-09 10:21:39 +04:00
|
|
|
const char* type="rpc", bool reply=false);
|
2009-10-09 01:44:43 +04:00
|
|
|
|
2009-09-11 11:28:09 +04:00
|
|
|
//
|
2009-10-09 10:21:39 +04:00
|
|
|
// Queue of all incoming messages, except for replies to sync
|
|
|
|
// messages, which are delivered directly to the SyncChannel
|
|
|
|
// through its mRecvd member.
|
2009-08-19 09:22:01 +04:00
|
|
|
//
|
2009-09-11 11:28:09 +04:00
|
|
|
// If both this side and the other side are functioning correctly,
|
2009-10-09 10:21:39 +04:00
|
|
|
// the queue can only be in certain configurations. Let
|
2009-09-11 11:28:09 +04:00
|
|
|
//
|
|
|
|
// |A<| be an async in-message,
|
|
|
|
// |S<| be a sync in-message,
|
|
|
|
// |C<| be an RPC in-call,
|
|
|
|
// |R<| be an RPC reply.
|
|
|
|
//
|
2009-10-09 10:21:39 +04:00
|
|
|
// The queue can only match this configuration
|
2009-09-11 11:28:09 +04:00
|
|
|
//
|
|
|
|
// A<* (S< | C< | R< (?{mStack.size() == 1} A<* (S< | C<)))
|
2009-10-09 10:21:39 +04:00
|
|
|
//
|
|
|
|
// The other side can send as many async messages |A<*| as it
|
|
|
|
// wants before sending us a blocking message.
|
|
|
|
//
|
|
|
|
// The first case is |S<|, a sync in-msg. The other side must be
|
|
|
|
// blocked, and thus can't send us any more messages until we
|
|
|
|
// process the sync in-msg.
|
|
|
|
//
|
|
|
|
// The second case is |C<|, an RPC in-call; the other side must be
|
|
|
|
// blocked. (There's a subtlety here: this in-call might have
|
|
|
|
// raced with an out-call, but we detect that with the mechanism
|
2009-09-11 11:28:09 +04:00
|
|
|
// below, |mRemoteStackDepth|, and races don't matter to the
|
|
|
|
// queue.)
|
2009-10-09 10:21:39 +04:00
|
|
|
//
|
2009-09-11 11:28:09 +04:00
|
|
|
// Final case, the other side replied to our most recent out-call
|
2009-10-09 10:21:39 +04:00
|
|
|
// |R<|. If that was the *only* out-call on our stack,
|
|
|
|
// |?{mStack.size() == 1}|, then other side "finished with us,"
|
|
|
|
// and went back to its own business. That business might have
|
2009-09-11 11:28:09 +04:00
|
|
|
// included sending any number of async message |A<*| until
|
2009-10-09 10:21:39 +04:00
|
|
|
// sending a blocking message |(S< | C<)|. If we had more than
|
|
|
|
// one RPC call on our stack, the other side *better* not have
|
|
|
|
// sent us another blocking message, because it's blocked on a
|
|
|
|
// reply from us.
|
2009-09-11 11:28:09 +04:00
|
|
|
//
|
|
|
|
std::queue<Message> mPending;
|
2009-08-19 09:22:01 +04:00
|
|
|
|
2009-10-09 10:21:39 +04:00
|
|
|
//
|
|
|
|
// Stack of all the RPC out-calls on which this RPCChannel is
|
|
|
|
// awaiting a response.
|
|
|
|
//
|
|
|
|
std::stack<Message> mStack;
|
|
|
|
|
2010-01-22 05:04:10 +03:00
|
|
|
//
|
2010-02-10 01:34:38 +03:00
|
|
|
// Map of replies received "out of turn", because of RPC
|
2010-01-22 05:04:10 +03:00
|
|
|
// in-calls racing with replies to outstanding in-calls. See
|
|
|
|
// https://bugzilla.mozilla.org/show_bug.cgi?id=521929.
|
|
|
|
//
|
2010-02-10 01:34:38 +03:00
|
|
|
typedef std::map<size_t, Message> MessageMap;
|
|
|
|
MessageMap mOutOfTurnReplies;
|
2010-01-22 05:04:10 +03:00
|
|
|
|
2009-10-09 10:21:39 +04:00
|
|
|
//
|
|
|
|
// Stack of RPC in-calls that were deferred because of race
|
|
|
|
// conditions.
|
|
|
|
//
|
|
|
|
std::stack<Message> mDeferred;
|
|
|
|
|
2009-08-19 09:22:01 +04:00
|
|
|
//
|
|
|
|
// This is what we think the RPC stack depth is on the "other
|
|
|
|
// side" of this RPC channel. We maintain this variable so that
|
|
|
|
// we can detect racy RPC calls. With each RPC out-call sent, we
|
|
|
|
// send along what *we* think the stack depth of the remote side
|
|
|
|
// is *before* it will receive the RPC call.
|
|
|
|
//
|
|
|
|
// After sending the out-call, our stack depth is "incremented"
|
|
|
|
// by pushing that pending message onto mPending.
|
|
|
|
//
|
|
|
|
// Then when processing an in-call |c|, it must be true that
|
|
|
|
//
|
2009-10-09 10:21:39 +04:00
|
|
|
// mStack.size() == c.remoteDepth
|
2009-08-19 09:22:01 +04:00
|
|
|
//
|
|
|
|
// i.e., my depth is actually the same as what the other side
|
|
|
|
// thought it was when it sent in-call |c|. If this fails to
|
|
|
|
// hold, we have detected racy RPC calls.
|
|
|
|
//
|
|
|
|
// We then increment mRemoteStackDepth *just before* processing
|
|
|
|
// the in-call, since we know the other side is waiting on it, and
|
|
|
|
// decrement it *just after* finishing processing that in-call,
|
|
|
|
// since our response will pop the top of the other side's
|
|
|
|
// |mPending|.
|
|
|
|
//
|
|
|
|
// One nice aspect of this race detection is that it is symmetric;
|
|
|
|
// if one side detects a race, then the other side must also
|
|
|
|
// detect the same race.
|
|
|
|
//
|
2009-10-09 01:44:43 +04:00
|
|
|
size_t mRemoteStackDepthGuess;
|
2010-01-27 09:41:32 +03:00
|
|
|
|
|
|
|
// True iff the parent has put us in a |BlockChild()| state.
|
|
|
|
bool mBlockedOnParent;
|
2010-02-16 21:44:21 +03:00
|
|
|
|
|
|
|
// Approximation of number of Sync/RPCChannel-code frames on the
|
|
|
|
// C++ stack. It can only be interpreted as the implication
|
|
|
|
//
|
|
|
|
// mCxxStackDepth > 0 => RPCChannel code on C++ stack
|
|
|
|
//
|
|
|
|
// This member is only accessed on the worker thread, and so is
|
|
|
|
// not protected by mMutex. It is managed exclusively by the
|
|
|
|
// helper |class CxxStackFrame|.
|
|
|
|
int mCxxStackFrames;
|
2010-02-15 10:47:00 +03:00
|
|
|
|
|
|
|
private:
|
|
|
|
|
|
|
|
//
|
|
|
|
// All dequeuing tasks require a single point of cancellation,
|
|
|
|
// which is handled via a reference-counted task.
|
|
|
|
//
|
|
|
|
class RefCountedTask
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
RefCountedTask(CancelableTask* aTask)
|
|
|
|
: mTask(aTask)
|
|
|
|
, mRefCnt(0) {}
|
|
|
|
~RefCountedTask() { delete mTask; }
|
|
|
|
void Run() { mTask->Run(); }
|
|
|
|
void Cancel() { mTask->Cancel(); }
|
2010-03-12 01:24:15 +03:00
|
|
|
void AddRef() {
|
|
|
|
PR_AtomicIncrement(reinterpret_cast<PRInt32*>(&mRefCnt));
|
|
|
|
}
|
2010-02-15 10:47:00 +03:00
|
|
|
void Release() {
|
2010-03-12 01:24:15 +03:00
|
|
|
nsrefcnt count =
|
|
|
|
PR_AtomicDecrement(reinterpret_cast<PRInt32*>(&mRefCnt));
|
|
|
|
if (0 == count)
|
2010-02-15 10:47:00 +03:00
|
|
|
delete this;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
CancelableTask* mTask;
|
|
|
|
nsrefcnt mRefCnt;
|
|
|
|
};
|
|
|
|
|
|
|
|
//
|
|
|
|
// Wrap an existing task which can be cancelled at any time
|
|
|
|
// without the wrapper's knowledge.
|
|
|
|
//
|
|
|
|
class DequeueTask : public Task
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
DequeueTask(RefCountedTask* aTask) : mTask(aTask) {}
|
|
|
|
void Run() { mTask->Run(); }
|
|
|
|
|
|
|
|
private:
|
|
|
|
nsRefPtr<RefCountedTask> mTask;
|
|
|
|
};
|
|
|
|
|
|
|
|
// A task encapsulating dequeuing one pending task
|
|
|
|
nsRefPtr<RefCountedTask> mDequeueOneTask;
|
2009-06-29 22:38:29 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
} // namespace ipc
|
|
|
|
} // namespace mozilla
|
|
|
|
#endif // ifndef ipc_glue_RPCChannel_h
|