2009-06-29 22:38:29 +04:00
|
|
|
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
|
|
|
* vim: sw=4 ts=4 et :
|
2009-07-14 01:55:04 +04:00
|
|
|
*/
|
2012-05-21 15:12:37 +04:00
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
2009-06-29 22:38:29 +04:00
|
|
|
|
|
|
|
#include "mozilla/ipc/RPCChannel.h"
|
2010-01-27 09:41:32 +03:00
|
|
|
#include "mozilla/ipc/ProtocolUtils.h"
|
2009-06-29 22:38:29 +04:00
|
|
|
|
|
|
|
#include "nsDebug.h"
|
2009-11-13 01:46:29 +03:00
|
|
|
#include "nsTraceRefcnt.h"
|
2009-06-29 22:38:29 +04:00
|
|
|
|
2009-10-09 10:21:39 +04:00
|
|
|
#define RPC_ASSERT(_cond, ...) \
|
|
|
|
do { \
|
|
|
|
if (!(_cond)) \
|
|
|
|
DebugAbort(__FILE__, __LINE__, #_cond,## __VA_ARGS__); \
|
|
|
|
} while (0)
|
|
|
|
|
2011-04-29 23:21:57 +04:00
|
|
|
using mozilla::MonitorAutoLock;
|
|
|
|
using mozilla::MonitorAutoUnlock;
|
2009-06-29 22:38:29 +04:00
|
|
|
|
|
|
|
template<>
|
|
|
|
struct RunnableMethodTraits<mozilla::ipc::RPCChannel>
|
|
|
|
{
|
|
|
|
static void RetainCallee(mozilla::ipc::RPCChannel* obj) { }
|
|
|
|
static void ReleaseCallee(mozilla::ipc::RPCChannel* obj) { }
|
|
|
|
};
|
|
|
|
|
2010-01-27 09:41:32 +03:00
|
|
|
|
2009-06-29 22:38:29 +04:00
|
|
|
namespace mozilla {
|
|
|
|
namespace ipc {
|
|
|
|
|
2010-03-11 10:35:30 +03:00
|
|
|
RPCChannel::RPCChannel(RPCListener* aListener)
|
2009-11-13 01:16:54 +03:00
|
|
|
: SyncChannel(aListener),
|
|
|
|
mPending(),
|
|
|
|
mStack(),
|
2010-01-22 05:04:10 +03:00
|
|
|
mOutOfTurnReplies(),
|
2009-11-13 01:16:54 +03:00
|
|
|
mDeferred(),
|
|
|
|
mRemoteStackDepthGuess(0),
|
2010-03-24 08:52:47 +03:00
|
|
|
mSawRPCOutMsg(false)
|
2009-11-13 01:16:54 +03:00
|
|
|
{
|
|
|
|
MOZ_COUNT_CTOR(RPCChannel);
|
2010-02-15 10:47:00 +03:00
|
|
|
|
|
|
|
mDequeueOneTask = new RefCountedTask(NewRunnableMethod(
|
|
|
|
this,
|
|
|
|
&RPCChannel::OnMaybeDequeueOne));
|
2009-11-13 01:16:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
RPCChannel::~RPCChannel()
|
|
|
|
{
|
|
|
|
MOZ_COUNT_DTOR(RPCChannel);
|
2010-03-19 01:52:32 +03:00
|
|
|
RPC_ASSERT(mCxxStackFrames.empty(), "mismatched CxxStackFrame ctor/dtors");
|
2009-11-13 01:16:54 +03:00
|
|
|
}
|
|
|
|
|
2010-02-15 10:47:00 +03:00
|
|
|
void
|
|
|
|
RPCChannel::Clear()
|
|
|
|
{
|
|
|
|
mDequeueOneTask->Cancel();
|
|
|
|
|
|
|
|
AsyncChannel::Clear();
|
|
|
|
}
|
|
|
|
|
2010-02-10 03:02:54 +03:00
|
|
|
bool
|
2010-04-27 09:42:59 +04:00
|
|
|
RPCChannel::EventOccurred() const
|
2010-02-10 03:02:54 +03:00
|
|
|
{
|
|
|
|
AssertWorkerThread();
|
2011-11-30 20:24:46 +04:00
|
|
|
mMonitor->AssertCurrentThreadOwns();
|
2010-02-10 03:02:54 +03:00
|
|
|
RPC_ASSERT(StackDepth() > 0, "not in wait loop");
|
|
|
|
|
|
|
|
return (!Connected() ||
|
|
|
|
!mPending.empty() ||
|
2013-07-03 07:37:33 +04:00
|
|
|
!mUrgent.empty() ||
|
2010-02-10 03:02:54 +03:00
|
|
|
(!mOutOfTurnReplies.empty() &&
|
|
|
|
mOutOfTurnReplies.find(mStack.top().seqno())
|
|
|
|
!= mOutOfTurnReplies.end()));
|
|
|
|
}
|
|
|
|
|
2010-02-16 21:44:21 +03:00
|
|
|
bool
|
|
|
|
RPCChannel::Send(Message* msg)
|
|
|
|
{
|
2010-03-19 01:52:32 +03:00
|
|
|
Message copy = *msg;
|
|
|
|
CxxStackFrame f(*this, OUT_MESSAGE, ©);
|
2010-02-16 21:44:21 +03:00
|
|
|
return AsyncChannel::Send(msg);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
RPCChannel::Send(Message* msg, Message* reply)
|
|
|
|
{
|
2010-03-19 01:52:32 +03:00
|
|
|
Message copy = *msg;
|
|
|
|
CxxStackFrame f(*this, OUT_MESSAGE, ©);
|
2010-02-16 21:44:21 +03:00
|
|
|
return SyncChannel::Send(msg, reply);
|
|
|
|
}
|
|
|
|
|
2009-06-29 22:38:29 +04:00
|
|
|
bool
|
2011-11-30 17:19:49 +04:00
|
|
|
RPCChannel::Call(Message* _msg, Message* reply)
|
2009-06-29 22:38:29 +04:00
|
|
|
{
|
2013-07-03 07:37:33 +04:00
|
|
|
RPC_ASSERT(!mPendingReply, "should not be waiting for a reply");
|
|
|
|
|
2011-11-30 17:19:49 +04:00
|
|
|
nsAutoPtr<Message> msg(_msg);
|
2009-10-08 23:11:13 +04:00
|
|
|
AssertWorkerThread();
|
2011-11-30 20:24:46 +04:00
|
|
|
mMonitor->AssertNotCurrentThreadOwns();
|
2013-07-03 07:37:33 +04:00
|
|
|
RPC_ASSERT(!ProcessingSyncMessage() || msg->priority() == IPC::Message::PRIORITY_HIGH,
|
2009-10-09 10:21:39 +04:00
|
|
|
"violation of sync handler invariant");
|
|
|
|
RPC_ASSERT(msg->is_rpc(), "can only Call() RPC messages here");
|
2009-09-22 06:02:15 +04:00
|
|
|
|
2010-04-28 19:01:09 +04:00
|
|
|
#ifdef OS_WIN
|
|
|
|
SyncStackFrame frame(this, true);
|
|
|
|
#endif
|
|
|
|
|
2010-03-19 01:52:32 +03:00
|
|
|
Message copy = *msg;
|
|
|
|
CxxStackFrame f(*this, OUT_MESSAGE, ©);
|
2010-02-16 21:44:21 +03:00
|
|
|
|
2011-11-30 20:24:46 +04:00
|
|
|
MonitorAutoLock lock(*mMonitor);
|
2009-10-09 01:44:43 +04:00
|
|
|
|
2009-10-28 00:32:55 +03:00
|
|
|
if (!Connected()) {
|
|
|
|
ReportConnectionError("RPCChannel");
|
2009-09-22 06:02:15 +04:00
|
|
|
return false;
|
2009-10-28 00:32:55 +03:00
|
|
|
}
|
2009-07-02 09:45:19 +04:00
|
|
|
|
2013-07-03 07:37:33 +04:00
|
|
|
bool urgent = (copy.priority() == IPC::Message::PRIORITY_HIGH);
|
|
|
|
|
2010-01-22 05:04:09 +03:00
|
|
|
msg->set_seqno(NextSeqno());
|
2009-10-09 01:44:43 +04:00
|
|
|
msg->set_rpc_remote_stack_depth_guess(mRemoteStackDepthGuess);
|
2010-01-22 05:04:09 +03:00
|
|
|
msg->set_rpc_local_stack_depth(1 + StackDepth());
|
|
|
|
mStack.push(*msg);
|
2009-08-19 09:22:01 +04:00
|
|
|
|
2011-11-30 20:24:46 +04:00
|
|
|
mLink->SendMessage(msg.forget());
|
2009-07-14 01:55:04 +04:00
|
|
|
|
2009-06-29 22:38:29 +04:00
|
|
|
while (1) {
|
2010-03-07 23:16:02 +03:00
|
|
|
// if a handler invoked by *Dispatch*() spun a nested event
|
|
|
|
// loop, and the connection was broken during that loop, we
|
|
|
|
// might have already processed the OnError event. if so,
|
|
|
|
// trying another loop iteration will be futile because
|
|
|
|
// channel state will have been cleared
|
|
|
|
if (!Connected()) {
|
|
|
|
ReportConnectionError("RPCChannel");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2009-12-10 02:15:01 +03:00
|
|
|
// now might be the time to process a message deferred because
|
|
|
|
// of race resolution
|
2011-01-12 10:07:17 +03:00
|
|
|
MaybeUndeferIncall();
|
2009-12-10 02:15:01 +03:00
|
|
|
|
2009-09-11 11:28:09 +04:00
|
|
|
// here we're waiting for something to happen. see long
|
|
|
|
// comment about the queue in RPCChannel.h
|
2010-02-10 03:02:54 +03:00
|
|
|
while (!EventOccurred()) {
|
|
|
|
bool maybeTimedOut = !RPCChannel::WaitForNotify();
|
|
|
|
|
2010-03-25 00:49:54 +03:00
|
|
|
if (EventOccurred() ||
|
|
|
|
// we might have received a "subtly deferred" message
|
|
|
|
// in a nested loop that it's now time to process
|
|
|
|
(!maybeTimedOut &&
|
|
|
|
(!mDeferred.empty() || !mOutOfTurnReplies.empty())))
|
2010-02-10 03:02:54 +03:00
|
|
|
break;
|
|
|
|
|
2010-02-11 00:41:44 +03:00
|
|
|
if (maybeTimedOut && !ShouldContinueFromTimeout())
|
2010-02-10 03:02:54 +03:00
|
|
|
return false;
|
2009-09-11 11:28:09 +04:00
|
|
|
}
|
|
|
|
|
2009-10-28 00:32:55 +03:00
|
|
|
if (!Connected()) {
|
|
|
|
ReportConnectionError("RPCChannel");
|
2009-09-22 06:02:15 +04:00
|
|
|
return false;
|
2009-10-28 00:32:55 +03:00
|
|
|
}
|
2009-09-22 06:02:15 +04:00
|
|
|
|
2010-01-22 05:04:10 +03:00
|
|
|
Message recvd;
|
2010-02-10 01:34:38 +03:00
|
|
|
MessageMap::iterator it;
|
2010-01-22 05:04:10 +03:00
|
|
|
if (!mOutOfTurnReplies.empty() &&
|
2010-02-10 01:34:38 +03:00
|
|
|
((it = mOutOfTurnReplies.find(mStack.top().seqno())) !=
|
|
|
|
mOutOfTurnReplies.end())) {
|
|
|
|
recvd = it->second;
|
|
|
|
mOutOfTurnReplies.erase(it);
|
2010-01-22 05:04:10 +03:00
|
|
|
}
|
2013-07-03 07:37:33 +04:00
|
|
|
else if (!mUrgent.empty()) {
|
|
|
|
recvd = mUrgent.front();
|
|
|
|
mUrgent.pop_front();
|
|
|
|
}
|
2010-03-25 00:49:54 +03:00
|
|
|
else if (!mPending.empty()) {
|
2010-01-22 05:04:10 +03:00
|
|
|
recvd = mPending.front();
|
2012-08-25 12:25:08 +04:00
|
|
|
mPending.pop_front();
|
2010-01-22 05:04:10 +03:00
|
|
|
}
|
2010-03-25 00:49:54 +03:00
|
|
|
else {
|
|
|
|
// because of subtleties with nested event loops, it's
|
|
|
|
// possible that we got here and nothing happened. or, we
|
|
|
|
// might have a deferred in-call that needs to be
|
|
|
|
// processed. either way, we won't break the inner while
|
|
|
|
// loop again until something new happens.
|
|
|
|
continue;
|
|
|
|
}
|
2009-06-29 22:38:29 +04:00
|
|
|
|
2013-07-03 07:37:33 +04:00
|
|
|
if (!recvd.is_rpc()) {
|
|
|
|
if (urgent && recvd.priority() != IPC::Message::PRIORITY_HIGH) {
|
|
|
|
// If we're waiting for an urgent reply, don't process any
|
|
|
|
// messages yet.
|
|
|
|
mNonUrgentDeferred.push_back(recvd);
|
|
|
|
} else if (recvd.is_sync()) {
|
|
|
|
RPC_ASSERT(mPending.empty(),
|
|
|
|
"other side should have been blocked");
|
|
|
|
MonitorAutoUnlock unlock(*mMonitor);
|
|
|
|
CxxStackFrame f(*this, IN_MESSAGE, &recvd);
|
|
|
|
SyncChannel::OnDispatchMessage(recvd);
|
|
|
|
} else {
|
|
|
|
MonitorAutoUnlock unlock(*mMonitor);
|
|
|
|
CxxStackFrame f(*this, IN_MESSAGE, &recvd);
|
|
|
|
AsyncChannel::OnDispatchMessage(recvd);
|
|
|
|
}
|
2009-09-11 03:54:37 +04:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2010-01-22 05:04:09 +03:00
|
|
|
RPC_ASSERT(recvd.is_rpc(), "wtf???");
|
2009-09-11 03:54:37 +04:00
|
|
|
|
2009-08-19 09:22:01 +04:00
|
|
|
if (recvd.is_reply()) {
|
2009-10-09 02:41:18 +04:00
|
|
|
RPC_ASSERT(0 < mStack.size(), "invalid RPC stack");
|
2009-07-14 01:55:04 +04:00
|
|
|
|
2009-09-11 11:28:09 +04:00
|
|
|
const Message& outcall = mStack.top();
|
2009-08-08 03:13:20 +04:00
|
|
|
|
2010-02-25 00:59:23 +03:00
|
|
|
// in the parent, seqno's increase from 0, and in the
|
|
|
|
// child, they decrease from 0
|
|
|
|
if ((!mChild && recvd.seqno() < outcall.seqno()) ||
|
|
|
|
(mChild && recvd.seqno() > outcall.seqno())) {
|
2010-02-10 01:34:38 +03:00
|
|
|
mOutOfTurnReplies[recvd.seqno()] = recvd;
|
2010-01-22 05:04:10 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2009-10-09 02:41:18 +04:00
|
|
|
// FIXME/cjones: handle error
|
2009-10-09 10:21:39 +04:00
|
|
|
RPC_ASSERT(
|
2010-01-22 05:04:09 +03:00
|
|
|
recvd.is_reply_error() ||
|
|
|
|
(recvd.type() == (outcall.type()+1) &&
|
|
|
|
recvd.seqno() == outcall.seqno()),
|
2009-10-09 10:21:39 +04:00
|
|
|
"somebody's misbehavin'", "rpc", true);
|
2009-07-14 01:55:04 +04:00
|
|
|
|
2009-08-19 09:22:01 +04:00
|
|
|
// we received a reply to our most recent outstanding
|
|
|
|
// call. pop this frame and return the reply
|
2009-09-11 11:28:09 +04:00
|
|
|
mStack.pop();
|
2009-08-08 03:13:20 +04:00
|
|
|
|
|
|
|
bool isError = recvd.is_reply_error();
|
|
|
|
if (!isError) {
|
|
|
|
*reply = recvd;
|
|
|
|
}
|
2009-06-29 22:38:29 +04:00
|
|
|
|
2010-01-22 05:04:10 +03:00
|
|
|
if (0 == StackDepth()) {
|
|
|
|
RPC_ASSERT(
|
|
|
|
mOutOfTurnReplies.empty(),
|
|
|
|
"still have pending replies with no pending out-calls",
|
|
|
|
"rpc", true);
|
|
|
|
}
|
|
|
|
|
2009-10-09 10:21:39 +04:00
|
|
|
// finished with this RPC stack frame
|
2009-08-08 03:13:20 +04:00
|
|
|
return !isError;
|
2009-06-29 22:38:29 +04:00
|
|
|
}
|
|
|
|
|
2009-10-09 10:21:39 +04:00
|
|
|
// in-call. process in a new stack frame.
|
2009-09-11 11:28:09 +04:00
|
|
|
|
2011-04-29 23:21:57 +04:00
|
|
|
// "snapshot" the current stack depth while we own the Monitor
|
2009-09-11 11:28:09 +04:00
|
|
|
size_t stackDepth = StackDepth();
|
|
|
|
{
|
2011-11-30 20:24:46 +04:00
|
|
|
MonitorAutoUnlock unlock(*mMonitor);
|
2009-06-29 22:38:29 +04:00
|
|
|
// someone called in to us from the other side. handle the call
|
2010-03-19 01:52:32 +03:00
|
|
|
CxxStackFrame f(*this, IN_MESSAGE, &recvd);
|
2009-10-09 10:21:39 +04:00
|
|
|
Incall(recvd, stackDepth);
|
2009-07-14 01:55:04 +04:00
|
|
|
// FIXME/cjones: error handling
|
2009-06-29 22:38:29 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2011-01-12 10:07:17 +03:00
|
|
|
void
|
|
|
|
RPCChannel::MaybeUndeferIncall()
|
2009-09-11 11:28:09 +04:00
|
|
|
{
|
2009-10-08 23:11:13 +04:00
|
|
|
AssertWorkerThread();
|
2011-11-30 20:24:46 +04:00
|
|
|
mMonitor->AssertCurrentThreadOwns();
|
2009-10-09 10:21:39 +04:00
|
|
|
|
|
|
|
if (mDeferred.empty())
|
2011-01-12 10:07:17 +03:00
|
|
|
return;
|
2009-10-09 10:21:39 +04:00
|
|
|
|
|
|
|
size_t stackDepth = StackDepth();
|
|
|
|
|
|
|
|
// the other side can only *under*-estimate our actual stack depth
|
|
|
|
RPC_ASSERT(mDeferred.top().rpc_remote_stack_depth_guess() <= stackDepth,
|
|
|
|
"fatal logic error");
|
|
|
|
|
2011-05-05 00:55:54 +04:00
|
|
|
if (mDeferred.top().rpc_remote_stack_depth_guess() < RemoteViewOfStackDepth(stackDepth))
|
2011-01-12 10:07:17 +03:00
|
|
|
return;
|
2009-10-09 10:21:39 +04:00
|
|
|
|
2011-01-12 10:07:17 +03:00
|
|
|
// maybe time to process this message
|
2009-10-09 10:21:39 +04:00
|
|
|
Message call = mDeferred.top();
|
|
|
|
mDeferred.pop();
|
|
|
|
|
|
|
|
// fix up fudge factor we added to account for race
|
|
|
|
RPC_ASSERT(0 < mRemoteStackDepthGuess, "fatal logic error");
|
|
|
|
--mRemoteStackDepthGuess;
|
|
|
|
|
2012-08-25 12:25:08 +04:00
|
|
|
mPending.push_back(call);
|
2009-10-09 10:21:39 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
RPCChannel::EnqueuePendingMessages()
|
|
|
|
{
|
|
|
|
AssertWorkerThread();
|
2011-11-30 20:24:46 +04:00
|
|
|
mMonitor->AssertCurrentThreadOwns();
|
2009-12-10 02:15:01 +03:00
|
|
|
|
2011-01-12 10:07:17 +03:00
|
|
|
MaybeUndeferIncall();
|
|
|
|
|
2013-07-03 07:37:33 +04:00
|
|
|
for (size_t i = 0; i < mDeferred.size(); ++i) {
|
|
|
|
mWorkerLoop->PostTask(FROM_HERE, new DequeueTask(mDequeueOneTask));
|
|
|
|
}
|
2009-12-10 02:15:01 +03:00
|
|
|
|
|
|
|
// XXX performance tuning knob: could process all or k pending
|
|
|
|
// messages here, rather than enqueuing for later processing
|
2009-10-09 10:21:39 +04:00
|
|
|
|
2013-07-03 07:37:33 +04:00
|
|
|
size_t total = mPending.size() + mUrgent.size() + mNonUrgentDeferred.size();
|
|
|
|
for (size_t i = 0; i < total; ++i) {
|
|
|
|
mWorkerLoop->PostTask(FROM_HERE, new DequeueTask(mDequeueOneTask));
|
|
|
|
}
|
2009-09-11 11:28:09 +04:00
|
|
|
}
|
|
|
|
|
2009-09-22 19:23:29 +04:00
|
|
|
void
|
2010-05-21 19:48:34 +04:00
|
|
|
RPCChannel::FlushPendingRPCQueue()
|
|
|
|
{
|
|
|
|
AssertWorkerThread();
|
2011-11-30 20:24:46 +04:00
|
|
|
mMonitor->AssertNotCurrentThreadOwns();
|
2010-05-21 19:48:34 +04:00
|
|
|
|
|
|
|
{
|
2011-11-30 20:24:46 +04:00
|
|
|
MonitorAutoLock lock(*mMonitor);
|
2010-05-21 19:48:34 +04:00
|
|
|
|
|
|
|
if (mDeferred.empty()) {
|
|
|
|
if (mPending.empty())
|
|
|
|
return;
|
|
|
|
|
|
|
|
const Message& last = mPending.back();
|
|
|
|
if (!last.is_rpc() || last.is_reply())
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
while (OnMaybeDequeueOne());
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2009-09-22 19:23:29 +04:00
|
|
|
RPCChannel::OnMaybeDequeueOne()
|
|
|
|
{
|
2009-10-09 10:21:39 +04:00
|
|
|
// XXX performance tuning knob: could process all or k pending
|
|
|
|
// messages here
|
|
|
|
|
2009-10-08 23:11:13 +04:00
|
|
|
AssertWorkerThread();
|
2011-11-30 20:24:46 +04:00
|
|
|
mMonitor->AssertNotCurrentThreadOwns();
|
2009-10-09 10:21:39 +04:00
|
|
|
|
|
|
|
Message recvd;
|
2009-09-22 19:23:29 +04:00
|
|
|
{
|
2011-11-30 20:24:46 +04:00
|
|
|
MonitorAutoLock lock(*mMonitor);
|
2009-09-22 19:23:29 +04:00
|
|
|
|
2010-03-11 10:35:26 +03:00
|
|
|
if (!Connected()) {
|
|
|
|
ReportConnectionError("RPCChannel");
|
2010-05-21 19:48:34 +04:00
|
|
|
return false;
|
2010-03-11 10:35:26 +03:00
|
|
|
}
|
|
|
|
|
2009-12-10 02:15:01 +03:00
|
|
|
if (!mDeferred.empty())
|
2011-01-12 10:07:17 +03:00
|
|
|
MaybeUndeferIncall();
|
2009-12-10 02:15:01 +03:00
|
|
|
|
2013-07-03 07:37:33 +04:00
|
|
|
MessageQueue *queue = mUrgent.empty()
|
|
|
|
? mNonUrgentDeferred.empty()
|
|
|
|
? &mPending
|
|
|
|
: &mNonUrgentDeferred
|
|
|
|
: &mUrgent;
|
|
|
|
if (queue->empty())
|
2010-05-21 19:48:34 +04:00
|
|
|
return false;
|
2009-09-22 19:23:29 +04:00
|
|
|
|
2013-07-03 07:37:33 +04:00
|
|
|
recvd = queue->front();
|
|
|
|
queue->pop_front();
|
2009-09-22 19:23:29 +04:00
|
|
|
}
|
|
|
|
|
2010-03-19 09:57:00 +03:00
|
|
|
if (IsOnCxxStack() && recvd.is_rpc() && recvd.is_reply()) {
|
|
|
|
// We probably just received a reply in a nested loop for an
|
|
|
|
// RPC call sent before entering that loop.
|
|
|
|
mOutOfTurnReplies[recvd.seqno()] = recvd;
|
2010-05-21 19:48:34 +04:00
|
|
|
return false;
|
2010-03-19 09:57:00 +03:00
|
|
|
}
|
|
|
|
|
2010-03-19 01:52:32 +03:00
|
|
|
CxxStackFrame f(*this, IN_MESSAGE, &recvd);
|
2010-02-16 21:44:21 +03:00
|
|
|
|
2009-10-09 10:21:39 +04:00
|
|
|
if (recvd.is_rpc())
|
2010-05-21 19:48:34 +04:00
|
|
|
Incall(recvd, 0);
|
2009-10-09 10:21:39 +04:00
|
|
|
else if (recvd.is_sync())
|
2010-05-21 19:48:34 +04:00
|
|
|
SyncChannel::OnDispatchMessage(recvd);
|
2009-10-09 10:21:39 +04:00
|
|
|
else
|
2010-05-21 19:48:34 +04:00
|
|
|
AsyncChannel::OnDispatchMessage(recvd);
|
|
|
|
|
|
|
|
return true;
|
2009-08-19 09:22:01 +04:00
|
|
|
}
|
|
|
|
|
2011-05-05 00:55:54 +04:00
|
|
|
size_t
|
|
|
|
RPCChannel::RemoteViewOfStackDepth(size_t stackDepth) const
|
|
|
|
{
|
|
|
|
AssertWorkerThread();
|
|
|
|
return stackDepth - mOutOfTurnReplies.size();
|
|
|
|
}
|
|
|
|
|
2009-10-09 02:41:18 +04:00
|
|
|
void
|
2009-10-09 10:21:39 +04:00
|
|
|
RPCChannel::Incall(const Message& call, size_t stackDepth)
|
2009-08-19 09:22:01 +04:00
|
|
|
{
|
2009-10-08 23:11:13 +04:00
|
|
|
AssertWorkerThread();
|
2011-11-30 20:24:46 +04:00
|
|
|
mMonitor->AssertNotCurrentThreadOwns();
|
2009-10-09 10:21:39 +04:00
|
|
|
RPC_ASSERT(call.is_rpc() && !call.is_reply(), "wrong message type");
|
2009-08-19 09:22:01 +04:00
|
|
|
|
2009-10-09 01:44:43 +04:00
|
|
|
// Race detection: see the long comment near
|
|
|
|
// mRemoteStackDepthGuess in RPCChannel.h. "Remote" stack depth
|
|
|
|
// means our side, and "local" means other side.
|
2011-05-05 00:55:54 +04:00
|
|
|
if (call.rpc_remote_stack_depth_guess() != RemoteViewOfStackDepth(stackDepth)) {
|
2010-03-12 04:17:35 +03:00
|
|
|
// RPC in-calls have raced.
|
2009-10-09 01:44:43 +04:00
|
|
|
// the "winner", if there is one, gets to defer processing of
|
|
|
|
// the other side's in-call
|
|
|
|
bool defer;
|
|
|
|
const char* winner;
|
2010-03-11 10:35:30 +03:00
|
|
|
switch (Listener()->MediateRPCRace(mChild ? call : mStack.top(),
|
|
|
|
mChild ? mStack.top() : call)) {
|
2009-10-09 01:44:43 +04:00
|
|
|
case RRPChildWins:
|
|
|
|
winner = "child";
|
|
|
|
defer = mChild;
|
|
|
|
break;
|
|
|
|
case RRPParentWins:
|
|
|
|
winner = "parent";
|
|
|
|
defer = !mChild;
|
|
|
|
break;
|
|
|
|
case RRPError:
|
|
|
|
NS_RUNTIMEABORT("NYI: 'Error' RPC race policy");
|
|
|
|
return;
|
|
|
|
default:
|
|
|
|
NS_RUNTIMEABORT("not reached");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-05-06 23:17:08 +04:00
|
|
|
if (LoggingEnabled()) {
|
2012-09-05 21:11:05 +04:00
|
|
|
printf_stderr(" (%s: %s won, so we're%sdeferring)\n",
|
|
|
|
mChild ? "child" : "parent", winner,
|
|
|
|
defer ? " " : " not ");
|
2010-05-06 23:17:08 +04:00
|
|
|
}
|
2009-10-09 01:44:43 +04:00
|
|
|
|
|
|
|
if (defer) {
|
2009-10-09 10:21:39 +04:00
|
|
|
// we now know the other side's stack has one more frame
|
|
|
|
// than we thought
|
|
|
|
++mRemoteStackDepthGuess; // decremented in MaybeProcessDeferred()
|
|
|
|
mDeferred.push(call);
|
2009-10-09 01:44:43 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-10-09 10:21:39 +04:00
|
|
|
// we "lost" and need to process the other side's in-call.
|
|
|
|
// don't need to fix up the mRemoteStackDepthGuess here,
|
|
|
|
// because we're just about to increment it in DispatchCall(),
|
|
|
|
// which will make it correct again
|
2009-10-09 01:44:43 +04:00
|
|
|
}
|
2009-06-29 22:38:29 +04:00
|
|
|
|
2011-05-18 15:57:08 +04:00
|
|
|
#ifdef OS_WIN
|
|
|
|
SyncStackFrame frame(this, true);
|
|
|
|
#endif
|
|
|
|
|
2009-10-09 10:21:39 +04:00
|
|
|
DispatchIncall(call);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
RPCChannel::DispatchIncall(const Message& call)
|
|
|
|
{
|
|
|
|
AssertWorkerThread();
|
2011-11-30 20:24:46 +04:00
|
|
|
mMonitor->AssertNotCurrentThreadOwns();
|
2009-10-09 10:21:39 +04:00
|
|
|
RPC_ASSERT(call.is_rpc() && !call.is_reply(),
|
|
|
|
"wrong message type");
|
|
|
|
|
2012-07-30 18:20:58 +04:00
|
|
|
Message* reply = nullptr;
|
2009-08-19 09:22:01 +04:00
|
|
|
|
2009-10-09 01:44:43 +04:00
|
|
|
++mRemoteStackDepthGuess;
|
2010-03-11 10:35:30 +03:00
|
|
|
Result rv = Listener()->OnCallReceived(call, reply);
|
2009-10-09 01:44:43 +04:00
|
|
|
--mRemoteStackDepthGuess;
|
2009-08-19 09:22:01 +04:00
|
|
|
|
2009-10-28 00:32:55 +03:00
|
|
|
if (!MaybeHandleError(rv, "RPCChannel")) {
|
2009-08-08 03:13:20 +04:00
|
|
|
delete reply;
|
|
|
|
reply = new Message();
|
|
|
|
reply->set_rpc();
|
|
|
|
reply->set_reply();
|
|
|
|
reply->set_reply_error();
|
2009-06-29 22:38:29 +04:00
|
|
|
}
|
2009-09-15 00:00:31 +04:00
|
|
|
|
2010-01-22 05:04:09 +03:00
|
|
|
reply->set_seqno(call.seqno());
|
|
|
|
|
2010-02-12 02:33:53 +03:00
|
|
|
{
|
2011-11-30 20:24:46 +04:00
|
|
|
MonitorAutoLock lock(*mMonitor);
|
2010-02-12 02:33:53 +03:00
|
|
|
if (ChannelConnected == mChannelState)
|
2011-11-30 20:24:46 +04:00
|
|
|
mLink->SendMessage(reply);
|
2010-02-12 02:33:53 +03:00
|
|
|
}
|
2009-10-09 10:21:39 +04:00
|
|
|
}
|
|
|
|
|
2010-03-19 01:52:33 +03:00
|
|
|
void
|
|
|
|
RPCChannel::ExitedCxxStack()
|
|
|
|
{
|
|
|
|
Listener()->OnExitedCxxStack();
|
2010-03-24 08:52:47 +03:00
|
|
|
if (mSawRPCOutMsg) {
|
2011-11-30 20:24:46 +04:00
|
|
|
MonitorAutoLock lock(*mMonitor);
|
2010-03-19 01:52:33 +03:00
|
|
|
// see long comment in OnMaybeDequeueOne()
|
|
|
|
EnqueuePendingMessages();
|
2010-03-24 08:52:47 +03:00
|
|
|
mSawRPCOutMsg = false;
|
2010-03-19 01:52:33 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-10-09 10:21:39 +04:00
|
|
|
void
|
|
|
|
RPCChannel::DebugAbort(const char* file, int line, const char* cond,
|
|
|
|
const char* why,
|
2010-04-27 09:42:59 +04:00
|
|
|
const char* type, bool reply) const
|
2009-10-09 10:21:39 +04:00
|
|
|
{
|
2012-09-05 21:11:05 +04:00
|
|
|
printf_stderr("###!!! [RPCChannel][%s][%s:%d] "
|
|
|
|
"Assertion (%s) failed. %s (triggered by %s%s)\n",
|
|
|
|
mChild ? "Child" : "Parent",
|
|
|
|
file, line, cond,
|
|
|
|
why,
|
|
|
|
type, reply ? "reply" : "");
|
2009-10-09 10:21:39 +04:00
|
|
|
// technically we need the mutex for this, but we're dying anyway
|
2012-09-05 21:11:05 +04:00
|
|
|
DumpRPCStack(" ");
|
|
|
|
printf_stderr(" remote RPC stack guess: %lu\n",
|
|
|
|
mRemoteStackDepthGuess);
|
|
|
|
printf_stderr(" deferred stack size: %lu\n",
|
|
|
|
mDeferred.size());
|
|
|
|
printf_stderr(" out-of-turn RPC replies stack size: %lu\n",
|
|
|
|
mOutOfTurnReplies.size());
|
|
|
|
printf_stderr(" Pending queue size: %lu, front to back:\n",
|
|
|
|
mPending.size());
|
2010-04-27 09:42:59 +04:00
|
|
|
|
|
|
|
MessageQueue pending = mPending;
|
|
|
|
while (!pending.empty()) {
|
2012-09-05 21:11:05 +04:00
|
|
|
printf_stderr(" [ %s%s ]\n",
|
|
|
|
pending.front().is_rpc() ? "rpc" :
|
|
|
|
(pending.front().is_sync() ? "sync" : "async"),
|
|
|
|
pending.front().is_reply() ? "reply" : "");
|
2012-08-25 12:25:08 +04:00
|
|
|
pending.pop_front();
|
2009-10-09 10:21:39 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
NS_RUNTIMEABORT(why);
|
2009-06-29 22:38:29 +04:00
|
|
|
}
|
|
|
|
|
2010-03-19 01:52:32 +03:00
|
|
|
void
|
2012-09-05 21:11:05 +04:00
|
|
|
RPCChannel::DumpRPCStack(const char* const pfx) const
|
2010-03-19 01:52:32 +03:00
|
|
|
{
|
|
|
|
NS_WARN_IF_FALSE(MessageLoop::current() != mWorkerLoop,
|
|
|
|
"The worker thread had better be paused in a debugger!");
|
|
|
|
|
2012-09-05 21:11:05 +04:00
|
|
|
printf_stderr("%sRPCChannel 'backtrace':\n", pfx);
|
2010-03-19 01:52:32 +03:00
|
|
|
|
|
|
|
// print a python-style backtrace, first frame to last
|
2012-08-22 19:56:38 +04:00
|
|
|
for (uint32_t i = 0; i < mCxxStackFrames.size(); ++i) {
|
2012-09-17 12:37:20 +04:00
|
|
|
int32_t id;
|
2010-03-19 01:52:32 +03:00
|
|
|
const char* dir, *sems, *name;
|
|
|
|
mCxxStackFrames[i].Describe(&id, &dir, &sems, &name);
|
|
|
|
|
2012-09-05 21:11:05 +04:00
|
|
|
printf_stderr("%s[(%u) %s %s %s(actor=%d) ]\n", pfx,
|
|
|
|
i, dir, sems, name, id);
|
2010-03-19 01:52:32 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-06-29 22:38:29 +04:00
|
|
|
//
|
2011-11-30 20:24:46 +04:00
|
|
|
// The methods below run in the context of the link thread, and can proxy
|
2009-06-29 22:38:29 +04:00
|
|
|
// back to the methods above
|
|
|
|
//
|
|
|
|
|
|
|
|
void
|
2011-11-30 20:24:46 +04:00
|
|
|
RPCChannel::OnMessageReceivedFromLink(const Message& msg)
|
2009-07-14 01:55:04 +04:00
|
|
|
{
|
2011-11-30 20:24:46 +04:00
|
|
|
AssertLinkThread();
|
|
|
|
mMonitor->AssertCurrentThreadOwns();
|
2009-07-14 01:55:04 +04:00
|
|
|
|
2010-03-11 10:35:26 +03:00
|
|
|
if (MaybeInterceptSpecialIOMessage(msg))
|
|
|
|
return;
|
|
|
|
|
2009-09-22 19:23:29 +04:00
|
|
|
// regardless of the RPC stack, if we're awaiting a sync reply, we
|
|
|
|
// know that it needs to be immediately handled to unblock us.
|
2009-10-09 10:21:39 +04:00
|
|
|
if (AwaitingSyncReply() && msg.is_sync()) {
|
|
|
|
// wake up worker thread waiting at SyncChannel::Send
|
2009-09-22 19:23:29 +04:00
|
|
|
mRecvd = msg;
|
2009-10-09 10:21:39 +04:00
|
|
|
NotifyWorkerThread();
|
2009-09-22 19:23:29 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-07-03 07:37:33 +04:00
|
|
|
MessageQueue *queue = (msg.priority() == IPC::Message::PRIORITY_HIGH)
|
|
|
|
? &mUrgent
|
|
|
|
: &mPending;
|
|
|
|
|
|
|
|
bool compressMessage = (msg.compress() && !queue->empty() &&
|
|
|
|
queue->back().type() == msg.type() &&
|
|
|
|
queue->back().routing_id() == msg.routing_id());
|
2012-08-25 12:25:08 +04:00
|
|
|
if (compressMessage) {
|
|
|
|
// This message type has compression enabled, and the back of
|
|
|
|
// the queue was the same message type and routed to the same
|
|
|
|
// destination. Replace it with the newer message.
|
2013-07-03 07:37:33 +04:00
|
|
|
MOZ_ASSERT(queue->back().compress());
|
|
|
|
queue->pop_back();
|
2012-08-25 12:25:08 +04:00
|
|
|
}
|
|
|
|
|
2013-07-03 07:37:33 +04:00
|
|
|
queue->push_back(msg);
|
|
|
|
|
|
|
|
// There are three cases we're concerned about, relating to the state of
|
|
|
|
// the main thread:
|
|
|
|
//
|
|
|
|
// (1) We are waiting on a sync reply - main thread is blocked on the IPC monitor.
|
|
|
|
// - If the message is high priority, we wake up the main thread to
|
|
|
|
// deliver the message. Otherwise, we leave it in the mPending queue,
|
|
|
|
// posting a task to the main event loop, where it will be processed
|
|
|
|
// once the synchronous reply has been received.
|
|
|
|
//
|
|
|
|
// (2) We are waiting on an RPC reply - main thread is blocked on the IPC monitor.
|
|
|
|
// - Always wake up the main thread to deliver the message.
|
|
|
|
//
|
|
|
|
// (3) We are not waiting on a reply.
|
|
|
|
// - We post a task to the main event loop.
|
|
|
|
//
|
|
|
|
bool waiting_rpc = (0 != StackDepth());
|
|
|
|
bool urgent = (msg.priority() == IPC::Message::PRIORITY_HIGH);
|
|
|
|
|
|
|
|
if (waiting_rpc || (AwaitingSyncReply() && urgent)) {
|
|
|
|
// Always wake up our RPC waiter, and wake up sync waiters for urgent
|
|
|
|
// messages.
|
|
|
|
NotifyWorkerThread();
|
|
|
|
} else {
|
|
|
|
// Worker thread is either not blocked on a reply, or this is an
|
|
|
|
// incoming RPC that raced with outgoing sync and needs to be deferred
|
|
|
|
// to a later event-loop iteration.
|
2012-08-25 12:25:08 +04:00
|
|
|
if (!compressMessage) {
|
|
|
|
// If we compressed away the previous message, we'll reuse
|
|
|
|
// its pending task.
|
|
|
|
mWorkerLoop->PostTask(FROM_HERE, new DequeueTask(mDequeueOneTask));
|
|
|
|
}
|
2010-02-15 10:47:00 +03:00
|
|
|
}
|
2009-06-29 22:38:29 +04:00
|
|
|
}
|
|
|
|
|
2009-09-22 06:02:15 +04:00
|
|
|
void
|
2011-11-30 20:24:46 +04:00
|
|
|
RPCChannel::OnChannelErrorFromLink()
|
2009-09-22 06:02:15 +04:00
|
|
|
{
|
2011-11-30 20:24:46 +04:00
|
|
|
AssertLinkThread();
|
|
|
|
mMonitor->AssertCurrentThreadOwns();
|
2009-09-22 06:02:15 +04:00
|
|
|
|
2011-11-30 20:24:46 +04:00
|
|
|
if (0 < StackDepth())
|
2010-03-19 01:52:28 +03:00
|
|
|
NotifyWorkerThread();
|
2010-02-19 23:39:38 +03:00
|
|
|
|
2011-11-30 20:24:46 +04:00
|
|
|
SyncChannel::OnChannelErrorFromLink();
|
2009-09-22 06:02:15 +04:00
|
|
|
}
|
|
|
|
|
2009-06-29 22:38:29 +04:00
|
|
|
} // namespace ipc
|
|
|
|
} // namespace mozilla
|
2009-10-09 10:21:39 +04:00
|
|
|
|