2005-06-09 18:09:23 +04:00
|
|
|
// vim:set sw=4 sts=4 et cin:
|
2012-05-21 15:12:37 +04:00
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
2003-01-18 04:27:53 +03:00
|
|
|
|
|
|
|
#include "nsSocketTransportService2.h"
|
2015-04-03 16:54:00 +03:00
|
|
|
#if !defined(MOZILLA_XPCOMRT_API)
|
2003-01-18 04:27:53 +03:00
|
|
|
#include "nsSocketTransport2.h"
|
2015-04-03 16:54:00 +03:00
|
|
|
#include "NetworkActivityMonitor.h"
|
|
|
|
#include "mozilla/Preferences.h"
|
2016-01-15 10:21:00 +03:00
|
|
|
#include "nsIOService.h"
|
2015-04-03 16:54:00 +03:00
|
|
|
#endif // !defined(MOZILLA_XPCOMRT_API)
|
|
|
|
#include "nsASocketHandler.h"
|
2012-07-27 18:03:27 +04:00
|
|
|
#include "nsError.h"
|
2003-09-12 00:32:33 +04:00
|
|
|
#include "prnetdb.h"
|
2003-01-18 04:27:53 +03:00
|
|
|
#include "prerror.h"
|
2009-02-09 20:31:44 +03:00
|
|
|
#include "nsIPrefService.h"
|
2012-01-17 05:48:29 +04:00
|
|
|
#include "nsIPrefBranch.h"
|
2009-02-09 20:31:44 +03:00
|
|
|
#include "nsServiceManagerUtils.h"
|
2012-10-01 13:43:14 +04:00
|
|
|
#include "nsIObserverService.h"
|
2015-11-06 20:00:37 +03:00
|
|
|
#include "mozilla/Atomics.h"
|
2012-10-01 13:43:14 +04:00
|
|
|
#include "mozilla/Services.h"
|
2012-10-26 17:32:10 +04:00
|
|
|
#include "mozilla/Likely.h"
|
2012-12-08 02:50:43 +04:00
|
|
|
#include "mozilla/PublicSSL.h"
|
2014-03-03 09:12:32 +04:00
|
|
|
#include "mozilla/ChaosMode.h"
|
|
|
|
#include "mozilla/PodOperations.h"
|
2015-03-12 12:25:13 +03:00
|
|
|
#include "mozilla/Telemetry.h"
|
2013-09-19 17:54:39 +04:00
|
|
|
#include "nsThreadUtils.h"
|
2013-09-23 07:35:05 +04:00
|
|
|
#include "nsIFile.h"
|
2011-12-02 02:37:57 +04:00
|
|
|
|
Rollup of bug 645263 and bug 646259: Switch to mozilla:: sync primitives. r=cjones,dbaron,doublec,ehsan src=bsmedberg
Bug 645263, part 0: Count sync primitive ctor/dtors. r=dbaron
Bug 645263, part 1: Migrate content/media to mozilla:: sync primitives. r=doublec
Bug 645263, part 2: Migrate modules/plugin to mozilla:: sync primitives. sr=bsmedberg
Bug 645263, part 3: Migrate nsComponentManagerImpl to mozilla:: sync primitives. sr=bsmedberg
Bug 645263, part 4: Migrate everything else to mozilla:: sync primitives. r=dbaron
Bug 645263, part 5: Remove nsAutoLock.*. sr=bsmedberg
Bug 645263, part 6: Make editor test be nicer to deadlock detector. r=ehsan
Bug 645263, part 7: Disable tracemalloc backtraces for xpcshell tests. r=dbaron
Bug 646259: Fix nsCacheService to use a CondVar for notifying. r=cjones
2011-04-01 08:29:02 +04:00
|
|
|
using namespace mozilla;
|
2012-12-10 18:13:55 +04:00
|
|
|
using namespace mozilla::net;
|
Rollup of bug 645263 and bug 646259: Switch to mozilla:: sync primitives. r=cjones,dbaron,doublec,ehsan src=bsmedberg
Bug 645263, part 0: Count sync primitive ctor/dtors. r=dbaron
Bug 645263, part 1: Migrate content/media to mozilla:: sync primitives. r=doublec
Bug 645263, part 2: Migrate modules/plugin to mozilla:: sync primitives. sr=bsmedberg
Bug 645263, part 3: Migrate nsComponentManagerImpl to mozilla:: sync primitives. sr=bsmedberg
Bug 645263, part 4: Migrate everything else to mozilla:: sync primitives. r=dbaron
Bug 645263, part 5: Remove nsAutoLock.*. sr=bsmedberg
Bug 645263, part 6: Make editor test be nicer to deadlock detector. r=ehsan
Bug 645263, part 7: Disable tracemalloc backtraces for xpcshell tests. r=dbaron
Bug 646259: Fix nsCacheService to use a CondVar for notifying. r=cjones
2011-04-01 08:29:02 +04:00
|
|
|
|
2015-11-03 07:35:29 +03:00
|
|
|
LazyLogModule gSocketTransportLog("nsSocketTransport");
|
|
|
|
LazyLogModule gUDPSocketLog("UDPSocket");
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2012-07-30 18:20:58 +04:00
|
|
|
nsSocketTransportService *gSocketTransportService = nullptr;
|
2015-11-06 20:00:37 +03:00
|
|
|
Atomic<PRThread*, Relaxed> gSocketThread;
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2009-02-09 20:31:44 +03:00
|
|
|
#define SEND_BUFFER_PREF "network.tcp.sendbuffer"
|
2014-02-06 23:51:38 +04:00
|
|
|
#define KEEPALIVE_ENABLED_PREF "network.tcp.keepalive.enabled"
|
|
|
|
#define KEEPALIVE_IDLE_TIME_PREF "network.tcp.keepalive.idle_time"
|
|
|
|
#define KEEPALIVE_RETRY_INTERVAL_PREF "network.tcp.keepalive.retry_interval"
|
|
|
|
#define KEEPALIVE_PROBE_COUNT_PREF "network.tcp.keepalive.probe_count"
|
2011-04-13 16:59:29 +04:00
|
|
|
#define SOCKET_LIMIT_TARGET 550U
|
|
|
|
#define SOCKET_LIMIT_MIN 50U
|
2012-10-10 03:46:22 +04:00
|
|
|
#define BLIP_INTERVAL_PREF "network.activity.blipIntervalMilliseconds"
|
2015-03-04 16:17:00 +03:00
|
|
|
#define MAX_TIME_BETWEEN_TWO_POLLS "network.sts.max_time_for_events_between_two_polls"
|
2015-03-12 12:25:13 +03:00
|
|
|
#define TELEMETRY_PREF "toolkit.telemetry.enabled"
|
2016-01-18 10:20:00 +03:00
|
|
|
#define MAX_TIME_FOR_PR_CLOSE_DURING_SHUTDOWN "network.sts.max_time_for_pr_close_during_shutdown"
|
2011-04-13 16:59:29 +04:00
|
|
|
|
2012-08-22 19:56:38 +04:00
|
|
|
uint32_t nsSocketTransportService::gMaxCount;
|
2011-04-13 16:59:29 +04:00
|
|
|
PRCallOnceType nsSocketTransportService::gMaxCountInitOnce;
|
2009-02-09 20:31:44 +03:00
|
|
|
|
2015-05-13 21:13:23 +03:00
|
|
|
class DebugMutexAutoLock
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
explicit DebugMutexAutoLock(Mutex& mutex);
|
|
|
|
~DebugMutexAutoLock();
|
|
|
|
|
|
|
|
private:
|
|
|
|
Mutex *mLock;
|
2015-08-05 00:16:49 +03:00
|
|
|
static Atomic<PRThread *, Relaxed> sDebugOwningThread;
|
2015-05-13 21:13:23 +03:00
|
|
|
};
|
|
|
|
|
2015-08-05 00:16:49 +03:00
|
|
|
Atomic<PRThread *, Relaxed> DebugMutexAutoLock::sDebugOwningThread;
|
2015-05-13 21:13:23 +03:00
|
|
|
|
|
|
|
DebugMutexAutoLock::DebugMutexAutoLock(Mutex& mutex)
|
|
|
|
:mLock(&mutex)
|
|
|
|
{
|
|
|
|
PRThread *currentThread = PR_GetCurrentThread();
|
2015-07-31 23:33:48 +03:00
|
|
|
MOZ_DIAGNOSTIC_ASSERT(sDebugOwningThread != currentThread);
|
2015-05-13 21:13:23 +03:00
|
|
|
SOCKET_LOG(("Acquiring lock on thread %p", currentThread));
|
|
|
|
mLock->Lock();
|
|
|
|
sDebugOwningThread = currentThread;
|
|
|
|
SOCKET_LOG(("Acquired lock on thread %p", currentThread));
|
|
|
|
}
|
|
|
|
|
|
|
|
DebugMutexAutoLock::~DebugMutexAutoLock()
|
|
|
|
{
|
|
|
|
sDebugOwningThread = nullptr;
|
|
|
|
mLock->Unlock();
|
|
|
|
mLock = nullptr;
|
|
|
|
SOCKET_LOG(("Released lock on thread %p", PR_GetCurrentThread()));
|
|
|
|
}
|
|
|
|
|
2003-01-18 04:27:53 +03:00
|
|
|
//-----------------------------------------------------------------------------
|
|
|
|
// ctor/dtor (called on the main/UI thread by the service manager)
|
|
|
|
|
|
|
|
nsSocketTransportService::nsSocketTransportService()
|
2012-07-30 18:20:58 +04:00
|
|
|
: mThread(nullptr)
|
2016-02-22 01:41:21 +03:00
|
|
|
, mThreadEvent(nullptr)
|
2011-10-17 18:59:28 +04:00
|
|
|
, mAutodialEnabled(false)
|
Rollup of bug 645263 and bug 646259: Switch to mozilla:: sync primitives. r=cjones,dbaron,doublec,ehsan src=bsmedberg
Bug 645263, part 0: Count sync primitive ctor/dtors. r=dbaron
Bug 645263, part 1: Migrate content/media to mozilla:: sync primitives. r=doublec
Bug 645263, part 2: Migrate modules/plugin to mozilla:: sync primitives. sr=bsmedberg
Bug 645263, part 3: Migrate nsComponentManagerImpl to mozilla:: sync primitives. sr=bsmedberg
Bug 645263, part 4: Migrate everything else to mozilla:: sync primitives. r=dbaron
Bug 645263, part 5: Remove nsAutoLock.*. sr=bsmedberg
Bug 645263, part 6: Make editor test be nicer to deadlock detector. r=ehsan
Bug 645263, part 7: Disable tracemalloc backtraces for xpcshell tests. r=dbaron
Bug 646259: Fix nsCacheService to use a CondVar for notifying. r=cjones
2011-04-01 08:29:02 +04:00
|
|
|
, mLock("nsSocketTransportService::mLock")
|
2011-10-17 18:59:28 +04:00
|
|
|
, mInitialized(false)
|
|
|
|
, mShuttingDown(false)
|
2012-09-18 03:45:10 +04:00
|
|
|
, mOffline(false)
|
|
|
|
, mGoingOffline(false)
|
2011-04-13 16:59:29 +04:00
|
|
|
, mActiveListSize(SOCKET_LIMIT_MIN)
|
|
|
|
, mIdleListSize(SOCKET_LIMIT_MIN)
|
2003-01-18 04:27:53 +03:00
|
|
|
, mActiveCount(0)
|
|
|
|
, mIdleCount(0)
|
2013-07-29 20:08:03 +04:00
|
|
|
, mSentBytesCount(0)
|
|
|
|
, mReceivedBytesCount(0)
|
2015-09-21 11:34:51 +03:00
|
|
|
, mEventQueueLock("nsSocketTransportService::mEventQueueLock")
|
Bug 1202497 - part 7 - make nsEventQueue use external locking; r=gerald
We want to ensure that nsThread's use of nsEventQueue uses locking done
in nsThread instead of nsEventQueue, for efficiency's sake: we only need
to lock once in nsThread, rather than the current situation of locking
in nsThread and additionally in nsEventQueue. With the current
structure of nsEventQueue, that would mean that nsThread should be using
a Monitor internally, rather than a Mutex.
Which would be well and good, except that DOM workers use nsThread's
mutex to protect their own, internal CondVar. Switching nsThread to use
a Monitor would mean that either:
- DOM workers drop their internal CondVar in favor of nsThread's
Monitor-owned CondVar. This change seems unlikely to work out well,
because now the Monitor-owned CondVar is performing double duty:
tracking availability of events in nsThread's event queue and
additionally whatever DOM workers were using a CondVar for. Having a
single CondVar track two things in such a fashion is for Experts Only.
- DOM workers grow their own Mutex to protect their own CondVar. Adding
a mutex like this would change locking in subtle ways and seems
unlikely to lead to success.
Using a Monitor in nsThread is therefore untenable, and we would like to
retain the current Mutex that lives in nsThread. Therefore, we need to
have nsEventQueue manage its own condition variable and push the
required (Mutex) locking to the client of nsEventQueue. This scheme
also seems more fitting: external clients merely need synchronized
access to the event queue; the details of managing notifications about
events in the event queue should be left up to the event queue itself.
Doing so also forces us to merge nsEventQueueBase and nsEventQueue:
there's no way to have nsEventQueueBase require an externally-defined
Mutex and then have nsEventQueue subclass nsEventQueueBase and provide
its own Mutex to the superclass. C++ initialization rules (and the way
things like CondVar are constructed) simply forbid it. But that's OK,
because we want a world where nsEventQueue is externally locked anyway,
so there's no reason to have separate classes here.
One casualty of this work is removing ChaosMode support from
nsEventQueue. nsEventQueue had support to delay placing events into the
queue, theoretically giving other threads the chance to put events there
first. Unfortunately, since the thread would have been holding a lock
(as is evident from the MutexAutoLock& parameter required), sleeping in
PutEvent accomplishes nothing but delaying the thread from getting
useful work done. We should support this, but it's complicated to
figure out how to reasonably support this right now.
A wrinkle in this overall pleasant refactoring is that nsThreadPool's
threads wait for limited amounts of time for new events to be placed in
the event queue, so that they can shut themselves down if no new events
are appearing. Setting limits on the number of threads also needs to be
able to wake up all threads, so threads can shut themselves down if
necessary.
Unfortunately, with the transition to nsEventQueue managing its own
condition variable, there's no way for nsThreadPool to perform these
functions, since there's no Monitor to wait on. Therefore, we add a
private API for accessing the condition variable and performing the
tasks nsThreadPool needs.
Prior to all the previous patches, placing items in an nsThread's event
queue required three lock/unlock pairs: one for nsThread's Mutex, one to
enter nsEventQueue's ReentrantMonitor, and one to exit nsEventQueue's
ReentrantMonitor. The upshot of all this work is that we now only
require one lock/unlock pair in nsThread itself, as things should be.
2015-09-20 12:13:09 +03:00
|
|
|
, mPendingSocketQ(mEventQueueLock)
|
2009-02-09 20:31:44 +03:00
|
|
|
, mSendBufferSize(0)
|
2014-02-06 23:51:38 +04:00
|
|
|
, mKeepaliveIdleTimeS(600)
|
|
|
|
, mKeepaliveRetryIntervalS(1)
|
|
|
|
, mKeepaliveProbeCount(kDefaultTCPKeepCount)
|
|
|
|
, mKeepaliveEnabledPref(false)
|
2015-03-04 16:17:00 +03:00
|
|
|
, mServingPendingQueue(false)
|
|
|
|
, mMaxTimePerPollIter(100)
|
2015-03-12 12:25:13 +03:00
|
|
|
, mTelemetryEnabledPref(false)
|
2016-01-18 10:20:00 +03:00
|
|
|
, mMaxTimeForPrClosePref(PR_SecondsToInterval(5))
|
2011-10-25 19:36:49 +04:00
|
|
|
, mProbedMaxCount(false)
|
2003-01-18 04:27:53 +03:00
|
|
|
{
|
2006-05-10 21:30:15 +04:00
|
|
|
NS_ASSERTION(NS_IsMainThread(), "wrong thread");
|
2003-10-06 05:46:31 +04:00
|
|
|
|
2011-04-13 16:59:29 +04:00
|
|
|
PR_CallOnce(&gMaxCountInitOnce, DiscoverMaxCount);
|
|
|
|
mActiveList = (SocketContext *)
|
|
|
|
moz_xmalloc(sizeof(SocketContext) * mActiveListSize);
|
|
|
|
mIdleList = (SocketContext *)
|
|
|
|
moz_xmalloc(sizeof(SocketContext) * mIdleListSize);
|
|
|
|
mPollList = (PRPollDesc *)
|
|
|
|
moz_xmalloc(sizeof(PRPollDesc) * (mActiveListSize + 1));
|
|
|
|
|
2003-10-09 05:54:07 +04:00
|
|
|
NS_ASSERTION(!gSocketTransportService, "must not instantiate twice");
|
2003-01-18 04:27:53 +03:00
|
|
|
gSocketTransportService = this;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsSocketTransportService::~nsSocketTransportService()
|
|
|
|
{
|
2006-05-10 21:30:15 +04:00
|
|
|
NS_ASSERTION(NS_IsMainThread(), "wrong thread");
|
2003-01-18 04:27:53 +03:00
|
|
|
NS_ASSERTION(!mInitialized, "not shutdown properly");
|
2016-02-22 01:41:21 +03:00
|
|
|
|
|
|
|
if (mThreadEvent)
|
|
|
|
PR_DestroyPollableEvent(mThreadEvent);
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2015-02-19 07:51:06 +03:00
|
|
|
free(mActiveList);
|
|
|
|
free(mIdleList);
|
|
|
|
free(mPollList);
|
2012-07-30 18:20:58 +04:00
|
|
|
gSocketTransportService = nullptr;
|
2003-01-18 04:27:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
//-----------------------------------------------------------------------------
|
|
|
|
// event queue (any thread)
|
|
|
|
|
2007-12-05 05:18:58 +03:00
|
|
|
already_AddRefed<nsIThread>
|
|
|
|
nsSocketTransportService::GetThreadSafely()
|
|
|
|
{
|
2015-05-13 21:13:23 +03:00
|
|
|
DebugMutexAutoLock lock(mLock);
|
2013-04-22 15:15:59 +04:00
|
|
|
nsCOMPtr<nsIThread> result = mThread;
|
|
|
|
return result.forget();
|
2007-12-05 05:18:58 +03:00
|
|
|
}
|
|
|
|
|
2003-01-18 04:27:53 +03:00
|
|
|
NS_IMETHODIMP
|
2015-07-10 06:21:46 +03:00
|
|
|
nsSocketTransportService::DispatchFromScript(nsIRunnable *event, uint32_t flags)
|
2003-01-18 04:27:53 +03:00
|
|
|
{
|
2015-07-10 06:21:46 +03:00
|
|
|
nsCOMPtr<nsIRunnable> event_ref(event);
|
|
|
|
return Dispatch(event_ref.forget(), flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::Dispatch(already_AddRefed<nsIRunnable>&& event, uint32_t flags)
|
|
|
|
{
|
|
|
|
nsCOMPtr<nsIRunnable> event_ref(event);
|
|
|
|
SOCKET_LOG(("STS dispatch [%p]\n", event_ref.get()));
|
2005-06-09 18:09:23 +04:00
|
|
|
|
2007-12-05 05:18:58 +03:00
|
|
|
nsCOMPtr<nsIThread> thread = GetThreadSafely();
|
2014-03-12 13:14:45 +04:00
|
|
|
nsresult rv;
|
2015-07-10 06:21:46 +03:00
|
|
|
rv = thread ? thread->Dispatch(event_ref.forget(), flags) : NS_ERROR_NOT_INITIALIZED;
|
2007-12-05 05:18:58 +03:00
|
|
|
if (rv == NS_ERROR_UNEXPECTED) {
|
|
|
|
// Thread is no longer accepting events. We must have just shut it
|
|
|
|
// down on the main thread. Pretend we never saw it.
|
|
|
|
rv = NS_ERROR_NOT_INITIALIZED;
|
|
|
|
}
|
|
|
|
return rv;
|
2003-01-18 04:27:53 +03:00
|
|
|
}
|
|
|
|
|
2003-10-06 05:46:31 +04:00
|
|
|
NS_IMETHODIMP
|
2011-09-29 10:19:26 +04:00
|
|
|
nsSocketTransportService::IsOnCurrentThread(bool *result)
|
2003-10-06 05:46:31 +04:00
|
|
|
{
|
2007-12-05 05:18:58 +03:00
|
|
|
nsCOMPtr<nsIThread> thread = GetThreadSafely();
|
|
|
|
NS_ENSURE_TRUE(thread, NS_ERROR_NOT_INITIALIZED);
|
|
|
|
return thread->IsOnCurrentThread(result);
|
2003-10-06 05:46:31 +04:00
|
|
|
}
|
|
|
|
|
2003-01-18 04:27:53 +03:00
|
|
|
//-----------------------------------------------------------------------------
|
|
|
|
// socket api (socket thread only)
|
|
|
|
|
2008-02-26 23:39:50 +03:00
|
|
|
NS_IMETHODIMP
|
2006-05-10 21:30:15 +04:00
|
|
|
nsSocketTransportService::NotifyWhenCanAttachSocket(nsIRunnable *event)
|
2003-04-09 02:18:10 +04:00
|
|
|
{
|
2010-10-28 21:09:08 +04:00
|
|
|
SOCKET_LOG(("nsSocketTransportService::NotifyWhenCanAttachSocket\n"));
|
2003-04-09 02:18:10 +04:00
|
|
|
|
2003-10-09 05:54:07 +04:00
|
|
|
NS_ASSERTION(PR_GetCurrentThread() == gSocketThread, "wrong thread");
|
|
|
|
|
2003-04-09 02:18:10 +04:00
|
|
|
if (CanAttachSocket()) {
|
2006-05-10 21:30:15 +04:00
|
|
|
return Dispatch(event, NS_DISPATCH_NORMAL);
|
2003-04-09 02:18:10 +04:00
|
|
|
}
|
|
|
|
|
2015-09-21 11:34:51 +03:00
|
|
|
{
|
|
|
|
MutexAutoLock lock(mEventQueueLock);
|
|
|
|
mPendingSocketQ.PutEvent(event, lock);
|
|
|
|
}
|
2003-04-09 02:18:10 +04:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2008-02-26 23:39:50 +03:00
|
|
|
NS_IMETHODIMP
|
2003-01-18 04:27:53 +03:00
|
|
|
nsSocketTransportService::AttachSocket(PRFileDesc *fd, nsASocketHandler *handler)
|
|
|
|
{
|
2013-07-01 20:40:12 +04:00
|
|
|
SOCKET_LOG(("nsSocketTransportService::AttachSocket [handler=%p]\n", handler));
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2003-10-09 05:54:07 +04:00
|
|
|
NS_ASSERTION(PR_GetCurrentThread() == gSocketThread, "wrong thread");
|
|
|
|
|
2008-02-26 23:39:50 +03:00
|
|
|
if (!CanAttachSocket()) {
|
|
|
|
return NS_ERROR_NOT_AVAILABLE;
|
|
|
|
}
|
|
|
|
|
2003-02-23 08:07:34 +03:00
|
|
|
SocketContext sock;
|
|
|
|
sock.mFD = fd;
|
|
|
|
sock.mHandler = handler;
|
2005-01-26 05:13:14 +03:00
|
|
|
sock.mElapsedTime = 0;
|
2003-02-23 08:07:34 +03:00
|
|
|
|
|
|
|
nsresult rv = AddToIdleList(&sock);
|
|
|
|
if (NS_SUCCEEDED(rv))
|
|
|
|
NS_ADDREF(handler);
|
|
|
|
return rv;
|
2003-01-18 04:27:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
2011-04-13 16:59:29 +04:00
|
|
|
nsSocketTransportService::DetachSocket(SocketContext *listHead, SocketContext *sock)
|
2003-01-18 04:27:53 +03:00
|
|
|
{
|
2013-07-01 20:40:12 +04:00
|
|
|
SOCKET_LOG(("nsSocketTransportService::DetachSocket [handler=%p]\n", sock->mHandler));
|
2015-02-10 01:34:50 +03:00
|
|
|
MOZ_ASSERT((listHead == mActiveList) || (listHead == mIdleList),
|
|
|
|
"DetachSocket invalid head");
|
2003-01-18 04:27:53 +03:00
|
|
|
|
|
|
|
// inform the handler that this socket is going away
|
|
|
|
sock->mHandler->OnSocketDetached(sock->mFD);
|
2013-07-29 20:08:03 +04:00
|
|
|
mSentBytesCount += sock->mHandler->ByteCountSent();
|
|
|
|
mReceivedBytesCount += sock->mHandler->ByteCountReceived();
|
2003-01-18 04:27:53 +03:00
|
|
|
|
|
|
|
// cleanup
|
2012-07-30 18:20:58 +04:00
|
|
|
sock->mFD = nullptr;
|
2003-01-18 04:27:53 +03:00
|
|
|
NS_RELEASE(sock->mHandler);
|
|
|
|
|
2011-04-13 16:59:29 +04:00
|
|
|
if (listHead == mActiveList)
|
2003-01-18 04:27:53 +03:00
|
|
|
RemoveFromPollList(sock);
|
|
|
|
else
|
|
|
|
RemoveFromIdleList(sock);
|
|
|
|
|
|
|
|
// NOTE: sock is now an invalid pointer
|
2003-04-09 02:18:10 +04:00
|
|
|
|
|
|
|
//
|
2003-10-06 05:46:31 +04:00
|
|
|
// notify the first element on the pending socket queue...
|
2003-04-09 02:18:10 +04:00
|
|
|
//
|
2006-05-10 21:30:15 +04:00
|
|
|
nsCOMPtr<nsIRunnable> event;
|
2015-09-21 11:34:51 +03:00
|
|
|
{
|
|
|
|
MutexAutoLock lock(mEventQueueLock);
|
|
|
|
mPendingSocketQ.GetPendingEvent(getter_AddRefs(event), lock);
|
|
|
|
}
|
|
|
|
if (event) {
|
2006-05-10 21:30:15 +04:00
|
|
|
// move event from pending queue to dispatch queue
|
|
|
|
return Dispatch(event, NS_DISPATCH_NORMAL);
|
2003-04-09 02:18:10 +04:00
|
|
|
}
|
2003-01-18 04:27:53 +03:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
nsSocketTransportService::AddToPollList(SocketContext *sock)
|
|
|
|
{
|
2015-02-10 01:34:50 +03:00
|
|
|
MOZ_ASSERT(!(static_cast<uint32_t>(sock - mActiveList) < mActiveListSize),
|
|
|
|
"AddToPollList Socket Already Active");
|
2011-04-12 10:17:43 +04:00
|
|
|
|
2013-07-01 20:40:12 +04:00
|
|
|
SOCKET_LOG(("nsSocketTransportService::AddToPollList [handler=%p]\n", sock->mHandler));
|
2011-04-13 16:59:29 +04:00
|
|
|
if (mActiveCount == mActiveListSize) {
|
|
|
|
SOCKET_LOG((" Active List size of %d met\n", mActiveCount));
|
|
|
|
if (!GrowActiveList()) {
|
|
|
|
NS_ERROR("too many active sockets");
|
|
|
|
return NS_ERROR_OUT_OF_MEMORY;
|
|
|
|
}
|
2003-02-23 08:07:34 +03:00
|
|
|
}
|
2011-04-13 16:59:29 +04:00
|
|
|
|
2014-03-03 09:12:32 +04:00
|
|
|
uint32_t newSocketIndex = mActiveCount;
|
2015-07-15 00:29:23 +03:00
|
|
|
if (ChaosMode::isActive(ChaosFeature::NetworkScheduling)) {
|
2014-03-03 09:12:32 +04:00
|
|
|
newSocketIndex = ChaosMode::randomUint32LessThan(mActiveCount + 1);
|
|
|
|
PodMove(mActiveList + newSocketIndex + 1, mActiveList + newSocketIndex,
|
|
|
|
mActiveCount - newSocketIndex);
|
|
|
|
PodMove(mPollList + newSocketIndex + 2, mPollList + newSocketIndex + 1,
|
|
|
|
mActiveCount - newSocketIndex);
|
|
|
|
}
|
|
|
|
mActiveList[newSocketIndex] = *sock;
|
2003-02-23 08:07:34 +03:00
|
|
|
mActiveCount++;
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2014-03-03 09:12:32 +04:00
|
|
|
mPollList[newSocketIndex + 1].fd = sock->mFD;
|
|
|
|
mPollList[newSocketIndex + 1].in_flags = sock->mHandler->mPollFlags;
|
|
|
|
mPollList[newSocketIndex + 1].out_flags = 0;
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2010-10-28 21:09:08 +04:00
|
|
|
SOCKET_LOG((" active=%u idle=%u\n", mActiveCount, mIdleCount));
|
2003-01-18 04:27:53 +03:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
nsSocketTransportService::RemoveFromPollList(SocketContext *sock)
|
|
|
|
{
|
2013-07-01 20:40:12 +04:00
|
|
|
SOCKET_LOG(("nsSocketTransportService::RemoveFromPollList [handler=%p]\n", sock->mHandler));
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2012-08-22 19:56:38 +04:00
|
|
|
uint32_t index = sock - mActiveList;
|
2015-02-10 01:34:50 +03:00
|
|
|
MOZ_ASSERT(index < mActiveListSize, "invalid index");
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2010-10-28 21:09:08 +04:00
|
|
|
SOCKET_LOG((" index=%u mActiveCount=%u\n", index, mActiveCount));
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2003-02-23 08:07:34 +03:00
|
|
|
if (index != mActiveCount-1) {
|
|
|
|
mActiveList[index] = mActiveList[mActiveCount-1];
|
|
|
|
mPollList[index+1] = mPollList[mActiveCount];
|
2003-01-18 04:27:53 +03:00
|
|
|
}
|
|
|
|
mActiveCount--;
|
|
|
|
|
2010-10-28 21:09:08 +04:00
|
|
|
SOCKET_LOG((" active=%u idle=%u\n", mActiveCount, mIdleCount));
|
2003-01-18 04:27:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
nsSocketTransportService::AddToIdleList(SocketContext *sock)
|
|
|
|
{
|
2015-02-10 01:34:50 +03:00
|
|
|
MOZ_ASSERT(!(static_cast<uint32_t>(sock - mIdleList) < mIdleListSize),
|
|
|
|
"AddToIdlelList Socket Already Idle");
|
2011-04-12 10:17:43 +04:00
|
|
|
|
2013-07-01 20:40:12 +04:00
|
|
|
SOCKET_LOG(("nsSocketTransportService::AddToIdleList [handler=%p]\n", sock->mHandler));
|
2011-04-13 16:59:29 +04:00
|
|
|
if (mIdleCount == mIdleListSize) {
|
|
|
|
SOCKET_LOG((" Idle List size of %d met\n", mIdleCount));
|
|
|
|
if (!GrowIdleList()) {
|
|
|
|
NS_ERROR("too many idle sockets");
|
|
|
|
return NS_ERROR_OUT_OF_MEMORY;
|
|
|
|
}
|
2003-02-23 08:07:34 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
mIdleList[mIdleCount] = *sock;
|
2003-01-18 04:27:53 +03:00
|
|
|
mIdleCount++;
|
|
|
|
|
2010-10-28 21:09:08 +04:00
|
|
|
SOCKET_LOG((" active=%u idle=%u\n", mActiveCount, mIdleCount));
|
2003-01-18 04:27:53 +03:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
nsSocketTransportService::RemoveFromIdleList(SocketContext *sock)
|
|
|
|
{
|
2013-07-01 20:40:12 +04:00
|
|
|
SOCKET_LOG(("nsSocketTransportService::RemoveFromIdleList [handler=%p]\n", sock->mHandler));
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2012-08-22 19:56:38 +04:00
|
|
|
uint32_t index = sock - mIdleList;
|
2011-04-13 16:59:29 +04:00
|
|
|
NS_ASSERTION(index < mIdleListSize, "invalid index in idle list");
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2003-02-23 08:07:34 +03:00
|
|
|
if (index != mIdleCount-1)
|
|
|
|
mIdleList[index] = mIdleList[mIdleCount-1];
|
2003-01-18 04:27:53 +03:00
|
|
|
mIdleCount--;
|
|
|
|
|
2010-10-28 21:09:08 +04:00
|
|
|
SOCKET_LOG((" active=%u idle=%u\n", mActiveCount, mIdleCount));
|
2003-01-18 04:27:53 +03:00
|
|
|
}
|
|
|
|
|
2003-02-23 08:07:34 +03:00
|
|
|
void
|
|
|
|
nsSocketTransportService::MoveToIdleList(SocketContext *sock)
|
|
|
|
{
|
|
|
|
nsresult rv = AddToIdleList(sock);
|
|
|
|
if (NS_FAILED(rv))
|
2011-04-13 16:59:29 +04:00
|
|
|
DetachSocket(mActiveList, sock);
|
2003-02-23 08:07:34 +03:00
|
|
|
else
|
|
|
|
RemoveFromPollList(sock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
nsSocketTransportService::MoveToPollList(SocketContext *sock)
|
|
|
|
{
|
|
|
|
nsresult rv = AddToPollList(sock);
|
|
|
|
if (NS_FAILED(rv))
|
2011-04-13 16:59:29 +04:00
|
|
|
DetachSocket(mIdleList, sock);
|
2003-02-23 08:07:34 +03:00
|
|
|
else
|
|
|
|
RemoveFromIdleList(sock);
|
|
|
|
}
|
|
|
|
|
2011-09-29 10:19:26 +04:00
|
|
|
bool
|
2011-04-13 16:59:29 +04:00
|
|
|
nsSocketTransportService::GrowActiveList()
|
|
|
|
{
|
2012-08-22 19:56:38 +04:00
|
|
|
int32_t toAdd = gMaxCount - mActiveListSize;
|
2011-04-13 16:59:29 +04:00
|
|
|
if (toAdd > 100)
|
|
|
|
toAdd = 100;
|
|
|
|
if (toAdd < 1)
|
2011-10-17 18:59:28 +04:00
|
|
|
return false;
|
2011-04-13 16:59:29 +04:00
|
|
|
|
|
|
|
mActiveListSize += toAdd;
|
|
|
|
mActiveList = (SocketContext *)
|
|
|
|
moz_xrealloc(mActiveList, sizeof(SocketContext) * mActiveListSize);
|
|
|
|
mPollList = (PRPollDesc *)
|
|
|
|
moz_xrealloc(mPollList, sizeof(PRPollDesc) * (mActiveListSize + 1));
|
2011-10-17 18:59:28 +04:00
|
|
|
return true;
|
2011-04-13 16:59:29 +04:00
|
|
|
}
|
|
|
|
|
2011-09-29 10:19:26 +04:00
|
|
|
bool
|
2011-04-13 16:59:29 +04:00
|
|
|
nsSocketTransportService::GrowIdleList()
|
|
|
|
{
|
2012-08-22 19:56:38 +04:00
|
|
|
int32_t toAdd = gMaxCount - mIdleListSize;
|
2011-04-13 16:59:29 +04:00
|
|
|
if (toAdd > 100)
|
|
|
|
toAdd = 100;
|
|
|
|
if (toAdd < 1)
|
2011-10-17 18:59:28 +04:00
|
|
|
return false;
|
2011-04-13 16:59:29 +04:00
|
|
|
|
|
|
|
mIdleListSize += toAdd;
|
|
|
|
mIdleList = (SocketContext *)
|
|
|
|
moz_xrealloc(mIdleList, sizeof(SocketContext) * mIdleListSize);
|
2011-10-17 18:59:28 +04:00
|
|
|
return true;
|
2011-04-13 16:59:29 +04:00
|
|
|
}
|
|
|
|
|
2005-01-26 05:13:14 +03:00
|
|
|
PRIntervalTime
|
|
|
|
nsSocketTransportService::PollTimeout()
|
|
|
|
{
|
|
|
|
if (mActiveCount == 0)
|
|
|
|
return NS_SOCKET_POLL_TIMEOUT;
|
|
|
|
|
|
|
|
// compute minimum time before any socket timeout expires.
|
2012-09-28 10:57:33 +04:00
|
|
|
uint32_t minR = UINT16_MAX;
|
2012-08-22 19:56:38 +04:00
|
|
|
for (uint32_t i=0; i<mActiveCount; ++i) {
|
2005-01-26 05:13:14 +03:00
|
|
|
const SocketContext &s = mActiveList[i];
|
2005-02-01 18:22:20 +03:00
|
|
|
// mPollTimeout could be less than mElapsedTime if setTimeout
|
|
|
|
// was called with a value smaller than mElapsedTime.
|
2012-08-22 19:56:38 +04:00
|
|
|
uint32_t r = (s.mElapsedTime < s.mHandler->mPollTimeout)
|
2005-02-01 18:22:20 +03:00
|
|
|
? s.mHandler->mPollTimeout - s.mElapsedTime
|
|
|
|
: 0;
|
2005-01-26 05:13:14 +03:00
|
|
|
if (r < minR)
|
|
|
|
minR = r;
|
|
|
|
}
|
2014-01-30 11:29:20 +04:00
|
|
|
// nsASocketHandler defines UINT16_MAX as do not timeout
|
|
|
|
if (minR == UINT16_MAX) {
|
|
|
|
SOCKET_LOG(("poll timeout: none\n"));
|
|
|
|
return NS_SOCKET_POLL_TIMEOUT;
|
|
|
|
}
|
2010-10-28 21:09:08 +04:00
|
|
|
SOCKET_LOG(("poll timeout: %lu\n", minR));
|
2005-01-26 05:13:14 +03:00
|
|
|
return PR_SecondsToInterval(minR);
|
|
|
|
}
|
|
|
|
|
2012-08-22 19:56:38 +04:00
|
|
|
int32_t
|
2016-02-22 01:41:21 +03:00
|
|
|
nsSocketTransportService::Poll(bool wait, uint32_t *interval,
|
2015-03-12 12:25:13 +03:00
|
|
|
TimeDuration *pollDuration)
|
2003-02-07 00:20:26 +03:00
|
|
|
{
|
|
|
|
PRPollDesc *pollList;
|
2012-08-22 19:56:38 +04:00
|
|
|
uint32_t pollCount;
|
2003-02-07 00:20:26 +03:00
|
|
|
PRIntervalTime pollTimeout;
|
2015-03-12 12:25:13 +03:00
|
|
|
*pollDuration = 0;
|
2003-02-07 00:20:26 +03:00
|
|
|
|
|
|
|
if (mPollList[0].fd) {
|
2003-02-23 08:07:34 +03:00
|
|
|
mPollList[0].out_flags = 0;
|
2003-02-07 00:20:26 +03:00
|
|
|
pollList = mPollList;
|
2003-02-23 08:07:34 +03:00
|
|
|
pollCount = mActiveCount + 1;
|
2016-02-22 01:41:21 +03:00
|
|
|
pollTimeout = PollTimeout();
|
2003-02-07 00:20:26 +03:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
// no pollable event, so busy wait...
|
2003-02-23 08:07:34 +03:00
|
|
|
pollCount = mActiveCount;
|
2003-02-07 00:20:26 +03:00
|
|
|
if (pollCount)
|
|
|
|
pollList = &mPollList[1];
|
|
|
|
else
|
2012-07-30 18:20:58 +04:00
|
|
|
pollList = nullptr;
|
2016-02-22 01:41:21 +03:00
|
|
|
pollTimeout = PR_MillisecondsToInterval(25);
|
2003-02-07 00:20:26 +03:00
|
|
|
}
|
|
|
|
|
2016-02-22 01:41:21 +03:00
|
|
|
if (!wait)
|
|
|
|
pollTimeout = PR_INTERVAL_NO_WAIT;
|
|
|
|
|
2005-01-26 05:13:14 +03:00
|
|
|
PRIntervalTime ts = PR_IntervalNow();
|
|
|
|
|
2015-03-12 12:25:13 +03:00
|
|
|
TimeStamp pollStart;
|
|
|
|
if (mTelemetryEnabledPref) {
|
|
|
|
pollStart = TimeStamp::NowLoRes();
|
|
|
|
}
|
|
|
|
|
2010-10-28 21:09:08 +04:00
|
|
|
SOCKET_LOG((" timeout = %i milliseconds\n",
|
2006-08-15 22:22:24 +04:00
|
|
|
PR_IntervalToMilliseconds(pollTimeout)));
|
2012-08-22 19:56:38 +04:00
|
|
|
int32_t rv = PR_Poll(pollList, pollCount, pollTimeout);
|
2005-01-26 05:13:14 +03:00
|
|
|
|
2006-08-15 22:22:24 +04:00
|
|
|
PRIntervalTime passedInterval = PR_IntervalNow() - ts;
|
|
|
|
|
2015-03-12 12:25:13 +03:00
|
|
|
if (mTelemetryEnabledPref && !pollStart.IsNull()) {
|
|
|
|
*pollDuration = TimeStamp::NowLoRes() - pollStart;
|
|
|
|
}
|
|
|
|
|
2010-10-28 21:09:08 +04:00
|
|
|
SOCKET_LOG((" ...returned after %i milliseconds\n",
|
2006-08-15 22:22:24 +04:00
|
|
|
PR_IntervalToMilliseconds(passedInterval)));
|
|
|
|
|
|
|
|
*interval = PR_IntervalToSeconds(passedInterval);
|
2005-01-26 05:13:14 +03:00
|
|
|
return rv;
|
2003-02-07 00:20:26 +03:00
|
|
|
}
|
|
|
|
|
2008-02-21 23:39:20 +03:00
|
|
|
//-----------------------------------------------------------------------------
|
|
|
|
// xpcom api
|
|
|
|
|
2014-04-27 11:06:00 +04:00
|
|
|
NS_IMPL_ISUPPORTS(nsSocketTransportService,
|
|
|
|
nsISocketTransportService,
|
2015-04-09 18:31:59 +03:00
|
|
|
nsIRoutedSocketTransportService,
|
2014-04-27 11:06:00 +04:00
|
|
|
nsIEventTarget,
|
|
|
|
nsIThreadObserver,
|
|
|
|
nsIRunnable,
|
|
|
|
nsPISocketTransportService,
|
|
|
|
nsIObserver)
|
2008-02-21 23:39:20 +03:00
|
|
|
|
2003-01-18 04:27:53 +03:00
|
|
|
// called from main thread only
|
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::Init()
|
|
|
|
{
|
2006-05-10 21:30:15 +04:00
|
|
|
if (!NS_IsMainThread()) {
|
|
|
|
NS_ERROR("wrong thread");
|
|
|
|
return NS_ERROR_UNEXPECTED;
|
|
|
|
}
|
2003-01-18 04:27:53 +03:00
|
|
|
|
|
|
|
if (mInitialized)
|
|
|
|
return NS_OK;
|
|
|
|
|
2007-07-04 23:33:57 +04:00
|
|
|
if (mShuttingDown)
|
|
|
|
return NS_ERROR_UNEXPECTED;
|
|
|
|
|
2016-02-22 01:41:21 +03:00
|
|
|
if (!mThreadEvent) {
|
|
|
|
mThreadEvent = PR_NewPollableEvent();
|
2003-02-07 00:20:26 +03:00
|
|
|
//
|
2003-01-31 04:51:45 +03:00
|
|
|
// NOTE: per bug 190000, this failure could be caused by Zone-Alarm
|
2003-02-07 00:20:26 +03:00
|
|
|
// or similar software.
|
|
|
|
//
|
|
|
|
// NOTE: per bug 191739, this failure could also be caused by lack
|
2016-02-22 01:41:21 +03:00
|
|
|
// of a loopback device on Windows and OS/2 platforms (NSPR creates
|
2003-02-07 00:20:26 +03:00
|
|
|
// a loopback socket pair on these platforms to implement a pollable
|
|
|
|
// event object). if we can't create a pollable event, then we'll
|
|
|
|
// have to "busy wait" to implement the socket event queue :-(
|
|
|
|
//
|
2016-02-22 01:41:21 +03:00
|
|
|
if (!mThreadEvent) {
|
2006-08-24 22:46:59 +04:00
|
|
|
NS_WARNING("running socket transport thread without a pollable event");
|
2010-10-28 21:09:08 +04:00
|
|
|
SOCKET_LOG(("running socket transport thread without a pollable event"));
|
2006-08-24 22:46:59 +04:00
|
|
|
}
|
2003-01-31 04:51:45 +03:00
|
|
|
}
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2007-12-05 05:18:58 +03:00
|
|
|
nsCOMPtr<nsIThread> thread;
|
|
|
|
nsresult rv = NS_NewThread(getter_AddRefs(thread), this);
|
2003-01-18 04:27:53 +03:00
|
|
|
if (NS_FAILED(rv)) return rv;
|
2007-12-05 05:18:58 +03:00
|
|
|
|
|
|
|
{
|
2015-05-13 21:13:23 +03:00
|
|
|
DebugMutexAutoLock lock(mLock);
|
2007-12-05 05:18:58 +03:00
|
|
|
// Install our mThread, protecting against concurrent readers
|
|
|
|
thread.swap(mThread);
|
|
|
|
}
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2012-01-17 05:48:29 +04:00
|
|
|
nsCOMPtr<nsIPrefBranch> tmpPrefService = do_GetService(NS_PREFSERVICE_CONTRACTID);
|
2012-09-29 02:39:20 +04:00
|
|
|
if (tmpPrefService) {
|
2011-10-17 18:59:28 +04:00
|
|
|
tmpPrefService->AddObserver(SEND_BUFFER_PREF, this, false);
|
2014-02-06 23:51:38 +04:00
|
|
|
tmpPrefService->AddObserver(KEEPALIVE_ENABLED_PREF, this, false);
|
|
|
|
tmpPrefService->AddObserver(KEEPALIVE_IDLE_TIME_PREF, this, false);
|
|
|
|
tmpPrefService->AddObserver(KEEPALIVE_RETRY_INTERVAL_PREF, this, false);
|
|
|
|
tmpPrefService->AddObserver(KEEPALIVE_PROBE_COUNT_PREF, this, false);
|
2015-03-04 16:17:00 +03:00
|
|
|
tmpPrefService->AddObserver(MAX_TIME_BETWEEN_TWO_POLLS, this, false);
|
2015-03-12 12:25:13 +03:00
|
|
|
tmpPrefService->AddObserver(TELEMETRY_PREF, this, false);
|
2016-01-18 10:20:00 +03:00
|
|
|
tmpPrefService->AddObserver(MAX_TIME_FOR_PR_CLOSE_DURING_SHUTDOWN, this, false);
|
2012-09-29 02:39:20 +04:00
|
|
|
}
|
2009-02-09 20:31:44 +03:00
|
|
|
UpdatePrefs();
|
|
|
|
|
2012-10-01 13:43:14 +04:00
|
|
|
nsCOMPtr<nsIObserverService> obsSvc = services::GetObserverService();
|
|
|
|
if (obsSvc) {
|
|
|
|
obsSvc->AddObserver(this, "profile-initial-state", false);
|
2012-12-08 02:50:43 +04:00
|
|
|
obsSvc->AddObserver(this, "last-pb-context-exited", false);
|
2012-10-01 13:43:14 +04:00
|
|
|
}
|
|
|
|
|
2011-10-17 18:59:28 +04:00
|
|
|
mInitialized = true;
|
2003-01-18 04:27:53 +03:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
// called from main thread only
|
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::Shutdown()
|
|
|
|
{
|
2010-10-28 21:09:08 +04:00
|
|
|
SOCKET_LOG(("nsSocketTransportService::Shutdown\n"));
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2006-05-10 21:30:15 +04:00
|
|
|
NS_ENSURE_STATE(NS_IsMainThread());
|
2003-01-18 04:27:53 +03:00
|
|
|
|
|
|
|
if (!mInitialized)
|
|
|
|
return NS_OK;
|
|
|
|
|
2007-07-04 23:33:57 +04:00
|
|
|
if (mShuttingDown)
|
|
|
|
return NS_ERROR_UNEXPECTED;
|
|
|
|
|
2003-01-18 04:27:53 +03:00
|
|
|
{
|
2015-05-13 21:13:23 +03:00
|
|
|
DebugMutexAutoLock lock(mLock);
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2006-05-10 21:30:15 +04:00
|
|
|
// signal the socket thread to shutdown
|
2011-10-17 18:59:28 +04:00
|
|
|
mShuttingDown = true;
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2016-02-22 01:41:21 +03:00
|
|
|
if (mThreadEvent)
|
|
|
|
PR_SetPollableEvent(mThreadEvent);
|
|
|
|
// else wait for Poll timeout
|
2003-01-18 04:27:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// join with thread
|
2006-05-10 21:30:15 +04:00
|
|
|
mThread->Shutdown();
|
2007-12-05 05:18:58 +03:00
|
|
|
{
|
2015-05-13 21:13:23 +03:00
|
|
|
DebugMutexAutoLock lock(mLock);
|
2007-12-05 05:18:58 +03:00
|
|
|
// Drop our reference to mThread and make sure that any concurrent
|
|
|
|
// readers are excluded
|
2012-07-30 18:20:58 +04:00
|
|
|
mThread = nullptr;
|
2007-12-05 05:18:58 +03:00
|
|
|
}
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2012-01-17 05:48:29 +04:00
|
|
|
nsCOMPtr<nsIPrefBranch> tmpPrefService = do_GetService(NS_PREFSERVICE_CONTRACTID);
|
2009-02-09 20:31:44 +03:00
|
|
|
if (tmpPrefService)
|
|
|
|
tmpPrefService->RemoveObserver(SEND_BUFFER_PREF, this);
|
|
|
|
|
2012-10-01 13:43:14 +04:00
|
|
|
nsCOMPtr<nsIObserverService> obsSvc = services::GetObserverService();
|
|
|
|
if (obsSvc) {
|
|
|
|
obsSvc->RemoveObserver(this, "profile-initial-state");
|
2012-12-08 02:50:43 +04:00
|
|
|
obsSvc->RemoveObserver(this, "last-pb-context-exited");
|
2012-10-01 13:43:14 +04:00
|
|
|
}
|
|
|
|
|
2015-04-03 16:54:00 +03:00
|
|
|
#if !defined(MOZILLA_XPCOMRT_API)
|
2012-09-29 02:39:20 +04:00
|
|
|
mozilla::net::NetworkActivityMonitor::Shutdown();
|
2015-04-03 16:54:00 +03:00
|
|
|
#endif // !defined(MOZILLA_XPCOMRT_API)
|
2012-09-29 02:39:20 +04:00
|
|
|
|
2011-10-17 18:59:28 +04:00
|
|
|
mInitialized = false;
|
|
|
|
mShuttingDown = false;
|
2006-05-10 21:30:15 +04:00
|
|
|
|
2003-01-18 04:27:53 +03:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2012-09-18 03:45:10 +04:00
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::GetOffline(bool *offline)
|
|
|
|
{
|
|
|
|
*offline = mOffline;
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::SetOffline(bool offline)
|
|
|
|
{
|
2015-05-13 21:13:23 +03:00
|
|
|
DebugMutexAutoLock lock(mLock);
|
2012-09-18 03:45:10 +04:00
|
|
|
if (!mOffline && offline) {
|
|
|
|
// signal the socket thread to go offline, so it will detach sockets
|
|
|
|
mGoingOffline = true;
|
|
|
|
mOffline = true;
|
|
|
|
}
|
|
|
|
else if (mOffline && !offline) {
|
|
|
|
mOffline = false;
|
|
|
|
}
|
2016-02-22 01:41:21 +03:00
|
|
|
if (mThreadEvent)
|
|
|
|
PR_SetPollableEvent(mThreadEvent);
|
2012-09-18 03:45:10 +04:00
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2014-02-06 23:51:38 +04:00
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::GetKeepaliveIdleTime(int32_t *aKeepaliveIdleTimeS)
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(aKeepaliveIdleTimeS);
|
|
|
|
if (NS_WARN_IF(!aKeepaliveIdleTimeS)) {
|
|
|
|
return NS_ERROR_NULL_POINTER;
|
|
|
|
}
|
|
|
|
*aKeepaliveIdleTimeS = mKeepaliveIdleTimeS;
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::GetKeepaliveRetryInterval(int32_t *aKeepaliveRetryIntervalS)
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(aKeepaliveRetryIntervalS);
|
|
|
|
if (NS_WARN_IF(!aKeepaliveRetryIntervalS)) {
|
|
|
|
return NS_ERROR_NULL_POINTER;
|
|
|
|
}
|
|
|
|
*aKeepaliveRetryIntervalS = mKeepaliveRetryIntervalS;
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::GetKeepaliveProbeCount(int32_t *aKeepaliveProbeCount)
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(aKeepaliveProbeCount);
|
|
|
|
if (NS_WARN_IF(!aKeepaliveProbeCount)) {
|
|
|
|
return NS_ERROR_NULL_POINTER;
|
|
|
|
}
|
|
|
|
*aKeepaliveProbeCount = mKeepaliveProbeCount;
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2003-01-18 04:27:53 +03:00
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::CreateTransport(const char **types,
|
2012-08-22 19:56:38 +04:00
|
|
|
uint32_t typeCount,
|
2003-01-18 04:27:53 +03:00
|
|
|
const nsACString &host,
|
2012-08-22 19:56:38 +04:00
|
|
|
int32_t port,
|
2003-01-18 04:27:53 +03:00
|
|
|
nsIProxyInfo *proxyInfo,
|
|
|
|
nsISocketTransport **result)
|
2015-04-09 18:31:59 +03:00
|
|
|
{
|
|
|
|
return CreateRoutedTransport(types, typeCount, host, port, NS_LITERAL_CSTRING(""), 0,
|
|
|
|
proxyInfo, result);
|
|
|
|
}
|
|
|
|
|
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::CreateRoutedTransport(const char **types,
|
|
|
|
uint32_t typeCount,
|
|
|
|
const nsACString &host,
|
|
|
|
int32_t port,
|
|
|
|
const nsACString &hostRoute,
|
|
|
|
int32_t portRoute,
|
|
|
|
nsIProxyInfo *proxyInfo,
|
|
|
|
nsISocketTransport **result)
|
2003-01-18 04:27:53 +03:00
|
|
|
{
|
2015-04-03 16:54:00 +03:00
|
|
|
#if defined(MOZILLA_XPCOMRT_API)
|
|
|
|
NS_WARNING("nsSocketTransportService::CreateTransport not implemented");
|
|
|
|
return NS_ERROR_NOT_IMPLEMENTED;
|
|
|
|
#else
|
2012-09-18 03:45:10 +04:00
|
|
|
NS_ENSURE_TRUE(mInitialized, NS_ERROR_NOT_INITIALIZED);
|
2003-01-18 04:27:53 +03:00
|
|
|
NS_ENSURE_TRUE(port >= 0 && port <= 0xFFFF, NS_ERROR_ILLEGAL_VALUE);
|
|
|
|
|
2015-10-18 08:24:48 +03:00
|
|
|
RefPtr<nsSocketTransport> trans = new nsSocketTransport();
|
2015-04-09 18:31:59 +03:00
|
|
|
nsresult rv = trans->Init(types, typeCount, host, port, hostRoute, portRoute, proxyInfo);
|
2003-01-18 04:27:53 +03:00
|
|
|
if (NS_FAILED(rv)) {
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2014-04-16 06:41:06 +04:00
|
|
|
trans.forget(result);
|
2003-01-18 04:27:53 +03:00
|
|
|
return NS_OK;
|
2015-04-03 16:54:00 +03:00
|
|
|
#endif // defined(MOZILLA_XPCOMRT_API)
|
2003-01-18 04:27:53 +03:00
|
|
|
}
|
|
|
|
|
2013-09-06 19:06:23 +04:00
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::CreateUnixDomainTransport(nsIFile *aPath,
|
|
|
|
nsISocketTransport **result)
|
|
|
|
{
|
2015-04-03 16:54:00 +03:00
|
|
|
#if defined(MOZILLA_XPCOMRT_API)
|
|
|
|
NS_WARNING("nsSocketTransportService::CreateUnixDomainTransport not implemented");
|
|
|
|
return NS_ERROR_NOT_IMPLEMENTED;
|
|
|
|
#else
|
2013-09-06 19:06:23 +04:00
|
|
|
nsresult rv;
|
|
|
|
|
|
|
|
NS_ENSURE_TRUE(mInitialized, NS_ERROR_NOT_INITIALIZED);
|
|
|
|
|
|
|
|
nsAutoCString path;
|
|
|
|
rv = aPath->GetNativePath(path);
|
|
|
|
if (NS_FAILED(rv))
|
|
|
|
return rv;
|
|
|
|
|
2015-10-18 08:24:48 +03:00
|
|
|
RefPtr<nsSocketTransport> trans = new nsSocketTransport();
|
2013-09-06 19:06:23 +04:00
|
|
|
|
|
|
|
rv = trans->InitWithFilename(path.get());
|
|
|
|
if (NS_FAILED(rv))
|
|
|
|
return rv;
|
|
|
|
|
|
|
|
trans.forget(result);
|
|
|
|
return NS_OK;
|
2015-04-03 16:54:00 +03:00
|
|
|
#endif // defined(MOZILLA_XPCOMRT_API)
|
2013-09-06 19:06:23 +04:00
|
|
|
}
|
|
|
|
|
2003-01-18 04:27:53 +03:00
|
|
|
NS_IMETHODIMP
|
2011-09-29 10:19:26 +04:00
|
|
|
nsSocketTransportService::GetAutodialEnabled(bool *value)
|
2003-01-18 04:27:53 +03:00
|
|
|
{
|
2003-01-22 03:09:05 +03:00
|
|
|
*value = mAutodialEnabled;
|
2003-01-18 04:27:53 +03:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
NS_IMETHODIMP
|
2011-09-29 10:19:26 +04:00
|
|
|
nsSocketTransportService::SetAutodialEnabled(bool value)
|
2003-01-18 04:27:53 +03:00
|
|
|
{
|
2003-01-22 03:09:05 +03:00
|
|
|
mAutodialEnabled = value;
|
2003-01-18 04:27:53 +03:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2006-05-10 21:30:15 +04:00
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::OnDispatchedEvent(nsIThreadInternal *thread)
|
|
|
|
{
|
2015-05-13 21:13:23 +03:00
|
|
|
DebugMutexAutoLock lock(mLock);
|
2016-02-22 01:41:21 +03:00
|
|
|
if (mThreadEvent)
|
|
|
|
PR_SetPollableEvent(mThreadEvent);
|
2006-05-10 21:30:15 +04:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::OnProcessNextEvent(nsIThreadInternal *thread,
|
Bug 1179909: Refactor stable state handling. r=smaug
This is motivated by three separate but related problems:
1. Our concept of recursion depth is broken for things that run from AfterProcessNextEvent observers (e.g. Promises). We decrement the recursionDepth counter before firing observers, so a Promise callback running at the lowest event loop depth has a recursion depth of 0 (whereas a regular nsIRunnable would be 1). This is a problem because it's impossible to distinguish a Promise running after a sync XHR's onreadystatechange handler from a top-level event (since the former runs with depth 2 - 1 = 1, and the latter runs with just 1).
2. The nsIThreadObserver mechanism that is used by a lot of code to run "after" the current event is a poor fit for anything that runs script. First, the order the observers fire in is the order they were added, not anything fixed by spec. Additionally, running script can cause the event loop to spin, which is a big source of pain here (bholley has some nasty bug caused by this).
3. We run Promises from different points in the code for workers and main thread. The latter runs from XPConnect's nsIThreadObserver callbacks, while the former runs from a hardcoded call to run Promises in the worker event loop. What workers do is particularly problematic because it means we can't get the right recursion depth no matter what we do to nsThread.
The solve this, this patch does the following:
1. Consolidate some handling of microtasks and all handling of stable state from appshell and WorkerPrivate into CycleCollectedJSRuntime.
2. Make the recursionDepth counter only available to CycleCollectedJSRuntime (and its consumers) and remove it from the nsIThreadInternal and nsIThreadObserver APIs.
3. Adjust the recursionDepth counter so that microtasks run with the recursionDepth of the task they are associated with.
4. Introduce the concept of metastable state to replace appshell's RunBeforeNextEvent. Metastable state is reached after every microtask or task is completed. This provides the semantics that bent and I want for IndexedDB, where transactions autocommit at the end of a microtask and do not "spill" from one microtask into a subsequent microtask. This differs from appshell's RunBeforeNextEvent in two ways:
a) It fires between microtasks, which was the motivation for starting this.
b) It no longer ensures that we're at the same event loop depth in the native event queue. bent decided we don't care about this.
5. Reorder stable state to happen after microtasks such as Promises, per HTML. Right now we call the regular thread observers, including appshell, before the main thread observer (XPConnect), so stable state tasks happen before microtasks.
2015-08-11 16:10:46 +03:00
|
|
|
bool mayWait)
|
2006-05-10 21:30:15 +04:00
|
|
|
{
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2006-06-07 04:06:11 +04:00
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::AfterProcessNextEvent(nsIThreadInternal* thread,
|
2013-10-23 16:01:20 +04:00
|
|
|
bool eventWasProcessed)
|
2006-06-07 04:06:11 +04:00
|
|
|
{
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2015-03-04 16:17:00 +03:00
|
|
|
void
|
|
|
|
nsSocketTransportService::MarkTheLastElementOfPendingQueue()
|
|
|
|
{
|
|
|
|
mServingPendingQueue = false;
|
|
|
|
}
|
|
|
|
|
2013-06-03 14:14:42 +04:00
|
|
|
#ifdef MOZ_NUWA_PROCESS
|
|
|
|
#include "ipc/Nuwa.h"
|
|
|
|
#endif
|
|
|
|
|
2003-01-18 04:27:53 +03:00
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::Run()
|
|
|
|
{
|
2012-06-12 21:06:20 +04:00
|
|
|
PR_SetCurrentThreadName("Socket Thread");
|
|
|
|
|
2013-06-03 14:14:42 +04:00
|
|
|
#ifdef MOZ_NUWA_PROCESS
|
|
|
|
if (IsNuwaProcess()) {
|
|
|
|
NuwaMarkCurrentThread(nullptr, nullptr);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2010-10-28 21:09:08 +04:00
|
|
|
SOCKET_LOG(("STS thread init\n"));
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2015-04-03 16:54:00 +03:00
|
|
|
#if !defined(MOZILLA_XPCOMRT_API)
|
2011-12-02 02:37:57 +04:00
|
|
|
psm::InitializeSSLServerCertVerificationThreads();
|
2015-04-03 16:54:00 +03:00
|
|
|
#endif // !defined(MOZILLA_XPCOMRT_API)
|
2011-12-02 02:37:57 +04:00
|
|
|
|
2003-01-18 04:27:53 +03:00
|
|
|
gSocketThread = PR_GetCurrentThread();
|
|
|
|
|
2016-02-22 01:41:21 +03:00
|
|
|
// add thread event to poll list (mThreadEvent may be nullptr)
|
|
|
|
mPollList[0].fd = mThreadEvent;
|
|
|
|
mPollList[0].in_flags = PR_POLL_READ;
|
2007-05-30 21:30:39 +04:00
|
|
|
mPollList[0].out_flags = 0;
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2016-02-22 01:41:21 +03:00
|
|
|
nsIThread *thread = NS_GetCurrentThread();
|
2006-05-10 21:30:15 +04:00
|
|
|
|
|
|
|
// hook ourselves up to observe event processing for this thread
|
2016-02-22 01:41:21 +03:00
|
|
|
nsCOMPtr<nsIThreadInternal> threadInt = do_QueryInterface(thread);
|
2006-05-10 21:30:15 +04:00
|
|
|
threadInt->SetObserver(this);
|
|
|
|
|
2012-02-27 19:32:09 +04:00
|
|
|
// make sure the pseudo random number generator is seeded on this thread
|
2013-02-08 15:49:30 +04:00
|
|
|
srand(static_cast<unsigned>(PR_Now()));
|
2012-02-27 19:32:09 +04:00
|
|
|
|
2015-03-12 12:25:13 +03:00
|
|
|
// For the calculation of the duration of the last cycle (i.e. the last for-loop
|
|
|
|
// iteration before shutdown).
|
|
|
|
TimeStamp startOfCycleForLastCycleCalc;
|
|
|
|
int numberOfPendingEventsLastCycle;
|
|
|
|
|
|
|
|
// For measuring of the poll iteration duration without time spent blocked
|
|
|
|
// in poll().
|
|
|
|
TimeStamp pollCycleStart;
|
|
|
|
// Time blocked in poll().
|
|
|
|
TimeDuration singlePollDuration;
|
|
|
|
|
|
|
|
// For calculating the time needed for a new element to run.
|
|
|
|
TimeStamp startOfIteration;
|
|
|
|
TimeStamp startOfNextIteration;
|
|
|
|
int numberOfPendingEvents;
|
|
|
|
|
|
|
|
// If there is too many pending events queued, we will run some poll()
|
|
|
|
// between them and the following variable is cumulative time spent
|
|
|
|
// blocking in poll().
|
|
|
|
TimeDuration pollDuration;
|
|
|
|
|
2006-05-10 21:30:15 +04:00
|
|
|
for (;;) {
|
2011-09-29 10:19:26 +04:00
|
|
|
bool pendingEvents = false;
|
2016-02-22 01:41:21 +03:00
|
|
|
thread->HasPendingEvents(&pendingEvents);
|
2011-06-15 18:24:56 +04:00
|
|
|
|
2015-03-12 12:25:13 +03:00
|
|
|
numberOfPendingEvents = 0;
|
|
|
|
numberOfPendingEventsLastCycle = 0;
|
|
|
|
if (mTelemetryEnabledPref) {
|
|
|
|
startOfCycleForLastCycleCalc = TimeStamp::NowLoRes();
|
|
|
|
startOfNextIteration = TimeStamp::NowLoRes();
|
|
|
|
}
|
|
|
|
pollDuration = 0;
|
|
|
|
|
2011-06-15 18:24:56 +04:00
|
|
|
do {
|
2015-03-12 12:25:13 +03:00
|
|
|
if (mTelemetryEnabledPref) {
|
|
|
|
pollCycleStart = TimeStamp::NowLoRes();
|
|
|
|
}
|
|
|
|
|
2016-02-22 01:41:21 +03:00
|
|
|
// If there are pending events for this thread then
|
|
|
|
// DoPollIteration() should service the network without blocking.
|
|
|
|
DoPollIteration(!pendingEvents, &singlePollDuration);
|
2015-03-12 12:25:13 +03:00
|
|
|
|
|
|
|
if (mTelemetryEnabledPref && !pollCycleStart.IsNull()) {
|
|
|
|
Telemetry::Accumulate(Telemetry::STS_POLL_BLOCK_TIME,
|
|
|
|
singlePollDuration.ToMilliseconds());
|
|
|
|
Telemetry::AccumulateTimeDelta(
|
|
|
|
Telemetry::STS_POLL_CYCLE,
|
|
|
|
pollCycleStart + singlePollDuration,
|
|
|
|
TimeStamp::NowLoRes());
|
|
|
|
pollDuration += singlePollDuration;
|
|
|
|
}
|
2015-03-04 16:17:00 +03:00
|
|
|
|
2016-02-22 01:41:21 +03:00
|
|
|
// If nothing was pending before the poll, it might be now
|
|
|
|
if (!pendingEvents) {
|
|
|
|
thread->HasPendingEvents(&pendingEvents);
|
|
|
|
}
|
|
|
|
|
2011-06-15 18:24:56 +04:00
|
|
|
if (pendingEvents) {
|
2015-09-15 06:10:00 +03:00
|
|
|
if (!mServingPendingQueue) {
|
|
|
|
nsresult rv = Dispatch(NS_NewRunnableMethod(this,
|
|
|
|
&nsSocketTransportService::MarkTheLastElementOfPendingQueue),
|
|
|
|
nsIEventTarget::DISPATCH_NORMAL);
|
|
|
|
if (NS_FAILED(rv)) {
|
|
|
|
NS_WARNING("Could not dispatch a new event on the "
|
|
|
|
"socket thread.");
|
|
|
|
} else {
|
|
|
|
mServingPendingQueue = true;
|
2015-03-04 16:17:00 +03:00
|
|
|
}
|
2015-09-15 06:10:00 +03:00
|
|
|
|
|
|
|
if (mTelemetryEnabledPref) {
|
|
|
|
startOfIteration = startOfNextIteration;
|
|
|
|
// Everything that comes after this point will
|
|
|
|
// be served in the next iteration. If no even
|
|
|
|
// arrives, startOfNextIteration will be reset at the
|
|
|
|
// beginning of each for-loop.
|
|
|
|
startOfNextIteration = TimeStamp::NowLoRes();
|
2015-03-12 12:25:13 +03:00
|
|
|
}
|
2015-09-15 06:10:00 +03:00
|
|
|
}
|
|
|
|
TimeStamp eventQueueStart = TimeStamp::NowLoRes();
|
|
|
|
do {
|
2016-02-22 01:41:21 +03:00
|
|
|
NS_ProcessNextEvent(thread);
|
2015-09-15 06:10:00 +03:00
|
|
|
numberOfPendingEvents++;
|
2015-03-04 16:17:00 +03:00
|
|
|
pendingEvents = false;
|
2016-02-22 01:41:21 +03:00
|
|
|
thread->HasPendingEvents(&pendingEvents);
|
2015-09-15 06:10:00 +03:00
|
|
|
} while (pendingEvents && mServingPendingQueue &&
|
|
|
|
((TimeStamp::NowLoRes() -
|
|
|
|
eventQueueStart).ToMilliseconds() <
|
|
|
|
mMaxTimePerPollIter));
|
|
|
|
|
|
|
|
if (mTelemetryEnabledPref && !mServingPendingQueue &&
|
|
|
|
!startOfIteration.IsNull()) {
|
|
|
|
Telemetry::AccumulateTimeDelta(
|
|
|
|
Telemetry::STS_POLL_AND_EVENTS_CYCLE,
|
|
|
|
startOfIteration + pollDuration,
|
|
|
|
TimeStamp::NowLoRes());
|
|
|
|
|
|
|
|
Telemetry::Accumulate(
|
|
|
|
Telemetry::STS_NUMBER_OF_PENDING_EVENTS,
|
|
|
|
numberOfPendingEvents);
|
|
|
|
|
|
|
|
numberOfPendingEventsLastCycle += numberOfPendingEvents;
|
|
|
|
numberOfPendingEvents = 0;
|
|
|
|
pollDuration = 0;
|
2015-03-04 16:17:00 +03:00
|
|
|
}
|
2011-06-15 18:24:56 +04:00
|
|
|
}
|
|
|
|
} while (pendingEvents);
|
2006-05-10 21:30:15 +04:00
|
|
|
|
2012-09-18 03:45:10 +04:00
|
|
|
bool goingOffline = false;
|
2006-05-10 21:30:15 +04:00
|
|
|
// now that our event queue is empty, check to see if we should exit
|
|
|
|
{
|
2015-05-13 21:13:23 +03:00
|
|
|
DebugMutexAutoLock lock(mLock);
|
2015-03-12 12:25:13 +03:00
|
|
|
if (mShuttingDown) {
|
|
|
|
if (mTelemetryEnabledPref &&
|
|
|
|
!startOfCycleForLastCycleCalc.IsNull()) {
|
|
|
|
Telemetry::Accumulate(
|
|
|
|
Telemetry::STS_NUMBER_OF_PENDING_EVENTS_IN_THE_LAST_CYCLE,
|
|
|
|
numberOfPendingEventsLastCycle);
|
|
|
|
Telemetry::AccumulateTimeDelta(
|
|
|
|
Telemetry::STS_POLL_AND_EVENT_THE_LAST_CYCLE,
|
|
|
|
startOfCycleForLastCycleCalc,
|
|
|
|
TimeStamp::NowLoRes());
|
|
|
|
}
|
2006-05-10 21:30:15 +04:00
|
|
|
break;
|
2015-03-12 12:25:13 +03:00
|
|
|
}
|
2012-09-18 03:45:10 +04:00
|
|
|
if (mGoingOffline) {
|
|
|
|
mGoingOffline = false;
|
|
|
|
goingOffline = true;
|
|
|
|
}
|
2006-05-10 21:30:15 +04:00
|
|
|
}
|
2012-09-18 03:45:10 +04:00
|
|
|
// Avoid potential deadlock
|
|
|
|
if (goingOffline)
|
|
|
|
Reset(true);
|
2006-05-10 21:30:15 +04:00
|
|
|
}
|
|
|
|
|
2010-10-28 21:09:08 +04:00
|
|
|
SOCKET_LOG(("STS shutting down thread\n"));
|
2006-05-10 21:30:15 +04:00
|
|
|
|
2012-09-18 03:45:10 +04:00
|
|
|
// detach all sockets, including locals
|
|
|
|
Reset(false);
|
2006-05-10 21:30:15 +04:00
|
|
|
|
|
|
|
// Final pass over the event queue. This makes sure that events posted by
|
|
|
|
// socket detach handlers get processed.
|
2016-02-22 01:41:21 +03:00
|
|
|
NS_ProcessPendingEvents(thread);
|
2006-05-10 21:30:15 +04:00
|
|
|
|
2012-07-30 18:20:58 +04:00
|
|
|
gSocketThread = nullptr;
|
2006-05-10 21:30:15 +04:00
|
|
|
|
2015-04-03 16:54:00 +03:00
|
|
|
#if !defined(MOZILLA_XPCOMRT_API)
|
2011-12-02 02:37:57 +04:00
|
|
|
psm::StopSSLServerCertVerificationThreads();
|
2015-04-03 16:54:00 +03:00
|
|
|
#endif // !defined(MOZILLA_XPCOMRT_API)
|
2011-12-02 02:37:57 +04:00
|
|
|
|
2010-10-28 21:09:08 +04:00
|
|
|
SOCKET_LOG(("STS thread exit\n"));
|
2006-05-10 21:30:15 +04:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2013-04-10 04:46:25 +04:00
|
|
|
void
|
|
|
|
nsSocketTransportService::DetachSocketWithGuard(bool aGuardLocals,
|
|
|
|
SocketContext *socketList,
|
|
|
|
int32_t index)
|
|
|
|
{
|
|
|
|
bool isGuarded = false;
|
|
|
|
if (aGuardLocals) {
|
|
|
|
socketList[index].mHandler->IsLocal(&isGuarded);
|
|
|
|
if (!isGuarded)
|
|
|
|
socketList[index].mHandler->KeepWhenOffline(&isGuarded);
|
|
|
|
}
|
|
|
|
if (!isGuarded)
|
|
|
|
DetachSocket(socketList, &socketList[index]);
|
|
|
|
}
|
|
|
|
|
2012-09-18 03:45:10 +04:00
|
|
|
void
|
|
|
|
nsSocketTransportService::Reset(bool aGuardLocals)
|
|
|
|
{
|
|
|
|
// detach any sockets
|
|
|
|
int32_t i;
|
|
|
|
for (i = mActiveCount - 1; i >= 0; --i) {
|
2013-04-10 04:46:25 +04:00
|
|
|
DetachSocketWithGuard(aGuardLocals, mActiveList, i);
|
2012-09-18 03:45:10 +04:00
|
|
|
}
|
|
|
|
for (i = mIdleCount - 1; i >= 0; --i) {
|
2013-04-10 04:46:25 +04:00
|
|
|
DetachSocketWithGuard(aGuardLocals, mIdleList, i);
|
2012-09-18 03:45:10 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-05-10 21:30:15 +04:00
|
|
|
nsresult
|
2016-02-22 01:41:21 +03:00
|
|
|
nsSocketTransportService::DoPollIteration(bool wait, TimeDuration *pollDuration)
|
2006-05-10 21:30:15 +04:00
|
|
|
{
|
2016-02-22 01:41:21 +03:00
|
|
|
SOCKET_LOG(("STS poll iter [%d]\n", wait));
|
2006-05-10 21:30:15 +04:00
|
|
|
|
2012-08-22 19:56:38 +04:00
|
|
|
int32_t i, count;
|
2016-02-22 01:41:21 +03:00
|
|
|
|
2003-01-18 04:27:53 +03:00
|
|
|
//
|
|
|
|
// poll loop
|
2006-05-10 21:30:15 +04:00
|
|
|
//
|
|
|
|
// walk active list backwards to see if any sockets should actually be
|
|
|
|
// idle, then walk the idle list backwards to see if any idle sockets
|
|
|
|
// should become active. take care to check only idle sockets that
|
|
|
|
// were idle to begin with ;-)
|
|
|
|
//
|
|
|
|
count = mIdleCount;
|
|
|
|
for (i=mActiveCount-1; i>=0; --i) {
|
|
|
|
//---
|
2013-07-01 20:40:12 +04:00
|
|
|
SOCKET_LOG((" active [%u] { handler=%p condition=%x pollflags=%hu }\n", i,
|
2006-05-10 21:30:15 +04:00
|
|
|
mActiveList[i].mHandler,
|
|
|
|
mActiveList[i].mHandler->mCondition,
|
|
|
|
mActiveList[i].mHandler->mPollFlags));
|
|
|
|
//---
|
|
|
|
if (NS_FAILED(mActiveList[i].mHandler->mCondition))
|
2011-04-13 16:59:29 +04:00
|
|
|
DetachSocket(mActiveList, &mActiveList[i]);
|
2006-05-10 21:30:15 +04:00
|
|
|
else {
|
2012-08-22 19:56:38 +04:00
|
|
|
uint16_t in_flags = mActiveList[i].mHandler->mPollFlags;
|
2006-05-10 21:30:15 +04:00
|
|
|
if (in_flags == 0)
|
|
|
|
MoveToIdleList(&mActiveList[i]);
|
2003-02-23 08:07:34 +03:00
|
|
|
else {
|
2006-05-10 21:30:15 +04:00
|
|
|
// update poll flags
|
|
|
|
mPollList[i+1].in_flags = in_flags;
|
|
|
|
mPollList[i+1].out_flags = 0;
|
2003-01-18 04:27:53 +03:00
|
|
|
}
|
|
|
|
}
|
2006-05-10 21:30:15 +04:00
|
|
|
}
|
|
|
|
for (i=count-1; i>=0; --i) {
|
|
|
|
//---
|
2013-07-01 20:40:12 +04:00
|
|
|
SOCKET_LOG((" idle [%u] { handler=%p condition=%x pollflags=%hu }\n", i,
|
2006-05-10 21:30:15 +04:00
|
|
|
mIdleList[i].mHandler,
|
|
|
|
mIdleList[i].mHandler->mCondition,
|
|
|
|
mIdleList[i].mHandler->mPollFlags));
|
|
|
|
//---
|
|
|
|
if (NS_FAILED(mIdleList[i].mHandler->mCondition))
|
2011-04-13 16:59:29 +04:00
|
|
|
DetachSocket(mIdleList, &mIdleList[i]);
|
2006-05-10 21:30:15 +04:00
|
|
|
else if (mIdleList[i].mHandler->mPollFlags != 0)
|
|
|
|
MoveToPollList(&mIdleList[i]);
|
|
|
|
}
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2010-10-28 21:09:08 +04:00
|
|
|
SOCKET_LOG((" calling PR_Poll [active=%u idle=%u]\n", mActiveCount, mIdleCount));
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2011-10-25 19:36:49 +04:00
|
|
|
#if defined(XP_WIN)
|
|
|
|
// 30 active connections is the historic limit before firefox 7's 256. A few
|
|
|
|
// windows systems have troubles with the higher limit, so actively probe a
|
|
|
|
// limit the first time we exceed 30.
|
|
|
|
if ((mActiveCount > 30) && !mProbedMaxCount)
|
|
|
|
ProbeMaxCount();
|
|
|
|
#endif
|
|
|
|
|
2006-05-10 21:30:15 +04:00
|
|
|
// Measures seconds spent while blocked on PR_Poll
|
2016-02-24 14:17:39 +03:00
|
|
|
uint32_t pollInterval;
|
|
|
|
|
2016-01-15 10:21:00 +03:00
|
|
|
int32_t n = 0;
|
|
|
|
#if !defined(MOZILLA_XPCOMRT_API)
|
|
|
|
if (!gIOService->IsNetTearingDown()) {
|
|
|
|
// Let's not do polling during shutdown.
|
2016-02-22 01:41:21 +03:00
|
|
|
n = Poll(wait, &pollInterval, pollDuration);
|
2016-01-15 10:21:00 +03:00
|
|
|
}
|
|
|
|
#else
|
2016-02-22 01:41:21 +03:00
|
|
|
n = Poll(wait, &pollInterval, pollDuration);
|
2016-01-15 10:21:00 +03:00
|
|
|
#endif // defined(MOZILLA_XPCOMRT_API)
|
|
|
|
|
2006-05-10 21:30:15 +04:00
|
|
|
if (n < 0) {
|
2015-01-14 19:39:09 +03:00
|
|
|
SOCKET_LOG((" PR_Poll error [%d] os error [%d]\n", PR_GetError(),
|
|
|
|
PR_GetOSError()));
|
2006-05-10 21:30:15 +04:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
//
|
|
|
|
// service "active" sockets...
|
|
|
|
//
|
2015-03-12 12:25:13 +03:00
|
|
|
uint32_t numberOfOnSocketReadyCalls = 0;
|
2012-08-22 19:56:38 +04:00
|
|
|
for (i=0; i<int32_t(mActiveCount); ++i) {
|
2006-05-10 21:30:15 +04:00
|
|
|
PRPollDesc &desc = mPollList[i+1];
|
|
|
|
SocketContext &s = mActiveList[i];
|
|
|
|
if (n > 0 && desc.out_flags != 0) {
|
|
|
|
s.mElapsedTime = 0;
|
|
|
|
s.mHandler->OnSocketReady(desc.fd, desc.out_flags);
|
2015-03-12 12:25:13 +03:00
|
|
|
numberOfOnSocketReadyCalls++;
|
2006-05-10 21:30:15 +04:00
|
|
|
}
|
|
|
|
// check for timeout errors unless disabled...
|
2012-09-28 10:57:33 +04:00
|
|
|
else if (s.mHandler->mPollTimeout != UINT16_MAX) {
|
2006-05-10 21:30:15 +04:00
|
|
|
// update elapsed time counter
|
2012-10-03 04:18:47 +04:00
|
|
|
// (NOTE: We explicitly cast UINT16_MAX to be an unsigned value
|
|
|
|
// here -- otherwise, some compilers will treat it as signed,
|
|
|
|
// which makes them fire signed/unsigned-comparison build
|
|
|
|
// warnings for the comparison against 'pollInterval'.)
|
2012-10-26 17:32:10 +04:00
|
|
|
if (MOZ_UNLIKELY(pollInterval >
|
2012-10-03 04:18:47 +04:00
|
|
|
static_cast<uint32_t>(UINT16_MAX) -
|
|
|
|
s.mElapsedTime))
|
2012-09-28 10:57:33 +04:00
|
|
|
s.mElapsedTime = UINT16_MAX;
|
2006-05-10 21:30:15 +04:00
|
|
|
else
|
2012-08-22 19:56:38 +04:00
|
|
|
s.mElapsedTime += uint16_t(pollInterval);
|
2006-05-10 21:30:15 +04:00
|
|
|
// check for timeout expiration
|
|
|
|
if (s.mElapsedTime >= s.mHandler->mPollTimeout) {
|
2005-01-26 05:13:14 +03:00
|
|
|
s.mElapsedTime = 0;
|
2006-05-10 21:30:15 +04:00
|
|
|
s.mHandler->OnSocketReady(desc.fd, -1);
|
2015-03-12 12:25:13 +03:00
|
|
|
numberOfOnSocketReadyCalls++;
|
2003-01-18 04:27:53 +03:00
|
|
|
}
|
|
|
|
}
|
2006-05-10 21:30:15 +04:00
|
|
|
}
|
2015-03-12 12:25:13 +03:00
|
|
|
if (mTelemetryEnabledPref) {
|
|
|
|
Telemetry::Accumulate(
|
|
|
|
Telemetry::STS_NUMBER_OF_ONSOCKETREADY_CALLS,
|
|
|
|
numberOfOnSocketReadyCalls);
|
|
|
|
}
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2006-05-10 21:30:15 +04:00
|
|
|
//
|
|
|
|
// check for "dead" sockets and remove them (need to do this in
|
|
|
|
// reverse order obviously).
|
|
|
|
//
|
|
|
|
for (i=mActiveCount-1; i>=0; --i) {
|
|
|
|
if (NS_FAILED(mActiveList[i].mHandler->mCondition))
|
2011-04-13 16:59:29 +04:00
|
|
|
DetachSocket(mActiveList, &mActiveList[i]);
|
2006-05-10 21:30:15 +04:00
|
|
|
}
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2016-02-22 01:41:21 +03:00
|
|
|
if (n != 0 && mPollList[0].out_flags == PR_POLL_READ) {
|
|
|
|
// acknowledge pollable event (wait should not block)
|
|
|
|
if (PR_WaitForPollableEvent(mThreadEvent) != PR_SUCCESS) {
|
2007-05-30 21:30:39 +04:00
|
|
|
// On Windows, the TCP loopback connection in the
|
|
|
|
// pollable event may become broken when a laptop
|
|
|
|
// switches between wired and wireless networks or
|
|
|
|
// wakes up from hibernation. We try to create a
|
|
|
|
// new pollable event. If that fails, we fall back
|
|
|
|
// on "busy wait".
|
2016-02-22 01:41:21 +03:00
|
|
|
{
|
|
|
|
DebugMutexAutoLock lock(mLock);
|
|
|
|
PR_DestroyPollableEvent(mThreadEvent);
|
|
|
|
mThreadEvent = PR_NewPollableEvent();
|
|
|
|
}
|
|
|
|
if (!mThreadEvent) {
|
|
|
|
NS_WARNING("running socket transport thread without "
|
|
|
|
"a pollable event");
|
|
|
|
SOCKET_LOG(("running socket transport thread without "
|
|
|
|
"a pollable event"));
|
2007-05-30 21:30:39 +04:00
|
|
|
}
|
2016-02-22 01:41:21 +03:00
|
|
|
mPollList[0].fd = mThreadEvent;
|
|
|
|
// mPollList[0].in_flags was already set to PR_POLL_READ
|
|
|
|
// in Run().
|
2007-05-30 21:30:39 +04:00
|
|
|
mPollList[0].out_flags = 0;
|
|
|
|
}
|
2003-02-07 00:20:26 +03:00
|
|
|
}
|
2003-01-18 04:27:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
2009-02-09 20:31:44 +03:00
|
|
|
|
|
|
|
nsresult
|
|
|
|
nsSocketTransportService::UpdatePrefs()
|
|
|
|
{
|
2015-04-03 16:54:00 +03:00
|
|
|
#if defined(MOZILLA_XPCOMRT_API)
|
|
|
|
NS_WARNING("nsSocketTransportService::UpdatePrefs not implemented");
|
|
|
|
return NS_ERROR_NOT_IMPLEMENTED;
|
|
|
|
#else
|
2009-02-09 20:31:44 +03:00
|
|
|
mSendBufferSize = 0;
|
|
|
|
|
2012-01-17 05:48:29 +04:00
|
|
|
nsCOMPtr<nsIPrefBranch> tmpPrefService = do_GetService(NS_PREFSERVICE_CONTRACTID);
|
2009-02-09 20:31:44 +03:00
|
|
|
if (tmpPrefService) {
|
2012-08-22 19:56:38 +04:00
|
|
|
int32_t bufferSize;
|
2009-02-09 20:31:44 +03:00
|
|
|
nsresult rv = tmpPrefService->GetIntPref(SEND_BUFFER_PREF, &bufferSize);
|
|
|
|
if (NS_SUCCEEDED(rv) && bufferSize > 0)
|
|
|
|
mSendBufferSize = bufferSize;
|
2014-02-06 23:51:38 +04:00
|
|
|
|
|
|
|
// Default TCP Keepalive Values.
|
|
|
|
int32_t keepaliveIdleTimeS;
|
|
|
|
rv = tmpPrefService->GetIntPref(KEEPALIVE_IDLE_TIME_PREF,
|
|
|
|
&keepaliveIdleTimeS);
|
|
|
|
if (NS_SUCCEEDED(rv))
|
|
|
|
mKeepaliveIdleTimeS = clamped(keepaliveIdleTimeS,
|
|
|
|
1, kMaxTCPKeepIdle);
|
|
|
|
|
|
|
|
int32_t keepaliveRetryIntervalS;
|
|
|
|
rv = tmpPrefService->GetIntPref(KEEPALIVE_RETRY_INTERVAL_PREF,
|
|
|
|
&keepaliveRetryIntervalS);
|
|
|
|
if (NS_SUCCEEDED(rv))
|
|
|
|
mKeepaliveRetryIntervalS = clamped(keepaliveRetryIntervalS,
|
|
|
|
1, kMaxTCPKeepIntvl);
|
|
|
|
|
|
|
|
int32_t keepaliveProbeCount;
|
|
|
|
rv = tmpPrefService->GetIntPref(KEEPALIVE_PROBE_COUNT_PREF,
|
|
|
|
&keepaliveProbeCount);
|
|
|
|
if (NS_SUCCEEDED(rv))
|
|
|
|
mKeepaliveProbeCount = clamped(keepaliveProbeCount,
|
|
|
|
1, kMaxTCPKeepCount);
|
|
|
|
bool keepaliveEnabled = false;
|
|
|
|
rv = tmpPrefService->GetBoolPref(KEEPALIVE_ENABLED_PREF,
|
|
|
|
&keepaliveEnabled);
|
|
|
|
if (NS_SUCCEEDED(rv) && keepaliveEnabled != mKeepaliveEnabledPref) {
|
|
|
|
mKeepaliveEnabledPref = keepaliveEnabled;
|
|
|
|
OnKeepaliveEnabledPrefChange();
|
|
|
|
}
|
2015-03-04 16:17:00 +03:00
|
|
|
|
|
|
|
int32_t maxTimePref;
|
|
|
|
rv = tmpPrefService->GetIntPref(MAX_TIME_BETWEEN_TWO_POLLS,
|
|
|
|
&maxTimePref);
|
|
|
|
if (NS_SUCCEEDED(rv) && maxTimePref >= 0) {
|
|
|
|
mMaxTimePerPollIter = maxTimePref;
|
|
|
|
}
|
2015-03-12 12:25:13 +03:00
|
|
|
|
|
|
|
bool telemetryPref = false;
|
|
|
|
rv = tmpPrefService->GetBoolPref(TELEMETRY_PREF,
|
|
|
|
&telemetryPref);
|
|
|
|
if (NS_SUCCEEDED(rv)) {
|
|
|
|
mTelemetryEnabledPref = telemetryPref;
|
|
|
|
}
|
2016-01-18 10:20:00 +03:00
|
|
|
|
|
|
|
int32_t maxTimeForPrClosePref;
|
|
|
|
rv = tmpPrefService->GetIntPref(MAX_TIME_FOR_PR_CLOSE_DURING_SHUTDOWN,
|
|
|
|
&maxTimeForPrClosePref);
|
|
|
|
if (NS_SUCCEEDED(rv) && maxTimeForPrClosePref >=0) {
|
|
|
|
mMaxTimeForPrClosePref = PR_MillisecondsToInterval(maxTimeForPrClosePref);
|
|
|
|
}
|
2009-02-09 20:31:44 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return NS_OK;
|
2015-04-03 16:54:00 +03:00
|
|
|
#endif // defined(MOZILLA_XPCOMRT_API)
|
2009-02-09 20:31:44 +03:00
|
|
|
}
|
|
|
|
|
2014-02-06 23:51:38 +04:00
|
|
|
void
|
|
|
|
nsSocketTransportService::OnKeepaliveEnabledPrefChange()
|
|
|
|
{
|
|
|
|
// Dispatch to socket thread if we're not executing there.
|
|
|
|
if (PR_GetCurrentThread() != gSocketThread) {
|
|
|
|
gSocketTransportService->Dispatch(
|
|
|
|
NS_NewRunnableMethod(
|
|
|
|
this, &nsSocketTransportService::OnKeepaliveEnabledPrefChange),
|
|
|
|
NS_DISPATCH_NORMAL);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
SOCKET_LOG(("nsSocketTransportService::OnKeepaliveEnabledPrefChange %s",
|
|
|
|
mKeepaliveEnabledPref ? "enabled" : "disabled"));
|
|
|
|
|
|
|
|
// Notify each socket that keepalive has been en/disabled globally.
|
|
|
|
for (int32_t i = mActiveCount - 1; i >= 0; --i) {
|
|
|
|
NotifyKeepaliveEnabledPrefChange(&mActiveList[i]);
|
|
|
|
}
|
|
|
|
for (int32_t i = mIdleCount - 1; i >= 0; --i) {
|
|
|
|
NotifyKeepaliveEnabledPrefChange(&mIdleList[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
nsSocketTransportService::NotifyKeepaliveEnabledPrefChange(SocketContext *sock)
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(sock, "SocketContext cannot be null!");
|
|
|
|
MOZ_ASSERT(sock->mHandler, "SocketContext does not have a handler!");
|
|
|
|
|
|
|
|
if (!sock || !sock->mHandler) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
sock->mHandler->OnKeepaliveEnabledPrefChange(mKeepaliveEnabledPref);
|
|
|
|
}
|
|
|
|
|
2009-02-09 20:31:44 +03:00
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::Observe(nsISupports *subject,
|
|
|
|
const char *topic,
|
2014-01-04 19:02:17 +04:00
|
|
|
const char16_t *data)
|
2009-02-09 20:31:44 +03:00
|
|
|
{
|
2015-04-03 16:54:00 +03:00
|
|
|
#if !defined(MOZILLA_XPCOMRT_API)
|
2009-02-09 20:31:44 +03:00
|
|
|
if (!strcmp(topic, NS_PREFBRANCH_PREFCHANGE_TOPIC_ID)) {
|
|
|
|
UpdatePrefs();
|
2012-10-01 13:43:14 +04:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!strcmp(topic, "profile-initial-state")) {
|
2012-10-10 03:46:22 +04:00
|
|
|
int32_t blipInterval = Preferences::GetInt(BLIP_INTERVAL_PREF, 0);
|
2012-10-01 13:43:14 +04:00
|
|
|
if (blipInterval <= 0) {
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
return net::NetworkActivityMonitor::Init(blipInterval);
|
2009-02-09 20:31:44 +03:00
|
|
|
}
|
2015-04-03 16:54:00 +03:00
|
|
|
#endif // !defined(MOZILLA_XPCOMRT_API)
|
2012-12-08 02:50:43 +04:00
|
|
|
|
|
|
|
if (!strcmp(topic, "last-pb-context-exited")) {
|
|
|
|
nsCOMPtr<nsIRunnable> ev =
|
|
|
|
NS_NewRunnableMethod(this,
|
|
|
|
&nsSocketTransportService::ClosePrivateConnections);
|
|
|
|
nsresult rv = Dispatch(ev, nsIEventTarget::DISPATCH_NORMAL);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
}
|
|
|
|
|
2009-02-09 20:31:44 +03:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2012-12-08 02:50:43 +04:00
|
|
|
void
|
|
|
|
nsSocketTransportService::ClosePrivateConnections()
|
|
|
|
{
|
|
|
|
// Must be called on the socket thread.
|
|
|
|
#ifdef DEBUG
|
|
|
|
bool onSTSThread;
|
|
|
|
IsOnCurrentThread(&onSTSThread);
|
|
|
|
MOZ_ASSERT(onSTSThread);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
for (int32_t i = mActiveCount - 1; i >= 0; --i) {
|
|
|
|
if (mActiveList[i].mHandler->mIsPrivate) {
|
|
|
|
DetachSocket(mActiveList, &mActiveList[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (int32_t i = mIdleCount - 1; i >= 0; --i) {
|
|
|
|
if (mIdleList[i].mHandler->mIsPrivate) {
|
|
|
|
DetachSocket(mIdleList, &mIdleList[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-03 16:54:00 +03:00
|
|
|
#if !defined(MOZILLA_XPCOMRT_API)
|
2012-12-08 02:50:43 +04:00
|
|
|
mozilla::ClearPrivateSSLState();
|
2015-04-03 16:54:00 +03:00
|
|
|
#endif // !defined(MOZILLA_XPCOMRT_API)
|
2012-12-08 02:50:43 +04:00
|
|
|
}
|
|
|
|
|
2009-02-09 20:31:44 +03:00
|
|
|
NS_IMETHODIMP
|
2012-08-22 19:56:38 +04:00
|
|
|
nsSocketTransportService::GetSendBufferSize(int32_t *value)
|
2009-02-09 20:31:44 +03:00
|
|
|
{
|
|
|
|
*value = mSendBufferSize;
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-04-13 16:59:29 +04:00
|
|
|
/// ugly OS specific includes are placed at the bottom of the src for clarity
|
|
|
|
|
|
|
|
#if defined(XP_WIN)
|
|
|
|
#include <windows.h>
|
|
|
|
#elif defined(XP_UNIX) && !defined(AIX) && !defined(NEXTSTEP) && !defined(QNX)
|
|
|
|
#include <sys/resource.h>
|
|
|
|
#endif
|
|
|
|
|
2011-10-25 19:36:49 +04:00
|
|
|
// Right now the only need to do this is on windows.
|
|
|
|
#if defined(XP_WIN)
|
|
|
|
void
|
|
|
|
nsSocketTransportService::ProbeMaxCount()
|
|
|
|
{
|
|
|
|
NS_ASSERTION(PR_GetCurrentThread() == gSocketThread, "wrong thread");
|
|
|
|
|
|
|
|
if (mProbedMaxCount)
|
|
|
|
return;
|
|
|
|
mProbedMaxCount = true;
|
|
|
|
|
|
|
|
// Allocate and test a PR_Poll up to the gMaxCount number of unconnected
|
|
|
|
// sockets. See bug 692260 - windows should be able to handle 1000 sockets
|
|
|
|
// in select() without a problem, but LSPs have been known to balk at lower
|
|
|
|
// numbers. (64 in the bug).
|
|
|
|
|
|
|
|
// Allocate
|
|
|
|
struct PRPollDesc pfd[SOCKET_LIMIT_TARGET];
|
2012-08-22 19:56:38 +04:00
|
|
|
uint32_t numAllocated = 0;
|
2011-10-25 19:36:49 +04:00
|
|
|
|
2012-08-22 19:56:38 +04:00
|
|
|
for (uint32_t index = 0 ; index < gMaxCount; ++index) {
|
2011-10-25 19:36:49 +04:00
|
|
|
pfd[index].in_flags = PR_POLL_READ | PR_POLL_WRITE | PR_POLL_EXCEPT;
|
|
|
|
pfd[index].out_flags = 0;
|
|
|
|
pfd[index].fd = PR_OpenTCPSocket(PR_AF_INET);
|
|
|
|
if (!pfd[index].fd) {
|
|
|
|
SOCKET_LOG(("Socket Limit Test index %d failed\n", index));
|
|
|
|
if (index < SOCKET_LIMIT_MIN)
|
|
|
|
gMaxCount = SOCKET_LIMIT_MIN;
|
|
|
|
else
|
|
|
|
gMaxCount = index;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
++numAllocated;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test
|
|
|
|
PR_STATIC_ASSERT(SOCKET_LIMIT_MIN >= 32U);
|
|
|
|
while (gMaxCount <= numAllocated) {
|
2012-08-22 19:56:38 +04:00
|
|
|
int32_t rv = PR_Poll(pfd, gMaxCount, PR_MillisecondsToInterval(0));
|
2011-10-25 19:36:49 +04:00
|
|
|
|
|
|
|
SOCKET_LOG(("Socket Limit Test poll() size=%d rv=%d\n",
|
|
|
|
gMaxCount, rv));
|
|
|
|
|
|
|
|
if (rv >= 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
SOCKET_LOG(("Socket Limit Test poll confirmationSize=%d rv=%d error=%d\n",
|
|
|
|
gMaxCount, rv, PR_GetError()));
|
|
|
|
|
|
|
|
gMaxCount -= 32;
|
|
|
|
if (gMaxCount <= SOCKET_LIMIT_MIN) {
|
|
|
|
gMaxCount = SOCKET_LIMIT_MIN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Free
|
2012-08-22 19:56:38 +04:00
|
|
|
for (uint32_t index = 0 ; index < numAllocated; ++index)
|
2011-10-25 19:36:49 +04:00
|
|
|
if (pfd[index].fd)
|
|
|
|
PR_Close(pfd[index].fd);
|
|
|
|
|
|
|
|
SOCKET_LOG(("Socket Limit Test max was confirmed at %d\n", gMaxCount));
|
|
|
|
}
|
|
|
|
#endif // windows
|
|
|
|
|
2011-04-13 16:59:29 +04:00
|
|
|
PRStatus
|
|
|
|
nsSocketTransportService::DiscoverMaxCount()
|
|
|
|
{
|
|
|
|
gMaxCount = SOCKET_LIMIT_MIN;
|
|
|
|
|
|
|
|
#if defined(XP_UNIX) && !defined(AIX) && !defined(NEXTSTEP) && !defined(QNX)
|
|
|
|
// On unix and os x network sockets and file
|
|
|
|
// descriptors are the same. OS X comes defaulted at 256,
|
|
|
|
// most linux at 1000. We can reliably use [sg]rlimit to
|
|
|
|
// query that and raise it. We will try to raise it 250 past
|
|
|
|
// our target number of SOCKET_LIMIT_TARGET so that some descriptors
|
|
|
|
// are still available for other things.
|
|
|
|
|
|
|
|
struct rlimit rlimitData;
|
|
|
|
if (getrlimit(RLIMIT_NOFILE, &rlimitData) == -1)
|
|
|
|
return PR_SUCCESS;
|
|
|
|
if (rlimitData.rlim_cur >= SOCKET_LIMIT_TARGET + 250) {
|
|
|
|
gMaxCount = SOCKET_LIMIT_TARGET;
|
|
|
|
return PR_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2012-08-22 19:56:38 +04:00
|
|
|
int32_t maxallowed = rlimitData.rlim_max;
|
2011-04-13 16:59:29 +04:00
|
|
|
if (maxallowed == -1) { /* no limit */
|
|
|
|
maxallowed = SOCKET_LIMIT_TARGET + 250;
|
2012-08-22 19:56:38 +04:00
|
|
|
} else if ((uint32_t)maxallowed < SOCKET_LIMIT_MIN + 250) {
|
2011-04-13 16:59:29 +04:00
|
|
|
return PR_SUCCESS;
|
2012-08-22 19:56:38 +04:00
|
|
|
} else if ((uint32_t)maxallowed > SOCKET_LIMIT_TARGET + 250) {
|
2011-04-13 16:59:29 +04:00
|
|
|
maxallowed = SOCKET_LIMIT_TARGET + 250;
|
|
|
|
}
|
|
|
|
|
|
|
|
rlimitData.rlim_cur = maxallowed;
|
|
|
|
setrlimit(RLIMIT_NOFILE, &rlimitData);
|
|
|
|
if (getrlimit(RLIMIT_NOFILE, &rlimitData) != -1)
|
|
|
|
if (rlimitData.rlim_cur > SOCKET_LIMIT_MIN + 250)
|
|
|
|
gMaxCount = rlimitData.rlim_cur - 250;
|
|
|
|
|
|
|
|
#elif defined(XP_WIN) && !defined(WIN_CE)
|
|
|
|
// >= XP is confirmed to have at least 1000
|
2012-02-23 18:53:55 +04:00
|
|
|
gMaxCount = SOCKET_LIMIT_TARGET;
|
2011-04-13 16:59:29 +04:00
|
|
|
#else
|
|
|
|
// other platforms are harder to test - so leave at safe legacy value
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return PR_SUCCESS;
|
|
|
|
}
|
2012-12-10 18:13:55 +04:00
|
|
|
|
2013-08-31 20:06:14 +04:00
|
|
|
|
|
|
|
// Used to return connection info to Dashboard.cpp
|
2012-12-10 18:13:55 +04:00
|
|
|
void
|
|
|
|
nsSocketTransportService::AnalyzeConnection(nsTArray<SocketInfo> *data,
|
|
|
|
struct SocketContext *context, bool aActive)
|
|
|
|
{
|
2013-08-31 20:06:14 +04:00
|
|
|
if (context->mHandler->mIsPrivate)
|
|
|
|
return;
|
2012-12-10 18:13:55 +04:00
|
|
|
PRFileDesc *aFD = context->mFD;
|
2016-01-25 16:44:59 +03:00
|
|
|
|
|
|
|
PRFileDesc *idLayer = PR_GetIdentitiesLayer(aFD, PR_NSPR_IO_LAYER);
|
|
|
|
|
|
|
|
NS_ENSURE_TRUE_VOID(idLayer);
|
|
|
|
|
|
|
|
bool tcp = PR_GetDescType(idLayer) == PR_DESC_SOCKET_TCP;
|
2012-12-10 18:13:55 +04:00
|
|
|
|
|
|
|
PRNetAddr peer_addr;
|
|
|
|
PR_GetPeerName(aFD, &peer_addr);
|
|
|
|
|
|
|
|
char host[64] = {0};
|
|
|
|
PR_NetAddrToString(&peer_addr, host, sizeof(host));
|
|
|
|
|
|
|
|
uint16_t port;
|
|
|
|
if (peer_addr.raw.family == PR_AF_INET)
|
|
|
|
port = peer_addr.inet.port;
|
|
|
|
else
|
|
|
|
port = peer_addr.ipv6.port;
|
|
|
|
port = PR_ntohs(port);
|
|
|
|
uint64_t sent = context->mHandler->ByteCountSent();
|
|
|
|
uint64_t received = context->mHandler->ByteCountReceived();
|
|
|
|
SocketInfo info = { nsCString(host), sent, received, port, aActive, tcp };
|
|
|
|
|
|
|
|
data->AppendElement(info);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
nsSocketTransportService::GetSocketConnections(nsTArray<SocketInfo> *data)
|
|
|
|
{
|
|
|
|
NS_ASSERTION(PR_GetCurrentThread() == gSocketThread, "wrong thread");
|
|
|
|
for (uint32_t i = 0; i < mActiveCount; i++)
|
|
|
|
AnalyzeConnection(data, &mActiveList[i], true);
|
|
|
|
for (uint32_t i = 0; i < mIdleCount; i++)
|
|
|
|
AnalyzeConnection(data, &mIdleList[i], false);
|
|
|
|
}
|