2005-06-09 18:09:23 +04:00
|
|
|
// vim:set sw=4 sts=4 et cin:
|
2012-05-21 15:12:37 +04:00
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
2003-01-18 04:27:53 +03:00
|
|
|
|
|
|
|
#include "nsSocketTransportService2.h"
|
|
|
|
#include "nsSocketTransport2.h"
|
2015-04-03 16:54:00 +03:00
|
|
|
#include "NetworkActivityMonitor.h"
|
2016-12-16 06:16:31 +03:00
|
|
|
#include "mozilla/IntegerPrintfMacros.h"
|
2015-04-03 16:54:00 +03:00
|
|
|
#include "mozilla/Preferences.h"
|
2016-01-15 10:21:00 +03:00
|
|
|
#include "nsIOService.h"
|
2015-04-03 16:54:00 +03:00
|
|
|
#include "nsASocketHandler.h"
|
2012-07-27 18:03:27 +04:00
|
|
|
#include "nsError.h"
|
2003-09-12 00:32:33 +04:00
|
|
|
#include "prnetdb.h"
|
2003-01-18 04:27:53 +03:00
|
|
|
#include "prerror.h"
|
2009-02-09 20:31:44 +03:00
|
|
|
#include "nsIPrefService.h"
|
2012-01-17 05:48:29 +04:00
|
|
|
#include "nsIPrefBranch.h"
|
2009-02-09 20:31:44 +03:00
|
|
|
#include "nsServiceManagerUtils.h"
|
2012-10-01 13:43:14 +04:00
|
|
|
#include "nsIObserverService.h"
|
2015-11-06 20:00:37 +03:00
|
|
|
#include "mozilla/Atomics.h"
|
2012-10-01 13:43:14 +04:00
|
|
|
#include "mozilla/Services.h"
|
2012-10-26 17:32:10 +04:00
|
|
|
#include "mozilla/Likely.h"
|
2012-12-08 02:50:43 +04:00
|
|
|
#include "mozilla/PublicSSL.h"
|
2014-03-03 09:12:32 +04:00
|
|
|
#include "mozilla/ChaosMode.h"
|
|
|
|
#include "mozilla/PodOperations.h"
|
2015-03-12 12:25:13 +03:00
|
|
|
#include "mozilla/Telemetry.h"
|
2013-09-19 17:54:39 +04:00
|
|
|
#include "nsThreadUtils.h"
|
2013-09-23 07:35:05 +04:00
|
|
|
#include "nsIFile.h"
|
2016-03-19 06:25:00 +03:00
|
|
|
#include "nsIWidget.h"
|
2011-12-02 02:37:57 +04:00
|
|
|
|
2016-12-09 22:09:00 +03:00
|
|
|
#ifdef MOZ_TASK_TRACER
|
|
|
|
#include "GeckoTaskTracer.h"
|
|
|
|
#endif
|
|
|
|
|
2016-05-19 05:02:57 +03:00
|
|
|
namespace mozilla {
|
|
|
|
namespace net {
|
Rollup of bug 645263 and bug 646259: Switch to mozilla:: sync primitives. r=cjones,dbaron,doublec,ehsan src=bsmedberg
Bug 645263, part 0: Count sync primitive ctor/dtors. r=dbaron
Bug 645263, part 1: Migrate content/media to mozilla:: sync primitives. r=doublec
Bug 645263, part 2: Migrate modules/plugin to mozilla:: sync primitives. sr=bsmedberg
Bug 645263, part 3: Migrate nsComponentManagerImpl to mozilla:: sync primitives. sr=bsmedberg
Bug 645263, part 4: Migrate everything else to mozilla:: sync primitives. r=dbaron
Bug 645263, part 5: Remove nsAutoLock.*. sr=bsmedberg
Bug 645263, part 6: Make editor test be nicer to deadlock detector. r=ehsan
Bug 645263, part 7: Disable tracemalloc backtraces for xpcshell tests. r=dbaron
Bug 646259: Fix nsCacheService to use a CondVar for notifying. r=cjones
2011-04-01 08:29:02 +04:00
|
|
|
|
2015-11-03 07:35:29 +03:00
|
|
|
LazyLogModule gSocketTransportLog("nsSocketTransport");
|
|
|
|
LazyLogModule gUDPSocketLog("UDPSocket");
|
2016-02-11 12:18:46 +03:00
|
|
|
LazyLogModule gTCPSocketLog("TCPSocket");
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2012-07-30 18:20:58 +04:00
|
|
|
nsSocketTransportService *gSocketTransportService = nullptr;
|
2017-04-27 21:34:42 +03:00
|
|
|
static Atomic<PRThread*, Relaxed> gSocketThread;
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2009-02-09 20:31:44 +03:00
|
|
|
#define SEND_BUFFER_PREF "network.tcp.sendbuffer"
|
2014-02-06 23:51:38 +04:00
|
|
|
#define KEEPALIVE_ENABLED_PREF "network.tcp.keepalive.enabled"
|
|
|
|
#define KEEPALIVE_IDLE_TIME_PREF "network.tcp.keepalive.idle_time"
|
|
|
|
#define KEEPALIVE_RETRY_INTERVAL_PREF "network.tcp.keepalive.retry_interval"
|
|
|
|
#define KEEPALIVE_PROBE_COUNT_PREF "network.tcp.keepalive.probe_count"
|
2016-03-24 03:44:28 +03:00
|
|
|
#define SOCKET_LIMIT_TARGET 1000U
|
|
|
|
#define SOCKET_LIMIT_MIN 50U
|
2017-11-23 11:37:54 +03:00
|
|
|
#define INTERVAL_PREF "network.activity.intervalMilliseconds"
|
2015-03-04 16:17:00 +03:00
|
|
|
#define MAX_TIME_BETWEEN_TWO_POLLS "network.sts.max_time_for_events_between_two_polls"
|
2015-03-12 12:25:13 +03:00
|
|
|
#define TELEMETRY_PREF "toolkit.telemetry.enabled"
|
2016-01-18 10:20:00 +03:00
|
|
|
#define MAX_TIME_FOR_PR_CLOSE_DURING_SHUTDOWN "network.sts.max_time_for_pr_close_during_shutdown"
|
2011-04-13 16:59:29 +04:00
|
|
|
|
2016-09-13 11:43:00 +03:00
|
|
|
#define REPAIR_POLLABLE_EVENT_TIME 10
|
|
|
|
|
2012-08-22 19:56:38 +04:00
|
|
|
uint32_t nsSocketTransportService::gMaxCount;
|
2011-04-13 16:59:29 +04:00
|
|
|
PRCallOnceType nsSocketTransportService::gMaxCountInitOnce;
|
2009-02-09 20:31:44 +03:00
|
|
|
|
2017-04-27 21:34:42 +03:00
|
|
|
// Utility functions
|
|
|
|
bool
|
|
|
|
OnSocketThread()
|
|
|
|
{
|
|
|
|
return PR_GetCurrentThread() == gSocketThread;
|
|
|
|
}
|
|
|
|
|
2003-01-18 04:27:53 +03:00
|
|
|
//-----------------------------------------------------------------------------
|
|
|
|
// ctor/dtor (called on the main/UI thread by the service manager)
|
|
|
|
|
|
|
|
nsSocketTransportService::nsSocketTransportService()
|
2012-07-30 18:20:58 +04:00
|
|
|
: mThread(nullptr)
|
Rollup of bug 645263 and bug 646259: Switch to mozilla:: sync primitives. r=cjones,dbaron,doublec,ehsan src=bsmedberg
Bug 645263, part 0: Count sync primitive ctor/dtors. r=dbaron
Bug 645263, part 1: Migrate content/media to mozilla:: sync primitives. r=doublec
Bug 645263, part 2: Migrate modules/plugin to mozilla:: sync primitives. sr=bsmedberg
Bug 645263, part 3: Migrate nsComponentManagerImpl to mozilla:: sync primitives. sr=bsmedberg
Bug 645263, part 4: Migrate everything else to mozilla:: sync primitives. r=dbaron
Bug 645263, part 5: Remove nsAutoLock.*. sr=bsmedberg
Bug 645263, part 6: Make editor test be nicer to deadlock detector. r=ehsan
Bug 645263, part 7: Disable tracemalloc backtraces for xpcshell tests. r=dbaron
Bug 646259: Fix nsCacheService to use a CondVar for notifying. r=cjones
2011-04-01 08:29:02 +04:00
|
|
|
, mLock("nsSocketTransportService::mLock")
|
2011-10-17 18:59:28 +04:00
|
|
|
, mInitialized(false)
|
|
|
|
, mShuttingDown(false)
|
2012-09-18 03:45:10 +04:00
|
|
|
, mOffline(false)
|
|
|
|
, mGoingOffline(false)
|
2016-03-22 18:02:39 +03:00
|
|
|
, mRawThread(nullptr)
|
2011-04-13 16:59:29 +04:00
|
|
|
, mActiveListSize(SOCKET_LIMIT_MIN)
|
|
|
|
, mIdleListSize(SOCKET_LIMIT_MIN)
|
2003-01-18 04:27:53 +03:00
|
|
|
, mActiveCount(0)
|
|
|
|
, mIdleCount(0)
|
2013-07-29 20:08:03 +04:00
|
|
|
, mSentBytesCount(0)
|
|
|
|
, mReceivedBytesCount(0)
|
2009-02-09 20:31:44 +03:00
|
|
|
, mSendBufferSize(0)
|
2014-02-06 23:51:38 +04:00
|
|
|
, mKeepaliveIdleTimeS(600)
|
|
|
|
, mKeepaliveRetryIntervalS(1)
|
|
|
|
, mKeepaliveProbeCount(kDefaultTCPKeepCount)
|
|
|
|
, mKeepaliveEnabledPref(false)
|
2015-03-04 16:17:00 +03:00
|
|
|
, mServingPendingQueue(false)
|
|
|
|
, mMaxTimePerPollIter(100)
|
2015-03-12 12:25:13 +03:00
|
|
|
, mTelemetryEnabledPref(false)
|
2016-01-18 10:20:00 +03:00
|
|
|
, mMaxTimeForPrClosePref(PR_SecondsToInterval(5))
|
2016-03-19 06:25:00 +03:00
|
|
|
, mSleepPhase(false)
|
2011-10-25 19:36:49 +04:00
|
|
|
, mProbedMaxCount(false)
|
2016-09-13 11:43:00 +03:00
|
|
|
#if defined(XP_WIN)
|
|
|
|
, mPolling(false)
|
|
|
|
#endif
|
2003-01-18 04:27:53 +03:00
|
|
|
{
|
2006-05-10 21:30:15 +04:00
|
|
|
NS_ASSERTION(NS_IsMainThread(), "wrong thread");
|
2003-10-06 05:46:31 +04:00
|
|
|
|
2011-04-13 16:59:29 +04:00
|
|
|
PR_CallOnce(&gMaxCountInitOnce, DiscoverMaxCount);
|
|
|
|
mActiveList = (SocketContext *)
|
|
|
|
moz_xmalloc(sizeof(SocketContext) * mActiveListSize);
|
|
|
|
mIdleList = (SocketContext *)
|
|
|
|
moz_xmalloc(sizeof(SocketContext) * mIdleListSize);
|
|
|
|
mPollList = (PRPollDesc *)
|
|
|
|
moz_xmalloc(sizeof(PRPollDesc) * (mActiveListSize + 1));
|
|
|
|
|
2003-10-09 05:54:07 +04:00
|
|
|
NS_ASSERTION(!gSocketTransportService, "must not instantiate twice");
|
2003-01-18 04:27:53 +03:00
|
|
|
gSocketTransportService = this;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsSocketTransportService::~nsSocketTransportService()
|
|
|
|
{
|
2006-05-10 21:30:15 +04:00
|
|
|
NS_ASSERTION(NS_IsMainThread(), "wrong thread");
|
2003-01-18 04:27:53 +03:00
|
|
|
NS_ASSERTION(!mInitialized, "not shutdown properly");
|
|
|
|
|
2015-02-19 07:51:06 +03:00
|
|
|
free(mActiveList);
|
|
|
|
free(mIdleList);
|
|
|
|
free(mPollList);
|
2012-07-30 18:20:58 +04:00
|
|
|
gSocketTransportService = nullptr;
|
2003-01-18 04:27:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
//-----------------------------------------------------------------------------
|
|
|
|
// event queue (any thread)
|
|
|
|
|
2007-12-05 05:18:58 +03:00
|
|
|
already_AddRefed<nsIThread>
|
|
|
|
nsSocketTransportService::GetThreadSafely()
|
|
|
|
{
|
2016-06-03 22:56:26 +03:00
|
|
|
MutexAutoLock lock(mLock);
|
2013-04-22 15:15:59 +04:00
|
|
|
nsCOMPtr<nsIThread> result = mThread;
|
|
|
|
return result.forget();
|
2007-12-05 05:18:58 +03:00
|
|
|
}
|
|
|
|
|
2003-01-18 04:27:53 +03:00
|
|
|
NS_IMETHODIMP
|
2015-07-10 06:21:46 +03:00
|
|
|
nsSocketTransportService::DispatchFromScript(nsIRunnable *event, uint32_t flags)
|
2003-01-18 04:27:53 +03:00
|
|
|
{
|
2015-07-10 06:21:46 +03:00
|
|
|
nsCOMPtr<nsIRunnable> event_ref(event);
|
|
|
|
return Dispatch(event_ref.forget(), flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
NS_IMETHODIMP
|
2016-06-01 03:04:54 +03:00
|
|
|
nsSocketTransportService::Dispatch(already_AddRefed<nsIRunnable> event, uint32_t flags)
|
2015-07-10 06:21:46 +03:00
|
|
|
{
|
|
|
|
nsCOMPtr<nsIRunnable> event_ref(event);
|
|
|
|
SOCKET_LOG(("STS dispatch [%p]\n", event_ref.get()));
|
2005-06-09 18:09:23 +04:00
|
|
|
|
2007-12-05 05:18:58 +03:00
|
|
|
nsCOMPtr<nsIThread> thread = GetThreadSafely();
|
2014-03-12 13:14:45 +04:00
|
|
|
nsresult rv;
|
2015-07-10 06:21:46 +03:00
|
|
|
rv = thread ? thread->Dispatch(event_ref.forget(), flags) : NS_ERROR_NOT_INITIALIZED;
|
2007-12-05 05:18:58 +03:00
|
|
|
if (rv == NS_ERROR_UNEXPECTED) {
|
|
|
|
// Thread is no longer accepting events. We must have just shut it
|
|
|
|
// down on the main thread. Pretend we never saw it.
|
|
|
|
rv = NS_ERROR_NOT_INITIALIZED;
|
|
|
|
}
|
|
|
|
return rv;
|
2003-01-18 04:27:53 +03:00
|
|
|
}
|
|
|
|
|
2016-05-13 01:15:43 +03:00
|
|
|
NS_IMETHODIMP
|
2016-06-01 03:04:54 +03:00
|
|
|
nsSocketTransportService::DelayedDispatch(already_AddRefed<nsIRunnable>, uint32_t)
|
2016-05-13 01:15:43 +03:00
|
|
|
{
|
|
|
|
return NS_ERROR_NOT_IMPLEMENTED;
|
|
|
|
}
|
|
|
|
|
2003-10-06 05:46:31 +04:00
|
|
|
NS_IMETHODIMP
|
2011-09-29 10:19:26 +04:00
|
|
|
nsSocketTransportService::IsOnCurrentThread(bool *result)
|
2003-10-06 05:46:31 +04:00
|
|
|
{
|
2007-12-05 05:18:58 +03:00
|
|
|
nsCOMPtr<nsIThread> thread = GetThreadSafely();
|
|
|
|
NS_ENSURE_TRUE(thread, NS_ERROR_NOT_INITIALIZED);
|
|
|
|
return thread->IsOnCurrentThread(result);
|
2003-10-06 05:46:31 +04:00
|
|
|
}
|
|
|
|
|
2017-05-22 21:26:39 +03:00
|
|
|
NS_IMETHODIMP_(bool)
|
|
|
|
nsSocketTransportService::IsOnCurrentThreadInfallible()
|
|
|
|
{
|
|
|
|
nsCOMPtr<nsIThread> thread = GetThreadSafely();
|
|
|
|
NS_ENSURE_TRUE(thread, false);
|
|
|
|
return thread->IsOnCurrentThread();
|
|
|
|
}
|
|
|
|
|
2003-01-18 04:27:53 +03:00
|
|
|
//-----------------------------------------------------------------------------
|
|
|
|
// socket api (socket thread only)
|
|
|
|
|
2008-02-26 23:39:50 +03:00
|
|
|
NS_IMETHODIMP
|
2006-05-10 21:30:15 +04:00
|
|
|
nsSocketTransportService::NotifyWhenCanAttachSocket(nsIRunnable *event)
|
2003-04-09 02:18:10 +04:00
|
|
|
{
|
2010-10-28 21:09:08 +04:00
|
|
|
SOCKET_LOG(("nsSocketTransportService::NotifyWhenCanAttachSocket\n"));
|
2003-04-09 02:18:10 +04:00
|
|
|
|
2017-04-27 21:34:42 +03:00
|
|
|
MOZ_ASSERT(OnSocketThread(), "not on socket thread");
|
2003-10-09 05:54:07 +04:00
|
|
|
|
2003-04-09 02:18:10 +04:00
|
|
|
if (CanAttachSocket()) {
|
2006-05-10 21:30:15 +04:00
|
|
|
return Dispatch(event, NS_DISPATCH_NORMAL);
|
2003-04-09 02:18:10 +04:00
|
|
|
}
|
|
|
|
|
2016-11-01 15:44:09 +03:00
|
|
|
auto *runnable = new LinkedRunnableEvent(event);
|
2016-05-23 19:20:58 +03:00
|
|
|
mPendingSocketQueue.insertBack(runnable);
|
2003-04-09 02:18:10 +04:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2008-02-26 23:39:50 +03:00
|
|
|
NS_IMETHODIMP
|
2003-01-18 04:27:53 +03:00
|
|
|
nsSocketTransportService::AttachSocket(PRFileDesc *fd, nsASocketHandler *handler)
|
|
|
|
{
|
2013-07-01 20:40:12 +04:00
|
|
|
SOCKET_LOG(("nsSocketTransportService::AttachSocket [handler=%p]\n", handler));
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2017-04-27 21:34:42 +03:00
|
|
|
MOZ_ASSERT(OnSocketThread(), "not on socket thread");
|
2003-10-09 05:54:07 +04:00
|
|
|
|
2008-02-26 23:39:50 +03:00
|
|
|
if (!CanAttachSocket()) {
|
|
|
|
return NS_ERROR_NOT_AVAILABLE;
|
|
|
|
}
|
|
|
|
|
2003-02-23 08:07:34 +03:00
|
|
|
SocketContext sock;
|
|
|
|
sock.mFD = fd;
|
|
|
|
sock.mHandler = handler;
|
2005-01-26 05:13:14 +03:00
|
|
|
sock.mElapsedTime = 0;
|
2003-02-23 08:07:34 +03:00
|
|
|
|
|
|
|
nsresult rv = AddToIdleList(&sock);
|
|
|
|
if (NS_SUCCEEDED(rv))
|
|
|
|
NS_ADDREF(handler);
|
|
|
|
return rv;
|
2003-01-18 04:27:53 +03:00
|
|
|
}
|
|
|
|
|
2016-03-24 03:44:28 +03:00
|
|
|
// the number of sockets that can be attached at any given time is
|
|
|
|
// limited. this is done because some operating systems (e.g., Win9x)
|
|
|
|
// limit the number of sockets that can be created by an application.
|
|
|
|
// AttachSocket will fail if the limit is exceeded. consumers should
|
|
|
|
// call CanAttachSocket and check the result before creating a socket.
|
|
|
|
|
|
|
|
bool
|
|
|
|
nsSocketTransportService::CanAttachSocket()
|
|
|
|
{
|
|
|
|
static bool reported900FDLimit = false;
|
|
|
|
|
|
|
|
uint32_t total = mActiveCount + mIdleCount;
|
|
|
|
bool rv = total < gMaxCount;
|
|
|
|
|
2016-05-23 20:48:18 +03:00
|
|
|
if (mTelemetryEnabledPref &&
|
|
|
|
(((total >= 900) || !rv) && !reported900FDLimit)) {
|
|
|
|
reported900FDLimit = true;
|
|
|
|
Telemetry::Accumulate(Telemetry::NETWORK_SESSION_AT_900FD, true);
|
2016-03-24 03:44:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2003-01-18 04:27:53 +03:00
|
|
|
nsresult
|
2011-04-13 16:59:29 +04:00
|
|
|
nsSocketTransportService::DetachSocket(SocketContext *listHead, SocketContext *sock)
|
2003-01-18 04:27:53 +03:00
|
|
|
{
|
2013-07-01 20:40:12 +04:00
|
|
|
SOCKET_LOG(("nsSocketTransportService::DetachSocket [handler=%p]\n", sock->mHandler));
|
2015-02-10 01:34:50 +03:00
|
|
|
MOZ_ASSERT((listHead == mActiveList) || (listHead == mIdleList),
|
|
|
|
"DetachSocket invalid head");
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2016-12-09 22:09:00 +03:00
|
|
|
{
|
|
|
|
#ifdef MOZ_TASK_TRACER
|
|
|
|
tasktracer::AutoSourceEvent taskTracerEvent(tasktracer::SourceEventType::SocketIO);
|
|
|
|
#endif
|
|
|
|
// inform the handler that this socket is going away
|
|
|
|
sock->mHandler->OnSocketDetached(sock->mFD);
|
|
|
|
}
|
2013-07-29 20:08:03 +04:00
|
|
|
mSentBytesCount += sock->mHandler->ByteCountSent();
|
|
|
|
mReceivedBytesCount += sock->mHandler->ByteCountReceived();
|
2003-01-18 04:27:53 +03:00
|
|
|
|
|
|
|
// cleanup
|
2012-07-30 18:20:58 +04:00
|
|
|
sock->mFD = nullptr;
|
2003-01-18 04:27:53 +03:00
|
|
|
NS_RELEASE(sock->mHandler);
|
|
|
|
|
2011-04-13 16:59:29 +04:00
|
|
|
if (listHead == mActiveList)
|
2003-01-18 04:27:53 +03:00
|
|
|
RemoveFromPollList(sock);
|
|
|
|
else
|
|
|
|
RemoveFromIdleList(sock);
|
|
|
|
|
|
|
|
// NOTE: sock is now an invalid pointer
|
2017-07-06 15:00:35 +03:00
|
|
|
|
2003-04-09 02:18:10 +04:00
|
|
|
//
|
2003-10-06 05:46:31 +04:00
|
|
|
// notify the first element on the pending socket queue...
|
2003-04-09 02:18:10 +04:00
|
|
|
//
|
2006-05-10 21:30:15 +04:00
|
|
|
nsCOMPtr<nsIRunnable> event;
|
2016-05-23 19:20:58 +03:00
|
|
|
LinkedRunnableEvent *runnable = mPendingSocketQueue.getFirst();
|
|
|
|
if (runnable) {
|
|
|
|
event = runnable->TakeEvent();
|
|
|
|
runnable->remove();
|
|
|
|
delete runnable;
|
2015-09-21 11:34:51 +03:00
|
|
|
}
|
|
|
|
if (event) {
|
2006-05-10 21:30:15 +04:00
|
|
|
// move event from pending queue to dispatch queue
|
|
|
|
return Dispatch(event, NS_DISPATCH_NORMAL);
|
2003-04-09 02:18:10 +04:00
|
|
|
}
|
2003-01-18 04:27:53 +03:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
nsSocketTransportService::AddToPollList(SocketContext *sock)
|
|
|
|
{
|
2015-02-10 01:34:50 +03:00
|
|
|
MOZ_ASSERT(!(static_cast<uint32_t>(sock - mActiveList) < mActiveListSize),
|
|
|
|
"AddToPollList Socket Already Active");
|
2011-04-12 10:17:43 +04:00
|
|
|
|
2013-07-01 20:40:12 +04:00
|
|
|
SOCKET_LOG(("nsSocketTransportService::AddToPollList [handler=%p]\n", sock->mHandler));
|
2011-04-13 16:59:29 +04:00
|
|
|
if (mActiveCount == mActiveListSize) {
|
|
|
|
SOCKET_LOG((" Active List size of %d met\n", mActiveCount));
|
|
|
|
if (!GrowActiveList()) {
|
|
|
|
NS_ERROR("too many active sockets");
|
|
|
|
return NS_ERROR_OUT_OF_MEMORY;
|
|
|
|
}
|
2003-02-23 08:07:34 +03:00
|
|
|
}
|
2017-07-06 15:00:35 +03:00
|
|
|
|
2014-03-03 09:12:32 +04:00
|
|
|
uint32_t newSocketIndex = mActiveCount;
|
2015-07-15 00:29:23 +03:00
|
|
|
if (ChaosMode::isActive(ChaosFeature::NetworkScheduling)) {
|
2014-03-03 09:12:32 +04:00
|
|
|
newSocketIndex = ChaosMode::randomUint32LessThan(mActiveCount + 1);
|
|
|
|
PodMove(mActiveList + newSocketIndex + 1, mActiveList + newSocketIndex,
|
|
|
|
mActiveCount - newSocketIndex);
|
|
|
|
PodMove(mPollList + newSocketIndex + 2, mPollList + newSocketIndex + 1,
|
|
|
|
mActiveCount - newSocketIndex);
|
|
|
|
}
|
|
|
|
mActiveList[newSocketIndex] = *sock;
|
2003-02-23 08:07:34 +03:00
|
|
|
mActiveCount++;
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2014-03-03 09:12:32 +04:00
|
|
|
mPollList[newSocketIndex + 1].fd = sock->mFD;
|
|
|
|
mPollList[newSocketIndex + 1].in_flags = sock->mHandler->mPollFlags;
|
|
|
|
mPollList[newSocketIndex + 1].out_flags = 0;
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2010-10-28 21:09:08 +04:00
|
|
|
SOCKET_LOG((" active=%u idle=%u\n", mActiveCount, mIdleCount));
|
2003-01-18 04:27:53 +03:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
nsSocketTransportService::RemoveFromPollList(SocketContext *sock)
|
|
|
|
{
|
2013-07-01 20:40:12 +04:00
|
|
|
SOCKET_LOG(("nsSocketTransportService::RemoveFromPollList [handler=%p]\n", sock->mHandler));
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2012-08-22 19:56:38 +04:00
|
|
|
uint32_t index = sock - mActiveList;
|
2015-02-10 01:34:50 +03:00
|
|
|
MOZ_ASSERT(index < mActiveListSize, "invalid index");
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2010-10-28 21:09:08 +04:00
|
|
|
SOCKET_LOG((" index=%u mActiveCount=%u\n", index, mActiveCount));
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2003-02-23 08:07:34 +03:00
|
|
|
if (index != mActiveCount-1) {
|
|
|
|
mActiveList[index] = mActiveList[mActiveCount-1];
|
|
|
|
mPollList[index+1] = mPollList[mActiveCount];
|
2003-01-18 04:27:53 +03:00
|
|
|
}
|
|
|
|
mActiveCount--;
|
|
|
|
|
2010-10-28 21:09:08 +04:00
|
|
|
SOCKET_LOG((" active=%u idle=%u\n", mActiveCount, mIdleCount));
|
2003-01-18 04:27:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
nsSocketTransportService::AddToIdleList(SocketContext *sock)
|
|
|
|
{
|
2015-02-10 01:34:50 +03:00
|
|
|
MOZ_ASSERT(!(static_cast<uint32_t>(sock - mIdleList) < mIdleListSize),
|
|
|
|
"AddToIdlelList Socket Already Idle");
|
2011-04-12 10:17:43 +04:00
|
|
|
|
2013-07-01 20:40:12 +04:00
|
|
|
SOCKET_LOG(("nsSocketTransportService::AddToIdleList [handler=%p]\n", sock->mHandler));
|
2011-04-13 16:59:29 +04:00
|
|
|
if (mIdleCount == mIdleListSize) {
|
|
|
|
SOCKET_LOG((" Idle List size of %d met\n", mIdleCount));
|
|
|
|
if (!GrowIdleList()) {
|
|
|
|
NS_ERROR("too many idle sockets");
|
|
|
|
return NS_ERROR_OUT_OF_MEMORY;
|
|
|
|
}
|
2003-02-23 08:07:34 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
mIdleList[mIdleCount] = *sock;
|
2003-01-18 04:27:53 +03:00
|
|
|
mIdleCount++;
|
|
|
|
|
2010-10-28 21:09:08 +04:00
|
|
|
SOCKET_LOG((" active=%u idle=%u\n", mActiveCount, mIdleCount));
|
2003-01-18 04:27:53 +03:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
nsSocketTransportService::RemoveFromIdleList(SocketContext *sock)
|
|
|
|
{
|
2013-07-01 20:40:12 +04:00
|
|
|
SOCKET_LOG(("nsSocketTransportService::RemoveFromIdleList [handler=%p]\n", sock->mHandler));
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2012-08-22 19:56:38 +04:00
|
|
|
uint32_t index = sock - mIdleList;
|
2011-04-13 16:59:29 +04:00
|
|
|
NS_ASSERTION(index < mIdleListSize, "invalid index in idle list");
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2003-02-23 08:07:34 +03:00
|
|
|
if (index != mIdleCount-1)
|
|
|
|
mIdleList[index] = mIdleList[mIdleCount-1];
|
2003-01-18 04:27:53 +03:00
|
|
|
mIdleCount--;
|
|
|
|
|
2010-10-28 21:09:08 +04:00
|
|
|
SOCKET_LOG((" active=%u idle=%u\n", mActiveCount, mIdleCount));
|
2003-01-18 04:27:53 +03:00
|
|
|
}
|
|
|
|
|
2003-02-23 08:07:34 +03:00
|
|
|
void
|
|
|
|
nsSocketTransportService::MoveToIdleList(SocketContext *sock)
|
|
|
|
{
|
|
|
|
nsresult rv = AddToIdleList(sock);
|
|
|
|
if (NS_FAILED(rv))
|
2011-04-13 16:59:29 +04:00
|
|
|
DetachSocket(mActiveList, sock);
|
2003-02-23 08:07:34 +03:00
|
|
|
else
|
|
|
|
RemoveFromPollList(sock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
nsSocketTransportService::MoveToPollList(SocketContext *sock)
|
|
|
|
{
|
|
|
|
nsresult rv = AddToPollList(sock);
|
|
|
|
if (NS_FAILED(rv))
|
2011-04-13 16:59:29 +04:00
|
|
|
DetachSocket(mIdleList, sock);
|
2003-02-23 08:07:34 +03:00
|
|
|
else
|
|
|
|
RemoveFromIdleList(sock);
|
|
|
|
}
|
|
|
|
|
2011-09-29 10:19:26 +04:00
|
|
|
bool
|
2011-04-13 16:59:29 +04:00
|
|
|
nsSocketTransportService::GrowActiveList()
|
|
|
|
{
|
2012-08-22 19:56:38 +04:00
|
|
|
int32_t toAdd = gMaxCount - mActiveListSize;
|
2016-03-24 03:44:28 +03:00
|
|
|
if (toAdd > 100) {
|
2011-04-13 16:59:29 +04:00
|
|
|
toAdd = 100;
|
2016-03-24 03:44:28 +03:00
|
|
|
} else if (toAdd < 1) {
|
|
|
|
MOZ_ASSERT(false, "CanAttachSocket() should prevent this");
|
2011-10-17 18:59:28 +04:00
|
|
|
return false;
|
2016-03-24 03:44:28 +03:00
|
|
|
}
|
|
|
|
|
2011-04-13 16:59:29 +04:00
|
|
|
mActiveListSize += toAdd;
|
|
|
|
mActiveList = (SocketContext *)
|
|
|
|
moz_xrealloc(mActiveList, sizeof(SocketContext) * mActiveListSize);
|
|
|
|
mPollList = (PRPollDesc *)
|
|
|
|
moz_xrealloc(mPollList, sizeof(PRPollDesc) * (mActiveListSize + 1));
|
2011-10-17 18:59:28 +04:00
|
|
|
return true;
|
2011-04-13 16:59:29 +04:00
|
|
|
}
|
|
|
|
|
2011-09-29 10:19:26 +04:00
|
|
|
bool
|
2011-04-13 16:59:29 +04:00
|
|
|
nsSocketTransportService::GrowIdleList()
|
|
|
|
{
|
2012-08-22 19:56:38 +04:00
|
|
|
int32_t toAdd = gMaxCount - mIdleListSize;
|
2016-03-24 03:44:28 +03:00
|
|
|
if (toAdd > 100) {
|
2011-04-13 16:59:29 +04:00
|
|
|
toAdd = 100;
|
2016-03-24 03:44:28 +03:00
|
|
|
} else if (toAdd < 1) {
|
|
|
|
MOZ_ASSERT(false, "CanAttachSocket() should prevent this");
|
2011-10-17 18:59:28 +04:00
|
|
|
return false;
|
2016-03-24 03:44:28 +03:00
|
|
|
}
|
2011-04-13 16:59:29 +04:00
|
|
|
|
|
|
|
mIdleListSize += toAdd;
|
|
|
|
mIdleList = (SocketContext *)
|
|
|
|
moz_xrealloc(mIdleList, sizeof(SocketContext) * mIdleListSize);
|
2011-10-17 18:59:28 +04:00
|
|
|
return true;
|
2011-04-13 16:59:29 +04:00
|
|
|
}
|
|
|
|
|
2005-01-26 05:13:14 +03:00
|
|
|
PRIntervalTime
|
|
|
|
nsSocketTransportService::PollTimeout()
|
|
|
|
{
|
|
|
|
if (mActiveCount == 0)
|
|
|
|
return NS_SOCKET_POLL_TIMEOUT;
|
|
|
|
|
|
|
|
// compute minimum time before any socket timeout expires.
|
2012-09-28 10:57:33 +04:00
|
|
|
uint32_t minR = UINT16_MAX;
|
2012-08-22 19:56:38 +04:00
|
|
|
for (uint32_t i=0; i<mActiveCount; ++i) {
|
2005-01-26 05:13:14 +03:00
|
|
|
const SocketContext &s = mActiveList[i];
|
2005-02-01 18:22:20 +03:00
|
|
|
// mPollTimeout could be less than mElapsedTime if setTimeout
|
|
|
|
// was called with a value smaller than mElapsedTime.
|
2012-08-22 19:56:38 +04:00
|
|
|
uint32_t r = (s.mElapsedTime < s.mHandler->mPollTimeout)
|
2005-02-01 18:22:20 +03:00
|
|
|
? s.mHandler->mPollTimeout - s.mElapsedTime
|
|
|
|
: 0;
|
2005-01-26 05:13:14 +03:00
|
|
|
if (r < minR)
|
|
|
|
minR = r;
|
|
|
|
}
|
2014-01-30 11:29:20 +04:00
|
|
|
// nsASocketHandler defines UINT16_MAX as do not timeout
|
|
|
|
if (minR == UINT16_MAX) {
|
|
|
|
SOCKET_LOG(("poll timeout: none\n"));
|
|
|
|
return NS_SOCKET_POLL_TIMEOUT;
|
|
|
|
}
|
2016-12-16 06:16:31 +03:00
|
|
|
SOCKET_LOG(("poll timeout: %" PRIu32 "\n", minR));
|
2005-01-26 05:13:14 +03:00
|
|
|
return PR_SecondsToInterval(minR);
|
|
|
|
}
|
|
|
|
|
2012-08-22 19:56:38 +04:00
|
|
|
int32_t
|
2016-03-22 18:02:39 +03:00
|
|
|
nsSocketTransportService::Poll(uint32_t *interval,
|
2015-03-12 12:25:13 +03:00
|
|
|
TimeDuration *pollDuration)
|
2003-02-07 00:20:26 +03:00
|
|
|
{
|
|
|
|
PRPollDesc *pollList;
|
2012-08-22 19:56:38 +04:00
|
|
|
uint32_t pollCount;
|
2003-02-07 00:20:26 +03:00
|
|
|
PRIntervalTime pollTimeout;
|
2015-03-12 12:25:13 +03:00
|
|
|
*pollDuration = 0;
|
2003-02-07 00:20:26 +03:00
|
|
|
|
2016-03-22 18:02:39 +03:00
|
|
|
// If there are pending events for this thread then
|
|
|
|
// DoPollIteration() should service the network without blocking.
|
|
|
|
bool pendingEvents = false;
|
|
|
|
mRawThread->HasPendingEvents(&pendingEvents);
|
|
|
|
|
2003-02-07 00:20:26 +03:00
|
|
|
if (mPollList[0].fd) {
|
2003-02-23 08:07:34 +03:00
|
|
|
mPollList[0].out_flags = 0;
|
2003-02-07 00:20:26 +03:00
|
|
|
pollList = mPollList;
|
2003-02-23 08:07:34 +03:00
|
|
|
pollCount = mActiveCount + 1;
|
2016-03-22 18:02:39 +03:00
|
|
|
pollTimeout = pendingEvents ? PR_INTERVAL_NO_WAIT : PollTimeout();
|
2003-02-07 00:20:26 +03:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
// no pollable event, so busy wait...
|
2003-02-23 08:07:34 +03:00
|
|
|
pollCount = mActiveCount;
|
2003-02-07 00:20:26 +03:00
|
|
|
if (pollCount)
|
|
|
|
pollList = &mPollList[1];
|
|
|
|
else
|
2012-07-30 18:20:58 +04:00
|
|
|
pollList = nullptr;
|
2016-03-22 18:02:39 +03:00
|
|
|
pollTimeout =
|
|
|
|
pendingEvents ? PR_INTERVAL_NO_WAIT : PR_MillisecondsToInterval(25);
|
2003-02-07 00:20:26 +03:00
|
|
|
}
|
|
|
|
|
2005-01-26 05:13:14 +03:00
|
|
|
PRIntervalTime ts = PR_IntervalNow();
|
|
|
|
|
2015-03-12 12:25:13 +03:00
|
|
|
TimeStamp pollStart;
|
|
|
|
if (mTelemetryEnabledPref) {
|
|
|
|
pollStart = TimeStamp::NowLoRes();
|
|
|
|
}
|
|
|
|
|
2010-10-28 21:09:08 +04:00
|
|
|
SOCKET_LOG((" timeout = %i milliseconds\n",
|
2006-08-15 22:22:24 +04:00
|
|
|
PR_IntervalToMilliseconds(pollTimeout)));
|
2012-08-22 19:56:38 +04:00
|
|
|
int32_t rv = PR_Poll(pollList, pollCount, pollTimeout);
|
2005-01-26 05:13:14 +03:00
|
|
|
|
2006-08-15 22:22:24 +04:00
|
|
|
PRIntervalTime passedInterval = PR_IntervalNow() - ts;
|
|
|
|
|
2015-03-12 12:25:13 +03:00
|
|
|
if (mTelemetryEnabledPref && !pollStart.IsNull()) {
|
|
|
|
*pollDuration = TimeStamp::NowLoRes() - pollStart;
|
|
|
|
}
|
|
|
|
|
2010-10-28 21:09:08 +04:00
|
|
|
SOCKET_LOG((" ...returned after %i milliseconds\n",
|
2017-07-06 15:00:35 +03:00
|
|
|
PR_IntervalToMilliseconds(passedInterval)));
|
2006-08-15 22:22:24 +04:00
|
|
|
|
|
|
|
*interval = PR_IntervalToSeconds(passedInterval);
|
2005-01-26 05:13:14 +03:00
|
|
|
return rv;
|
2003-02-07 00:20:26 +03:00
|
|
|
}
|
|
|
|
|
2008-02-21 23:39:20 +03:00
|
|
|
//-----------------------------------------------------------------------------
|
|
|
|
// xpcom api
|
|
|
|
|
2014-04-27 11:06:00 +04:00
|
|
|
NS_IMPL_ISUPPORTS(nsSocketTransportService,
|
|
|
|
nsISocketTransportService,
|
2015-04-09 18:31:59 +03:00
|
|
|
nsIRoutedSocketTransportService,
|
2014-04-27 11:06:00 +04:00
|
|
|
nsIEventTarget,
|
|
|
|
nsIThreadObserver,
|
|
|
|
nsIRunnable,
|
|
|
|
nsPISocketTransportService,
|
|
|
|
nsIObserver)
|
2008-02-21 23:39:20 +03:00
|
|
|
|
2003-01-18 04:27:53 +03:00
|
|
|
// called from main thread only
|
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::Init()
|
|
|
|
{
|
2006-05-10 21:30:15 +04:00
|
|
|
if (!NS_IsMainThread()) {
|
|
|
|
NS_ERROR("wrong thread");
|
|
|
|
return NS_ERROR_UNEXPECTED;
|
|
|
|
}
|
2003-01-18 04:27:53 +03:00
|
|
|
|
|
|
|
if (mInitialized)
|
|
|
|
return NS_OK;
|
|
|
|
|
2007-07-04 23:33:57 +04:00
|
|
|
if (mShuttingDown)
|
|
|
|
return NS_ERROR_UNEXPECTED;
|
|
|
|
|
2007-12-05 05:18:58 +03:00
|
|
|
nsCOMPtr<nsIThread> thread;
|
2016-05-11 07:11:40 +03:00
|
|
|
nsresult rv = NS_NewNamedThread("Socket Thread", getter_AddRefs(thread), this);
|
2003-01-18 04:27:53 +03:00
|
|
|
if (NS_FAILED(rv)) return rv;
|
2017-07-06 15:00:35 +03:00
|
|
|
|
2007-12-05 05:18:58 +03:00
|
|
|
{
|
2016-06-03 22:56:26 +03:00
|
|
|
MutexAutoLock lock(mLock);
|
2007-12-05 05:18:58 +03:00
|
|
|
// Install our mThread, protecting against concurrent readers
|
|
|
|
thread.swap(mThread);
|
|
|
|
}
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2012-01-17 05:48:29 +04:00
|
|
|
nsCOMPtr<nsIPrefBranch> tmpPrefService = do_GetService(NS_PREFSERVICE_CONTRACTID);
|
2012-09-29 02:39:20 +04:00
|
|
|
if (tmpPrefService) {
|
2011-10-17 18:59:28 +04:00
|
|
|
tmpPrefService->AddObserver(SEND_BUFFER_PREF, this, false);
|
2014-02-06 23:51:38 +04:00
|
|
|
tmpPrefService->AddObserver(KEEPALIVE_ENABLED_PREF, this, false);
|
|
|
|
tmpPrefService->AddObserver(KEEPALIVE_IDLE_TIME_PREF, this, false);
|
|
|
|
tmpPrefService->AddObserver(KEEPALIVE_RETRY_INTERVAL_PREF, this, false);
|
|
|
|
tmpPrefService->AddObserver(KEEPALIVE_PROBE_COUNT_PREF, this, false);
|
2015-03-04 16:17:00 +03:00
|
|
|
tmpPrefService->AddObserver(MAX_TIME_BETWEEN_TWO_POLLS, this, false);
|
2015-03-12 12:25:13 +03:00
|
|
|
tmpPrefService->AddObserver(TELEMETRY_PREF, this, false);
|
2016-01-18 10:20:00 +03:00
|
|
|
tmpPrefService->AddObserver(MAX_TIME_FOR_PR_CLOSE_DURING_SHUTDOWN, this, false);
|
2012-09-29 02:39:20 +04:00
|
|
|
}
|
2009-02-09 20:31:44 +03:00
|
|
|
UpdatePrefs();
|
|
|
|
|
2012-10-01 13:43:14 +04:00
|
|
|
nsCOMPtr<nsIObserverService> obsSvc = services::GetObserverService();
|
|
|
|
if (obsSvc) {
|
|
|
|
obsSvc->AddObserver(this, "profile-initial-state", false);
|
2012-12-08 02:50:43 +04:00
|
|
|
obsSvc->AddObserver(this, "last-pb-context-exited", false);
|
2016-03-19 06:25:00 +03:00
|
|
|
obsSvc->AddObserver(this, NS_WIDGET_SLEEP_OBSERVER_TOPIC, true);
|
|
|
|
obsSvc->AddObserver(this, NS_WIDGET_WAKE_OBSERVER_TOPIC, true);
|
2016-09-19 20:29:59 +03:00
|
|
|
obsSvc->AddObserver(this, "xpcom-shutdown-threads", false);
|
2012-10-01 13:43:14 +04:00
|
|
|
}
|
|
|
|
|
2011-10-17 18:59:28 +04:00
|
|
|
mInitialized = true;
|
2003-01-18 04:27:53 +03:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
// called from main thread only
|
|
|
|
NS_IMETHODIMP
|
2016-09-19 20:29:59 +03:00
|
|
|
nsSocketTransportService::Shutdown(bool aXpcomShutdown)
|
2003-01-18 04:27:53 +03:00
|
|
|
{
|
2010-10-28 21:09:08 +04:00
|
|
|
SOCKET_LOG(("nsSocketTransportService::Shutdown\n"));
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2006-05-10 21:30:15 +04:00
|
|
|
NS_ENSURE_STATE(NS_IsMainThread());
|
2003-01-18 04:27:53 +03:00
|
|
|
|
|
|
|
if (!mInitialized)
|
|
|
|
return NS_OK;
|
|
|
|
|
2007-07-04 23:33:57 +04:00
|
|
|
if (mShuttingDown)
|
|
|
|
return NS_ERROR_UNEXPECTED;
|
|
|
|
|
2003-01-18 04:27:53 +03:00
|
|
|
{
|
2016-06-03 22:56:26 +03:00
|
|
|
MutexAutoLock lock(mLock);
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2006-05-10 21:30:15 +04:00
|
|
|
// signal the socket thread to shutdown
|
2011-10-17 18:59:28 +04:00
|
|
|
mShuttingDown = true;
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2016-03-22 18:02:39 +03:00
|
|
|
if (mPollableEvent) {
|
|
|
|
mPollableEvent->Signal();
|
|
|
|
}
|
2003-01-18 04:27:53 +03:00
|
|
|
}
|
|
|
|
|
2016-09-19 20:29:59 +03:00
|
|
|
if (!aXpcomShutdown) {
|
|
|
|
return ShutdownThread();
|
|
|
|
}
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
nsSocketTransportService::ShutdownThread()
|
|
|
|
{
|
|
|
|
SOCKET_LOG(("nsSocketTransportService::ShutdownThread\n"));
|
|
|
|
|
|
|
|
NS_ENSURE_STATE(NS_IsMainThread());
|
|
|
|
|
|
|
|
if (!mInitialized || !mShuttingDown)
|
|
|
|
return NS_OK;
|
|
|
|
|
2003-01-18 04:27:53 +03:00
|
|
|
// join with thread
|
2006-05-10 21:30:15 +04:00
|
|
|
mThread->Shutdown();
|
2007-12-05 05:18:58 +03:00
|
|
|
{
|
2016-06-03 22:56:26 +03:00
|
|
|
MutexAutoLock lock(mLock);
|
2007-12-05 05:18:58 +03:00
|
|
|
// Drop our reference to mThread and make sure that any concurrent
|
|
|
|
// readers are excluded
|
2012-07-30 18:20:58 +04:00
|
|
|
mThread = nullptr;
|
2007-12-05 05:18:58 +03:00
|
|
|
}
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2012-01-17 05:48:29 +04:00
|
|
|
nsCOMPtr<nsIPrefBranch> tmpPrefService = do_GetService(NS_PREFSERVICE_CONTRACTID);
|
2017-07-06 15:00:35 +03:00
|
|
|
if (tmpPrefService)
|
2009-02-09 20:31:44 +03:00
|
|
|
tmpPrefService->RemoveObserver(SEND_BUFFER_PREF, this);
|
|
|
|
|
2012-10-01 13:43:14 +04:00
|
|
|
nsCOMPtr<nsIObserverService> obsSvc = services::GetObserverService();
|
|
|
|
if (obsSvc) {
|
|
|
|
obsSvc->RemoveObserver(this, "profile-initial-state");
|
2012-12-08 02:50:43 +04:00
|
|
|
obsSvc->RemoveObserver(this, "last-pb-context-exited");
|
2016-03-19 06:25:00 +03:00
|
|
|
obsSvc->RemoveObserver(this, NS_WIDGET_SLEEP_OBSERVER_TOPIC);
|
|
|
|
obsSvc->RemoveObserver(this, NS_WIDGET_WAKE_OBSERVER_TOPIC);
|
2016-09-19 20:29:59 +03:00
|
|
|
obsSvc->RemoveObserver(this, "xpcom-shutdown-threads");
|
2016-03-19 06:25:00 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (mAfterWakeUpTimer) {
|
|
|
|
mAfterWakeUpTimer->Cancel();
|
|
|
|
mAfterWakeUpTimer = nullptr;
|
2012-10-01 13:43:14 +04:00
|
|
|
}
|
|
|
|
|
2016-05-19 05:02:57 +03:00
|
|
|
NetworkActivityMonitor::Shutdown();
|
2012-09-29 02:39:20 +04:00
|
|
|
|
2011-10-17 18:59:28 +04:00
|
|
|
mInitialized = false;
|
|
|
|
mShuttingDown = false;
|
2006-05-10 21:30:15 +04:00
|
|
|
|
2003-01-18 04:27:53 +03:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2012-09-18 03:45:10 +04:00
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::GetOffline(bool *offline)
|
|
|
|
{
|
|
|
|
*offline = mOffline;
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::SetOffline(bool offline)
|
|
|
|
{
|
2016-06-03 22:56:26 +03:00
|
|
|
MutexAutoLock lock(mLock);
|
2012-09-18 03:45:10 +04:00
|
|
|
if (!mOffline && offline) {
|
|
|
|
// signal the socket thread to go offline, so it will detach sockets
|
|
|
|
mGoingOffline = true;
|
|
|
|
mOffline = true;
|
|
|
|
}
|
|
|
|
else if (mOffline && !offline) {
|
|
|
|
mOffline = false;
|
|
|
|
}
|
2016-03-22 18:02:39 +03:00
|
|
|
if (mPollableEvent) {
|
|
|
|
mPollableEvent->Signal();
|
|
|
|
}
|
2012-09-18 03:45:10 +04:00
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2014-02-06 23:51:38 +04:00
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::GetKeepaliveIdleTime(int32_t *aKeepaliveIdleTimeS)
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(aKeepaliveIdleTimeS);
|
|
|
|
if (NS_WARN_IF(!aKeepaliveIdleTimeS)) {
|
|
|
|
return NS_ERROR_NULL_POINTER;
|
|
|
|
}
|
|
|
|
*aKeepaliveIdleTimeS = mKeepaliveIdleTimeS;
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::GetKeepaliveRetryInterval(int32_t *aKeepaliveRetryIntervalS)
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(aKeepaliveRetryIntervalS);
|
|
|
|
if (NS_WARN_IF(!aKeepaliveRetryIntervalS)) {
|
|
|
|
return NS_ERROR_NULL_POINTER;
|
|
|
|
}
|
|
|
|
*aKeepaliveRetryIntervalS = mKeepaliveRetryIntervalS;
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::GetKeepaliveProbeCount(int32_t *aKeepaliveProbeCount)
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(aKeepaliveProbeCount);
|
|
|
|
if (NS_WARN_IF(!aKeepaliveProbeCount)) {
|
|
|
|
return NS_ERROR_NULL_POINTER;
|
|
|
|
}
|
|
|
|
*aKeepaliveProbeCount = mKeepaliveProbeCount;
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2003-01-18 04:27:53 +03:00
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::CreateTransport(const char **types,
|
2012-08-22 19:56:38 +04:00
|
|
|
uint32_t typeCount,
|
2003-01-18 04:27:53 +03:00
|
|
|
const nsACString &host,
|
2012-08-22 19:56:38 +04:00
|
|
|
int32_t port,
|
2003-01-18 04:27:53 +03:00
|
|
|
nsIProxyInfo *proxyInfo,
|
|
|
|
nsISocketTransport **result)
|
2015-04-09 18:31:59 +03:00
|
|
|
{
|
|
|
|
return CreateRoutedTransport(types, typeCount, host, port, NS_LITERAL_CSTRING(""), 0,
|
|
|
|
proxyInfo, result);
|
|
|
|
}
|
|
|
|
|
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::CreateRoutedTransport(const char **types,
|
|
|
|
uint32_t typeCount,
|
|
|
|
const nsACString &host,
|
|
|
|
int32_t port,
|
|
|
|
const nsACString &hostRoute,
|
|
|
|
int32_t portRoute,
|
|
|
|
nsIProxyInfo *proxyInfo,
|
|
|
|
nsISocketTransport **result)
|
2003-01-18 04:27:53 +03:00
|
|
|
{
|
2012-09-18 03:45:10 +04:00
|
|
|
NS_ENSURE_TRUE(mInitialized, NS_ERROR_NOT_INITIALIZED);
|
2003-01-18 04:27:53 +03:00
|
|
|
NS_ENSURE_TRUE(port >= 0 && port <= 0xFFFF, NS_ERROR_ILLEGAL_VALUE);
|
|
|
|
|
2015-10-18 08:24:48 +03:00
|
|
|
RefPtr<nsSocketTransport> trans = new nsSocketTransport();
|
2015-04-09 18:31:59 +03:00
|
|
|
nsresult rv = trans->Init(types, typeCount, host, port, hostRoute, portRoute, proxyInfo);
|
2003-01-18 04:27:53 +03:00
|
|
|
if (NS_FAILED(rv)) {
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2014-04-16 06:41:06 +04:00
|
|
|
trans.forget(result);
|
2003-01-18 04:27:53 +03:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2013-09-06 19:06:23 +04:00
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::CreateUnixDomainTransport(nsIFile *aPath,
|
|
|
|
nsISocketTransport **result)
|
|
|
|
{
|
2017-12-15 14:21:19 +03:00
|
|
|
#ifdef XP_UNIX
|
2013-09-06 19:06:23 +04:00
|
|
|
nsresult rv;
|
|
|
|
|
|
|
|
NS_ENSURE_TRUE(mInitialized, NS_ERROR_NOT_INITIALIZED);
|
|
|
|
|
|
|
|
nsAutoCString path;
|
|
|
|
rv = aPath->GetNativePath(path);
|
|
|
|
if (NS_FAILED(rv))
|
|
|
|
return rv;
|
|
|
|
|
2015-10-18 08:24:48 +03:00
|
|
|
RefPtr<nsSocketTransport> trans = new nsSocketTransport();
|
2013-09-06 19:06:23 +04:00
|
|
|
|
|
|
|
rv = trans->InitWithFilename(path.get());
|
|
|
|
if (NS_FAILED(rv))
|
|
|
|
return rv;
|
|
|
|
|
|
|
|
trans.forget(result);
|
|
|
|
return NS_OK;
|
2017-12-15 14:21:19 +03:00
|
|
|
#else
|
|
|
|
return NS_ERROR_SOCKET_ADDRESS_NOT_SUPPORTED;
|
|
|
|
#endif
|
2013-09-06 19:06:23 +04:00
|
|
|
}
|
|
|
|
|
2006-05-10 21:30:15 +04:00
|
|
|
NS_IMETHODIMP
|
2017-07-07 02:05:28 +03:00
|
|
|
nsSocketTransportService::OnDispatchedEvent()
|
2006-05-10 21:30:15 +04:00
|
|
|
{
|
2016-08-12 02:30:23 +03:00
|
|
|
#ifndef XP_WIN
|
|
|
|
// On windows poll can hang and this became worse when we introduced the
|
|
|
|
// patch for bug 698882 (see also bug 1292181), therefore we reverted the
|
|
|
|
// behavior on windows to be as before bug 698882, e.g. write to the socket
|
|
|
|
// also if an event dispatch is on the socket thread and writing to the
|
|
|
|
// socket for each event.
|
2017-04-27 21:34:42 +03:00
|
|
|
if (OnSocketThread()) {
|
2016-03-22 18:02:39 +03:00
|
|
|
// this check is redundant to one done inside ::Signal(), but
|
|
|
|
// we can do it here and skip obtaining the lock - given that
|
|
|
|
// this is a relatively common occurance its worth the
|
|
|
|
// redundant code
|
|
|
|
SOCKET_LOG(("OnDispatchedEvent Same Thread Skip Signal\n"));
|
|
|
|
return NS_OK;
|
|
|
|
}
|
2016-09-13 11:43:00 +03:00
|
|
|
#else
|
|
|
|
if (gIOService->IsNetTearingDown()) {
|
|
|
|
// Poll can hang sometimes. If we are in shutdown, we are going to
|
|
|
|
// start a watchdog. If we do not exit poll within
|
|
|
|
// REPAIR_POLLABLE_EVENT_TIME signal a pollable event again.
|
|
|
|
StartPollWatchdog();
|
|
|
|
}
|
2016-08-12 02:30:23 +03:00
|
|
|
#endif
|
2016-03-22 18:02:39 +03:00
|
|
|
|
2016-06-03 22:56:26 +03:00
|
|
|
MutexAutoLock lock(mLock);
|
2016-03-22 18:02:39 +03:00
|
|
|
if (mPollableEvent) {
|
|
|
|
mPollableEvent->Signal();
|
|
|
|
}
|
2006-05-10 21:30:15 +04:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::OnProcessNextEvent(nsIThreadInternal *thread,
|
Bug 1179909: Refactor stable state handling. r=smaug
This is motivated by three separate but related problems:
1. Our concept of recursion depth is broken for things that run from AfterProcessNextEvent observers (e.g. Promises). We decrement the recursionDepth counter before firing observers, so a Promise callback running at the lowest event loop depth has a recursion depth of 0 (whereas a regular nsIRunnable would be 1). This is a problem because it's impossible to distinguish a Promise running after a sync XHR's onreadystatechange handler from a top-level event (since the former runs with depth 2 - 1 = 1, and the latter runs with just 1).
2. The nsIThreadObserver mechanism that is used by a lot of code to run "after" the current event is a poor fit for anything that runs script. First, the order the observers fire in is the order they were added, not anything fixed by spec. Additionally, running script can cause the event loop to spin, which is a big source of pain here (bholley has some nasty bug caused by this).
3. We run Promises from different points in the code for workers and main thread. The latter runs from XPConnect's nsIThreadObserver callbacks, while the former runs from a hardcoded call to run Promises in the worker event loop. What workers do is particularly problematic because it means we can't get the right recursion depth no matter what we do to nsThread.
The solve this, this patch does the following:
1. Consolidate some handling of microtasks and all handling of stable state from appshell and WorkerPrivate into CycleCollectedJSRuntime.
2. Make the recursionDepth counter only available to CycleCollectedJSRuntime (and its consumers) and remove it from the nsIThreadInternal and nsIThreadObserver APIs.
3. Adjust the recursionDepth counter so that microtasks run with the recursionDepth of the task they are associated with.
4. Introduce the concept of metastable state to replace appshell's RunBeforeNextEvent. Metastable state is reached after every microtask or task is completed. This provides the semantics that bent and I want for IndexedDB, where transactions autocommit at the end of a microtask and do not "spill" from one microtask into a subsequent microtask. This differs from appshell's RunBeforeNextEvent in two ways:
a) It fires between microtasks, which was the motivation for starting this.
b) It no longer ensures that we're at the same event loop depth in the native event queue. bent decided we don't care about this.
5. Reorder stable state to happen after microtasks such as Promises, per HTML. Right now we call the regular thread observers, including appshell, before the main thread observer (XPConnect), so stable state tasks happen before microtasks.
2015-08-11 16:10:46 +03:00
|
|
|
bool mayWait)
|
2006-05-10 21:30:15 +04:00
|
|
|
{
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2006-06-07 04:06:11 +04:00
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::AfterProcessNextEvent(nsIThreadInternal* thread,
|
2013-10-23 16:01:20 +04:00
|
|
|
bool eventWasProcessed)
|
2006-06-07 04:06:11 +04:00
|
|
|
{
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2015-03-04 16:17:00 +03:00
|
|
|
void
|
|
|
|
nsSocketTransportService::MarkTheLastElementOfPendingQueue()
|
|
|
|
{
|
|
|
|
mServingPendingQueue = false;
|
|
|
|
}
|
|
|
|
|
2003-01-18 04:27:53 +03:00
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::Run()
|
|
|
|
{
|
2016-03-24 03:44:28 +03:00
|
|
|
SOCKET_LOG(("STS thread init %d sockets\n", gMaxCount));
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2017-05-03 00:22:53 +03:00
|
|
|
#if defined(XP_WIN)
|
|
|
|
// see bug 1361495, gethostname() triggers winsock initialization.
|
|
|
|
// so do it here (on parent and child) to protect against it being done first
|
|
|
|
// accidentally on the main thread.. especially via PR_GetSystemInfo(). This
|
|
|
|
// will also improve latency of first real winsock operation
|
|
|
|
// ..
|
|
|
|
// If STS-thread is no longer needed this should still be run before exiting
|
|
|
|
|
|
|
|
char ignoredStackBuffer[255];
|
|
|
|
Unused << gethostname(ignoredStackBuffer, 255);
|
|
|
|
#endif
|
|
|
|
|
2011-12-02 02:37:57 +04:00
|
|
|
psm::InitializeSSLServerCertVerificationThreads();
|
|
|
|
|
2003-01-18 04:27:53 +03:00
|
|
|
gSocketThread = PR_GetCurrentThread();
|
|
|
|
|
2016-03-30 18:16:03 +03:00
|
|
|
{
|
2016-06-03 22:56:26 +03:00
|
|
|
MutexAutoLock lock(mLock);
|
2016-03-30 18:16:03 +03:00
|
|
|
mPollableEvent.reset(new PollableEvent());
|
|
|
|
//
|
|
|
|
// NOTE: per bug 190000, this failure could be caused by Zone-Alarm
|
|
|
|
// or similar software.
|
|
|
|
//
|
|
|
|
// NOTE: per bug 191739, this failure could also be caused by lack
|
|
|
|
// of a loopback device on Windows and OS/2 platforms (it creates
|
|
|
|
// a loopback socket pair on these platforms to implement a pollable
|
|
|
|
// event object). if we can't create a pollable event, then we'll
|
|
|
|
// have to "busy wait" to implement the socket event queue :-(
|
|
|
|
//
|
|
|
|
if (!mPollableEvent->Valid()) {
|
|
|
|
mPollableEvent = nullptr;
|
|
|
|
NS_WARNING("running socket transport thread without a pollable event");
|
|
|
|
SOCKET_LOG(("running socket transport thread without a pollable event"));
|
|
|
|
}
|
2016-03-22 18:02:39 +03:00
|
|
|
|
2016-03-30 18:16:03 +03:00
|
|
|
mPollList[0].fd = mPollableEvent ? mPollableEvent->PollableFD() : nullptr;
|
|
|
|
mPollList[0].in_flags = PR_POLL_READ | PR_POLL_EXCEPT;
|
|
|
|
mPollList[0].out_flags = 0;
|
|
|
|
}
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2016-03-22 18:02:39 +03:00
|
|
|
mRawThread = NS_GetCurrentThread();
|
2006-05-10 21:30:15 +04:00
|
|
|
|
|
|
|
// hook ourselves up to observe event processing for this thread
|
2016-03-22 18:02:39 +03:00
|
|
|
nsCOMPtr<nsIThreadInternal> threadInt = do_QueryInterface(mRawThread);
|
2006-05-10 21:30:15 +04:00
|
|
|
threadInt->SetObserver(this);
|
|
|
|
|
2012-02-27 19:32:09 +04:00
|
|
|
// make sure the pseudo random number generator is seeded on this thread
|
2013-02-08 15:49:30 +04:00
|
|
|
srand(static_cast<unsigned>(PR_Now()));
|
2012-02-27 19:32:09 +04:00
|
|
|
|
2015-03-12 12:25:13 +03:00
|
|
|
// For the calculation of the duration of the last cycle (i.e. the last for-loop
|
|
|
|
// iteration before shutdown).
|
|
|
|
TimeStamp startOfCycleForLastCycleCalc;
|
|
|
|
int numberOfPendingEventsLastCycle;
|
|
|
|
|
|
|
|
// For measuring of the poll iteration duration without time spent blocked
|
|
|
|
// in poll().
|
|
|
|
TimeStamp pollCycleStart;
|
|
|
|
// Time blocked in poll().
|
|
|
|
TimeDuration singlePollDuration;
|
|
|
|
|
|
|
|
// For calculating the time needed for a new element to run.
|
|
|
|
TimeStamp startOfIteration;
|
|
|
|
TimeStamp startOfNextIteration;
|
|
|
|
int numberOfPendingEvents;
|
|
|
|
|
|
|
|
// If there is too many pending events queued, we will run some poll()
|
|
|
|
// between them and the following variable is cumulative time spent
|
|
|
|
// blocking in poll().
|
|
|
|
TimeDuration pollDuration;
|
|
|
|
|
2006-05-10 21:30:15 +04:00
|
|
|
for (;;) {
|
2011-09-29 10:19:26 +04:00
|
|
|
bool pendingEvents = false;
|
2011-06-15 18:24:56 +04:00
|
|
|
|
2015-03-12 12:25:13 +03:00
|
|
|
numberOfPendingEvents = 0;
|
|
|
|
numberOfPendingEventsLastCycle = 0;
|
|
|
|
if (mTelemetryEnabledPref) {
|
|
|
|
startOfCycleForLastCycleCalc = TimeStamp::NowLoRes();
|
|
|
|
startOfNextIteration = TimeStamp::NowLoRes();
|
|
|
|
}
|
|
|
|
pollDuration = 0;
|
|
|
|
|
2011-06-15 18:24:56 +04:00
|
|
|
do {
|
2015-03-12 12:25:13 +03:00
|
|
|
if (mTelemetryEnabledPref) {
|
|
|
|
pollCycleStart = TimeStamp::NowLoRes();
|
|
|
|
}
|
|
|
|
|
2016-03-22 18:02:39 +03:00
|
|
|
DoPollIteration(&singlePollDuration);
|
2015-03-12 12:25:13 +03:00
|
|
|
|
|
|
|
if (mTelemetryEnabledPref && !pollCycleStart.IsNull()) {
|
|
|
|
Telemetry::Accumulate(Telemetry::STS_POLL_BLOCK_TIME,
|
|
|
|
singlePollDuration.ToMilliseconds());
|
|
|
|
Telemetry::AccumulateTimeDelta(
|
|
|
|
Telemetry::STS_POLL_CYCLE,
|
|
|
|
pollCycleStart + singlePollDuration,
|
|
|
|
TimeStamp::NowLoRes());
|
|
|
|
pollDuration += singlePollDuration;
|
|
|
|
}
|
2015-03-04 16:17:00 +03:00
|
|
|
|
2016-03-22 18:02:39 +03:00
|
|
|
mRawThread->HasPendingEvents(&pendingEvents);
|
2011-06-15 18:24:56 +04:00
|
|
|
if (pendingEvents) {
|
2015-09-15 06:10:00 +03:00
|
|
|
if (!mServingPendingQueue) {
|
2017-06-12 22:34:10 +03:00
|
|
|
nsresult rv = Dispatch(
|
|
|
|
NewRunnableMethod("net::nsSocketTransportService::"
|
|
|
|
"MarkTheLastElementOfPendingQueue",
|
|
|
|
this,
|
|
|
|
&nsSocketTransportService::
|
|
|
|
MarkTheLastElementOfPendingQueue),
|
|
|
|
nsIEventTarget::DISPATCH_NORMAL);
|
|
|
|
if (NS_FAILED(rv)) {
|
|
|
|
NS_WARNING("Could not dispatch a new event on the "
|
|
|
|
"socket thread.");
|
2015-09-15 06:10:00 +03:00
|
|
|
} else {
|
|
|
|
mServingPendingQueue = true;
|
2015-03-04 16:17:00 +03:00
|
|
|
}
|
2015-09-15 06:10:00 +03:00
|
|
|
|
|
|
|
if (mTelemetryEnabledPref) {
|
|
|
|
startOfIteration = startOfNextIteration;
|
|
|
|
// Everything that comes after this point will
|
|
|
|
// be served in the next iteration. If no even
|
|
|
|
// arrives, startOfNextIteration will be reset at the
|
|
|
|
// beginning of each for-loop.
|
|
|
|
startOfNextIteration = TimeStamp::NowLoRes();
|
2015-03-12 12:25:13 +03:00
|
|
|
}
|
2015-09-15 06:10:00 +03:00
|
|
|
}
|
|
|
|
TimeStamp eventQueueStart = TimeStamp::NowLoRes();
|
|
|
|
do {
|
2016-03-22 18:02:39 +03:00
|
|
|
NS_ProcessNextEvent(mRawThread);
|
2015-09-15 06:10:00 +03:00
|
|
|
numberOfPendingEvents++;
|
2015-03-04 16:17:00 +03:00
|
|
|
pendingEvents = false;
|
2016-03-22 18:02:39 +03:00
|
|
|
mRawThread->HasPendingEvents(&pendingEvents);
|
2015-09-15 06:10:00 +03:00
|
|
|
} while (pendingEvents && mServingPendingQueue &&
|
|
|
|
((TimeStamp::NowLoRes() -
|
|
|
|
eventQueueStart).ToMilliseconds() <
|
|
|
|
mMaxTimePerPollIter));
|
|
|
|
|
|
|
|
if (mTelemetryEnabledPref && !mServingPendingQueue &&
|
|
|
|
!startOfIteration.IsNull()) {
|
|
|
|
Telemetry::AccumulateTimeDelta(
|
|
|
|
Telemetry::STS_POLL_AND_EVENTS_CYCLE,
|
|
|
|
startOfIteration + pollDuration,
|
|
|
|
TimeStamp::NowLoRes());
|
|
|
|
|
|
|
|
Telemetry::Accumulate(
|
|
|
|
Telemetry::STS_NUMBER_OF_PENDING_EVENTS,
|
|
|
|
numberOfPendingEvents);
|
|
|
|
|
|
|
|
numberOfPendingEventsLastCycle += numberOfPendingEvents;
|
|
|
|
numberOfPendingEvents = 0;
|
|
|
|
pollDuration = 0;
|
2015-03-04 16:17:00 +03:00
|
|
|
}
|
2011-06-15 18:24:56 +04:00
|
|
|
}
|
|
|
|
} while (pendingEvents);
|
2006-05-10 21:30:15 +04:00
|
|
|
|
2012-09-18 03:45:10 +04:00
|
|
|
bool goingOffline = false;
|
2006-05-10 21:30:15 +04:00
|
|
|
// now that our event queue is empty, check to see if we should exit
|
|
|
|
{
|
2016-06-03 22:56:26 +03:00
|
|
|
MutexAutoLock lock(mLock);
|
2015-03-12 12:25:13 +03:00
|
|
|
if (mShuttingDown) {
|
|
|
|
if (mTelemetryEnabledPref &&
|
|
|
|
!startOfCycleForLastCycleCalc.IsNull()) {
|
|
|
|
Telemetry::Accumulate(
|
|
|
|
Telemetry::STS_NUMBER_OF_PENDING_EVENTS_IN_THE_LAST_CYCLE,
|
|
|
|
numberOfPendingEventsLastCycle);
|
|
|
|
Telemetry::AccumulateTimeDelta(
|
|
|
|
Telemetry::STS_POLL_AND_EVENT_THE_LAST_CYCLE,
|
|
|
|
startOfCycleForLastCycleCalc,
|
|
|
|
TimeStamp::NowLoRes());
|
|
|
|
}
|
2006-05-10 21:30:15 +04:00
|
|
|
break;
|
2015-03-12 12:25:13 +03:00
|
|
|
}
|
2012-09-18 03:45:10 +04:00
|
|
|
if (mGoingOffline) {
|
|
|
|
mGoingOffline = false;
|
|
|
|
goingOffline = true;
|
|
|
|
}
|
2006-05-10 21:30:15 +04:00
|
|
|
}
|
2012-09-18 03:45:10 +04:00
|
|
|
// Avoid potential deadlock
|
|
|
|
if (goingOffline)
|
|
|
|
Reset(true);
|
2006-05-10 21:30:15 +04:00
|
|
|
}
|
|
|
|
|
2010-10-28 21:09:08 +04:00
|
|
|
SOCKET_LOG(("STS shutting down thread\n"));
|
2006-05-10 21:30:15 +04:00
|
|
|
|
2012-09-18 03:45:10 +04:00
|
|
|
// detach all sockets, including locals
|
|
|
|
Reset(false);
|
2006-05-10 21:30:15 +04:00
|
|
|
|
|
|
|
// Final pass over the event queue. This makes sure that events posted by
|
|
|
|
// socket detach handlers get processed.
|
2016-03-22 18:02:39 +03:00
|
|
|
NS_ProcessPendingEvents(mRawThread);
|
2006-05-10 21:30:15 +04:00
|
|
|
|
2012-07-30 18:20:58 +04:00
|
|
|
gSocketThread = nullptr;
|
2006-05-10 21:30:15 +04:00
|
|
|
|
2011-12-02 02:37:57 +04:00
|
|
|
psm::StopSSLServerCertVerificationThreads();
|
|
|
|
|
2010-10-28 21:09:08 +04:00
|
|
|
SOCKET_LOG(("STS thread exit\n"));
|
2016-11-25 13:07:06 +03:00
|
|
|
|
2006-05-10 21:30:15 +04:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2013-04-10 04:46:25 +04:00
|
|
|
void
|
|
|
|
nsSocketTransportService::DetachSocketWithGuard(bool aGuardLocals,
|
|
|
|
SocketContext *socketList,
|
|
|
|
int32_t index)
|
|
|
|
{
|
|
|
|
bool isGuarded = false;
|
|
|
|
if (aGuardLocals) {
|
|
|
|
socketList[index].mHandler->IsLocal(&isGuarded);
|
|
|
|
if (!isGuarded)
|
|
|
|
socketList[index].mHandler->KeepWhenOffline(&isGuarded);
|
|
|
|
}
|
|
|
|
if (!isGuarded)
|
|
|
|
DetachSocket(socketList, &socketList[index]);
|
|
|
|
}
|
|
|
|
|
2012-09-18 03:45:10 +04:00
|
|
|
void
|
|
|
|
nsSocketTransportService::Reset(bool aGuardLocals)
|
|
|
|
{
|
|
|
|
// detach any sockets
|
|
|
|
int32_t i;
|
|
|
|
for (i = mActiveCount - 1; i >= 0; --i) {
|
2013-04-10 04:46:25 +04:00
|
|
|
DetachSocketWithGuard(aGuardLocals, mActiveList, i);
|
2012-09-18 03:45:10 +04:00
|
|
|
}
|
|
|
|
for (i = mIdleCount - 1; i >= 0; --i) {
|
2013-04-10 04:46:25 +04:00
|
|
|
DetachSocketWithGuard(aGuardLocals, mIdleList, i);
|
2012-09-18 03:45:10 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-05-10 21:30:15 +04:00
|
|
|
nsresult
|
2016-03-22 18:02:39 +03:00
|
|
|
nsSocketTransportService::DoPollIteration(TimeDuration *pollDuration)
|
2006-05-10 21:30:15 +04:00
|
|
|
{
|
2016-03-22 18:02:39 +03:00
|
|
|
SOCKET_LOG(("STS poll iter\n"));
|
2006-05-10 21:30:15 +04:00
|
|
|
|
2012-08-22 19:56:38 +04:00
|
|
|
int32_t i, count;
|
2003-01-18 04:27:53 +03:00
|
|
|
//
|
|
|
|
// poll loop
|
2006-05-10 21:30:15 +04:00
|
|
|
//
|
|
|
|
// walk active list backwards to see if any sockets should actually be
|
|
|
|
// idle, then walk the idle list backwards to see if any idle sockets
|
|
|
|
// should become active. take care to check only idle sockets that
|
|
|
|
// were idle to begin with ;-)
|
|
|
|
//
|
|
|
|
count = mIdleCount;
|
|
|
|
for (i=mActiveCount-1; i>=0; --i) {
|
|
|
|
//---
|
2016-12-16 06:16:31 +03:00
|
|
|
SOCKET_LOG((" active [%u] { handler=%p condition=%" PRIx32 " pollflags=%hu }\n", i,
|
2006-05-10 21:30:15 +04:00
|
|
|
mActiveList[i].mHandler,
|
2016-12-16 06:16:31 +03:00
|
|
|
static_cast<uint32_t>(mActiveList[i].mHandler->mCondition),
|
2006-05-10 21:30:15 +04:00
|
|
|
mActiveList[i].mHandler->mPollFlags));
|
|
|
|
//---
|
|
|
|
if (NS_FAILED(mActiveList[i].mHandler->mCondition))
|
2011-04-13 16:59:29 +04:00
|
|
|
DetachSocket(mActiveList, &mActiveList[i]);
|
2006-05-10 21:30:15 +04:00
|
|
|
else {
|
2012-08-22 19:56:38 +04:00
|
|
|
uint16_t in_flags = mActiveList[i].mHandler->mPollFlags;
|
2006-05-10 21:30:15 +04:00
|
|
|
if (in_flags == 0)
|
|
|
|
MoveToIdleList(&mActiveList[i]);
|
2003-02-23 08:07:34 +03:00
|
|
|
else {
|
2006-05-10 21:30:15 +04:00
|
|
|
// update poll flags
|
|
|
|
mPollList[i+1].in_flags = in_flags;
|
|
|
|
mPollList[i+1].out_flags = 0;
|
2003-01-18 04:27:53 +03:00
|
|
|
}
|
|
|
|
}
|
2006-05-10 21:30:15 +04:00
|
|
|
}
|
|
|
|
for (i=count-1; i>=0; --i) {
|
|
|
|
//---
|
2016-12-16 06:16:31 +03:00
|
|
|
SOCKET_LOG((" idle [%u] { handler=%p condition=%" PRIx32 " pollflags=%hu }\n", i,
|
2006-05-10 21:30:15 +04:00
|
|
|
mIdleList[i].mHandler,
|
2016-12-16 06:16:31 +03:00
|
|
|
static_cast<uint32_t>(mIdleList[i].mHandler->mCondition),
|
2006-05-10 21:30:15 +04:00
|
|
|
mIdleList[i].mHandler->mPollFlags));
|
|
|
|
//---
|
|
|
|
if (NS_FAILED(mIdleList[i].mHandler->mCondition))
|
2011-04-13 16:59:29 +04:00
|
|
|
DetachSocket(mIdleList, &mIdleList[i]);
|
2006-05-10 21:30:15 +04:00
|
|
|
else if (mIdleList[i].mHandler->mPollFlags != 0)
|
|
|
|
MoveToPollList(&mIdleList[i]);
|
|
|
|
}
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2010-10-28 21:09:08 +04:00
|
|
|
SOCKET_LOG((" calling PR_Poll [active=%u idle=%u]\n", mActiveCount, mIdleCount));
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2011-10-25 19:36:49 +04:00
|
|
|
#if defined(XP_WIN)
|
|
|
|
// 30 active connections is the historic limit before firefox 7's 256. A few
|
|
|
|
// windows systems have troubles with the higher limit, so actively probe a
|
|
|
|
// limit the first time we exceed 30.
|
|
|
|
if ((mActiveCount > 30) && !mProbedMaxCount)
|
|
|
|
ProbeMaxCount();
|
|
|
|
#endif
|
|
|
|
|
2006-05-10 21:30:15 +04:00
|
|
|
// Measures seconds spent while blocked on PR_Poll
|
2016-02-24 14:47:18 +03:00
|
|
|
uint32_t pollInterval = 0;
|
2016-01-15 10:21:00 +03:00
|
|
|
int32_t n = 0;
|
2016-02-29 22:16:30 +03:00
|
|
|
*pollDuration = 0;
|
2016-01-15 10:21:00 +03:00
|
|
|
if (!gIOService->IsNetTearingDown()) {
|
|
|
|
// Let's not do polling during shutdown.
|
2016-09-13 11:43:00 +03:00
|
|
|
#if defined(XP_WIN)
|
|
|
|
StartPolling();
|
|
|
|
#endif
|
2016-03-22 18:02:39 +03:00
|
|
|
n = Poll(&pollInterval, pollDuration);
|
2016-09-13 11:43:00 +03:00
|
|
|
#if defined(XP_WIN)
|
|
|
|
EndPolling();
|
|
|
|
#endif
|
2016-01-15 10:21:00 +03:00
|
|
|
}
|
|
|
|
|
2006-05-10 21:30:15 +04:00
|
|
|
if (n < 0) {
|
2015-01-14 19:39:09 +03:00
|
|
|
SOCKET_LOG((" PR_Poll error [%d] os error [%d]\n", PR_GetError(),
|
|
|
|
PR_GetOSError()));
|
2006-05-10 21:30:15 +04:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
//
|
|
|
|
// service "active" sockets...
|
|
|
|
//
|
2015-03-12 12:25:13 +03:00
|
|
|
uint32_t numberOfOnSocketReadyCalls = 0;
|
2012-08-22 19:56:38 +04:00
|
|
|
for (i=0; i<int32_t(mActiveCount); ++i) {
|
2006-05-10 21:30:15 +04:00
|
|
|
PRPollDesc &desc = mPollList[i+1];
|
|
|
|
SocketContext &s = mActiveList[i];
|
|
|
|
if (n > 0 && desc.out_flags != 0) {
|
2016-12-09 22:09:00 +03:00
|
|
|
#ifdef MOZ_TASK_TRACER
|
|
|
|
tasktracer::AutoSourceEvent taskTracerEvent(tasktracer::SourceEventType::SocketIO);
|
|
|
|
#endif
|
2006-05-10 21:30:15 +04:00
|
|
|
s.mElapsedTime = 0;
|
|
|
|
s.mHandler->OnSocketReady(desc.fd, desc.out_flags);
|
2015-03-12 12:25:13 +03:00
|
|
|
numberOfOnSocketReadyCalls++;
|
2006-05-10 21:30:15 +04:00
|
|
|
}
|
|
|
|
// check for timeout errors unless disabled...
|
2012-09-28 10:57:33 +04:00
|
|
|
else if (s.mHandler->mPollTimeout != UINT16_MAX) {
|
2006-05-10 21:30:15 +04:00
|
|
|
// update elapsed time counter
|
2012-10-03 04:18:47 +04:00
|
|
|
// (NOTE: We explicitly cast UINT16_MAX to be an unsigned value
|
|
|
|
// here -- otherwise, some compilers will treat it as signed,
|
|
|
|
// which makes them fire signed/unsigned-comparison build
|
|
|
|
// warnings for the comparison against 'pollInterval'.)
|
2012-10-26 17:32:10 +04:00
|
|
|
if (MOZ_UNLIKELY(pollInterval >
|
2012-10-03 04:18:47 +04:00
|
|
|
static_cast<uint32_t>(UINT16_MAX) -
|
|
|
|
s.mElapsedTime))
|
2012-09-28 10:57:33 +04:00
|
|
|
s.mElapsedTime = UINT16_MAX;
|
2006-05-10 21:30:15 +04:00
|
|
|
else
|
2012-08-22 19:56:38 +04:00
|
|
|
s.mElapsedTime += uint16_t(pollInterval);
|
2017-07-06 15:00:35 +03:00
|
|
|
// check for timeout expiration
|
2006-05-10 21:30:15 +04:00
|
|
|
if (s.mElapsedTime >= s.mHandler->mPollTimeout) {
|
2016-12-09 22:09:00 +03:00
|
|
|
#ifdef MOZ_TASK_TRACER
|
|
|
|
tasktracer::AutoSourceEvent taskTracerEvent(tasktracer::SourceEventType::SocketIO);
|
|
|
|
#endif
|
2005-01-26 05:13:14 +03:00
|
|
|
s.mElapsedTime = 0;
|
2006-05-10 21:30:15 +04:00
|
|
|
s.mHandler->OnSocketReady(desc.fd, -1);
|
2015-03-12 12:25:13 +03:00
|
|
|
numberOfOnSocketReadyCalls++;
|
2003-01-18 04:27:53 +03:00
|
|
|
}
|
|
|
|
}
|
2006-05-10 21:30:15 +04:00
|
|
|
}
|
2015-03-12 12:25:13 +03:00
|
|
|
if (mTelemetryEnabledPref) {
|
|
|
|
Telemetry::Accumulate(
|
|
|
|
Telemetry::STS_NUMBER_OF_ONSOCKETREADY_CALLS,
|
|
|
|
numberOfOnSocketReadyCalls);
|
|
|
|
}
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2006-05-10 21:30:15 +04:00
|
|
|
//
|
|
|
|
// check for "dead" sockets and remove them (need to do this in
|
|
|
|
// reverse order obviously).
|
|
|
|
//
|
|
|
|
for (i=mActiveCount-1; i>=0; --i) {
|
|
|
|
if (NS_FAILED(mActiveList[i].mHandler->mCondition))
|
2011-04-13 16:59:29 +04:00
|
|
|
DetachSocket(mActiveList, &mActiveList[i]);
|
2006-05-10 21:30:15 +04:00
|
|
|
}
|
2003-01-18 04:27:53 +03:00
|
|
|
|
2016-03-22 18:02:39 +03:00
|
|
|
if (n != 0 && (mPollList[0].out_flags & (PR_POLL_READ | PR_POLL_EXCEPT))) {
|
2016-06-03 22:56:26 +03:00
|
|
|
MutexAutoLock lock(mLock);
|
2016-03-22 18:02:39 +03:00
|
|
|
|
|
|
|
// acknowledge pollable event (should not block)
|
|
|
|
if (mPollableEvent &&
|
|
|
|
((mPollList[0].out_flags & PR_POLL_EXCEPT) ||
|
|
|
|
!mPollableEvent->Clear())) {
|
2007-05-30 21:30:39 +04:00
|
|
|
// On Windows, the TCP loopback connection in the
|
|
|
|
// pollable event may become broken when a laptop
|
|
|
|
// switches between wired and wireless networks or
|
|
|
|
// wakes up from hibernation. We try to create a
|
|
|
|
// new pollable event. If that fails, we fall back
|
|
|
|
// on "busy wait".
|
2016-03-22 18:02:39 +03:00
|
|
|
NS_WARNING("Trying to repair mPollableEvent");
|
|
|
|
mPollableEvent.reset(new PollableEvent());
|
|
|
|
if (!mPollableEvent->Valid()) {
|
|
|
|
mPollableEvent = nullptr;
|
2007-05-30 21:30:39 +04:00
|
|
|
}
|
2016-03-22 18:02:39 +03:00
|
|
|
SOCKET_LOG(("running socket transport thread without "
|
2016-06-07 08:07:54 +03:00
|
|
|
"a pollable event now valid=%d", !!mPollableEvent));
|
2016-03-22 18:02:39 +03:00
|
|
|
mPollList[0].fd = mPollableEvent ? mPollableEvent->PollableFD() : nullptr;
|
|
|
|
mPollList[0].in_flags = PR_POLL_READ | PR_POLL_EXCEPT;
|
2007-05-30 21:30:39 +04:00
|
|
|
mPollList[0].out_flags = 0;
|
|
|
|
}
|
2003-02-07 00:20:26 +03:00
|
|
|
}
|
2003-01-18 04:27:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
2009-02-09 20:31:44 +03:00
|
|
|
|
2016-05-11 12:31:01 +03:00
|
|
|
void
|
|
|
|
nsSocketTransportService::UpdateSendBufferPref(nsIPrefBranch *pref)
|
|
|
|
{
|
|
|
|
int32_t bufferSize;
|
|
|
|
|
|
|
|
// If the pref is set, honor it. 0 means use OS defaults.
|
|
|
|
nsresult rv = pref->GetIntPref(SEND_BUFFER_PREF, &bufferSize);
|
|
|
|
if (NS_SUCCEEDED(rv)) {
|
|
|
|
mSendBufferSize = bufferSize;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(XP_WIN)
|
2016-12-17 11:05:34 +03:00
|
|
|
mSendBufferSize = 131072 * 4;
|
2016-05-11 12:31:01 +03:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2009-02-09 20:31:44 +03:00
|
|
|
nsresult
|
|
|
|
nsSocketTransportService::UpdatePrefs()
|
|
|
|
{
|
|
|
|
mSendBufferSize = 0;
|
2016-02-29 22:16:30 +03:00
|
|
|
|
2012-01-17 05:48:29 +04:00
|
|
|
nsCOMPtr<nsIPrefBranch> tmpPrefService = do_GetService(NS_PREFSERVICE_CONTRACTID);
|
2009-02-09 20:31:44 +03:00
|
|
|
if (tmpPrefService) {
|
2016-05-11 12:31:01 +03:00
|
|
|
UpdateSendBufferPref(tmpPrefService);
|
2014-02-06 23:51:38 +04:00
|
|
|
|
|
|
|
// Default TCP Keepalive Values.
|
|
|
|
int32_t keepaliveIdleTimeS;
|
2016-05-11 12:31:01 +03:00
|
|
|
nsresult rv = tmpPrefService->GetIntPref(KEEPALIVE_IDLE_TIME_PREF,
|
2014-02-06 23:51:38 +04:00
|
|
|
&keepaliveIdleTimeS);
|
|
|
|
if (NS_SUCCEEDED(rv))
|
|
|
|
mKeepaliveIdleTimeS = clamped(keepaliveIdleTimeS,
|
|
|
|
1, kMaxTCPKeepIdle);
|
|
|
|
|
|
|
|
int32_t keepaliveRetryIntervalS;
|
|
|
|
rv = tmpPrefService->GetIntPref(KEEPALIVE_RETRY_INTERVAL_PREF,
|
|
|
|
&keepaliveRetryIntervalS);
|
|
|
|
if (NS_SUCCEEDED(rv))
|
|
|
|
mKeepaliveRetryIntervalS = clamped(keepaliveRetryIntervalS,
|
|
|
|
1, kMaxTCPKeepIntvl);
|
|
|
|
|
|
|
|
int32_t keepaliveProbeCount;
|
|
|
|
rv = tmpPrefService->GetIntPref(KEEPALIVE_PROBE_COUNT_PREF,
|
|
|
|
&keepaliveProbeCount);
|
|
|
|
if (NS_SUCCEEDED(rv))
|
|
|
|
mKeepaliveProbeCount = clamped(keepaliveProbeCount,
|
|
|
|
1, kMaxTCPKeepCount);
|
|
|
|
bool keepaliveEnabled = false;
|
|
|
|
rv = tmpPrefService->GetBoolPref(KEEPALIVE_ENABLED_PREF,
|
|
|
|
&keepaliveEnabled);
|
|
|
|
if (NS_SUCCEEDED(rv) && keepaliveEnabled != mKeepaliveEnabledPref) {
|
|
|
|
mKeepaliveEnabledPref = keepaliveEnabled;
|
|
|
|
OnKeepaliveEnabledPrefChange();
|
|
|
|
}
|
2015-03-04 16:17:00 +03:00
|
|
|
|
|
|
|
int32_t maxTimePref;
|
|
|
|
rv = tmpPrefService->GetIntPref(MAX_TIME_BETWEEN_TWO_POLLS,
|
|
|
|
&maxTimePref);
|
|
|
|
if (NS_SUCCEEDED(rv) && maxTimePref >= 0) {
|
|
|
|
mMaxTimePerPollIter = maxTimePref;
|
|
|
|
}
|
2015-03-12 12:25:13 +03:00
|
|
|
|
|
|
|
bool telemetryPref = false;
|
|
|
|
rv = tmpPrefService->GetBoolPref(TELEMETRY_PREF,
|
|
|
|
&telemetryPref);
|
|
|
|
if (NS_SUCCEEDED(rv)) {
|
|
|
|
mTelemetryEnabledPref = telemetryPref;
|
|
|
|
}
|
2016-01-18 10:20:00 +03:00
|
|
|
|
|
|
|
int32_t maxTimeForPrClosePref;
|
|
|
|
rv = tmpPrefService->GetIntPref(MAX_TIME_FOR_PR_CLOSE_DURING_SHUTDOWN,
|
|
|
|
&maxTimeForPrClosePref);
|
|
|
|
if (NS_SUCCEEDED(rv) && maxTimeForPrClosePref >=0) {
|
|
|
|
mMaxTimeForPrClosePref = PR_MillisecondsToInterval(maxTimeForPrClosePref);
|
|
|
|
}
|
2009-02-09 20:31:44 +03:00
|
|
|
}
|
2016-02-29 22:16:30 +03:00
|
|
|
|
2009-02-09 20:31:44 +03:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2014-02-06 23:51:38 +04:00
|
|
|
void
|
|
|
|
nsSocketTransportService::OnKeepaliveEnabledPrefChange()
|
|
|
|
{
|
|
|
|
// Dispatch to socket thread if we're not executing there.
|
2017-04-27 21:34:42 +03:00
|
|
|
if (!OnSocketThread()) {
|
2017-06-12 22:34:10 +03:00
|
|
|
gSocketTransportService->Dispatch(
|
|
|
|
NewRunnableMethod(
|
|
|
|
"net::nsSocketTransportService::OnKeepaliveEnabledPrefChange",
|
|
|
|
this,
|
|
|
|
&nsSocketTransportService::OnKeepaliveEnabledPrefChange),
|
|
|
|
NS_DISPATCH_NORMAL);
|
|
|
|
return;
|
2014-02-06 23:51:38 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
SOCKET_LOG(("nsSocketTransportService::OnKeepaliveEnabledPrefChange %s",
|
|
|
|
mKeepaliveEnabledPref ? "enabled" : "disabled"));
|
|
|
|
|
|
|
|
// Notify each socket that keepalive has been en/disabled globally.
|
|
|
|
for (int32_t i = mActiveCount - 1; i >= 0; --i) {
|
|
|
|
NotifyKeepaliveEnabledPrefChange(&mActiveList[i]);
|
|
|
|
}
|
|
|
|
for (int32_t i = mIdleCount - 1; i >= 0; --i) {
|
|
|
|
NotifyKeepaliveEnabledPrefChange(&mIdleList[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
nsSocketTransportService::NotifyKeepaliveEnabledPrefChange(SocketContext *sock)
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(sock, "SocketContext cannot be null!");
|
|
|
|
MOZ_ASSERT(sock->mHandler, "SocketContext does not have a handler!");
|
|
|
|
|
|
|
|
if (!sock || !sock->mHandler) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-12-09 22:09:00 +03:00
|
|
|
#ifdef MOZ_TASK_TRACER
|
|
|
|
tasktracer::AutoSourceEvent taskTracerEvent(tasktracer::SourceEventType::SocketIO);
|
|
|
|
#endif
|
2014-02-06 23:51:38 +04:00
|
|
|
sock->mHandler->OnKeepaliveEnabledPrefChange(mKeepaliveEnabledPref);
|
|
|
|
}
|
|
|
|
|
2009-02-09 20:31:44 +03:00
|
|
|
NS_IMETHODIMP
|
|
|
|
nsSocketTransportService::Observe(nsISupports *subject,
|
|
|
|
const char *topic,
|
2014-01-04 19:02:17 +04:00
|
|
|
const char16_t *data)
|
2009-02-09 20:31:44 +03:00
|
|
|
{
|
|
|
|
if (!strcmp(topic, NS_PREFBRANCH_PREFCHANGE_TOPIC_ID)) {
|
|
|
|
UpdatePrefs();
|
2012-10-01 13:43:14 +04:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!strcmp(topic, "profile-initial-state")) {
|
2017-11-23 11:37:54 +03:00
|
|
|
int32_t interval = Preferences::GetInt(INTERVAL_PREF, 0);
|
|
|
|
if (interval <= 0) {
|
2012-10-01 13:43:14 +04:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2017-11-23 11:37:54 +03:00
|
|
|
return net::NetworkActivityMonitor::Init(interval);
|
2009-02-09 20:31:44 +03:00
|
|
|
}
|
2012-12-08 02:50:43 +04:00
|
|
|
|
|
|
|
if (!strcmp(topic, "last-pb-context-exited")) {
|
2017-06-12 22:34:10 +03:00
|
|
|
nsCOMPtr<nsIRunnable> ev = NewRunnableMethod(
|
|
|
|
"net::nsSocketTransportService::ClosePrivateConnections",
|
|
|
|
this,
|
|
|
|
&nsSocketTransportService::ClosePrivateConnections);
|
|
|
|
nsresult rv = Dispatch(ev, nsIEventTarget::DISPATCH_NORMAL);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2012-12-08 02:50:43 +04:00
|
|
|
}
|
|
|
|
|
2016-03-19 06:25:00 +03:00
|
|
|
if (!strcmp(topic, NS_TIMER_CALLBACK_TOPIC)) {
|
|
|
|
nsCOMPtr<nsITimer> timer = do_QueryInterface(subject);
|
|
|
|
if (timer == mAfterWakeUpTimer) {
|
|
|
|
mAfterWakeUpTimer = nullptr;
|
|
|
|
mSleepPhase = false;
|
|
|
|
}
|
2016-09-13 11:43:00 +03:00
|
|
|
|
|
|
|
#if defined(XP_WIN)
|
|
|
|
if (timer == mPollRepairTimer) {
|
|
|
|
DoPollRepair();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-03-19 06:25:00 +03:00
|
|
|
} else if (!strcmp(topic, NS_WIDGET_SLEEP_OBSERVER_TOPIC)) {
|
|
|
|
mSleepPhase = true;
|
|
|
|
if (mAfterWakeUpTimer) {
|
|
|
|
mAfterWakeUpTimer->Cancel();
|
|
|
|
mAfterWakeUpTimer = nullptr;
|
|
|
|
}
|
|
|
|
} else if (!strcmp(topic, NS_WIDGET_WAKE_OBSERVER_TOPIC)) {
|
|
|
|
if (mSleepPhase && !mAfterWakeUpTimer) {
|
2017-10-16 09:12:02 +03:00
|
|
|
NS_NewTimerWithObserver(getter_AddRefs(mAfterWakeUpTimer),
|
|
|
|
this, 2000, nsITimer::TYPE_ONE_SHOT);
|
2016-03-19 06:25:00 +03:00
|
|
|
}
|
2016-09-19 20:29:59 +03:00
|
|
|
} else if (!strcmp(topic, "xpcom-shutdown-threads")) {
|
|
|
|
ShutdownThread();
|
2016-03-19 06:25:00 +03:00
|
|
|
}
|
|
|
|
|
2009-02-09 20:31:44 +03:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2012-12-08 02:50:43 +04:00
|
|
|
void
|
|
|
|
nsSocketTransportService::ClosePrivateConnections()
|
|
|
|
{
|
|
|
|
// Must be called on the socket thread.
|
|
|
|
#ifdef DEBUG
|
|
|
|
bool onSTSThread;
|
|
|
|
IsOnCurrentThread(&onSTSThread);
|
|
|
|
MOZ_ASSERT(onSTSThread);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
for (int32_t i = mActiveCount - 1; i >= 0; --i) {
|
|
|
|
if (mActiveList[i].mHandler->mIsPrivate) {
|
|
|
|
DetachSocket(mActiveList, &mActiveList[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (int32_t i = mIdleCount - 1; i >= 0; --i) {
|
|
|
|
if (mIdleList[i].mHandler->mIsPrivate) {
|
|
|
|
DetachSocket(mIdleList, &mIdleList[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-19 05:02:57 +03:00
|
|
|
ClearPrivateSSLState();
|
2012-12-08 02:50:43 +04:00
|
|
|
}
|
|
|
|
|
2009-02-09 20:31:44 +03:00
|
|
|
NS_IMETHODIMP
|
2012-08-22 19:56:38 +04:00
|
|
|
nsSocketTransportService::GetSendBufferSize(int32_t *value)
|
2009-02-09 20:31:44 +03:00
|
|
|
{
|
|
|
|
*value = mSendBufferSize;
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-04-13 16:59:29 +04:00
|
|
|
/// ugly OS specific includes are placed at the bottom of the src for clarity
|
|
|
|
|
|
|
|
#if defined(XP_WIN)
|
|
|
|
#include <windows.h>
|
|
|
|
#elif defined(XP_UNIX) && !defined(AIX) && !defined(NEXTSTEP) && !defined(QNX)
|
|
|
|
#include <sys/resource.h>
|
|
|
|
#endif
|
|
|
|
|
2011-10-25 19:36:49 +04:00
|
|
|
// Right now the only need to do this is on windows.
|
|
|
|
#if defined(XP_WIN)
|
|
|
|
void
|
|
|
|
nsSocketTransportService::ProbeMaxCount()
|
|
|
|
{
|
2017-04-27 21:34:42 +03:00
|
|
|
MOZ_ASSERT(OnSocketThread(), "not on socket thread");
|
2011-10-25 19:36:49 +04:00
|
|
|
|
|
|
|
if (mProbedMaxCount)
|
|
|
|
return;
|
|
|
|
mProbedMaxCount = true;
|
|
|
|
|
|
|
|
// Allocate and test a PR_Poll up to the gMaxCount number of unconnected
|
|
|
|
// sockets. See bug 692260 - windows should be able to handle 1000 sockets
|
|
|
|
// in select() without a problem, but LSPs have been known to balk at lower
|
|
|
|
// numbers. (64 in the bug).
|
|
|
|
|
|
|
|
// Allocate
|
|
|
|
struct PRPollDesc pfd[SOCKET_LIMIT_TARGET];
|
2012-08-22 19:56:38 +04:00
|
|
|
uint32_t numAllocated = 0;
|
2011-10-25 19:36:49 +04:00
|
|
|
|
2012-08-22 19:56:38 +04:00
|
|
|
for (uint32_t index = 0 ; index < gMaxCount; ++index) {
|
2011-10-25 19:36:49 +04:00
|
|
|
pfd[index].in_flags = PR_POLL_READ | PR_POLL_WRITE | PR_POLL_EXCEPT;
|
|
|
|
pfd[index].out_flags = 0;
|
|
|
|
pfd[index].fd = PR_OpenTCPSocket(PR_AF_INET);
|
|
|
|
if (!pfd[index].fd) {
|
|
|
|
SOCKET_LOG(("Socket Limit Test index %d failed\n", index));
|
|
|
|
if (index < SOCKET_LIMIT_MIN)
|
|
|
|
gMaxCount = SOCKET_LIMIT_MIN;
|
|
|
|
else
|
|
|
|
gMaxCount = index;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
++numAllocated;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test
|
2016-08-26 18:40:16 +03:00
|
|
|
static_assert(SOCKET_LIMIT_MIN >= 32U, "Minimum Socket Limit is >= 32");
|
2011-10-25 19:36:49 +04:00
|
|
|
while (gMaxCount <= numAllocated) {
|
2012-08-22 19:56:38 +04:00
|
|
|
int32_t rv = PR_Poll(pfd, gMaxCount, PR_MillisecondsToInterval(0));
|
2017-07-06 15:00:35 +03:00
|
|
|
|
2011-10-25 19:36:49 +04:00
|
|
|
SOCKET_LOG(("Socket Limit Test poll() size=%d rv=%d\n",
|
|
|
|
gMaxCount, rv));
|
|
|
|
|
|
|
|
if (rv >= 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
SOCKET_LOG(("Socket Limit Test poll confirmationSize=%d rv=%d error=%d\n",
|
|
|
|
gMaxCount, rv, PR_GetError()));
|
|
|
|
|
|
|
|
gMaxCount -= 32;
|
|
|
|
if (gMaxCount <= SOCKET_LIMIT_MIN) {
|
|
|
|
gMaxCount = SOCKET_LIMIT_MIN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Free
|
2012-08-22 19:56:38 +04:00
|
|
|
for (uint32_t index = 0 ; index < numAllocated; ++index)
|
2011-10-25 19:36:49 +04:00
|
|
|
if (pfd[index].fd)
|
|
|
|
PR_Close(pfd[index].fd);
|
|
|
|
|
2016-03-24 03:44:28 +03:00
|
|
|
Telemetry::Accumulate(Telemetry::NETWORK_PROBE_MAXCOUNT, gMaxCount);
|
2011-10-25 19:36:49 +04:00
|
|
|
SOCKET_LOG(("Socket Limit Test max was confirmed at %d\n", gMaxCount));
|
|
|
|
}
|
|
|
|
#endif // windows
|
|
|
|
|
2011-04-13 16:59:29 +04:00
|
|
|
PRStatus
|
|
|
|
nsSocketTransportService::DiscoverMaxCount()
|
|
|
|
{
|
|
|
|
gMaxCount = SOCKET_LIMIT_MIN;
|
|
|
|
|
|
|
|
#if defined(XP_UNIX) && !defined(AIX) && !defined(NEXTSTEP) && !defined(QNX)
|
|
|
|
// On unix and os x network sockets and file
|
|
|
|
// descriptors are the same. OS X comes defaulted at 256,
|
|
|
|
// most linux at 1000. We can reliably use [sg]rlimit to
|
2016-03-24 03:44:28 +03:00
|
|
|
// query that and raise it if needed.
|
2011-04-13 16:59:29 +04:00
|
|
|
|
|
|
|
struct rlimit rlimitData;
|
2016-03-24 03:44:28 +03:00
|
|
|
if (getrlimit(RLIMIT_NOFILE, &rlimitData) == -1) // rlimit broken - use min
|
2011-04-13 16:59:29 +04:00
|
|
|
return PR_SUCCESS;
|
2016-03-24 03:44:28 +03:00
|
|
|
|
|
|
|
if (rlimitData.rlim_cur >= SOCKET_LIMIT_TARGET) { // larger than target!
|
2011-04-13 16:59:29 +04:00
|
|
|
gMaxCount = SOCKET_LIMIT_TARGET;
|
|
|
|
return PR_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2012-08-22 19:56:38 +04:00
|
|
|
int32_t maxallowed = rlimitData.rlim_max;
|
2016-03-24 03:44:28 +03:00
|
|
|
if ((uint32_t)maxallowed <= SOCKET_LIMIT_MIN) {
|
|
|
|
return PR_SUCCESS; // so small treat as if rlimit is broken
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((maxallowed == -1) || // no hard cap - ok to set target
|
|
|
|
((uint32_t)maxallowed >= SOCKET_LIMIT_TARGET)) {
|
|
|
|
maxallowed = SOCKET_LIMIT_TARGET;
|
2011-04-13 16:59:29 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
rlimitData.rlim_cur = maxallowed;
|
|
|
|
setrlimit(RLIMIT_NOFILE, &rlimitData);
|
2016-03-24 03:44:28 +03:00
|
|
|
if ((getrlimit(RLIMIT_NOFILE, &rlimitData) != -1) &&
|
|
|
|
(rlimitData.rlim_cur > SOCKET_LIMIT_MIN)) {
|
|
|
|
gMaxCount = rlimitData.rlim_cur;
|
|
|
|
}
|
2011-04-13 16:59:29 +04:00
|
|
|
|
|
|
|
#elif defined(XP_WIN) && !defined(WIN_CE)
|
|
|
|
// >= XP is confirmed to have at least 1000
|
2016-08-26 18:40:16 +03:00
|
|
|
static_assert(SOCKET_LIMIT_TARGET <= 1000, "SOCKET_LIMIT_TARGET max value is 1000");
|
2012-02-23 18:53:55 +04:00
|
|
|
gMaxCount = SOCKET_LIMIT_TARGET;
|
2011-04-13 16:59:29 +04:00
|
|
|
#else
|
|
|
|
// other platforms are harder to test - so leave at safe legacy value
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return PR_SUCCESS;
|
|
|
|
}
|
2012-12-10 18:13:55 +04:00
|
|
|
|
2013-08-31 20:06:14 +04:00
|
|
|
|
|
|
|
// Used to return connection info to Dashboard.cpp
|
2012-12-10 18:13:55 +04:00
|
|
|
void
|
|
|
|
nsSocketTransportService::AnalyzeConnection(nsTArray<SocketInfo> *data,
|
|
|
|
struct SocketContext *context, bool aActive)
|
|
|
|
{
|
2013-08-31 20:06:14 +04:00
|
|
|
if (context->mHandler->mIsPrivate)
|
|
|
|
return;
|
2012-12-10 18:13:55 +04:00
|
|
|
PRFileDesc *aFD = context->mFD;
|
2016-01-25 16:44:59 +03:00
|
|
|
|
|
|
|
PRFileDesc *idLayer = PR_GetIdentitiesLayer(aFD, PR_NSPR_IO_LAYER);
|
|
|
|
|
|
|
|
NS_ENSURE_TRUE_VOID(idLayer);
|
|
|
|
|
|
|
|
bool tcp = PR_GetDescType(idLayer) == PR_DESC_SOCKET_TCP;
|
2012-12-10 18:13:55 +04:00
|
|
|
|
|
|
|
PRNetAddr peer_addr;
|
2016-09-10 09:16:14 +03:00
|
|
|
PodZero(&peer_addr);
|
|
|
|
PRStatus rv = PR_GetPeerName(aFD, &peer_addr);
|
|
|
|
if (rv != PR_SUCCESS)
|
|
|
|
return;
|
2012-12-10 18:13:55 +04:00
|
|
|
|
|
|
|
char host[64] = {0};
|
2016-09-10 09:16:14 +03:00
|
|
|
rv = PR_NetAddrToString(&peer_addr, host, sizeof(host));
|
|
|
|
if (rv != PR_SUCCESS)
|
|
|
|
return;
|
2012-12-10 18:13:55 +04:00
|
|
|
|
|
|
|
uint16_t port;
|
|
|
|
if (peer_addr.raw.family == PR_AF_INET)
|
|
|
|
port = peer_addr.inet.port;
|
|
|
|
else
|
|
|
|
port = peer_addr.ipv6.port;
|
|
|
|
port = PR_ntohs(port);
|
|
|
|
uint64_t sent = context->mHandler->ByteCountSent();
|
|
|
|
uint64_t received = context->mHandler->ByteCountReceived();
|
|
|
|
SocketInfo info = { nsCString(host), sent, received, port, aActive, tcp };
|
|
|
|
|
|
|
|
data->AppendElement(info);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
nsSocketTransportService::GetSocketConnections(nsTArray<SocketInfo> *data)
|
|
|
|
{
|
2017-04-27 21:34:42 +03:00
|
|
|
MOZ_ASSERT(OnSocketThread(), "not on socket thread");
|
2012-12-10 18:13:55 +04:00
|
|
|
for (uint32_t i = 0; i < mActiveCount; i++)
|
|
|
|
AnalyzeConnection(data, &mActiveList[i], true);
|
|
|
|
for (uint32_t i = 0; i < mIdleCount; i++)
|
|
|
|
AnalyzeConnection(data, &mIdleList[i], false);
|
|
|
|
}
|
2016-05-19 05:02:57 +03:00
|
|
|
|
2016-09-13 11:43:00 +03:00
|
|
|
#if defined(XP_WIN)
|
|
|
|
void
|
|
|
|
nsSocketTransportService::StartPollWatchdog()
|
|
|
|
{
|
2017-06-05 23:58:52 +03:00
|
|
|
// Start off the timer from a runnable off of the main thread in order to
|
|
|
|
// avoid a deadlock, see bug 1370448.
|
|
|
|
RefPtr<nsSocketTransportService> self(this);
|
2017-06-12 22:34:10 +03:00
|
|
|
NS_DispatchToMainThread(NS_NewRunnableFunction("nsSocketTransportService::StartPollWatchdog",
|
|
|
|
[self] {
|
2017-06-05 23:58:52 +03:00
|
|
|
MutexAutoLock lock(self->mLock);
|
|
|
|
|
|
|
|
// Poll can hang sometimes. If we are in shutdown, we are going to start a
|
|
|
|
// watchdog. If we do not exit poll within REPAIR_POLLABLE_EVENT_TIME
|
|
|
|
// signal a pollable event again.
|
|
|
|
MOZ_ASSERT(gIOService->IsNetTearingDown());
|
|
|
|
if (self->mPolling && !self->mPollRepairTimer) {
|
2017-10-16 09:12:02 +03:00
|
|
|
NS_NewTimerWithObserver(getter_AddRefs(self->mPollRepairTimer),
|
|
|
|
self, REPAIR_POLLABLE_EVENT_TIME,
|
|
|
|
nsITimer::TYPE_REPEATING_SLACK);
|
2017-06-05 23:58:52 +03:00
|
|
|
}
|
|
|
|
}));
|
2016-09-13 11:43:00 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
nsSocketTransportService::DoPollRepair()
|
|
|
|
{
|
|
|
|
MutexAutoLock lock(mLock);
|
|
|
|
if (mPolling && mPollableEvent) {
|
|
|
|
mPollableEvent->Signal();
|
|
|
|
} else if (mPollRepairTimer) {
|
|
|
|
mPollRepairTimer->Cancel();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
nsSocketTransportService::StartPolling()
|
|
|
|
{
|
|
|
|
MutexAutoLock lock(mLock);
|
|
|
|
mPolling = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
nsSocketTransportService::EndPolling()
|
|
|
|
{
|
|
|
|
MutexAutoLock lock(mLock);
|
|
|
|
mPolling = false;
|
|
|
|
if (mPollRepairTimer) {
|
|
|
|
mPollRepairTimer->Cancel();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-05-19 05:02:57 +03:00
|
|
|
} // namespace net
|
|
|
|
} // namespace mozilla
|