зеркало из https://github.com/mozilla/gecko-dev.git
Backed out changeset f16199c7d45e (bug 1648898) for causing Bug 1649333 . CLOSED TREE
This commit is contained in:
Родитель
3700aab557
Коммит
b151157eee
|
@ -119,12 +119,12 @@ void nsSocketTransportService::SocketContext::MaybeResetEpoch() {
|
||||||
// ctor/dtor (called on the main/UI thread by the service manager)
|
// ctor/dtor (called on the main/UI thread by the service manager)
|
||||||
|
|
||||||
nsSocketTransportService::nsSocketTransportService()
|
nsSocketTransportService::nsSocketTransportService()
|
||||||
: mRawThread(nullptr),
|
: mLock("nsSocketTransportService::mLock"),
|
||||||
mInitialized(false),
|
mInitialized(false),
|
||||||
mShuttingDown(false),
|
mShuttingDown(false),
|
||||||
mLock("nsSocketTransportService::mLock"),
|
|
||||||
mOffline(false),
|
mOffline(false),
|
||||||
mGoingOffline(false),
|
mGoingOffline(false),
|
||||||
|
mRawThread(nullptr),
|
||||||
mActiveListSize(SOCKET_LIMIT_MIN),
|
mActiveListSize(SOCKET_LIMIT_MIN),
|
||||||
mIdleListSize(SOCKET_LIMIT_MIN),
|
mIdleListSize(SOCKET_LIMIT_MIN),
|
||||||
mActiveCount(0),
|
mActiveCount(0),
|
||||||
|
@ -264,17 +264,12 @@ bool nsSocketTransportService::UpdatePortRemapPreference(
|
||||||
|
|
||||||
bool rv = consumePreference();
|
bool rv = consumePreference();
|
||||||
|
|
||||||
if (!IsOnCurrentThread()) {
|
if (!IsOnCurrentThreadInfallible() && mThread) {
|
||||||
nsCOMPtr<nsIThread> thread = GetThreadSafely();
|
mThread->Dispatch(
|
||||||
if (!thread) {
|
NewRunnableMethod<TPortRemapping>(
|
||||||
// Init hasn't been called yet. Could probably just assert.
|
"net::ApplyPortRemapping", this,
|
||||||
// If shutdown, the dispatch below will just silently fail.
|
&nsSocketTransportService::ApplyPortRemapPreference, portRemapping),
|
||||||
NS_ASSERTION(false, "ApplyPortRemapPreference before STS::Init");
|
NS_DISPATCH_NORMAL);
|
||||||
return false;
|
|
||||||
}
|
|
||||||
thread->Dispatch(NewRunnableMethod<TPortRemapping>(
|
|
||||||
"net::ApplyPortRemapping", this,
|
|
||||||
&nsSocketTransportService::ApplyPortRemapPreference, portRemapping));
|
|
||||||
} else {
|
} else {
|
||||||
ApplyPortRemapPreference(portRemapping);
|
ApplyPortRemapPreference(portRemapping);
|
||||||
}
|
}
|
||||||
|
@ -296,10 +291,9 @@ nsSocketTransportService::~nsSocketTransportService() {
|
||||||
// event queue (any thread)
|
// event queue (any thread)
|
||||||
|
|
||||||
already_AddRefed<nsIThread> nsSocketTransportService::GetThreadSafely() {
|
already_AddRefed<nsIThread> nsSocketTransportService::GetThreadSafely() {
|
||||||
if (!mRawThread) { // this is an atomic operation.
|
MutexAutoLock lock(mLock);
|
||||||
return nullptr;
|
nsCOMPtr<nsIThread> result = mThread;
|
||||||
}
|
return result.forget();
|
||||||
return do_AddRef(static_cast<nsIThread*>(mRawThread));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
NS_IMETHODIMP
|
NS_IMETHODIMP
|
||||||
|
@ -317,9 +311,8 @@ nsSocketTransportService::Dispatch(already_AddRefed<nsIRunnable> event,
|
||||||
|
|
||||||
nsCOMPtr<nsIThread> thread = GetThreadSafely();
|
nsCOMPtr<nsIThread> thread = GetThreadSafely();
|
||||||
nsresult rv;
|
nsresult rv;
|
||||||
rv = thread && MOZ_LIKELY(mInitialized)
|
rv = thread ? thread->Dispatch(event_ref.forget(), flags)
|
||||||
? thread->Dispatch(event_ref.forget(), flags)
|
: NS_ERROR_NOT_INITIALIZED;
|
||||||
: NS_ERROR_NOT_INITIALIZED;
|
|
||||||
if (rv == NS_ERROR_UNEXPECTED) {
|
if (rv == NS_ERROR_UNEXPECTED) {
|
||||||
// Thread is no longer accepting events. We must have just shut it
|
// Thread is no longer accepting events. We must have just shut it
|
||||||
// down on the main thread. Pretend we never saw it.
|
// down on the main thread. Pretend we never saw it.
|
||||||
|
@ -353,9 +346,7 @@ nsSocketTransportService::IsOnCurrentThreadInfallible() {
|
||||||
|
|
||||||
already_AddRefed<nsIDirectTaskDispatcher>
|
already_AddRefed<nsIDirectTaskDispatcher>
|
||||||
nsSocketTransportService::GetDirectTaskDispatcherSafely() {
|
nsSocketTransportService::GetDirectTaskDispatcherSafely() {
|
||||||
if (!mRawThread) { // this is an atomic operation.
|
MutexAutoLock lock(mLock);
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
nsCOMPtr<nsIDirectTaskDispatcher> result = mDirectTaskDispatcher;
|
nsCOMPtr<nsIDirectTaskDispatcher> result = mDirectTaskDispatcher;
|
||||||
return result.forget();
|
return result.forget();
|
||||||
}
|
}
|
||||||
|
@ -665,7 +656,6 @@ PRIntervalTime nsSocketTransportService::PollTimeout(PRIntervalTime now) {
|
||||||
|
|
||||||
int32_t nsSocketTransportService::Poll(TimeDuration* pollDuration,
|
int32_t nsSocketTransportService::Poll(TimeDuration* pollDuration,
|
||||||
PRIntervalTime ts) {
|
PRIntervalTime ts) {
|
||||||
MOZ_ASSERT(IsOnCurrentThread());
|
|
||||||
PRPollDesc* pollList;
|
PRPollDesc* pollList;
|
||||||
uint32_t pollCount;
|
uint32_t pollCount;
|
||||||
PRIntervalTime pollTimeout;
|
PRIntervalTime pollTimeout;
|
||||||
|
@ -674,7 +664,7 @@ int32_t nsSocketTransportService::Poll(TimeDuration* pollDuration,
|
||||||
// If there are pending events for this thread then
|
// If there are pending events for this thread then
|
||||||
// DoPollIteration() should service the network without blocking.
|
// DoPollIteration() should service the network without blocking.
|
||||||
bool pendingEvents = false;
|
bool pendingEvents = false;
|
||||||
mThread->HasPendingEvents(&pendingEvents);
|
mRawThread->HasPendingEvents(&pendingEvents);
|
||||||
|
|
||||||
if (mPollList[0].fd) {
|
if (mPollList[0].fd) {
|
||||||
mPollList[0].out_flags = 0;
|
mPollList[0].out_flags = 0;
|
||||||
|
@ -772,18 +762,17 @@ nsSocketTransportService::Init() {
|
||||||
|
|
||||||
if (mShuttingDown) return NS_ERROR_UNEXPECTED;
|
if (mShuttingDown) return NS_ERROR_UNEXPECTED;
|
||||||
|
|
||||||
nsresult rv = NS_NewNamedThread("Socket Thread", getter_AddRefs(mThread));
|
nsCOMPtr<nsIThread> thread;
|
||||||
|
nsresult rv =
|
||||||
|
NS_NewNamedThread("Socket Thread", getter_AddRefs(thread), this);
|
||||||
if (NS_FAILED(rv)) return rv;
|
if (NS_FAILED(rv)) return rv;
|
||||||
|
|
||||||
// Setting mRawThread allows to now query if we are on the socket thread.
|
{
|
||||||
mRawThread = mThread;
|
MutexAutoLock lock(mLock);
|
||||||
// Start the event loop, it must be done after mThread has been set.
|
// Install our mThread, protecting against concurrent readers
|
||||||
rv = mThread->Dispatch(do_AddRef(this));
|
thread.swap(mThread);
|
||||||
NS_ENSURE_SUCCESS(rv, rv);
|
mDirectTaskDispatcher = do_QueryInterface(mThread);
|
||||||
mDirectTaskDispatcher = do_QueryInterface(mThread);
|
}
|
||||||
MOZ_DIAGNOSTIC_ASSERT(
|
|
||||||
mDirectTaskDispatcher,
|
|
||||||
"Underlying thread must support direct task dispatching");
|
|
||||||
|
|
||||||
Preferences::RegisterCallbacks(UpdatePrefs, gCallbackPrefs, this);
|
Preferences::RegisterCallbacks(UpdatePrefs, gCallbackPrefs, this);
|
||||||
UpdatePrefs();
|
UpdatePrefs();
|
||||||
|
@ -801,7 +790,6 @@ nsSocketTransportService::Init() {
|
||||||
obsSvc->AddObserver(this, NS_NETWORK_LINK_TOPIC, false);
|
obsSvc->AddObserver(this, NS_NETWORK_LINK_TOPIC, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
// We can now dispatch tasks to the socket thread.
|
|
||||||
mInitialized = true;
|
mInitialized = true;
|
||||||
return NS_OK;
|
return NS_OK;
|
||||||
}
|
}
|
||||||
|
@ -817,12 +805,12 @@ nsSocketTransportService::Shutdown(bool aXpcomShutdown) {
|
||||||
|
|
||||||
if (mShuttingDown) return NS_ERROR_UNEXPECTED;
|
if (mShuttingDown) return NS_ERROR_UNEXPECTED;
|
||||||
|
|
||||||
// signal the socket thread to shutdown
|
|
||||||
mShuttingDown = true;
|
|
||||||
|
|
||||||
{
|
{
|
||||||
MutexAutoLock lock(mLock);
|
MutexAutoLock lock(mLock);
|
||||||
|
|
||||||
|
// signal the socket thread to shutdown
|
||||||
|
mShuttingDown = true;
|
||||||
|
|
||||||
if (mPollableEvent) {
|
if (mPollableEvent) {
|
||||||
mPollableEvent->Signal();
|
mPollableEvent->Signal();
|
||||||
}
|
}
|
||||||
|
@ -844,6 +832,13 @@ nsresult nsSocketTransportService::ShutdownThread() {
|
||||||
|
|
||||||
// join with thread
|
// join with thread
|
||||||
mThread->Shutdown();
|
mThread->Shutdown();
|
||||||
|
{
|
||||||
|
MutexAutoLock lock(mLock);
|
||||||
|
// Drop our reference to mThread and make sure that any concurrent
|
||||||
|
// readers are excluded
|
||||||
|
mThread = nullptr;
|
||||||
|
mDirectTaskDispatcher = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
Preferences::UnregisterCallbacks(UpdatePrefs, gCallbackPrefs, this);
|
Preferences::UnregisterCallbacks(UpdatePrefs, gCallbackPrefs, this);
|
||||||
|
|
||||||
|
@ -1049,10 +1044,6 @@ void nsSocketTransportService::MarkTheLastElementOfPendingQueue() {
|
||||||
NS_IMETHODIMP
|
NS_IMETHODIMP
|
||||||
nsSocketTransportService::Run() {
|
nsSocketTransportService::Run() {
|
||||||
SOCKET_LOG(("STS thread init %d sockets\n", gMaxCount));
|
SOCKET_LOG(("STS thread init %d sockets\n", gMaxCount));
|
||||||
// It is safe to access mThread directly from the socket thread as this is
|
|
||||||
// the first task ever dispatched and mThread has already been set.
|
|
||||||
// It must be non-null.
|
|
||||||
MOZ_DIAGNOSTIC_ASSERT(mThread);
|
|
||||||
|
|
||||||
#if defined(XP_WIN)
|
#if defined(XP_WIN)
|
||||||
// see bug 1361495, gethostname() triggers winsock initialization.
|
// see bug 1361495, gethostname() triggers winsock initialization.
|
||||||
|
@ -1094,11 +1085,13 @@ nsSocketTransportService::Run() {
|
||||||
mPollList[0].out_flags = 0;
|
mPollList[0].out_flags = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mRawThread = NS_GetCurrentThread();
|
||||||
|
|
||||||
// Ensure a call to GetCurrentSerialEventTarget() returns this event target.
|
// Ensure a call to GetCurrentSerialEventTarget() returns this event target.
|
||||||
SerialEventTargetGuard guard(this);
|
SerialEventTargetGuard guard(this);
|
||||||
|
|
||||||
// hook ourselves up to observe event processing for this thread
|
// hook ourselves up to observe event processing for this thread
|
||||||
nsCOMPtr<nsIThreadInternal> threadInt = do_QueryInterface(mThread);
|
nsCOMPtr<nsIThreadInternal> threadInt = do_QueryInterface(mRawThread);
|
||||||
threadInt->SetObserver(this);
|
threadInt->SetObserver(this);
|
||||||
|
|
||||||
// make sure the pseudo random number generator is seeded on this thread
|
// make sure the pseudo random number generator is seeded on this thread
|
||||||
|
@ -1134,7 +1127,7 @@ nsSocketTransportService::Run() {
|
||||||
// If we don't reset these, we may not re-enter ProcessNextEvent()
|
// If we don't reset these, we may not re-enter ProcessNextEvent()
|
||||||
// until we have events to process, and it may seem like we have
|
// until we have events to process, and it may seem like we have
|
||||||
// an event running for a very long time.
|
// an event running for a very long time.
|
||||||
mThread->SetRunningEventDelay(TimeDuration(), TimeStamp());
|
mRawThread->SetRunningEventDelay(TimeDuration(), TimeStamp());
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (Telemetry::CanRecordPrereleaseData()) {
|
if (Telemetry::CanRecordPrereleaseData()) {
|
||||||
|
@ -1152,7 +1145,7 @@ nsSocketTransportService::Run() {
|
||||||
pollDuration += singlePollDuration;
|
pollDuration += singlePollDuration;
|
||||||
}
|
}
|
||||||
|
|
||||||
mThread->HasPendingEvents(&pendingEvents);
|
mRawThread->HasPendingEvents(&pendingEvents);
|
||||||
if (pendingEvents) {
|
if (pendingEvents) {
|
||||||
if (!mServingPendingQueue) {
|
if (!mServingPendingQueue) {
|
||||||
nsresult rv = Dispatch(
|
nsresult rv = Dispatch(
|
||||||
|
@ -1181,9 +1174,9 @@ nsSocketTransportService::Run() {
|
||||||
}
|
}
|
||||||
TimeStamp eventQueueStart = TimeStamp::NowLoRes();
|
TimeStamp eventQueueStart = TimeStamp::NowLoRes();
|
||||||
do {
|
do {
|
||||||
NS_ProcessNextEvent(mThread);
|
NS_ProcessNextEvent(mRawThread);
|
||||||
pendingEvents = false;
|
pendingEvents = false;
|
||||||
mThread->HasPendingEvents(&pendingEvents);
|
mRawThread->HasPendingEvents(&pendingEvents);
|
||||||
} while (pendingEvents && mServingPendingQueue &&
|
} while (pendingEvents && mServingPendingQueue &&
|
||||||
((TimeStamp::NowLoRes() - eventQueueStart).ToMilliseconds() <
|
((TimeStamp::NowLoRes() - eventQueueStart).ToMilliseconds() <
|
||||||
mMaxTimePerPollIter));
|
mMaxTimePerPollIter));
|
||||||
|
@ -1200,17 +1193,17 @@ nsSocketTransportService::Run() {
|
||||||
|
|
||||||
bool goingOffline = false;
|
bool goingOffline = false;
|
||||||
// now that our event queue is empty, check to see if we should exit
|
// now that our event queue is empty, check to see if we should exit
|
||||||
if (mShuttingDown) {
|
|
||||||
if (Telemetry::CanRecordPrereleaseData() &&
|
|
||||||
!startOfCycleForLastCycleCalc.IsNull()) {
|
|
||||||
Telemetry::AccumulateTimeDelta(
|
|
||||||
Telemetry::STS_POLL_AND_EVENT_THE_LAST_CYCLE,
|
|
||||||
startOfCycleForLastCycleCalc, TimeStamp::NowLoRes());
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
{
|
{
|
||||||
MutexAutoLock lock(mLock);
|
MutexAutoLock lock(mLock);
|
||||||
|
if (mShuttingDown) {
|
||||||
|
if (Telemetry::CanRecordPrereleaseData() &&
|
||||||
|
!startOfCycleForLastCycleCalc.IsNull()) {
|
||||||
|
Telemetry::AccumulateTimeDelta(
|
||||||
|
Telemetry::STS_POLL_AND_EVENT_THE_LAST_CYCLE,
|
||||||
|
startOfCycleForLastCycleCalc, TimeStamp::NowLoRes());
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
if (mGoingOffline) {
|
if (mGoingOffline) {
|
||||||
mGoingOffline = false;
|
mGoingOffline = false;
|
||||||
goingOffline = true;
|
goingOffline = true;
|
||||||
|
@ -1231,7 +1224,7 @@ nsSocketTransportService::Run() {
|
||||||
|
|
||||||
// Final pass over the event queue. This makes sure that events posted by
|
// Final pass over the event queue. This makes sure that events posted by
|
||||||
// socket detach handlers get processed.
|
// socket detach handlers get processed.
|
||||||
NS_ProcessPendingEvents(mThread);
|
NS_ProcessPendingEvents(mRawThread);
|
||||||
|
|
||||||
SOCKET_LOG(("STS thread exit\n"));
|
SOCKET_LOG(("STS thread exit\n"));
|
||||||
|
|
||||||
|
@ -1617,7 +1610,12 @@ nsSocketTransportService::Observe(nsISupports* subject, const char* topic,
|
||||||
}
|
}
|
||||||
|
|
||||||
void nsSocketTransportService::ClosePrivateConnections() {
|
void nsSocketTransportService::ClosePrivateConnections() {
|
||||||
MOZ_ASSERT(IsOnCurrentThread(), "Must be called on the socket thread");
|
// Must be called on the socket thread.
|
||||||
|
#ifdef DEBUG
|
||||||
|
bool onSTSThread;
|
||||||
|
IsOnCurrentThread(&onSTSThread);
|
||||||
|
MOZ_ASSERT(onSTSThread);
|
||||||
|
#endif
|
||||||
|
|
||||||
for (int32_t i = mActiveCount - 1; i >= 0; --i) {
|
for (int32_t i = mActiveCount - 1; i >= 0; --i) {
|
||||||
if (mActiveList[i].mHandler->mIsPrivate) {
|
if (mActiveList[i].mHandler->mIsPrivate) {
|
||||||
|
|
|
@ -145,15 +145,14 @@ class nsSocketTransportService final : public nsPISocketTransportService,
|
||||||
// misc (any thread)
|
// misc (any thread)
|
||||||
//-------------------------------------------------------------------------
|
//-------------------------------------------------------------------------
|
||||||
|
|
||||||
// Only ever modified once on the main thread. They are never cleared until
|
nsCOMPtr<nsIThread> mThread; // protected by mLock
|
||||||
// the nsSocketTransportService gets destructed.
|
|
||||||
nsCOMPtr<nsIThread> mThread;
|
|
||||||
Atomic<nsIThread*> mRawThread;
|
|
||||||
// We store a pointer to mThread as a direct task dispatcher to avoid having
|
// We store a pointer to mThread as a direct task dispatcher to avoid having
|
||||||
// to do do_QueryInterface whenever we need to access the interface.
|
// to do do_QueryInterface whenever we need to access the interface.
|
||||||
nsCOMPtr<nsIDirectTaskDispatcher> mDirectTaskDispatcher;
|
nsCOMPtr<nsIDirectTaskDispatcher>
|
||||||
|
mDirectTaskDispatcher; // protected by mLock
|
||||||
|
UniquePtr<PollableEvent> mPollableEvent;
|
||||||
|
|
||||||
// Returns mThread in a thread-safe manner.
|
// Returns mThread, protecting the get-and-addref with mLock
|
||||||
already_AddRefed<nsIThread> GetThreadSafely();
|
already_AddRefed<nsIThread> GetThreadSafely();
|
||||||
// Same as above, but return mThread as a nsIDirectTaskDispatcher
|
// Same as above, but return mThread as a nsIDirectTaskDispatcher
|
||||||
already_AddRefed<nsIDirectTaskDispatcher> GetDirectTaskDispatcherSafely();
|
already_AddRefed<nsIDirectTaskDispatcher> GetDirectTaskDispatcherSafely();
|
||||||
|
@ -162,11 +161,9 @@ class nsSocketTransportService final : public nsPISocketTransportService,
|
||||||
// initialization and shutdown (any thread)
|
// initialization and shutdown (any thread)
|
||||||
//-------------------------------------------------------------------------
|
//-------------------------------------------------------------------------
|
||||||
|
|
||||||
Atomic<bool> mInitialized;
|
|
||||||
Atomic<bool> mShuttingDown;
|
|
||||||
Mutex mLock;
|
Mutex mLock;
|
||||||
// Variables in the next section protected by mLock
|
bool mInitialized;
|
||||||
UniquePtr<PollableEvent> mPollableEvent;
|
bool mShuttingDown;
|
||||||
// indicates whether we are currently in the
|
// indicates whether we are currently in the
|
||||||
// process of shutting down
|
// process of shutting down
|
||||||
bool mOffline;
|
bool mOffline;
|
||||||
|
@ -216,6 +213,7 @@ class nsSocketTransportService final : public nsPISocketTransportService,
|
||||||
|
|
||||||
SocketContext* mActiveList; /* mListSize entries */
|
SocketContext* mActiveList; /* mListSize entries */
|
||||||
SocketContext* mIdleList; /* mListSize entries */
|
SocketContext* mIdleList; /* mListSize entries */
|
||||||
|
nsIThread* mRawThread;
|
||||||
|
|
||||||
uint32_t mActiveListSize;
|
uint32_t mActiveListSize;
|
||||||
uint32_t mIdleListSize;
|
uint32_t mIdleListSize;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче