2013-09-20 13:11:25 +04:00
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
|
|
|
|
|
|
#include "CacheIOThread.h"
|
|
|
|
#include "CacheFileIOManager.h"
|
2021-01-25 15:23:13 +03:00
|
|
|
#include "CacheLog.h"
|
|
|
|
#include "CacheObserver.h"
|
2013-09-20 13:11:25 +04:00
|
|
|
|
|
|
|
#include "nsIRunnable.h"
|
|
|
|
#include "nsISupportsImpl.h"
|
|
|
|
#include "nsPrintfCString.h"
|
2018-07-28 01:13:12 +03:00
|
|
|
#include "nsThread.h"
|
|
|
|
#include "nsThreadManager.h"
|
2013-09-20 13:11:25 +04:00
|
|
|
#include "nsThreadUtils.h"
|
2018-07-28 01:13:12 +03:00
|
|
|
#include "mozilla/EventQueue.h"
|
2014-04-09 08:57:52 +04:00
|
|
|
#include "mozilla/IOInterposer.h"
|
Bug 1691589 - Reduce reliance on GeckoProfiler.h when only labels (and maybe markers) are needed - r=necko-reviewers,geckoview-reviewers,sg,agi,florian
There are no code changes, only #include changes.
It was a fairly mechanical process: Search for all "AUTO_PROFILER_LABEL", and in each file, if only labels are used, convert "GeckoProfiler.h" into "ProfilerLabels.h" (or just add that last one where needed).
In some files, there were also some marker calls but no other profiler-related calls, in these cases "GeckoProfiler.h" was replaced with both "ProfilerLabels.h" and "ProfilerMarkers.h", which still helps in reducing the use of the all-encompassing "GeckoProfiler.h".
Differential Revision: https://phabricator.services.mozilla.com/D104588
2021-02-16 07:44:19 +03:00
|
|
|
#include "mozilla/ProfilerLabels.h"
|
2018-07-28 01:13:12 +03:00
|
|
|
#include "mozilla/ThreadEventQueue.h"
|
2021-01-25 15:23:13 +03:00
|
|
|
#include "mozilla/Telemetry.h"
|
|
|
|
#include "mozilla/TelemetryHistogramEnums.h"
|
2013-09-20 13:11:25 +04:00
|
|
|
|
2016-08-10 17:50:00 +03:00
|
|
|
#ifdef XP_WIN
|
2016-11-04 17:40:22 +03:00
|
|
|
# include <windows.h>
|
2016-08-10 17:50:00 +03:00
|
|
|
#endif
|
|
|
|
|
2021-06-04 15:35:16 +03:00
|
|
|
namespace mozilla::net {
|
2013-09-20 13:11:25 +04:00
|
|
|
|
2016-06-27 06:43:00 +03:00
|
|
|
namespace { // anon
|
|
|
|
|
|
|
|
class CacheIOTelemetry {
|
|
|
|
public:
|
2021-06-04 15:35:16 +03:00
|
|
|
using size_type = CacheIOThread::EventQueue::size_type;
|
2016-06-27 06:43:00 +03:00
|
|
|
static size_type mMinLengthToReport[CacheIOThread::LAST_LEVEL];
|
|
|
|
static void Report(uint32_t aLevel, size_type aLength);
|
|
|
|
};
|
|
|
|
|
|
|
|
static CacheIOTelemetry::size_type const kGranularity = 30;
|
|
|
|
|
|
|
|
CacheIOTelemetry::size_type
|
|
|
|
CacheIOTelemetry::mMinLengthToReport[CacheIOThread::LAST_LEVEL] = {
|
|
|
|
kGranularity, kGranularity, kGranularity, kGranularity,
|
|
|
|
kGranularity, kGranularity, kGranularity, kGranularity};
|
|
|
|
|
|
|
|
// static
|
|
|
|
void CacheIOTelemetry::Report(uint32_t aLevel,
|
|
|
|
CacheIOTelemetry::size_type aLength) {
|
|
|
|
if (mMinLengthToReport[aLevel] > aLength) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-02-15 22:15:15 +03:00
|
|
|
static Telemetry::HistogramID telemetryID[] = {
|
2016-10-12 11:32:00 +03:00
|
|
|
Telemetry::HTTP_CACHE_IO_QUEUE_2_OPEN_PRIORITY,
|
|
|
|
Telemetry::HTTP_CACHE_IO_QUEUE_2_READ_PRIORITY,
|
|
|
|
Telemetry::HTTP_CACHE_IO_QUEUE_2_MANAGEMENT,
|
|
|
|
Telemetry::HTTP_CACHE_IO_QUEUE_2_OPEN,
|
|
|
|
Telemetry::HTTP_CACHE_IO_QUEUE_2_READ,
|
|
|
|
Telemetry::HTTP_CACHE_IO_QUEUE_2_WRITE_PRIORITY,
|
|
|
|
Telemetry::HTTP_CACHE_IO_QUEUE_2_WRITE,
|
|
|
|
Telemetry::HTTP_CACHE_IO_QUEUE_2_INDEX,
|
|
|
|
Telemetry::HTTP_CACHE_IO_QUEUE_2_EVICT};
|
2016-06-27 06:43:00 +03:00
|
|
|
|
|
|
|
// Each bucket is a multiply of kGranularity (30, 60, 90..., 300+)
|
|
|
|
aLength = (aLength / kGranularity);
|
|
|
|
// Next time report only when over the current length + kGranularity
|
|
|
|
mMinLengthToReport[aLevel] = (aLength + 1) * kGranularity;
|
|
|
|
|
|
|
|
// 10 is number of buckets we have in each probe
|
|
|
|
aLength = std::min<size_type>(aLength, 10);
|
|
|
|
|
|
|
|
Telemetry::Accumulate(telemetryID[aLevel], aLength - 1); // counted from 0
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
2016-08-10 17:50:00 +03:00
|
|
|
namespace detail {
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Helper class encapsulating platform-specific code to cancel
|
|
|
|
* any pending IO operation taking too long. Solely used during
|
|
|
|
* shutdown to prevent any IO shutdown hangs.
|
|
|
|
* Mainly designed for using Win32 CancelSynchronousIo function.
|
|
|
|
*/
|
|
|
|
class BlockingIOWatcher {
|
|
|
|
#ifdef XP_WIN
|
|
|
|
// The native handle to the thread
|
|
|
|
HANDLE mThread;
|
|
|
|
// Event signaling back to the main thread, see NotifyOperationDone.
|
|
|
|
HANDLE mEvent;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
public:
|
|
|
|
// Created and destroyed on the main thread only
|
|
|
|
BlockingIOWatcher();
|
|
|
|
~BlockingIOWatcher();
|
|
|
|
|
|
|
|
// Called on the IO thread to grab the platform specific
|
|
|
|
// reference to it.
|
|
|
|
void InitThread();
|
|
|
|
// If there is a blocking operation being handled on the IO
|
|
|
|
// thread, this is called on the main thread during shutdown.
|
|
|
|
// Waits for notification from the IO thread for up to two seconds.
|
|
|
|
// If that times out, it attempts to cancel the IO operation.
|
|
|
|
void WatchAndCancel(Monitor& aMonitor);
|
|
|
|
// Called by the IO thread after each operation has been
|
|
|
|
// finished (after each Run() call). This wakes the main
|
|
|
|
// thread up and makes WatchAndCancel() early exit and become
|
|
|
|
// a no-op.
|
|
|
|
void NotifyOperationDone();
|
|
|
|
};
|
|
|
|
|
|
|
|
#ifdef XP_WIN
|
|
|
|
|
2020-01-13 21:49:36 +03:00
|
|
|
BlockingIOWatcher::BlockingIOWatcher() : mThread(NULL), mEvent(NULL) {
|
2021-01-04 23:34:50 +03:00
|
|
|
HMODULE kernel32_dll = GetModuleHandleW(L"kernel32.dll");
|
2016-08-10 17:50:00 +03:00
|
|
|
if (!kernel32_dll) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-07-28 01:13:12 +03:00
|
|
|
mEvent = ::CreateEventW(NULL, TRUE, FALSE, NULL);
|
2016-08-10 17:50:00 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
BlockingIOWatcher::~BlockingIOWatcher() {
|
|
|
|
if (mEvent) {
|
|
|
|
CloseHandle(mEvent);
|
|
|
|
}
|
|
|
|
if (mThread) {
|
|
|
|
CloseHandle(mThread);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockingIOWatcher::InitThread() {
|
|
|
|
// GetCurrentThread() only returns a pseudo handle, hence DuplicateHandle
|
|
|
|
::DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
|
|
|
|
GetCurrentProcess(), &mThread, 0, FALSE,
|
|
|
|
DUPLICATE_SAME_ACCESS);
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockingIOWatcher::WatchAndCancel(Monitor& aMonitor) {
|
|
|
|
if (!mEvent) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reset before we enter the monitor to raise the chance we catch
|
|
|
|
// the currently pending IO op completion.
|
|
|
|
::ResetEvent(mEvent);
|
|
|
|
|
|
|
|
HANDLE thread;
|
|
|
|
{
|
|
|
|
MonitorAutoLock lock(aMonitor);
|
|
|
|
thread = mThread;
|
|
|
|
|
|
|
|
if (!thread) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
LOG(("Blocking IO operation pending on IO thread, waiting..."));
|
|
|
|
|
|
|
|
// It seems wise to use the I/O lag time as a maximum time to wait
|
|
|
|
// for an operation to finish. When that times out and cancelation
|
|
|
|
// succeeds, there will be no other IO operation permitted. By default
|
|
|
|
// this is two seconds.
|
|
|
|
uint32_t maxLag =
|
|
|
|
std::min<uint32_t>(5, CacheObserver::MaxShutdownIOLag()) * 1000;
|
|
|
|
|
|
|
|
DWORD result = ::WaitForSingleObject(mEvent, maxLag);
|
|
|
|
if (result == WAIT_TIMEOUT) {
|
|
|
|
LOG(("CacheIOThread: Attempting to cancel a long blocking IO operation"));
|
2020-01-13 21:49:36 +03:00
|
|
|
BOOL result = ::CancelSynchronousIo(thread);
|
2016-08-10 17:50:00 +03:00
|
|
|
if (result) {
|
|
|
|
LOG((" cancelation signal succeeded"));
|
|
|
|
} else {
|
|
|
|
DWORD error = GetLastError();
|
Bug 1766561 - Use %lu for Windows Error types (DWORD). r=rkraesig,xpcom-reviewers,application-update-reviewers,mhowell,bobowen,necko-reviewers,keeler,media-playback-reviewers,bytesized,barret,alwu,kershaw
Differential Revision: https://phabricator.services.mozilla.com/D144914
2022-05-03 23:49:07 +03:00
|
|
|
LOG((" cancelation signal failed with GetLastError=%lu", error));
|
2016-08-10 17:50:00 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockingIOWatcher::NotifyOperationDone() {
|
|
|
|
if (mEvent) {
|
|
|
|
::SetEvent(mEvent);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#else // WIN
|
|
|
|
|
|
|
|
// Stub code only (we don't implement IO cancelation for this platform)
|
|
|
|
|
2018-04-30 19:46:04 +03:00
|
|
|
BlockingIOWatcher::BlockingIOWatcher() = default;
|
|
|
|
BlockingIOWatcher::~BlockingIOWatcher() = default;
|
2016-08-10 17:50:00 +03:00
|
|
|
void BlockingIOWatcher::InitThread() {}
|
|
|
|
void BlockingIOWatcher::WatchAndCancel(Monitor&) {}
|
|
|
|
void BlockingIOWatcher::NotifyOperationDone() {}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
} // namespace detail
|
|
|
|
|
2014-02-27 03:11:42 +04:00
|
|
|
CacheIOThread* CacheIOThread::sSelf = nullptr;
|
|
|
|
|
2014-04-27 11:06:00 +04:00
|
|
|
NS_IMPL_ISUPPORTS(CacheIOThread, nsIThreadObserver)
|
2013-09-20 13:11:25 +04:00
|
|
|
|
2021-06-11 10:10:41 +03:00
|
|
|
CacheIOThread::CacheIOThread() {
|
2018-04-30 19:46:04 +03:00
|
|
|
for (auto& item : mQueueLength) {
|
|
|
|
item = 0;
|
2016-11-10 18:14:23 +03:00
|
|
|
}
|
|
|
|
|
2014-02-27 03:11:42 +04:00
|
|
|
sSelf = this;
|
2013-09-20 13:11:25 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
CacheIOThread::~CacheIOThread() {
|
2015-11-13 20:49:29 +03:00
|
|
|
if (mXPCOMThread) {
|
|
|
|
nsIThread* thread = mXPCOMThread;
|
|
|
|
thread->Release();
|
|
|
|
}
|
|
|
|
|
2014-02-27 03:11:42 +04:00
|
|
|
sSelf = nullptr;
|
2013-09-20 13:11:25 +04:00
|
|
|
#ifdef DEBUG
|
2018-04-30 19:46:04 +03:00
|
|
|
for (auto& event : mEventQueue) {
|
|
|
|
MOZ_ASSERT(!event.Length());
|
2013-09-20 13:11:25 +04:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult CacheIOThread::Init() {
|
2016-08-10 17:50:00 +03:00
|
|
|
{
|
|
|
|
MonitorAutoLock lock(mMonitor);
|
|
|
|
// Yeah, there is not a thread yet, but we want to make sure
|
|
|
|
// the sequencing is correct.
|
|
|
|
mBlockingIOWatcher = MakeUnique<detail::BlockingIOWatcher>();
|
|
|
|
}
|
|
|
|
|
2021-06-24 19:20:16 +03:00
|
|
|
// Increase the reference count while spawning a new thread.
|
|
|
|
// If PR_CreateThread succeeds, we will forget this reference and the thread
|
|
|
|
// will be responsible to release it when it completes.
|
|
|
|
RefPtr<CacheIOThread> self = this;
|
2013-09-20 13:11:25 +04:00
|
|
|
mThread =
|
|
|
|
PR_CreateThread(PR_USER_THREAD, ThreadFunc, this, PR_PRIORITY_NORMAL,
|
|
|
|
PR_GLOBAL_THREAD, PR_JOINABLE_THREAD, 128 * 1024);
|
2016-08-10 17:50:00 +03:00
|
|
|
if (!mThread) {
|
2013-09-20 13:11:25 +04:00
|
|
|
return NS_ERROR_FAILURE;
|
2016-08-10 17:50:00 +03:00
|
|
|
}
|
2013-09-20 13:11:25 +04:00
|
|
|
|
2021-06-24 19:20:16 +03:00
|
|
|
// IMPORTANT: The thread now owns this reference, so it's important that we
|
|
|
|
// leak it here, otherwise we'll end up with a bad refcount.
|
|
|
|
// See the dont_AddRef in ThreadFunc().
|
|
|
|
Unused << self.forget().take();
|
|
|
|
|
2013-09-20 13:11:25 +04:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult CacheIOThread::Dispatch(nsIRunnable* aRunnable, uint32_t aLevel) {
|
2016-06-29 05:24:54 +03:00
|
|
|
return Dispatch(do_AddRef(aRunnable), aLevel);
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult CacheIOThread::Dispatch(already_AddRefed<nsIRunnable> aRunnable,
|
|
|
|
uint32_t aLevel) {
|
2013-09-20 13:11:25 +04:00
|
|
|
NS_ENSURE_ARG(aLevel < LAST_LEVEL);
|
|
|
|
|
2016-06-29 05:24:54 +03:00
|
|
|
nsCOMPtr<nsIRunnable> runnable(aRunnable);
|
|
|
|
|
2014-05-02 22:15:15 +04:00
|
|
|
// Runnable is always expected to be non-null, hard null-check bellow.
|
2016-06-29 05:24:54 +03:00
|
|
|
MOZ_ASSERT(runnable);
|
2014-05-02 22:15:15 +04:00
|
|
|
|
2013-09-20 13:11:25 +04:00
|
|
|
MonitorAutoLock lock(mMonitor);
|
|
|
|
|
2021-06-04 15:35:16 +03:00
|
|
|
if (mShutdown && (PR_GetCurrentThread() != mThread)) {
|
2013-09-20 13:11:25 +04:00
|
|
|
return NS_ERROR_UNEXPECTED;
|
2021-06-04 15:35:16 +03:00
|
|
|
}
|
2013-09-20 13:11:25 +04:00
|
|
|
|
2016-06-29 05:24:54 +03:00
|
|
|
return DispatchInternal(runnable.forget(), aLevel);
|
2014-04-04 16:42:05 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
nsresult CacheIOThread::DispatchAfterPendingOpens(nsIRunnable* aRunnable) {
|
2014-05-02 22:15:15 +04:00
|
|
|
// Runnable is always expected to be non-null, hard null-check bellow.
|
|
|
|
MOZ_ASSERT(aRunnable);
|
|
|
|
|
2014-04-04 16:42:05 +04:00
|
|
|
MonitorAutoLock lock(mMonitor);
|
|
|
|
|
2021-06-04 15:35:16 +03:00
|
|
|
if (mShutdown && (PR_GetCurrentThread() != mThread)) {
|
2014-04-04 16:42:05 +04:00
|
|
|
return NS_ERROR_UNEXPECTED;
|
2021-06-04 15:35:16 +03:00
|
|
|
}
|
2014-04-04 16:42:05 +04:00
|
|
|
|
|
|
|
// Move everything from later executed OPEN level to the OPEN_PRIORITY level
|
|
|
|
// where we post the (eviction) runnable.
|
2016-11-10 18:14:23 +03:00
|
|
|
mQueueLength[OPEN_PRIORITY] += mEventQueue[OPEN].Length();
|
|
|
|
mQueueLength[OPEN] -= mEventQueue[OPEN].Length();
|
2014-04-04 16:42:05 +04:00
|
|
|
mEventQueue[OPEN_PRIORITY].AppendElements(mEventQueue[OPEN]);
|
|
|
|
mEventQueue[OPEN].Clear();
|
|
|
|
|
2016-06-29 05:24:54 +03:00
|
|
|
return DispatchInternal(do_AddRef(aRunnable), OPEN_PRIORITY);
|
2014-04-04 16:42:05 +04:00
|
|
|
}
|
|
|
|
|
2016-06-29 05:24:54 +03:00
|
|
|
nsresult CacheIOThread::DispatchInternal(
|
|
|
|
already_AddRefed<nsIRunnable> aRunnable, uint32_t aLevel) {
|
|
|
|
nsCOMPtr<nsIRunnable> runnable(aRunnable);
|
|
|
|
|
2020-05-12 15:48:49 +03:00
|
|
|
LogRunnable::LogDispatch(runnable.get());
|
|
|
|
|
2014-05-02 22:15:15 +04:00
|
|
|
if (NS_WARN_IF(!runnable)) return NS_ERROR_NULL_POINTER;
|
|
|
|
|
2014-04-04 16:42:05 +04:00
|
|
|
mMonitor.AssertCurrentThreadOwns();
|
|
|
|
|
2016-11-10 18:14:23 +03:00
|
|
|
++mQueueLength[aLevel];
|
2016-06-29 05:24:54 +03:00
|
|
|
mEventQueue[aLevel].AppendElement(runnable.forget());
|
2013-09-20 13:11:25 +04:00
|
|
|
if (mLowestLevelWaiting > aLevel) mLowestLevelWaiting = aLevel;
|
|
|
|
|
|
|
|
mMonitor.NotifyAll();
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool CacheIOThread::IsCurrentThread() {
|
|
|
|
return mThread == PR_GetCurrentThread();
|
|
|
|
}
|
|
|
|
|
2016-11-10 18:14:34 +03:00
|
|
|
uint32_t CacheIOThread::QueueSize(bool highPriority) {
|
|
|
|
MonitorAutoLock lock(mMonitor);
|
|
|
|
if (highPriority) {
|
|
|
|
return mQueueLength[OPEN_PRIORITY] + mQueueLength[READ_PRIORITY];
|
|
|
|
}
|
|
|
|
|
|
|
|
return mQueueLength[OPEN_PRIORITY] + mQueueLength[READ_PRIORITY] +
|
|
|
|
mQueueLength[MANAGEMENT] + mQueueLength[OPEN] + mQueueLength[READ];
|
|
|
|
}
|
|
|
|
|
2014-02-27 03:11:42 +04:00
|
|
|
bool CacheIOThread::YieldInternal() {
|
|
|
|
if (!IsCurrentThread()) {
|
|
|
|
NS_WARNING(
|
|
|
|
"Trying to yield to priority events on non-cache2 I/O thread? "
|
|
|
|
"You probably do something wrong.");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mCurrentlyExecutingLevel == XPCOM_LEVEL) {
|
|
|
|
// Doesn't make any sense, since this handler is the one
|
|
|
|
// that would be executed as the next one.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!EventsPending(mCurrentlyExecutingLevel)) return false;
|
|
|
|
|
|
|
|
mRerunCurrentEvent = true;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-08-10 17:50:00 +03:00
|
|
|
void CacheIOThread::Shutdown() {
|
|
|
|
if (!mThread) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-09-20 13:11:25 +04:00
|
|
|
{
|
|
|
|
MonitorAutoLock lock(mMonitor);
|
|
|
|
mShutdown = true;
|
|
|
|
mMonitor.NotifyAll();
|
|
|
|
}
|
|
|
|
|
|
|
|
PR_JoinThread(mThread);
|
|
|
|
mThread = nullptr;
|
2016-08-10 17:50:00 +03:00
|
|
|
}
|
2013-09-20 13:11:25 +04:00
|
|
|
|
2016-08-10 17:50:00 +03:00
|
|
|
void CacheIOThread::CancelBlockingIO() {
|
|
|
|
// This is an attempt to cancel any blocking I/O operation taking
|
|
|
|
// too long time.
|
|
|
|
if (!mBlockingIOWatcher) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!mIOCancelableEvents) {
|
|
|
|
LOG(("CacheIOThread::CancelBlockingIO, no blocking operation to cancel"));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// OK, when we are here, we are processing an IO on the thread that
|
|
|
|
// can be cancelled.
|
|
|
|
mBlockingIOWatcher->WatchAndCancel(mMonitor);
|
2013-09-20 13:11:25 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
already_AddRefed<nsIEventTarget> CacheIOThread::Target() {
|
|
|
|
nsCOMPtr<nsIEventTarget> target;
|
|
|
|
|
2014-02-27 03:11:42 +04:00
|
|
|
target = mXPCOMThread;
|
|
|
|
if (!target && mThread) {
|
2013-09-20 13:11:25 +04:00
|
|
|
MonitorAutoLock lock(mMonitor);
|
2016-03-21 18:51:58 +03:00
|
|
|
while (!mXPCOMThread) {
|
2013-09-20 13:11:25 +04:00
|
|
|
lock.Wait();
|
2016-03-21 18:51:58 +03:00
|
|
|
}
|
2013-09-20 13:11:25 +04:00
|
|
|
|
|
|
|
target = mXPCOMThread;
|
|
|
|
}
|
|
|
|
|
|
|
|
return target.forget();
|
|
|
|
}
|
|
|
|
|
|
|
|
// static
|
|
|
|
void CacheIOThread::ThreadFunc(void* aClosure) {
|
2021-12-01 09:34:10 +03:00
|
|
|
// XXXmstange We'd like to register this thread with the profiler, but doing
|
|
|
|
// so causes leaks, see bug 1323100.
|
2017-02-07 13:57:23 +03:00
|
|
|
NS_SetCurrentThreadName("Cache2 I/O");
|
|
|
|
|
2014-04-09 08:57:52 +04:00
|
|
|
mozilla::IOInterposer::RegisterCurrentThread();
|
2021-06-24 19:20:16 +03:00
|
|
|
// We hold on to this reference for the duration of the thread.
|
|
|
|
RefPtr<CacheIOThread> thread =
|
|
|
|
dont_AddRef(static_cast<CacheIOThread*>(aClosure));
|
2013-09-20 13:11:25 +04:00
|
|
|
thread->ThreadFunc();
|
2014-04-09 08:57:52 +04:00
|
|
|
mozilla::IOInterposer::UnregisterCurrentThread();
|
2013-09-20 13:11:25 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void CacheIOThread::ThreadFunc() {
|
|
|
|
nsCOMPtr<nsIThreadInternal> threadInternal;
|
|
|
|
|
|
|
|
{
|
|
|
|
MonitorAutoLock lock(mMonitor);
|
|
|
|
|
2016-08-10 17:50:00 +03:00
|
|
|
MOZ_ASSERT(mBlockingIOWatcher);
|
|
|
|
mBlockingIOWatcher->InitThread();
|
|
|
|
|
2020-10-09 20:56:34 +03:00
|
|
|
auto queue =
|
|
|
|
MakeRefPtr<ThreadEventQueue>(MakeUnique<mozilla::EventQueue>());
|
2018-07-28 01:13:12 +03:00
|
|
|
nsCOMPtr<nsIThread> xpcomThread =
|
|
|
|
nsThreadManager::get().CreateCurrentThread(queue,
|
|
|
|
nsThread::NOT_MAIN_THREAD);
|
2013-09-20 13:11:25 +04:00
|
|
|
|
2014-01-09 03:27:31 +04:00
|
|
|
threadInternal = do_QueryInterface(xpcomThread);
|
2013-09-20 13:11:25 +04:00
|
|
|
if (threadInternal) threadInternal->SetObserver(this);
|
|
|
|
|
2015-11-13 20:49:29 +03:00
|
|
|
mXPCOMThread = xpcomThread.forget().take();
|
2014-01-09 03:27:31 +04:00
|
|
|
|
2013-09-20 13:11:25 +04:00
|
|
|
lock.NotifyAll();
|
|
|
|
|
|
|
|
do {
|
|
|
|
loopStart:
|
|
|
|
// Reset the lowest level now, so that we can detect a new event on
|
|
|
|
// a lower level (i.e. higher priority) has been scheduled while
|
|
|
|
// executing any previously scheduled event.
|
|
|
|
mLowestLevelWaiting = LAST_LEVEL;
|
|
|
|
|
|
|
|
// Process xpcom events first
|
|
|
|
while (mHasXPCOMEvents) {
|
|
|
|
mHasXPCOMEvents = false;
|
2014-02-27 03:11:42 +04:00
|
|
|
mCurrentlyExecutingLevel = XPCOM_LEVEL;
|
|
|
|
|
2013-09-20 13:11:25 +04:00
|
|
|
MonitorAutoUnlock unlock(mMonitor);
|
|
|
|
|
|
|
|
bool processedEvent;
|
|
|
|
nsresult rv;
|
|
|
|
do {
|
2015-11-13 20:49:29 +03:00
|
|
|
nsIThread* thread = mXPCOMThread;
|
|
|
|
rv = thread->ProcessNextEvent(false, &processedEvent);
|
2016-08-10 17:50:00 +03:00
|
|
|
|
2017-06-23 11:24:45 +03:00
|
|
|
++mEventCounter;
|
2016-08-10 17:50:00 +03:00
|
|
|
MOZ_ASSERT(mBlockingIOWatcher);
|
|
|
|
mBlockingIOWatcher->NotifyOperationDone();
|
2013-09-20 13:11:25 +04:00
|
|
|
} while (NS_SUCCEEDED(rv) && processedEvent);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t level;
|
|
|
|
for (level = 0; level < LAST_LEVEL; ++level) {
|
|
|
|
if (!mEventQueue[level].Length()) {
|
|
|
|
// no events on this level, go to the next level
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
LoopOneLevel(level);
|
|
|
|
|
|
|
|
// Go to the first (lowest) level again
|
|
|
|
goto loopStart;
|
|
|
|
}
|
|
|
|
|
2016-08-10 17:50:00 +03:00
|
|
|
if (EventsPending()) {
|
2013-12-19 04:39:16 +04:00
|
|
|
continue;
|
2016-08-10 17:50:00 +03:00
|
|
|
}
|
2013-09-20 13:11:25 +04:00
|
|
|
|
2016-08-10 17:50:00 +03:00
|
|
|
if (mShutdown) {
|
2013-12-19 04:39:16 +04:00
|
|
|
break;
|
2016-08-10 17:50:00 +03:00
|
|
|
}
|
2013-12-19 04:39:16 +04:00
|
|
|
|
2018-11-05 15:58:51 +03:00
|
|
|
AUTO_PROFILER_LABEL("CacheIOThread::ThreadFunc::Wait", IDLE);
|
2018-02-09 23:17:26 +03:00
|
|
|
lock.Wait();
|
2013-09-20 13:11:25 +04:00
|
|
|
|
2013-12-19 04:39:16 +04:00
|
|
|
} while (true);
|
2013-09-20 13:11:25 +04:00
|
|
|
|
|
|
|
MOZ_ASSERT(!EventsPending());
|
2015-04-13 17:58:00 +03:00
|
|
|
|
2016-02-26 18:52:07 +03:00
|
|
|
#ifdef DEBUG
|
2015-04-13 17:58:00 +03:00
|
|
|
// This is for correct assertion on XPCOM events dispatch.
|
|
|
|
mInsideLoop = false;
|
2016-02-26 18:52:07 +03:00
|
|
|
#endif
|
2013-09-20 13:11:25 +04:00
|
|
|
} // lock
|
|
|
|
|
|
|
|
if (threadInternal) threadInternal->SetObserver(nullptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
void CacheIOThread::LoopOneLevel(uint32_t aLevel) {
|
2022-03-17 21:39:15 +03:00
|
|
|
mMonitor.AssertCurrentThreadOwns();
|
2020-08-04 14:27:07 +03:00
|
|
|
EventQueue events = std::move(mEventQueue[aLevel]);
|
2016-06-27 06:43:00 +03:00
|
|
|
EventQueue::size_type length = events.Length();
|
2013-09-20 13:11:25 +04:00
|
|
|
|
2014-02-27 03:11:42 +04:00
|
|
|
mCurrentlyExecutingLevel = aLevel;
|
|
|
|
|
2013-09-20 13:11:25 +04:00
|
|
|
bool returnEvents = false;
|
2017-06-02 21:49:22 +03:00
|
|
|
bool reportTelemetry = true;
|
2016-06-27 06:43:00 +03:00
|
|
|
|
|
|
|
EventQueue::size_type index;
|
2013-09-20 13:11:25 +04:00
|
|
|
{
|
|
|
|
MonitorAutoUnlock unlock(mMonitor);
|
|
|
|
|
|
|
|
for (index = 0; index < length; ++index) {
|
|
|
|
if (EventsPending(aLevel)) {
|
|
|
|
// Somebody scheduled a new event on a lower level, break and harry
|
|
|
|
// to execute it! Don't forget to return what we haven't exec.
|
|
|
|
returnEvents = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2017-06-02 21:49:22 +03:00
|
|
|
if (reportTelemetry) {
|
|
|
|
reportTelemetry = false;
|
2016-06-27 06:43:00 +03:00
|
|
|
CacheIOTelemetry::Report(aLevel, length);
|
|
|
|
}
|
|
|
|
|
2014-02-27 03:11:42 +04:00
|
|
|
// Drop any previous flagging, only an event on the current level may set
|
|
|
|
// this flag.
|
|
|
|
mRerunCurrentEvent = false;
|
|
|
|
|
2020-05-12 15:48:49 +03:00
|
|
|
LogRunnable::Run log(events[index].get());
|
|
|
|
|
2013-09-20 13:11:25 +04:00
|
|
|
events[index]->Run();
|
2014-02-27 03:11:42 +04:00
|
|
|
|
2016-08-10 17:50:00 +03:00
|
|
|
MOZ_ASSERT(mBlockingIOWatcher);
|
|
|
|
mBlockingIOWatcher->NotifyOperationDone();
|
|
|
|
|
2014-02-27 03:11:42 +04:00
|
|
|
if (mRerunCurrentEvent) {
|
|
|
|
// The event handler yields to higher priority events and wants to
|
|
|
|
// rerun.
|
2020-05-12 15:48:49 +03:00
|
|
|
log.WillRunAgain();
|
2014-02-27 03:11:42 +04:00
|
|
|
returnEvents = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2017-06-23 11:24:45 +03:00
|
|
|
++mEventCounter;
|
2016-11-10 18:14:23 +03:00
|
|
|
--mQueueLength[aLevel];
|
|
|
|
|
2014-02-27 03:11:42 +04:00
|
|
|
// Release outside the lock.
|
2013-09-20 13:11:25 +04:00
|
|
|
events[index] = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-03 14:27:28 +03:00
|
|
|
if (returnEvents) {
|
|
|
|
// This code must prevent any AddRef/Release calls on the stored COMPtrs as
|
|
|
|
// it might be exhaustive and block the monitor's lock for an excessive
|
|
|
|
// amout of time.
|
|
|
|
|
|
|
|
// 'index' points at the event that was interrupted and asked for re-run,
|
|
|
|
// all events before have run, been nullified, and can be removed.
|
|
|
|
events.RemoveElementsAt(0, index);
|
|
|
|
// Move events that might have been scheduled on this queue to the tail to
|
|
|
|
// preserve the expected per-queue FIFO order.
|
2020-04-24 16:31:14 +03:00
|
|
|
// XXX(Bug 1631371) Check if this should use a fallible operation as it
|
|
|
|
// pretended earlier.
|
|
|
|
events.AppendElements(std::move(mEventQueue[aLevel]));
|
2019-04-03 14:27:28 +03:00
|
|
|
// And finally move everything back to the main queue.
|
2020-08-04 14:27:07 +03:00
|
|
|
mEventQueue[aLevel] = std::move(events);
|
2019-04-03 14:27:28 +03:00
|
|
|
}
|
2013-09-20 13:11:25 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
bool CacheIOThread::EventsPending(uint32_t aLastLevel) {
|
|
|
|
return mLowestLevelWaiting < aLastLevel || mHasXPCOMEvents;
|
|
|
|
}
|
|
|
|
|
2017-07-07 02:05:28 +03:00
|
|
|
NS_IMETHODIMP CacheIOThread::OnDispatchedEvent() {
|
2013-09-20 13:11:25 +04:00
|
|
|
MonitorAutoLock lock(mMonitor);
|
|
|
|
mHasXPCOMEvents = true;
|
2015-04-13 17:58:00 +03:00
|
|
|
MOZ_ASSERT(mInsideLoop);
|
2013-09-20 13:11:25 +04:00
|
|
|
lock.Notify();
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
Bug 1179909: Refactor stable state handling. r=smaug
This is motivated by three separate but related problems:
1. Our concept of recursion depth is broken for things that run from AfterProcessNextEvent observers (e.g. Promises). We decrement the recursionDepth counter before firing observers, so a Promise callback running at the lowest event loop depth has a recursion depth of 0 (whereas a regular nsIRunnable would be 1). This is a problem because it's impossible to distinguish a Promise running after a sync XHR's onreadystatechange handler from a top-level event (since the former runs with depth 2 - 1 = 1, and the latter runs with just 1).
2. The nsIThreadObserver mechanism that is used by a lot of code to run "after" the current event is a poor fit for anything that runs script. First, the order the observers fire in is the order they were added, not anything fixed by spec. Additionally, running script can cause the event loop to spin, which is a big source of pain here (bholley has some nasty bug caused by this).
3. We run Promises from different points in the code for workers and main thread. The latter runs from XPConnect's nsIThreadObserver callbacks, while the former runs from a hardcoded call to run Promises in the worker event loop. What workers do is particularly problematic because it means we can't get the right recursion depth no matter what we do to nsThread.
The solve this, this patch does the following:
1. Consolidate some handling of microtasks and all handling of stable state from appshell and WorkerPrivate into CycleCollectedJSRuntime.
2. Make the recursionDepth counter only available to CycleCollectedJSRuntime (and its consumers) and remove it from the nsIThreadInternal and nsIThreadObserver APIs.
3. Adjust the recursionDepth counter so that microtasks run with the recursionDepth of the task they are associated with.
4. Introduce the concept of metastable state to replace appshell's RunBeforeNextEvent. Metastable state is reached after every microtask or task is completed. This provides the semantics that bent and I want for IndexedDB, where transactions autocommit at the end of a microtask and do not "spill" from one microtask into a subsequent microtask. This differs from appshell's RunBeforeNextEvent in two ways:
a) It fires between microtasks, which was the motivation for starting this.
b) It no longer ensures that we're at the same event loop depth in the native event queue. bent decided we don't care about this.
5. Reorder stable state to happen after microtasks such as Promises, per HTML. Right now we call the regular thread observers, including appshell, before the main thread observer (XPConnect), so stable state tasks happen before microtasks.
2015-08-11 16:10:46 +03:00
|
|
|
NS_IMETHODIMP CacheIOThread::OnProcessNextEvent(nsIThreadInternal* thread,
|
|
|
|
bool mayWait) {
|
2013-09-20 13:11:25 +04:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
Bug 1179909: Refactor stable state handling. r=smaug
This is motivated by three separate but related problems:
1. Our concept of recursion depth is broken for things that run from AfterProcessNextEvent observers (e.g. Promises). We decrement the recursionDepth counter before firing observers, so a Promise callback running at the lowest event loop depth has a recursion depth of 0 (whereas a regular nsIRunnable would be 1). This is a problem because it's impossible to distinguish a Promise running after a sync XHR's onreadystatechange handler from a top-level event (since the former runs with depth 2 - 1 = 1, and the latter runs with just 1).
2. The nsIThreadObserver mechanism that is used by a lot of code to run "after" the current event is a poor fit for anything that runs script. First, the order the observers fire in is the order they were added, not anything fixed by spec. Additionally, running script can cause the event loop to spin, which is a big source of pain here (bholley has some nasty bug caused by this).
3. We run Promises from different points in the code for workers and main thread. The latter runs from XPConnect's nsIThreadObserver callbacks, while the former runs from a hardcoded call to run Promises in the worker event loop. What workers do is particularly problematic because it means we can't get the right recursion depth no matter what we do to nsThread.
The solve this, this patch does the following:
1. Consolidate some handling of microtasks and all handling of stable state from appshell and WorkerPrivate into CycleCollectedJSRuntime.
2. Make the recursionDepth counter only available to CycleCollectedJSRuntime (and its consumers) and remove it from the nsIThreadInternal and nsIThreadObserver APIs.
3. Adjust the recursionDepth counter so that microtasks run with the recursionDepth of the task they are associated with.
4. Introduce the concept of metastable state to replace appshell's RunBeforeNextEvent. Metastable state is reached after every microtask or task is completed. This provides the semantics that bent and I want for IndexedDB, where transactions autocommit at the end of a microtask and do not "spill" from one microtask into a subsequent microtask. This differs from appshell's RunBeforeNextEvent in two ways:
a) It fires between microtasks, which was the motivation for starting this.
b) It no longer ensures that we're at the same event loop depth in the native event queue. bent decided we don't care about this.
5. Reorder stable state to happen after microtasks such as Promises, per HTML. Right now we call the regular thread observers, including appshell, before the main thread observer (XPConnect), so stable state tasks happen before microtasks.
2015-08-11 16:10:46 +03:00
|
|
|
NS_IMETHODIMP CacheIOThread::AfterProcessNextEvent(nsIThreadInternal* thread,
|
2013-10-23 16:01:20 +04:00
|
|
|
bool eventWasProcessed) {
|
2013-09-20 13:11:25 +04:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2014-02-27 03:11:40 +04:00
|
|
|
// Memory reporting
|
|
|
|
|
|
|
|
size_t CacheIOThread::SizeOfExcludingThis(
|
|
|
|
mozilla::MallocSizeOf mallocSizeOf) const {
|
|
|
|
MonitorAutoLock lock(const_cast<CacheIOThread*>(this)->mMonitor);
|
|
|
|
|
|
|
|
size_t n = 0;
|
2018-04-30 19:46:04 +03:00
|
|
|
for (const auto& event : mEventQueue) {
|
|
|
|
n += event.ShallowSizeOfExcludingThis(mallocSizeOf);
|
2014-02-27 03:11:40 +04:00
|
|
|
// Events referenced by the queues are arbitrary objects we cannot be sure
|
|
|
|
// are reported elsewhere as well as probably not implementing nsISizeOf
|
|
|
|
// interface. Deliberatly omitting them from reporting here.
|
|
|
|
}
|
|
|
|
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t CacheIOThread::SizeOfIncludingThis(
|
|
|
|
mozilla::MallocSizeOf mallocSizeOf) const {
|
|
|
|
return mallocSizeOf(this) + SizeOfExcludingThis(mallocSizeOf);
|
|
|
|
}
|
|
|
|
|
2016-08-10 17:50:00 +03:00
|
|
|
CacheIOThread::Cancelable::Cancelable(bool aCancelable)
|
|
|
|
: mCancelable(aCancelable) {
|
|
|
|
// This will only ever be used on the I/O thread,
|
|
|
|
// which is expected to be alive longer than this class.
|
|
|
|
MOZ_ASSERT(CacheIOThread::sSelf);
|
|
|
|
MOZ_ASSERT(CacheIOThread::sSelf->IsCurrentThread());
|
|
|
|
|
|
|
|
if (mCancelable) {
|
|
|
|
++CacheIOThread::sSelf->mIOCancelableEvents;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
CacheIOThread::Cancelable::~Cancelable() {
|
|
|
|
MOZ_ASSERT(CacheIOThread::sSelf);
|
|
|
|
|
|
|
|
if (mCancelable) {
|
|
|
|
--CacheIOThread::sSelf->mIOCancelableEvents;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-04 15:35:16 +03:00
|
|
|
} // namespace mozilla::net
|