/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* vim: set ts=8 sts=2 et sw=2 tw=80: */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "mozilla/ThreadEventQueue.h" #include "mozilla/EventQueue.h" #include "LeakRefPtr.h" #include "nsComponentManagerUtils.h" #include "nsIThreadInternal.h" #include "nsThreadUtils.h" #include "PrioritizedEventQueue.h" #include "ThreadEventTarget.h" using namespace mozilla; template class ThreadEventQueue::NestedSink : public ThreadTargetSink { public: NestedSink(EventQueue* aQueue, ThreadEventQueue* aOwner) : mQueue(aQueue), mOwner(aOwner) {} bool PutEvent(already_AddRefed&& aEvent, EventQueuePriority aPriority) final { return mOwner->PutEventInternal(std::move(aEvent), aPriority, this); } void Disconnect(const MutexAutoLock& aProofOfLock) final { mQueue = nullptr; } size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const { if (mQueue) { return mQueue->SizeOfIncludingThis(aMallocSizeOf); } return 0; } private: friend class ThreadEventQueue; // This is a non-owning reference. It must live at least until Disconnect is // called to clear it out. EventQueue* mQueue; RefPtr mOwner; }; template ThreadEventQueue::ThreadEventQueue(UniquePtr aQueue) : mBaseQueue(std::move(aQueue)), mLock("ThreadEventQueue"), mEventsAvailable(mLock, "EventsAvail") { static_assert(IsBaseOf::value, "InnerQueueT must be an AbstractEventQueue subclass"); } template ThreadEventQueue::~ThreadEventQueue() { MOZ_ASSERT(mNestedQueues.IsEmpty()); } template bool ThreadEventQueue::PutEvent( already_AddRefed&& aEvent, EventQueuePriority aPriority) { return PutEventInternal(std::move(aEvent), aPriority, nullptr); } template bool ThreadEventQueue::PutEventInternal( already_AddRefed&& aEvent, EventQueuePriority aPriority, NestedSink* aSink) { // We want to leak the reference when we fail to dispatch it, so that // we won't release the event in a wrong thread. LeakRefPtr event(std::move(aEvent)); nsCOMPtr obs; { // Check if the runnable wants to override the passed-in priority. // Do this outside the lock, so runnables implemented in JS can QI // (and possibly GC) outside of the lock. if (InnerQueueT::SupportsPrioritization) { auto* e = event.get(); // can't do_QueryInterface on LeakRefPtr. if (nsCOMPtr runnablePrio = do_QueryInterface(e)) { uint32_t prio = nsIRunnablePriority::PRIORITY_NORMAL; runnablePrio->GetPriority(&prio); if (prio == nsIRunnablePriority::PRIORITY_HIGH) { aPriority = EventQueuePriority::High; } else if (prio == nsIRunnablePriority::PRIORITY_INPUT) { aPriority = EventQueuePriority::Input; } else if (prio == nsIRunnablePriority::PRIORITY_MEDIUMHIGH) { aPriority = EventQueuePriority::MediumHigh; } else if (prio == nsIRunnablePriority::PRIORITY_DEFERRED_TIMERS) { aPriority = EventQueuePriority::DeferredTimers; } else if (prio == nsIRunnablePriority::PRIORITY_IDLE) { aPriority = EventQueuePriority::Idle; } } } MutexAutoLock lock(mLock); if (mEventsAreDoomed) { return false; } if (aSink) { if (!aSink->mQueue) { return false; } aSink->mQueue->PutEvent(event.take(), aPriority, lock); } else { mBaseQueue->PutEvent(event.take(), aPriority, lock); } mEventsAvailable.Notify(); // Make sure to grab the observer before dropping the lock, otherwise the // event that we just placed into the queue could run and eventually delete // this nsThread before the calling thread is scheduled again. We would then // crash while trying to access a dead nsThread. obs = mObserver; } if (obs) { obs->OnDispatchedEvent(); } return true; } template already_AddRefed ThreadEventQueue::GetEvent( bool aMayWait, EventQueuePriority* aPriority) { MutexAutoLock lock(mLock); nsCOMPtr event; for (;;) { if (mNestedQueues.IsEmpty()) { event = mBaseQueue->GetEvent(aPriority, lock); } else { // We always get events from the topmost queue when there are nested // queues. event = mNestedQueues.LastElement().mQueue->GetEvent(aPriority, lock); } if (event || !aMayWait) { break; } AUTO_PROFILER_LABEL("ThreadEventQueue::GetEvent::Wait", IDLE); AUTO_PROFILER_THREAD_SLEEP; mEventsAvailable.Wait(); } return event.forget(); } template void ThreadEventQueue::DidRunEvent() { MutexAutoLock lock(mLock); if (mNestedQueues.IsEmpty()) { mBaseQueue->DidRunEvent(lock); } else { mNestedQueues.LastElement().mQueue->DidRunEvent(lock); } } template bool ThreadEventQueue::HasPendingEvent() { MutexAutoLock lock(mLock); // We always get events from the topmost queue when there are nested queues. if (mNestedQueues.IsEmpty()) { return mBaseQueue->HasReadyEvent(lock); } else { return mNestedQueues.LastElement().mQueue->HasReadyEvent(lock); } } template bool ThreadEventQueue::HasPendingHighPriorityEvents() { MutexAutoLock lock(mLock); // We always get events from the topmost queue when there are nested queues. if (mNestedQueues.IsEmpty()) { return mBaseQueue->HasPendingHighPriorityEvents(lock); } else { return mNestedQueues.LastElement().mQueue->HasPendingHighPriorityEvents( lock); } } template bool ThreadEventQueue::ShutdownIfNoPendingEvents() { MutexAutoLock lock(mLock); if (mNestedQueues.IsEmpty() && mBaseQueue->IsEmpty(lock)) { mEventsAreDoomed = true; return true; } return false; } template void ThreadEventQueue::EnableInputEventPrioritization() { MutexAutoLock lock(mLock); mBaseQueue->EnableInputEventPrioritization(lock); } template void ThreadEventQueue::FlushInputEventPrioritization() { MutexAutoLock lock(mLock); mBaseQueue->FlushInputEventPrioritization(lock); } template void ThreadEventQueue::SuspendInputEventPrioritization() { MutexAutoLock lock(mLock); mBaseQueue->SuspendInputEventPrioritization(lock); } template void ThreadEventQueue::ResumeInputEventPrioritization() { MutexAutoLock lock(mLock); mBaseQueue->ResumeInputEventPrioritization(lock); } template already_AddRefed ThreadEventQueue::PushEventQueue() { auto queue = MakeUnique(); RefPtr sink = new NestedSink(queue.get(), this); RefPtr eventTarget = new ThreadEventTarget(sink, NS_IsMainThread()); MutexAutoLock lock(mLock); mNestedQueues.AppendElement(NestedQueueItem(std::move(queue), eventTarget)); return eventTarget.forget(); } template void ThreadEventQueue::PopEventQueue(nsIEventTarget* aTarget) { MutexAutoLock lock(mLock); MOZ_ASSERT(!mNestedQueues.IsEmpty()); NestedQueueItem& item = mNestedQueues.LastElement(); MOZ_ASSERT(aTarget == item.mEventTarget); // Disconnect the event target that will be popped. item.mEventTarget->Disconnect(lock); AbstractEventQueue* prevQueue = mNestedQueues.Length() == 1 ? static_cast(mBaseQueue.get()) : static_cast( mNestedQueues[mNestedQueues.Length() - 2].mQueue.get()); // Move events from the old queue to the new one. nsCOMPtr event; EventQueuePriority prio; while ((event = item.mQueue->GetEvent(&prio, lock))) { prevQueue->PutEvent(event.forget(), prio, lock); } mNestedQueues.RemoveLastElement(); } template size_t ThreadEventQueue::SizeOfExcludingThis( mozilla::MallocSizeOf aMallocSizeOf) const { size_t n = 0; n += mBaseQueue->SizeOfIncludingThis(aMallocSizeOf); n += mNestedQueues.ShallowSizeOfExcludingThis(aMallocSizeOf); for (auto& queue : mNestedQueues) { n += queue.mEventTarget->SizeOfIncludingThis(aMallocSizeOf); } return SynchronizedEventQueue::SizeOfExcludingThis(aMallocSizeOf) + n; } template already_AddRefed ThreadEventQueue::GetObserver() { MutexAutoLock lock(mLock); return do_AddRef(mObserver); } template already_AddRefed ThreadEventQueue::GetObserverOnThread() { return do_AddRef(mObserver); } template void ThreadEventQueue::SetObserver(nsIThreadObserver* aObserver) { MutexAutoLock lock(mLock); mObserver = aObserver; } namespace mozilla { template class ThreadEventQueue; template class ThreadEventQueue; } // namespace mozilla