2013-02-04 14:04:25 +04:00
|
|
|
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
|
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
|
|
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
|
|
|
|
|
|
#ifndef MOZILLA_MEDIASTREAMGRAPHIMPL_H_
|
|
|
|
#define MOZILLA_MEDIASTREAMGRAPHIMPL_H_
|
|
|
|
|
|
|
|
#include "MediaStreamGraph.h"
|
|
|
|
|
2017-05-16 13:37:38 +03:00
|
|
|
#include "AudioMixer.h"
|
|
|
|
#include "GraphDriver.h"
|
|
|
|
#include "Latency.h"
|
2017-11-10 20:27:39 +03:00
|
|
|
#include "mozilla/Atomics.h"
|
2013-02-04 14:04:25 +04:00
|
|
|
#include "mozilla/Monitor.h"
|
2017-05-16 13:37:38 +03:00
|
|
|
#include "mozilla/Services.h"
|
2013-02-04 14:04:25 +04:00
|
|
|
#include "mozilla/TimeStamp.h"
|
2017-05-16 13:37:38 +03:00
|
|
|
#include "mozilla/UniquePtr.h"
|
|
|
|
#include "mozilla/WeakPtr.h"
|
|
|
|
#include "nsDataHashtable.h"
|
2014-04-13 22:08:10 +04:00
|
|
|
#include "nsIMemoryReporter.h"
|
2017-07-26 21:18:20 +03:00
|
|
|
#include "nsINamed.h"
|
2013-02-04 14:04:25 +04:00
|
|
|
#include "nsIRunnable.h"
|
2017-05-16 13:37:38 +03:00
|
|
|
#include "nsIThread.h"
|
|
|
|
#include "nsITimer.h"
|
2013-02-04 14:04:25 +04:00
|
|
|
|
|
|
|
namespace mozilla {
|
|
|
|
|
2017-05-16 13:39:02 +03:00
|
|
|
namespace media {
|
|
|
|
class ShutdownTicket;
|
|
|
|
}
|
|
|
|
|
2013-07-19 18:40:58 +04:00
|
|
|
template <typename T>
|
|
|
|
class LinkedList;
|
2014-09-09 20:23:01 +04:00
|
|
|
#ifdef MOZ_WEBRTC
|
2014-08-26 19:02:31 +04:00
|
|
|
class AudioOutputObserver;
|
2014-09-09 20:23:01 +04:00
|
|
|
#endif
|
2013-07-19 18:40:58 +04:00
|
|
|
|
2013-02-04 14:04:25 +04:00
|
|
|
/**
|
|
|
|
* A per-stream update message passed from the media graph thread to the
|
|
|
|
* main thread.
|
|
|
|
*/
|
2015-05-13 16:34:56 +03:00
|
|
|
struct StreamUpdate
|
|
|
|
{
|
2015-10-18 08:24:48 +03:00
|
|
|
RefPtr<MediaStream> mStream;
|
2013-02-04 14:04:25 +04:00
|
|
|
StreamTime mNextMainThreadCurrentTime;
|
|
|
|
bool mNextMainThreadFinished;
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
2015-10-22 08:47:57 +03:00
|
|
|
* This represents a message run on the graph thread to modify stream or graph
|
|
|
|
* state. These are passed from main thread to graph thread through
|
|
|
|
* AppendMessage(), or scheduled on the graph thread with
|
|
|
|
* RunMessageAfterProcessing(). A ControlMessage
|
|
|
|
* always has a weak reference to a particular affected stream.
|
2013-02-04 14:04:25 +04:00
|
|
|
*/
|
2015-05-13 16:34:56 +03:00
|
|
|
class ControlMessage
|
|
|
|
{
|
2013-02-04 14:04:25 +04:00
|
|
|
public:
|
2013-10-25 03:07:29 +04:00
|
|
|
explicit ControlMessage(MediaStream* aStream) : mStream(aStream)
|
2013-02-04 14:04:25 +04:00
|
|
|
{
|
|
|
|
MOZ_COUNT_CTOR(ControlMessage);
|
|
|
|
}
|
|
|
|
// All these run on the graph thread
|
|
|
|
virtual ~ControlMessage()
|
|
|
|
{
|
|
|
|
MOZ_COUNT_DTOR(ControlMessage);
|
|
|
|
}
|
|
|
|
// Do the action of this message on the MediaStreamGraph thread. Any actions
|
2015-07-31 12:28:29 +03:00
|
|
|
// affecting graph processing should take effect at mProcessedTime.
|
|
|
|
// All stream data for times < mProcessedTime has already been
|
2013-02-04 14:04:25 +04:00
|
|
|
// computed.
|
|
|
|
virtual void Run() = 0;
|
2015-10-22 08:47:57 +03:00
|
|
|
// RunDuringShutdown() is only relevant to messages generated on the main
|
|
|
|
// thread (for AppendMessage()).
|
2013-02-04 14:04:25 +04:00
|
|
|
// When we're shutting down the application, most messages are ignored but
|
|
|
|
// some cleanup messages should still be processed (on the main thread).
|
2014-07-02 10:04:54 +04:00
|
|
|
// This must not add new control messages to the graph.
|
2013-02-04 14:04:25 +04:00
|
|
|
virtual void RunDuringShutdown() {}
|
|
|
|
MediaStream* GetStream() { return mStream; }
|
|
|
|
|
|
|
|
protected:
|
|
|
|
// We do not hold a reference to mStream. The graph will be holding
|
|
|
|
// a reference to the stream until the Destroy message is processed. The
|
|
|
|
// last message referencing a stream is the Destroy message for that stream.
|
|
|
|
MediaStream* mStream;
|
|
|
|
};
|
|
|
|
|
2015-05-13 16:34:56 +03:00
|
|
|
class MessageBlock
|
|
|
|
{
|
2014-08-26 19:01:33 +04:00
|
|
|
public:
|
2016-01-21 00:14:33 +03:00
|
|
|
nsTArray<UniquePtr<ControlMessage>> mMessages;
|
2014-04-25 18:09:30 +04:00
|
|
|
};
|
|
|
|
|
2013-02-04 14:04:25 +04:00
|
|
|
/**
|
|
|
|
* The implementation of a media stream graph. This class is private to this
|
|
|
|
* file. It's not in the anonymous namespace because MediaStream needs to
|
|
|
|
* be able to friend it.
|
|
|
|
*
|
2017-08-10 04:00:08 +03:00
|
|
|
* There can be multiple MediaStreamGraph per process: one per document.
|
2016-04-25 10:00:43 +03:00
|
|
|
* Additionaly, each OfflineAudioContext object creates its own MediaStreamGraph
|
|
|
|
* object too.
|
2013-02-04 14:04:25 +04:00
|
|
|
*/
|
2014-04-13 22:08:10 +04:00
|
|
|
class MediaStreamGraphImpl : public MediaStreamGraph,
|
2016-10-02 20:51:40 +03:00
|
|
|
public nsIMemoryReporter,
|
2017-07-26 21:18:20 +03:00
|
|
|
public nsITimerCallback,
|
|
|
|
public nsINamed
|
2015-05-13 16:34:56 +03:00
|
|
|
{
|
2013-02-04 14:04:25 +04:00
|
|
|
public:
|
2014-08-26 19:04:39 +04:00
|
|
|
NS_DECL_THREADSAFE_ISUPPORTS
|
2014-04-13 22:08:10 +04:00
|
|
|
NS_DECL_NSIMEMORYREPORTER
|
2016-10-02 20:51:40 +03:00
|
|
|
NS_DECL_NSITIMERCALLBACK
|
2017-07-26 21:18:20 +03:00
|
|
|
NS_DECL_NSINAMED
|
2014-04-13 22:08:10 +04:00
|
|
|
|
2013-05-08 15:44:07 +04:00
|
|
|
/**
|
2015-08-25 11:17:31 +03:00
|
|
|
* Use aGraphDriverRequested with SYSTEM_THREAD_DRIVER or AUDIO_THREAD_DRIVER
|
|
|
|
* to create a MediaStreamGraph which provides support for real-time audio
|
2016-04-25 10:00:43 +03:00
|
|
|
* and/or video. Set it to OFFLINE_THREAD_DRIVER in order to create a
|
|
|
|
* non-realtime instance which just churns through its inputs and produces
|
|
|
|
* output. Those objects currently only support audio, and are used to
|
|
|
|
* implement OfflineAudioContext. They do not support MediaStream inputs.
|
2013-05-08 15:44:07 +04:00
|
|
|
*/
|
2015-08-25 11:17:31 +03:00
|
|
|
explicit MediaStreamGraphImpl(GraphDriverType aGraphDriverRequested,
|
2014-08-26 19:02:08 +04:00
|
|
|
TrackRate aSampleRate,
|
2017-06-29 21:31:17 +03:00
|
|
|
AbstractThread* aWindow);
|
2014-04-13 22:08:10 +04:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Unregisters memory reporting and deletes this instance. This should be
|
|
|
|
* called instead of calling the destructor directly.
|
|
|
|
*/
|
|
|
|
void Destroy();
|
2013-02-04 14:04:25 +04:00
|
|
|
|
|
|
|
// Main thread only.
|
|
|
|
/**
|
|
|
|
* This runs every time we need to sync state from the media graph thread
|
|
|
|
* to the main thread while the main thread is not in the middle
|
|
|
|
* of a script. It runs during a "stable state" (per HTML5) or during
|
|
|
|
* an event posted to the main thread.
|
2014-08-25 16:13:14 +04:00
|
|
|
* The boolean affects which boolean controlling runnable dispatch is cleared
|
2013-02-04 14:04:25 +04:00
|
|
|
*/
|
2014-08-25 16:13:14 +04:00
|
|
|
void RunInStableState(bool aSourceIsMSG);
|
2013-02-04 14:04:25 +04:00
|
|
|
/**
|
|
|
|
* Ensure a runnable to run RunInStableState is posted to the appshell to
|
|
|
|
* run at the next stable state (per HTML5).
|
|
|
|
* See EnsureStableStateEventPosted.
|
|
|
|
*/
|
|
|
|
void EnsureRunInStableState();
|
|
|
|
/**
|
|
|
|
* Called to apply a StreamUpdate to its stream.
|
|
|
|
*/
|
|
|
|
void ApplyStreamUpdate(StreamUpdate* aUpdate);
|
|
|
|
/**
|
|
|
|
* Append a ControlMessage to the message queue. This queue is drained
|
|
|
|
* during RunInStableState; the messages will run on the graph thread.
|
|
|
|
*/
|
2016-01-21 00:14:33 +03:00
|
|
|
void AppendMessage(UniquePtr<ControlMessage> aMessage);
|
2016-01-22 21:49:54 +03:00
|
|
|
|
2017-06-29 21:31:17 +03:00
|
|
|
/**
|
|
|
|
* Dispatches a runnable from any thread to the correct main thread for this
|
|
|
|
* MediaStreamGraph.
|
|
|
|
*/
|
|
|
|
void Dispatch(already_AddRefed<nsIRunnable>&& aRunnable);
|
|
|
|
|
2013-02-04 14:04:25 +04:00
|
|
|
/**
|
|
|
|
* Make this MediaStreamGraph enter forced-shutdown state. This state
|
|
|
|
* will be noticed by the media graph thread, which will shut down all streams
|
|
|
|
* and other state controlled by the media graph thread.
|
|
|
|
* This is called during application shutdown.
|
|
|
|
*/
|
2017-05-16 13:39:02 +03:00
|
|
|
void ForceShutDown(media::ShutdownTicket* aShutdownTicket);
|
2013-02-04 14:04:25 +04:00
|
|
|
|
2013-12-09 23:54:49 +04:00
|
|
|
/**
|
|
|
|
* Called before the thread runs.
|
|
|
|
*/
|
|
|
|
void Init();
|
2016-07-04 01:40:48 +03:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Respond to CollectReports with sizes collected on the graph thread.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
FinishCollectReports(nsIHandleReportCallback* aHandleReport,
|
|
|
|
nsISupports* aData,
|
|
|
|
const nsTArray<AudioNodeSizes>& aAudioStreamSizes);
|
|
|
|
|
2016-08-31 15:07:53 +03:00
|
|
|
// The following methods run on the graph thread (or possibly the main thread
|
|
|
|
// if mLifecycleState > LIFECYCLE_RUNNING)
|
2016-07-04 01:40:48 +03:00
|
|
|
void CollectSizesForMemoryReport(
|
|
|
|
already_AddRefed<nsIHandleReportCallback> aHandleReport,
|
|
|
|
already_AddRefed<nsISupports> aHandlerData);
|
2015-09-04 15:26:48 +03:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Returns true if this MediaStreamGraph should keep running
|
2013-02-04 14:04:25 +04:00
|
|
|
*/
|
2015-09-04 15:26:48 +03:00
|
|
|
bool UpdateMainThreadState();
|
2014-08-25 17:26:21 +04:00
|
|
|
|
2015-09-04 15:26:48 +03:00
|
|
|
/**
|
|
|
|
* Returns true if this MediaStreamGraph should keep running
|
|
|
|
*/
|
2015-08-04 10:42:10 +03:00
|
|
|
bool OneIteration(GraphTime aStateEnd);
|
2014-08-25 17:26:21 +04:00
|
|
|
|
2015-05-13 16:34:56 +03:00
|
|
|
bool Running() const
|
|
|
|
{
|
2017-11-10 20:38:02 +03:00
|
|
|
return LifecycleStateRef() == LIFECYCLE_RUNNING;
|
2014-08-26 19:04:36 +04:00
|
|
|
}
|
|
|
|
|
2014-04-25 18:09:30 +04:00
|
|
|
/* This is the end of the current iteration, that is, the current time of the
|
|
|
|
* graph. */
|
2015-05-13 16:34:56 +03:00
|
|
|
GraphTime IterationEnd() const;
|
|
|
|
|
2013-02-04 14:04:25 +04:00
|
|
|
/**
|
|
|
|
* Ensure there is an event posted to the main thread to run RunInStableState.
|
|
|
|
* mMonitor must be held.
|
|
|
|
* See EnsureRunInStableState
|
|
|
|
*/
|
|
|
|
void EnsureStableStateEventPosted();
|
|
|
|
/**
|
|
|
|
* Generate messages to the main thread to update it for all state changes.
|
|
|
|
* mMonitor must be held.
|
|
|
|
*/
|
2013-06-17 17:06:34 +04:00
|
|
|
void PrepareUpdatesToMainThreadState(bool aFinalUpdate);
|
2013-12-09 09:08:02 +04:00
|
|
|
/**
|
|
|
|
* Returns false if there is any stream that has finished but not yet finished
|
|
|
|
* playing out.
|
|
|
|
*/
|
|
|
|
bool AllFinishedStreamsNotified();
|
2013-07-19 18:40:57 +04:00
|
|
|
/**
|
|
|
|
* If we are rendering in non-realtime mode, we don't want to send messages to
|
|
|
|
* the main thread at each iteration for performance reasons. We instead
|
|
|
|
* notify the main thread at the same rate
|
|
|
|
*/
|
|
|
|
bool ShouldUpdateMainThread();
|
2013-02-04 14:04:25 +04:00
|
|
|
// The following methods are the various stages of RunThread processing.
|
|
|
|
/**
|
2015-09-04 15:42:53 +03:00
|
|
|
* Advance all stream state to mStateComputedTime.
|
2014-04-25 20:04:23 +04:00
|
|
|
*/
|
2015-09-04 15:42:53 +03:00
|
|
|
void UpdateCurrentTimeForStreams(GraphTime aPrevCurrentTime);
|
2016-01-26 11:45:25 +03:00
|
|
|
/**
|
|
|
|
* Process chunks for all streams and raise events for properties that have
|
|
|
|
* changed, such as principalId.
|
|
|
|
*/
|
|
|
|
void ProcessChunkMetadata(GraphTime aPrevCurrentTime);
|
|
|
|
/**
|
|
|
|
* Process chunks for the given stream and interval, and raise events for
|
|
|
|
* properties that have changed, such as principalId.
|
|
|
|
*/
|
|
|
|
template<typename C, typename Chunk>
|
|
|
|
void ProcessChunkMetadataForInterval(MediaStream* aStream,
|
|
|
|
TrackID aTrackID,
|
|
|
|
C& aSegment,
|
|
|
|
StreamTime aStart,
|
|
|
|
StreamTime aEnd);
|
2014-04-25 20:04:23 +04:00
|
|
|
/**
|
2015-10-22 08:47:57 +03:00
|
|
|
* Process graph messages in mFrontMessageQueue.
|
|
|
|
*/
|
|
|
|
void RunMessagesInQueue();
|
|
|
|
/**
|
|
|
|
* Update stream processing order and recompute stream blocking until
|
|
|
|
* aEndBlockingDecisions.
|
2013-02-04 14:04:25 +04:00
|
|
|
*/
|
2014-08-25 17:26:21 +04:00
|
|
|
void UpdateGraph(GraphTime aEndBlockingDecisions);
|
|
|
|
|
2015-05-13 16:34:56 +03:00
|
|
|
void SwapMessageQueues()
|
|
|
|
{
|
2015-09-17 08:08:10 +03:00
|
|
|
MOZ_ASSERT(CurrentDriver()->OnThread());
|
|
|
|
MOZ_ASSERT(mFrontMessageQueue.IsEmpty());
|
2014-08-26 19:01:33 +04:00
|
|
|
mMonitor.AssertCurrentThreadOwns();
|
2014-08-25 17:26:21 +04:00
|
|
|
mFrontMessageQueue.SwapElements(mBackMessageQueue);
|
|
|
|
}
|
2014-04-25 20:04:23 +04:00
|
|
|
/**
|
2015-09-08 07:58:19 +03:00
|
|
|
* Do all the processing and play the audio and video, from
|
|
|
|
* mProcessedTime to mStateComputedTime.
|
2014-04-25 20:04:23 +04:00
|
|
|
*/
|
2015-09-08 07:58:19 +03:00
|
|
|
void Process();
|
2013-02-04 14:04:25 +04:00
|
|
|
/**
|
|
|
|
* Extract any state updates pending in aStream, and apply them.
|
|
|
|
*/
|
|
|
|
void ExtractPendingInput(SourceMediaStream* aStream,
|
|
|
|
GraphTime aDesiredUpToTime,
|
|
|
|
bool* aEnsureNextIteration);
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
|
2015-10-22 08:47:57 +03:00
|
|
|
/**
|
|
|
|
* For use during ProcessedMediaStream::ProcessInput() or
|
|
|
|
* MediaStreamListener callbacks, when graph state cannot be changed.
|
|
|
|
* Schedules |aMessage| to run after processing, at a time when graph state
|
|
|
|
* can be changed. Graph thread.
|
|
|
|
*/
|
2016-01-21 00:14:33 +03:00
|
|
|
void RunMessageAfterProcessing(UniquePtr<ControlMessage> aMessage);
|
2015-10-22 08:47:57 +03:00
|
|
|
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
/**
|
|
|
|
* Called when a suspend/resume/close operation has been completed, on the
|
|
|
|
* graph thread.
|
|
|
|
*/
|
|
|
|
void AudioContextOperationCompleted(MediaStream* aStream,
|
|
|
|
void* aPromise,
|
|
|
|
dom::AudioContextOperation aOperation);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Apply and AudioContext operation (suspend/resume/closed), on the graph
|
|
|
|
* thread.
|
|
|
|
*/
|
2015-09-16 07:15:21 +03:00
|
|
|
void ApplyAudioContextOperationImpl(MediaStream* aDestinationStream,
|
|
|
|
const nsTArray<MediaStream*>& aStreams,
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
dom::AudioContextOperation aOperation,
|
|
|
|
void* aPromise);
|
|
|
|
|
2015-09-03 14:54:00 +03:00
|
|
|
/**
|
|
|
|
* Increment suspend count on aStream and move it to mSuspendedStreams if
|
|
|
|
* necessary.
|
|
|
|
*/
|
|
|
|
void IncrementSuspendCount(MediaStream* aStream);
|
|
|
|
/**
|
|
|
|
* Increment suspend count on aStream and move it to mStreams if
|
|
|
|
* necessary.
|
|
|
|
*/
|
|
|
|
void DecrementSuspendCount(MediaStream* aStream);
|
|
|
|
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
/*
|
|
|
|
* Move streams from the mStreams to mSuspendedStream if suspending/closing an
|
|
|
|
* AudioContext, or the inverse when resuming an AudioContext.
|
|
|
|
*/
|
2015-09-16 07:15:21 +03:00
|
|
|
void SuspendOrResumeStreams(dom::AudioContextOperation aAudioContextOperation,
|
|
|
|
const nsTArray<MediaStream*>& aStreamSet);
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
|
2016-03-08 20:11:08 +03:00
|
|
|
/**
|
|
|
|
* Determine if we have any audio tracks, or are about to add any audiotracks.
|
|
|
|
* Also checks if we'll need the AEC running (i.e. microphone input tracks)
|
|
|
|
*/
|
|
|
|
bool AudioTrackPresent(bool& aNeedsAEC);
|
|
|
|
|
2013-02-04 14:04:25 +04:00
|
|
|
/**
|
|
|
|
* Sort mStreams so that every stream not in a cycle is after any streams
|
|
|
|
* it depends on, and every stream in a cycle is marked as being in a cycle.
|
|
|
|
* Also sets mIsConsumed on every stream.
|
|
|
|
*/
|
|
|
|
void UpdateStreamOrder();
|
2015-05-13 16:34:56 +03:00
|
|
|
|
2014-04-25 20:04:23 +04:00
|
|
|
/**
|
2014-09-18 09:13:15 +04:00
|
|
|
* Returns smallest value of t such that t is a multiple of
|
|
|
|
* WEBAUDIO_BLOCK_SIZE and t > aTime.
|
2014-04-25 20:04:23 +04:00
|
|
|
*/
|
|
|
|
GraphTime RoundUpToNextAudioBlock(GraphTime aTime);
|
2013-01-14 02:46:57 +04:00
|
|
|
/**
|
2015-09-16 07:24:10 +03:00
|
|
|
* Produce data for all streams >= aStreamIndex for the current time interval.
|
2013-01-14 02:46:57 +04:00
|
|
|
* Advances block by block, each iteration producing data for all streams
|
|
|
|
* for a single block.
|
|
|
|
* This is called whenever we have an AudioNodeStream in the graph.
|
|
|
|
*/
|
|
|
|
void ProduceDataForStreamsBlockByBlock(uint32_t aStreamIndex,
|
2015-09-16 07:24:10 +03:00
|
|
|
TrackRate aSampleRate);
|
2013-02-04 14:04:25 +04:00
|
|
|
/**
|
2015-09-16 07:17:30 +03:00
|
|
|
* If aStream will underrun between aTime, and aEndBlockingDecisions, returns
|
|
|
|
* the time at which the underrun will start. Otherwise return
|
|
|
|
* aEndBlockingDecisions.
|
2013-02-04 14:04:25 +04:00
|
|
|
*/
|
2015-09-16 07:17:30 +03:00
|
|
|
GraphTime WillUnderrun(MediaStream* aStream, GraphTime aEndBlockingDecisions);
|
2014-04-25 20:04:23 +04:00
|
|
|
|
2013-02-04 14:04:25 +04:00
|
|
|
/**
|
|
|
|
* Given a graph time aTime, convert it to a stream time taking into
|
|
|
|
* account the time during which aStream is scheduled to be blocked.
|
|
|
|
*/
|
2015-09-08 06:41:00 +03:00
|
|
|
StreamTime GraphTimeToStreamTimeWithBlocking(MediaStream* aStream, GraphTime aTime);
|
2015-09-08 07:42:42 +03:00
|
|
|
|
2013-02-04 14:04:25 +04:00
|
|
|
/**
|
|
|
|
* Call NotifyHaveCurrentData on aStream's listeners.
|
|
|
|
*/
|
|
|
|
void NotifyHasCurrentData(MediaStream* aStream);
|
|
|
|
/**
|
|
|
|
* If aStream needs an audio stream but doesn't have one, create it.
|
|
|
|
* If aStream doesn't need an audio stream but has one, destroy it.
|
|
|
|
*/
|
2015-09-16 07:23:14 +03:00
|
|
|
void CreateOrDestroyAudioStreams(MediaStream* aStream);
|
2013-02-04 14:04:25 +04:00
|
|
|
/**
|
|
|
|
* Queue audio (mix of stream audio and silence for blocked intervals)
|
2014-03-24 14:06:06 +04:00
|
|
|
* to the audio output stream. Returns the number of frames played.
|
2013-02-04 14:04:25 +04:00
|
|
|
*/
|
2015-09-08 07:58:19 +03:00
|
|
|
StreamTime PlayAudio(MediaStream* aStream);
|
2013-02-04 14:04:25 +04:00
|
|
|
/**
|
|
|
|
* No more data will be forthcoming for aStream. The stream will end
|
2016-01-26 05:49:01 +03:00
|
|
|
* at the current buffer end point. The StreamTracks's tracks must be
|
2013-02-04 14:04:25 +04:00
|
|
|
* explicitly set to finished by the caller.
|
|
|
|
*/
|
2016-03-08 20:11:09 +03:00
|
|
|
void OpenAudioInputImpl(int aID,
|
2016-01-21 19:51:36 +03:00
|
|
|
AudioDataListener *aListener);
|
2016-03-08 20:11:09 +03:00
|
|
|
virtual nsresult OpenAudioInput(int aID,
|
2016-01-21 19:51:36 +03:00
|
|
|
AudioDataListener *aListener) override;
|
2016-01-21 19:51:36 +03:00
|
|
|
void CloseAudioInputImpl(AudioDataListener *aListener);
|
|
|
|
virtual void CloseAudioInput(AudioDataListener *aListener) override;
|
2016-01-21 19:51:35 +03:00
|
|
|
|
2013-02-04 14:04:25 +04:00
|
|
|
void FinishStream(MediaStream* aStream);
|
|
|
|
/**
|
|
|
|
* Compute how much stream data we would like to buffer for aStream.
|
|
|
|
*/
|
|
|
|
StreamTime GetDesiredBufferEnd(MediaStream* aStream);
|
|
|
|
/**
|
|
|
|
* Returns true when there are no active streams.
|
|
|
|
*/
|
2015-05-13 16:34:56 +03:00
|
|
|
bool IsEmpty() const
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
{
|
2017-11-10 20:56:29 +03:00
|
|
|
MOZ_ASSERT(OnGraphThreadOrNotRunning() ||
|
|
|
|
(NS_IsMainThread() &&
|
|
|
|
LifecycleStateRef() >= LIFECYCLE_WAITING_FOR_MAIN_THREAD_CLEANUP));
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
return mStreams.IsEmpty() && mSuspendedStreams.IsEmpty() && mPortCount == 0;
|
|
|
|
}
|
2013-02-04 14:04:25 +04:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Add aStream to the graph and initializes its graph-specific state.
|
|
|
|
*/
|
2015-08-12 02:29:35 +03:00
|
|
|
void AddStreamGraphThread(MediaStream* aStream);
|
2013-02-04 14:04:25 +04:00
|
|
|
/**
|
|
|
|
* Remove aStream from the graph. Ensures that pending messages about the
|
|
|
|
* stream back to the main thread are flushed.
|
|
|
|
*/
|
2015-08-12 02:29:35 +03:00
|
|
|
void RemoveStreamGraphThread(MediaStream* aStream);
|
2013-02-04 14:04:25 +04:00
|
|
|
/**
|
|
|
|
* Remove aPort from the graph and release it.
|
|
|
|
*/
|
|
|
|
void DestroyPort(MediaInputPort* aPort);
|
2013-09-13 20:12:07 +04:00
|
|
|
/**
|
|
|
|
* Mark the media stream order as dirty.
|
|
|
|
*/
|
|
|
|
void SetStreamOrderDirty()
|
|
|
|
{
|
2017-11-10 20:24:20 +03:00
|
|
|
MOZ_ASSERT(OnGraphThreadOrNotRunning());
|
2013-09-13 20:12:07 +04:00
|
|
|
mStreamOrderDirty = true;
|
|
|
|
}
|
2013-02-04 14:04:25 +04:00
|
|
|
|
2017-08-28 17:16:20 +03:00
|
|
|
uint32_t AudioChannelCount() const
|
|
|
|
{
|
2017-10-03 17:56:53 +03:00
|
|
|
return mOutputChannels;
|
2017-08-28 17:16:20 +03:00
|
|
|
}
|
2014-04-23 13:20:56 +04:00
|
|
|
|
2015-05-13 16:34:56 +03:00
|
|
|
double MediaTimeToSeconds(GraphTime aTime) const
|
2014-06-12 08:45:00 +04:00
|
|
|
{
|
2015-08-13 08:07:49 +03:00
|
|
|
NS_ASSERTION(aTime > -STREAM_TIME_MAX && aTime <= STREAM_TIME_MAX,
|
|
|
|
"Bad time");
|
2014-09-18 09:20:43 +04:00
|
|
|
return static_cast<double>(aTime)/GraphRate();
|
2014-06-12 08:45:00 +04:00
|
|
|
}
|
2015-05-13 16:34:56 +03:00
|
|
|
|
|
|
|
GraphTime SecondsToMediaTime(double aS) const
|
2014-06-12 08:45:00 +04:00
|
|
|
{
|
2014-09-18 09:20:43 +04:00
|
|
|
NS_ASSERTION(0 <= aS && aS <= TRACK_TICKS_MAX/TRACK_RATE_MAX,
|
|
|
|
"Bad seconds");
|
|
|
|
return GraphRate() * aS;
|
2014-06-12 08:45:00 +04:00
|
|
|
}
|
2015-05-13 16:34:56 +03:00
|
|
|
|
|
|
|
GraphTime MillisecondsToMediaTime(int32_t aMS) const
|
2014-06-12 08:45:00 +04:00
|
|
|
{
|
|
|
|
return RateConvertTicksRoundDown(GraphRate(), 1000, aMS);
|
|
|
|
}
|
|
|
|
|
2014-08-26 19:02:28 +04:00
|
|
|
/**
|
|
|
|
* Signal to the graph that the thread has paused indefinitly,
|
|
|
|
* or resumed.
|
|
|
|
*/
|
|
|
|
void PausedIndefinitly();
|
|
|
|
void ResumedFromPaused();
|
|
|
|
|
2014-09-28 20:07:24 +04:00
|
|
|
/**
|
|
|
|
* Not safe to call off the MediaStreamGraph thread unless monitor is held!
|
|
|
|
*/
|
2015-05-13 16:34:56 +03:00
|
|
|
GraphDriver* CurrentDriver() const
|
|
|
|
{
|
2017-09-28 05:30:48 +03:00
|
|
|
#ifdef DEBUG
|
|
|
|
if (!OnGraphThreadOrNotRunning()) {
|
|
|
|
mMonitor.AssertCurrentThreadOwns();
|
|
|
|
}
|
|
|
|
#endif
|
2014-08-26 19:02:28 +04:00
|
|
|
return mDriver;
|
|
|
|
}
|
|
|
|
|
2014-08-26 19:01:33 +04:00
|
|
|
/**
|
|
|
|
* Effectively set the new driver, while we are switching.
|
|
|
|
* It is only safe to call this at the very end of an iteration, when there
|
|
|
|
* has been a SwitchAtNextIteration call during the iteration. The driver
|
|
|
|
* should return and pass the control to the new driver shortly after.
|
2017-11-10 19:28:59 +03:00
|
|
|
* We can also switch from Revive() (on MainThread). Monitor must be held.
|
2014-08-26 19:01:33 +04:00
|
|
|
*/
|
2015-05-13 16:34:56 +03:00
|
|
|
void SetCurrentDriver(GraphDriver* aDriver)
|
|
|
|
{
|
2017-09-28 05:30:48 +03:00
|
|
|
#ifdef DEBUG
|
|
|
|
mMonitor.AssertCurrentThreadOwns();
|
|
|
|
#endif
|
2014-08-26 19:02:28 +04:00
|
|
|
mDriver = aDriver;
|
2014-08-26 19:01:33 +04:00
|
|
|
}
|
|
|
|
|
2015-05-13 16:34:56 +03:00
|
|
|
Monitor& GetMonitor()
|
|
|
|
{
|
2014-08-26 19:01:33 +04:00
|
|
|
return mMonitor;
|
|
|
|
}
|
|
|
|
|
2015-05-13 16:34:56 +03:00
|
|
|
void EnsureNextIteration()
|
|
|
|
{
|
2014-09-28 20:07:25 +04:00
|
|
|
mNeedAnotherIteration = true; // atomic
|
2016-08-29 17:41:01 +03:00
|
|
|
// Note: GraphDriver must ensure that there's no race on setting
|
|
|
|
// mNeedAnotherIteration and mGraphDriverAsleep -- see WaitForNextIteration()
|
2014-09-28 20:07:25 +04:00
|
|
|
if (mGraphDriverAsleep) { // atomic
|
|
|
|
MonitorAutoLock mon(mMonitor);
|
|
|
|
CurrentDriver()->WakeUp(); // Might not be the same driver; might have woken already
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-13 16:34:56 +03:00
|
|
|
void EnsureNextIterationLocked()
|
|
|
|
{
|
2014-09-28 20:07:25 +04:00
|
|
|
mNeedAnotherIteration = true; // atomic
|
|
|
|
if (mGraphDriverAsleep) { // atomic
|
|
|
|
CurrentDriver()->WakeUp(); // Might not be the same driver; might have woken already
|
|
|
|
}
|
2014-09-28 20:07:24 +04:00
|
|
|
}
|
|
|
|
|
2015-07-24 15:28:16 +03:00
|
|
|
// Capture Stream API. This allows to get a mixed-down output for a window.
|
|
|
|
void RegisterCaptureStreamForWindow(uint64_t aWindowId,
|
|
|
|
ProcessedMediaStream* aCaptureStream);
|
|
|
|
void UnregisterCaptureStreamForWindow(uint64_t aWindowId);
|
|
|
|
already_AddRefed<MediaInputPort>
|
|
|
|
ConnectToCaptureStream(uint64_t aWindowId, MediaStream* aMediaStream);
|
|
|
|
|
2015-09-08 07:18:15 +03:00
|
|
|
class StreamSet {
|
|
|
|
public:
|
|
|
|
class iterator {
|
|
|
|
public:
|
|
|
|
explicit iterator(MediaStreamGraphImpl& aGraph)
|
|
|
|
: mGraph(&aGraph), mArrayNum(-1), mArrayIndex(0)
|
|
|
|
{
|
|
|
|
++(*this);
|
|
|
|
}
|
|
|
|
iterator() : mGraph(nullptr), mArrayNum(2), mArrayIndex(0) {}
|
|
|
|
MediaStream* operator*()
|
|
|
|
{
|
|
|
|
return Array()->ElementAt(mArrayIndex);
|
|
|
|
}
|
|
|
|
iterator operator++()
|
|
|
|
{
|
|
|
|
++mArrayIndex;
|
|
|
|
while (mArrayNum < 2 &&
|
|
|
|
(mArrayNum < 0 || mArrayIndex >= Array()->Length())) {
|
|
|
|
++mArrayNum;
|
|
|
|
mArrayIndex = 0;
|
|
|
|
}
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
bool operator==(const iterator& aOther) const
|
|
|
|
{
|
|
|
|
return mArrayNum == aOther.mArrayNum && mArrayIndex == aOther.mArrayIndex;
|
|
|
|
}
|
|
|
|
bool operator!=(const iterator& aOther) const
|
|
|
|
{
|
|
|
|
return !(*this == aOther);
|
|
|
|
}
|
|
|
|
private:
|
|
|
|
nsTArray<MediaStream*>* Array()
|
|
|
|
{
|
|
|
|
return mArrayNum == 0 ? &mGraph->mStreams : &mGraph->mSuspendedStreams;
|
|
|
|
}
|
|
|
|
MediaStreamGraphImpl* mGraph;
|
|
|
|
int mArrayNum;
|
|
|
|
uint32_t mArrayIndex;
|
|
|
|
};
|
|
|
|
|
|
|
|
explicit StreamSet(MediaStreamGraphImpl& aGraph) : mGraph(aGraph) {}
|
|
|
|
iterator begin() { return iterator(mGraph); }
|
|
|
|
iterator end() { return iterator(); }
|
|
|
|
private:
|
|
|
|
MediaStreamGraphImpl& mGraph;
|
|
|
|
};
|
|
|
|
StreamSet AllStreams() { return StreamSet(*this); }
|
|
|
|
|
2013-02-04 14:04:25 +04:00
|
|
|
// Data members
|
2014-08-26 19:02:28 +04:00
|
|
|
//
|
|
|
|
/**
|
|
|
|
* Graphs own owning references to their driver, until shutdown. When a driver
|
|
|
|
* switch occur, previous driver is either deleted, or it's ownership is
|
|
|
|
* passed to a event that will take care of the asynchronous cleanup, as
|
|
|
|
* audio stream can take some time to shut down.
|
2017-11-10 19:28:59 +03:00
|
|
|
* Accessed on both the main thread and the graph thread; both read and write.
|
|
|
|
* Must hold monitor to access it.
|
2014-08-26 19:02:28 +04:00
|
|
|
*/
|
2015-10-18 08:24:48 +03:00
|
|
|
RefPtr<GraphDriver> mDriver;
|
2013-02-04 14:04:25 +04:00
|
|
|
|
|
|
|
// The following state is managed on the graph thread only, unless
|
|
|
|
// mLifecycleState > LIFECYCLE_RUNNING in which case the graph thread
|
|
|
|
// is not running and this state can be used from the main thread.
|
|
|
|
|
2014-07-07 03:52:25 +04:00
|
|
|
/**
|
|
|
|
* The graph keeps a reference to each stream.
|
|
|
|
* References are maintained manually to simplify reordering without
|
|
|
|
* unnecessary thread-safe refcount changes.
|
2017-11-10 19:28:59 +03:00
|
|
|
* Must satisfy OnGraphThreadOrNotRunning().
|
2014-07-07 03:52:25 +04:00
|
|
|
*/
|
|
|
|
nsTArray<MediaStream*> mStreams;
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
/**
|
|
|
|
* This stores MediaStreams that are part of suspended AudioContexts.
|
|
|
|
* mStreams and mSuspendStream are disjoint sets: a stream is either suspended
|
|
|
|
* or not suspended. Suspended streams are not ordered in UpdateStreamOrder,
|
|
|
|
* and are therefore not doing any processing.
|
2017-11-10 19:28:59 +03:00
|
|
|
* Must satisfy OnGraphThreadOrNotRunning().
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
*/
|
|
|
|
nsTArray<MediaStream*> mSuspendedStreams;
|
2013-07-19 18:40:56 +04:00
|
|
|
/**
|
2014-07-17 04:55:55 +04:00
|
|
|
* Streams from mFirstCycleBreaker to the end of mStreams produce output
|
|
|
|
* before they receive input. They correspond to DelayNodes that are in
|
|
|
|
* cycles.
|
2013-07-19 18:40:56 +04:00
|
|
|
*/
|
2014-07-17 04:55:55 +04:00
|
|
|
uint32_t mFirstCycleBreaker;
|
2015-08-13 07:23:17 +03:00
|
|
|
/**
|
2015-07-31 12:28:29 +03:00
|
|
|
* Blocking decisions have been computed up to this time.
|
|
|
|
* Between each iteration, this is the same as mProcessedTime.
|
2015-08-13 07:23:17 +03:00
|
|
|
*/
|
|
|
|
GraphTime mStateComputedTime = 0;
|
2015-07-31 12:28:29 +03:00
|
|
|
/**
|
|
|
|
* All stream contents have been computed up to this time.
|
|
|
|
* The next batch of updates from the main thread will be processed
|
|
|
|
* at this time. This is behind mStateComputedTime during processing.
|
|
|
|
*/
|
|
|
|
GraphTime mProcessedTime = 0;
|
2013-07-19 18:40:57 +04:00
|
|
|
/**
|
|
|
|
* Date of the last time we updated the main thread with the graph state.
|
|
|
|
*/
|
|
|
|
TimeStamp mLastMainThreadUpdate;
|
2013-02-04 14:04:25 +04:00
|
|
|
/**
|
|
|
|
* Number of active MediaInputPorts
|
|
|
|
*/
|
|
|
|
int32_t mPortCount;
|
|
|
|
|
2016-01-21 19:51:36 +03:00
|
|
|
/**
|
|
|
|
* Devices to use for cubeb input & output, or NULL for no input (void*),
|
|
|
|
* and boolean to control if we want input/output
|
|
|
|
*/
|
|
|
|
bool mInputWanted;
|
2016-03-08 20:11:09 +03:00
|
|
|
int mInputDeviceID;
|
2016-01-21 19:51:36 +03:00
|
|
|
bool mOutputWanted;
|
2016-03-08 20:11:09 +03:00
|
|
|
int mOutputDeviceID;
|
2016-02-04 05:12:51 +03:00
|
|
|
// Maps AudioDataListeners to a usecount of streams using the listener
|
|
|
|
// so we can know when it's no longer in use.
|
|
|
|
nsDataHashtable<nsPtrHashKey<AudioDataListener>, uint32_t> mInputDeviceUsers;
|
2016-01-21 19:51:36 +03:00
|
|
|
|
2014-09-28 20:07:25 +04:00
|
|
|
// True if the graph needs another iteration after the current iteration.
|
|
|
|
Atomic<bool> mNeedAnotherIteration;
|
|
|
|
// GraphDriver may need a WakeUp() if something changes
|
|
|
|
Atomic<bool> mGraphDriverAsleep;
|
|
|
|
|
2014-08-26 19:01:33 +04:00
|
|
|
// mMonitor guards the data below.
|
|
|
|
// MediaStreamGraph normally does its work without holding mMonitor, so it is
|
|
|
|
// not safe to just grab mMonitor from some thread and start monkeying with
|
|
|
|
// the graph. Instead, communicate with the graph thread using provided
|
|
|
|
// mechanisms such as the ControlMessage queue.
|
|
|
|
Monitor mMonitor;
|
|
|
|
|
2013-02-04 14:04:25 +04:00
|
|
|
// Data guarded by mMonitor (must always be accessed with mMonitor held,
|
2014-09-28 20:07:25 +04:00
|
|
|
// regardless of the value of mLifecycleState).
|
2013-02-04 14:04:25 +04:00
|
|
|
|
|
|
|
/**
|
|
|
|
* State to copy to main thread
|
|
|
|
*/
|
|
|
|
nsTArray<StreamUpdate> mStreamUpdates;
|
|
|
|
/**
|
|
|
|
* Runnables to run after the next update to main thread state.
|
|
|
|
*/
|
|
|
|
nsTArray<nsCOMPtr<nsIRunnable> > mUpdateRunnables;
|
|
|
|
/**
|
|
|
|
* A list of batches of messages to process. Each batch is processed
|
|
|
|
* as an atomic unit.
|
|
|
|
*/
|
2015-09-17 08:08:10 +03:00
|
|
|
/*
|
|
|
|
* Message queue processed by the MSG thread during an iteration.
|
|
|
|
* Accessed on graph thread only.
|
|
|
|
*/
|
2014-08-25 17:26:21 +04:00
|
|
|
nsTArray<MessageBlock> mFrontMessageQueue;
|
2015-09-17 08:08:10 +03:00
|
|
|
/*
|
|
|
|
* Message queue in which the main thread appends messages.
|
|
|
|
* Access guarded by mMonitor.
|
|
|
|
*/
|
2014-08-25 17:26:21 +04:00
|
|
|
nsTArray<MessageBlock> mBackMessageQueue;
|
2014-08-26 19:01:33 +04:00
|
|
|
|
|
|
|
/* True if there will messages to process if we swap the message queues. */
|
2015-05-13 16:34:56 +03:00
|
|
|
bool MessagesQueued() const
|
|
|
|
{
|
2014-08-26 19:01:33 +04:00
|
|
|
mMonitor.AssertCurrentThreadOwns();
|
|
|
|
return !mBackMessageQueue.IsEmpty();
|
|
|
|
}
|
2013-02-04 14:04:25 +04:00
|
|
|
/**
|
|
|
|
* This enum specifies where this graph is in its lifecycle. This is used
|
|
|
|
* to control shutdown.
|
|
|
|
* Shutdown is tricky because it can happen in two different ways:
|
|
|
|
* 1) Shutdown due to inactivity. RunThread() detects that it has no
|
|
|
|
* pending messages and no streams, and exits. The next RunInStableState()
|
|
|
|
* checks if there are new pending messages from the main thread (true only
|
|
|
|
* if new stream creation raced with shutdown); if there are, it revives
|
|
|
|
* RunThread(), otherwise it commits to shutting down the graph. New stream
|
|
|
|
* creation after this point will create a new graph. An async event is
|
|
|
|
* dispatched to Shutdown() the graph's threads and then delete the graph
|
|
|
|
* object.
|
2014-02-11 04:04:58 +04:00
|
|
|
* 2) Forced shutdown at application shutdown, or completion of a
|
|
|
|
* non-realtime graph. A flag is set, RunThread() detects the flag and
|
|
|
|
* exits, the next RunInStableState() detects the flag, and dispatches the
|
|
|
|
* async event to Shutdown() the graph's threads. However the graph object
|
|
|
|
* is not deleted. New messages for the graph are processed synchronously on
|
|
|
|
* the main thread if necessary. When the last stream is destroyed, the
|
|
|
|
* graph object is deleted.
|
2014-08-31 16:19:48 +04:00
|
|
|
*
|
|
|
|
* This should be kept in sync with the LifecycleState_str array in
|
|
|
|
* MediaStreamGraph.cpp
|
2013-02-04 14:04:25 +04:00
|
|
|
*/
|
2015-05-13 16:34:56 +03:00
|
|
|
enum LifecycleState
|
|
|
|
{
|
2013-02-04 14:04:25 +04:00
|
|
|
// The graph thread hasn't started yet.
|
|
|
|
LIFECYCLE_THREAD_NOT_STARTED,
|
|
|
|
// RunThread() is running normally.
|
|
|
|
LIFECYCLE_RUNNING,
|
|
|
|
// In the following states, the graph thread is not running so
|
|
|
|
// all "graph thread only" state in this class can be used safely
|
|
|
|
// on the main thread.
|
|
|
|
// RunThread() has exited and we're waiting for the next
|
|
|
|
// RunInStableState(), at which point we can clean up the main-thread
|
|
|
|
// side of the graph.
|
|
|
|
LIFECYCLE_WAITING_FOR_MAIN_THREAD_CLEANUP,
|
|
|
|
// RunInStableState() posted a ShutdownRunnable, and we're waiting for it
|
|
|
|
// to shut down the graph thread(s).
|
|
|
|
LIFECYCLE_WAITING_FOR_THREAD_SHUTDOWN,
|
|
|
|
// Graph threads have shut down but we're waiting for remaining streams
|
2014-02-11 04:04:58 +04:00
|
|
|
// to be destroyed. Only happens during application shutdown and on
|
|
|
|
// completed non-realtime graphs, since normally we'd only shut down a
|
|
|
|
// realtime graph when it has no streams.
|
2013-02-04 14:04:25 +04:00
|
|
|
LIFECYCLE_WAITING_FOR_STREAM_DESTRUCTION
|
|
|
|
};
|
2017-11-10 19:28:59 +03:00
|
|
|
|
2016-07-04 07:33:05 +03:00
|
|
|
/**
|
2017-11-10 19:28:59 +03:00
|
|
|
* Modified on the main and graph thread (in UpdateMainThreadState() when
|
|
|
|
* we're about to shutdown) in mMonitor. mMonitor must be held when accessed.
|
2016-07-04 07:33:05 +03:00
|
|
|
*/
|
2013-02-04 14:04:25 +04:00
|
|
|
LifecycleState mLifecycleState;
|
2017-11-10 20:38:02 +03:00
|
|
|
LifecycleState& LifecycleStateRef()
|
|
|
|
{
|
|
|
|
#if DEBUG
|
|
|
|
if (!mDetectedNotRunning) {
|
|
|
|
mMonitor.AssertCurrentThreadOwns();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return mLifecycleState;
|
|
|
|
}
|
|
|
|
const LifecycleState& LifecycleStateRef() const
|
|
|
|
{
|
|
|
|
#if DEBUG
|
|
|
|
if (!mDetectedNotRunning) {
|
|
|
|
mMonitor.AssertCurrentThreadOwns();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return mLifecycleState;
|
|
|
|
}
|
2013-05-17 03:30:41 +04:00
|
|
|
/**
|
2013-12-09 09:08:02 +04:00
|
|
|
* The graph should stop processing at or after this time.
|
2017-11-10 19:28:59 +03:00
|
|
|
* Only set on main thread. Read on both main and MSG thread.
|
2013-05-17 03:30:41 +04:00
|
|
|
*/
|
2017-11-10 20:27:39 +03:00
|
|
|
Atomic<GraphTime> mEndTime;
|
2014-04-23 13:20:56 +04:00
|
|
|
|
2013-02-04 14:04:25 +04:00
|
|
|
/**
|
|
|
|
* True when we need to do a forced shutdown during application shutdown.
|
2017-11-10 19:28:59 +03:00
|
|
|
* Only set on main thread.
|
|
|
|
* Can be read safely on the main thread, on all other threads mMonitor must
|
|
|
|
* be held.
|
2013-02-04 14:04:25 +04:00
|
|
|
*/
|
|
|
|
bool mForceShutDown;
|
2016-01-22 21:49:54 +03:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Drop this reference during shutdown to unblock shutdown.
|
2017-11-10 19:28:59 +03:00
|
|
|
* Only accessed on the main thread.
|
2016-01-22 21:49:54 +03:00
|
|
|
**/
|
2017-05-16 13:39:02 +03:00
|
|
|
RefPtr<media::ShutdownTicket> mForceShutdownTicket;
|
2016-01-22 21:49:54 +03:00
|
|
|
|
2013-02-04 14:04:25 +04:00
|
|
|
/**
|
|
|
|
* True when we have posted an event to the main thread to run
|
|
|
|
* RunInStableState() and the event hasn't run yet.
|
2017-11-10 19:28:59 +03:00
|
|
|
* Accessed on both main and MSG thread, mMonitor must be held.
|
2013-02-04 14:04:25 +04:00
|
|
|
*/
|
|
|
|
bool mPostedRunInStableStateEvent;
|
|
|
|
|
|
|
|
// Main thread only
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Messages posted by the current event loop task. These are forwarded to
|
|
|
|
* the media graph thread during RunInStableState. We can't forward them
|
|
|
|
* immediately because we want all messages between stable states to be
|
|
|
|
* processed as an atomic batch.
|
|
|
|
*/
|
2016-01-21 00:14:33 +03:00
|
|
|
nsTArray<UniquePtr<ControlMessage>> mCurrentTaskMessageQueue;
|
2013-02-04 14:04:25 +04:00
|
|
|
/**
|
|
|
|
* True when RunInStableState has determined that mLifecycleState is >
|
|
|
|
* LIFECYCLE_RUNNING. Since only the main thread can reset mLifecycleState to
|
|
|
|
* LIFECYCLE_RUNNING, this can be relied on to not change unexpectedly.
|
|
|
|
*/
|
2017-11-10 20:27:39 +03:00
|
|
|
Atomic<bool> mDetectedNotRunning;
|
2013-02-04 14:04:25 +04:00
|
|
|
/**
|
|
|
|
* True when a stable state runner has been posted to the appshell to run
|
|
|
|
* RunInStableState at the next stable state.
|
2017-11-10 19:28:59 +03:00
|
|
|
* Only accessed on the main thread.
|
2013-02-04 14:04:25 +04:00
|
|
|
*/
|
|
|
|
bool mPostedRunInStableState;
|
2013-05-08 15:44:07 +04:00
|
|
|
/**
|
|
|
|
* True when processing real-time audio/video. False when processing non-realtime
|
|
|
|
* audio.
|
|
|
|
*/
|
2017-11-10 19:30:19 +03:00
|
|
|
const bool mRealtime;
|
2013-05-17 03:30:41 +04:00
|
|
|
/**
|
|
|
|
* True when a non-realtime MediaStreamGraph has started to process input. This
|
|
|
|
* value is only accessed on the main thread.
|
|
|
|
*/
|
|
|
|
bool mNonRealtimeProcessing;
|
2013-09-13 20:12:07 +04:00
|
|
|
/**
|
|
|
|
* True when a change has happened which requires us to recompute the stream
|
|
|
|
* blocking order.
|
|
|
|
*/
|
|
|
|
bool mStreamOrderDirty;
|
2013-09-25 06:10:24 +04:00
|
|
|
/**
|
|
|
|
* Hold a ref to the Latency logger
|
|
|
|
*/
|
2015-10-18 08:24:48 +03:00
|
|
|
RefPtr<AsyncLatencyLogger> mLatencyLog;
|
2014-08-25 17:25:49 +04:00
|
|
|
AudioMixer mMixer;
|
2017-06-29 21:30:57 +03:00
|
|
|
const RefPtr<AbstractThread> mAbstractMainThread;
|
2014-09-09 20:23:01 +04:00
|
|
|
#ifdef MOZ_WEBRTC
|
2015-10-18 08:24:48 +03:00
|
|
|
RefPtr<AudioOutputObserver> mFarendObserverRef;
|
2014-09-09 20:23:01 +04:00
|
|
|
#endif
|
2014-04-13 22:08:10 +04:00
|
|
|
|
2016-10-02 20:51:40 +03:00
|
|
|
// used to limit graph shutdown time
|
2017-11-10 19:28:59 +03:00
|
|
|
// Only accessed on the main thread.
|
2016-10-02 20:51:40 +03:00
|
|
|
nsCOMPtr<nsITimer> mShutdownTimer;
|
|
|
|
|
2014-04-13 22:08:10 +04:00
|
|
|
private:
|
|
|
|
virtual ~MediaStreamGraphImpl();
|
|
|
|
|
|
|
|
MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This class uses manual memory management, and all pointers to it are raw
|
|
|
|
* pointers. However, in order for it to implement nsIMemoryReporter, it needs
|
|
|
|
* to implement nsISupports and so be ref-counted. So it maintains a single
|
|
|
|
* nsRefPtr to itself, giving it a ref-count of 1 during its entire lifetime,
|
|
|
|
* and Destroy() nulls this self-reference in order to trigger self-deletion.
|
|
|
|
*/
|
2015-10-18 08:24:48 +03:00
|
|
|
RefPtr<MediaStreamGraphImpl> mSelfRef;
|
2015-07-24 15:28:16 +03:00
|
|
|
|
|
|
|
struct WindowAndStream
|
|
|
|
{
|
|
|
|
uint64_t mWindowId;
|
2015-10-18 08:24:48 +03:00
|
|
|
RefPtr<ProcessedMediaStream> mCaptureStreamSink;
|
2015-07-24 15:28:16 +03:00
|
|
|
};
|
|
|
|
/**
|
|
|
|
* Stream for window audio capture.
|
|
|
|
*/
|
|
|
|
nsTArray<WindowAndStream> mWindowCaptureStreams;
|
2014-07-02 10:04:54 +04:00
|
|
|
|
2017-10-03 17:56:53 +03:00
|
|
|
/**
|
|
|
|
* Number of channels on output.
|
|
|
|
*/
|
|
|
|
const uint32_t mOutputChannels;
|
|
|
|
|
2014-07-02 10:04:54 +04:00
|
|
|
#ifdef DEBUG
|
|
|
|
/**
|
|
|
|
* Used to assert when AppendMessage() runs ControlMessages synchronously.
|
|
|
|
*/
|
|
|
|
bool mCanRunMessagesSynchronously;
|
|
|
|
#endif
|
2013-02-04 14:04:25 +04:00
|
|
|
};
|
|
|
|
|
2015-07-13 18:25:42 +03:00
|
|
|
} // namespace mozilla
|
2013-02-04 14:04:25 +04:00
|
|
|
|
|
|
|
#endif /* MEDIASTREAMGRAPHIMPL_H_ */
|