2012-04-30 07:11:26 +04:00
|
|
|
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
|
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
|
|
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
|
|
|
|
|
|
#ifndef MOZILLA_MEDIASTREAMGRAPH_H_
|
|
|
|
#define MOZILLA_MEDIASTREAMGRAPH_H_
|
|
|
|
|
2013-07-19 18:40:58 +04:00
|
|
|
#include "mozilla/LinkedList.h"
|
2015-07-16 21:52:43 +03:00
|
|
|
#include "mozilla/Mutex.h"
|
|
|
|
#include "mozilla/TaskQueue.h"
|
|
|
|
|
|
|
|
#include "mozilla/dom/AudioChannelBinding.h"
|
|
|
|
|
2015-07-23 02:48:47 +03:00
|
|
|
#include "AudioSegment.h"
|
2012-11-14 23:46:40 +04:00
|
|
|
#include "AudioStream.h"
|
2012-04-30 07:11:26 +04:00
|
|
|
#include "nsTArray.h"
|
|
|
|
#include "nsIRunnable.h"
|
|
|
|
#include "StreamBuffer.h"
|
|
|
|
#include "TimeVarying.h"
|
|
|
|
#include "VideoFrameContainer.h"
|
|
|
|
#include "VideoSegment.h"
|
2013-09-19 17:54:42 +04:00
|
|
|
#include "MainThreadUtils.h"
|
2014-03-24 14:06:05 +04:00
|
|
|
#include "nsAutoRef.h"
|
2014-07-02 06:21:34 +04:00
|
|
|
#include <speex/speex_resampler.h>
|
2014-08-25 17:27:25 +04:00
|
|
|
#include "DOMMediaStream.h"
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
#include "AudioContext.h"
|
2013-09-19 17:54:42 +04:00
|
|
|
|
|
|
|
class nsIRunnable;
|
2012-04-30 07:11:26 +04:00
|
|
|
|
2014-03-24 14:06:05 +04:00
|
|
|
template <>
|
|
|
|
class nsAutoRefTraits<SpeexResamplerState> : public nsPointerRefTraits<SpeexResamplerState>
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
static void Release(SpeexResamplerState* aState) { speex_resampler_destroy(aState); }
|
|
|
|
};
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
namespace mozilla {
|
|
|
|
|
2012-07-31 16:17:21 +04:00
|
|
|
extern PRLogModuleInfo* gMediaStreamGraphLog;
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
/*
|
|
|
|
* MediaStreamGraph is a framework for synchronized audio/video processing
|
|
|
|
* and playback. It is designed to be used by other browser components such as
|
|
|
|
* HTML media elements, media capture APIs, real-time media streaming APIs,
|
|
|
|
* multitrack media APIs, and advanced audio APIs.
|
|
|
|
*
|
|
|
|
* The MediaStreamGraph uses a dedicated thread to process media --- the media
|
|
|
|
* graph thread. This ensures that we can process media through the graph
|
|
|
|
* without blocking on main-thread activity. The media graph is only modified
|
|
|
|
* on the media graph thread, to ensure graph changes can be processed without
|
|
|
|
* interfering with media processing. All interaction with the media graph
|
|
|
|
* thread is done with message passing.
|
|
|
|
*
|
|
|
|
* APIs that modify the graph or its properties are described as "control APIs".
|
|
|
|
* These APIs are asynchronous; they queue graph changes internally and
|
|
|
|
* those changes are processed all-at-once by the MediaStreamGraph. The
|
|
|
|
* MediaStreamGraph monitors the main thread event loop via nsIAppShell::RunInStableState
|
|
|
|
* to ensure that graph changes from a single event loop task are always
|
|
|
|
* processed all together. Control APIs should only be used on the main thread,
|
|
|
|
* currently; we may be able to relax that later.
|
|
|
|
*
|
|
|
|
* To allow precise synchronization of times in the control API, the
|
|
|
|
* MediaStreamGraph maintains a "media timeline". Control APIs that take or
|
|
|
|
* return times use that timeline. Those times never advance during
|
|
|
|
* an event loop task. This time is returned by MediaStreamGraph::GetCurrentTime().
|
|
|
|
*
|
|
|
|
* Media decoding, audio processing and media playback use thread-safe APIs to
|
|
|
|
* the media graph to ensure they can continue while the main thread is blocked.
|
|
|
|
*
|
|
|
|
* When the graph is changed, we may need to throw out buffered data and
|
|
|
|
* reprocess it. This is triggered automatically by the MediaStreamGraph.
|
|
|
|
*/
|
|
|
|
|
|
|
|
class MediaStreamGraph;
|
|
|
|
|
|
|
|
/**
|
2012-08-20 08:20:44 +04:00
|
|
|
* This is a base class for media graph thread listener callbacks.
|
|
|
|
* Override methods to be notified of audio or video data or changes in stream
|
|
|
|
* state.
|
2012-04-30 07:11:26 +04:00
|
|
|
*
|
|
|
|
* This can be used by stream recorders or network connections that receive
|
|
|
|
* stream input. It could also be used for debugging.
|
|
|
|
*
|
|
|
|
* All notification methods are called from the media graph thread. Overriders
|
|
|
|
* of these methods are responsible for all synchronization. Beware!
|
|
|
|
* These methods are called without the media graph monitor held, so
|
|
|
|
* reentry into media graph methods is possible, although very much discouraged!
|
|
|
|
* You should do something non-blocking and non-reentrant (e.g. dispatch an
|
|
|
|
* event to some thread) and return.
|
2012-08-20 08:20:44 +04:00
|
|
|
* The listener is not allowed to add/remove any listeners from the stream.
|
2012-05-24 14:37:14 +04:00
|
|
|
*
|
|
|
|
* When a listener is first attached, we guarantee to send a NotifyBlockingChanged
|
|
|
|
* callback to notify of the initial blocking state. Also, if a listener is
|
|
|
|
* attached to a stream that has already finished, we'll call NotifyFinished.
|
2012-04-30 07:11:26 +04:00
|
|
|
*/
|
|
|
|
class MediaStreamListener {
|
2014-04-02 20:21:11 +04:00
|
|
|
protected:
|
|
|
|
// Protected destructor, to discourage deletion outside of Release():
|
2012-04-30 07:11:26 +04:00
|
|
|
virtual ~MediaStreamListener() {}
|
|
|
|
|
2014-04-02 20:21:11 +04:00
|
|
|
public:
|
2012-04-30 07:11:26 +04:00
|
|
|
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaStreamListener)
|
|
|
|
|
2012-06-01 10:26:17 +04:00
|
|
|
enum Consumption {
|
|
|
|
CONSUMED,
|
|
|
|
NOT_CONSUMED
|
|
|
|
};
|
2014-07-14 09:47:56 +04:00
|
|
|
|
2012-06-01 10:26:17 +04:00
|
|
|
/**
|
|
|
|
* Notify that the stream is hooked up and we'd like to start or stop receiving
|
|
|
|
* data on it. Only fires on SourceMediaStreams.
|
|
|
|
* The initial state is assumed to be NOT_CONSUMED.
|
|
|
|
*/
|
|
|
|
virtual void NotifyConsumptionChanged(MediaStreamGraph* aGraph, Consumption aConsuming) {}
|
|
|
|
|
2012-07-20 23:36:03 +04:00
|
|
|
/**
|
|
|
|
* When a SourceMediaStream has pulling enabled, and the MediaStreamGraph
|
|
|
|
* control loop is ready to pull, this gets called. A NotifyPull implementation
|
|
|
|
* is allowed to call the SourceMediaStream methods that alter track
|
|
|
|
* data. It is not allowed to make other MediaStream API calls, including
|
|
|
|
* calls to add or remove MediaStreamListeners. It is not allowed to block
|
|
|
|
* for any length of time.
|
|
|
|
* aDesiredTime is the stream time we would like to get data up to. Data
|
|
|
|
* beyond this point will not be played until NotifyPull runs again, so there's
|
|
|
|
* not much point in providing it. Note that if the stream is blocked for
|
|
|
|
* some reason, then data before aDesiredTime may not be played immediately.
|
|
|
|
*/
|
|
|
|
virtual void NotifyPull(MediaStreamGraph* aGraph, StreamTime aDesiredTime) {}
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
enum Blocking {
|
|
|
|
BLOCKED,
|
|
|
|
UNBLOCKED
|
|
|
|
};
|
|
|
|
/**
|
2012-06-01 10:26:17 +04:00
|
|
|
* Notify that the blocking status of the stream changed. The initial state
|
|
|
|
* is assumed to be BLOCKED.
|
2012-04-30 07:11:26 +04:00
|
|
|
*/
|
|
|
|
virtual void NotifyBlockingChanged(MediaStreamGraph* aGraph, Blocking aBlocked) {}
|
|
|
|
|
2012-09-20 04:47:51 +04:00
|
|
|
/**
|
2013-03-20 15:19:39 +04:00
|
|
|
* Notify that the stream has data in each track
|
|
|
|
* for the stream's current time. Once this state becomes true, it will
|
|
|
|
* always be true since we block stream time from progressing to times where
|
|
|
|
* there isn't data in each track.
|
2012-09-20 04:47:51 +04:00
|
|
|
*/
|
2013-03-20 15:19:39 +04:00
|
|
|
virtual void NotifyHasCurrentData(MediaStreamGraph* aGraph) {}
|
2012-09-20 04:47:51 +04:00
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
/**
|
2013-11-25 15:59:49 +04:00
|
|
|
* Notify that the stream output is advancing. aCurrentTime is the graph's
|
|
|
|
* current time. MediaStream::GraphTimeToStreamTime can be used to get the
|
|
|
|
* stream time.
|
2012-04-30 07:11:26 +04:00
|
|
|
*/
|
2013-11-25 15:59:49 +04:00
|
|
|
virtual void NotifyOutput(MediaStreamGraph* aGraph, GraphTime aCurrentTime) {}
|
2012-04-30 07:11:26 +04:00
|
|
|
|
2014-07-14 09:47:56 +04:00
|
|
|
enum MediaStreamGraphEvent {
|
|
|
|
EVENT_FINISHED,
|
|
|
|
EVENT_REMOVED,
|
|
|
|
EVENT_HAS_DIRECT_LISTENERS, // transition from no direct listeners
|
|
|
|
EVENT_HAS_NO_DIRECT_LISTENERS, // transition to no direct listeners
|
|
|
|
};
|
2012-04-30 07:12:50 +04:00
|
|
|
|
2013-01-07 06:31:30 +04:00
|
|
|
/**
|
2014-07-14 09:47:56 +04:00
|
|
|
* Notify that an event has occurred on the Stream
|
2013-01-07 06:31:30 +04:00
|
|
|
*/
|
2014-07-14 09:47:56 +04:00
|
|
|
virtual void NotifyEvent(MediaStreamGraph* aGraph, MediaStreamGraphEvent aEvent) {}
|
2013-01-07 06:31:30 +04:00
|
|
|
|
2012-04-30 07:12:50 +04:00
|
|
|
enum {
|
|
|
|
TRACK_EVENT_CREATED = 0x01,
|
|
|
|
TRACK_EVENT_ENDED = 0x02
|
|
|
|
};
|
|
|
|
/**
|
|
|
|
* Notify that changes to one of the stream tracks have been queued.
|
|
|
|
* aTrackEvents can be any combination of TRACK_EVENT_CREATED and
|
|
|
|
* TRACK_EVENT_ENDED. aQueuedMedia is the data being added to the track
|
|
|
|
* at aTrackOffset (relative to the start of the stream).
|
|
|
|
*/
|
|
|
|
virtual void NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID,
|
2014-09-18 09:20:43 +04:00
|
|
|
StreamTime aTrackOffset,
|
2012-08-22 19:56:38 +04:00
|
|
|
uint32_t aTrackEvents,
|
2012-04-30 07:12:50 +04:00
|
|
|
const MediaSegment& aQueuedMedia) {}
|
2015-02-06 12:38:11 +03:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Notify that all new tracks this iteration have been created.
|
|
|
|
* This is to ensure that tracks added atomically to MediaStreamGraph
|
|
|
|
* are also notified of atomically to MediaStreamListeners.
|
|
|
|
*/
|
|
|
|
virtual void NotifyFinishedTrackCreation(MediaStreamGraph* aGraph) {}
|
2012-04-30 07:11:26 +04:00
|
|
|
};
|
|
|
|
|
2013-08-24 17:53:11 +04:00
|
|
|
/**
|
|
|
|
* This is a base class for media graph thread listener direct callbacks
|
|
|
|
* from within AppendToTrack(). Note that your regular listener will
|
|
|
|
* still get NotifyQueuedTrackChanges() callbacks from the MSG thread, so
|
|
|
|
* you must be careful to ignore them if AddDirectListener was successful.
|
|
|
|
*/
|
|
|
|
class MediaStreamDirectListener : public MediaStreamListener
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
virtual ~MediaStreamDirectListener() {}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This will be called on any MediaStreamDirectListener added to a
|
|
|
|
* a SourceMediaStream when AppendToTrack() is called. The MediaSegment
|
|
|
|
* will be the RawSegment (unresampled) if available in AppendToTrack().
|
|
|
|
* Note that NotifyQueuedTrackChanges() calls will also still occur.
|
|
|
|
*/
|
|
|
|
virtual void NotifyRealtimeData(MediaStreamGraph* aGraph, TrackID aID,
|
2014-09-18 09:20:43 +04:00
|
|
|
StreamTime aTrackOffset,
|
2013-08-24 17:53:11 +04:00
|
|
|
uint32_t aTrackEvents,
|
|
|
|
const MediaSegment& aMedia) {}
|
|
|
|
};
|
|
|
|
|
2012-08-20 08:20:44 +04:00
|
|
|
/**
|
|
|
|
* This is a base class for main-thread listener callbacks.
|
|
|
|
* This callback is invoked on the main thread when the main-thread-visible
|
|
|
|
* state of a stream has changed.
|
|
|
|
*
|
2013-02-04 14:04:24 +04:00
|
|
|
* These methods are called with the media graph monitor held, so
|
|
|
|
* reentry into general media graph methods is not possible.
|
2012-08-20 08:20:44 +04:00
|
|
|
* You should do something non-blocking and non-reentrant (e.g. dispatch an
|
2013-02-04 14:04:24 +04:00
|
|
|
* event) and return. DispatchFromMainThreadAfterNextStreamStateUpdate
|
|
|
|
* would be a good choice.
|
2012-08-20 08:20:44 +04:00
|
|
|
* The listener is allowed to synchronously remove itself from the stream, but
|
|
|
|
* not add or remove any other listeners.
|
|
|
|
*/
|
|
|
|
class MainThreadMediaStreamListener {
|
|
|
|
public:
|
2015-05-11 17:07:24 +03:00
|
|
|
virtual void NotifyMainThreadStreamFinished() = 0;
|
2012-08-20 08:20:44 +04:00
|
|
|
};
|
|
|
|
|
2014-04-13 22:08:10 +04:00
|
|
|
/**
|
|
|
|
* Helper struct used to keep track of memory usage by AudioNodes.
|
|
|
|
*/
|
|
|
|
struct AudioNodeSizes
|
|
|
|
{
|
2014-06-20 21:29:10 +04:00
|
|
|
AudioNodeSizes() : mDomNode(0), mStream(0), mEngine(0), mNodeType() {}
|
2014-04-13 22:08:10 +04:00
|
|
|
size_t mDomNode;
|
|
|
|
size_t mStream;
|
|
|
|
size_t mEngine;
|
|
|
|
nsCString mNodeType;
|
|
|
|
};
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
class MediaStreamGraphImpl;
|
2012-04-30 07:11:40 +04:00
|
|
|
class SourceMediaStream;
|
2012-07-31 16:17:21 +04:00
|
|
|
class ProcessedMediaStream;
|
|
|
|
class MediaInputPort;
|
2013-01-14 02:46:57 +04:00
|
|
|
class AudioNodeEngine;
|
2013-07-24 15:29:39 +04:00
|
|
|
class AudioNodeExternalInputStream;
|
|
|
|
class AudioNodeStream;
|
2015-01-22 11:27:24 +03:00
|
|
|
class CameraPreviewMediaStream;
|
2012-04-30 07:11:26 +04:00
|
|
|
|
|
|
|
/**
|
|
|
|
* A stream of synchronized audio and video data. All (not blocked) streams
|
|
|
|
* progress at the same rate --- "real time". Streams cannot seek. The only
|
|
|
|
* operation readers can perform on a stream is to read the next data.
|
|
|
|
*
|
|
|
|
* Consumers of a stream can be reading from it at different offsets, but that
|
|
|
|
* should only happen due to the order in which consumers are being run.
|
|
|
|
* Those offsets must not diverge in the long term, otherwise we would require
|
|
|
|
* unbounded buffering.
|
|
|
|
*
|
|
|
|
* Streams can be in a "blocked" state. While blocked, a stream does not
|
|
|
|
* produce data. A stream can be explicitly blocked via the control API,
|
|
|
|
* or implicitly blocked by whatever's generating it (e.g. an underrun in the
|
|
|
|
* source resource), or implicitly blocked because something consuming it
|
|
|
|
* blocks, or implicitly because it has finished.
|
|
|
|
*
|
|
|
|
* A stream can be in a "finished" state. "Finished" streams are permanently
|
|
|
|
* blocked.
|
|
|
|
*
|
|
|
|
* Transitions into and out of the "blocked" and "finished" states are managed
|
|
|
|
* by the MediaStreamGraph on the media graph thread.
|
|
|
|
*
|
|
|
|
* We buffer media data ahead of the consumers' reading offsets. It is possible
|
|
|
|
* to have buffered data but still be blocked.
|
|
|
|
*
|
|
|
|
* Any stream can have its audio and video playing when requested. The media
|
|
|
|
* stream graph plays audio by constructing audio output streams as necessary.
|
|
|
|
* Video is played by setting video frames into an VideoFrameContainer at the right
|
|
|
|
* time. To ensure video plays in sync with audio, make sure that the same
|
|
|
|
* stream is playing both the audio and video.
|
|
|
|
*
|
|
|
|
* The data in a stream is managed by StreamBuffer. It consists of a set of
|
|
|
|
* tracks of various types that can start and end over time.
|
|
|
|
*
|
|
|
|
* Streams are explicitly managed. The client creates them via
|
|
|
|
* MediaStreamGraph::CreateInput/ProcessedMediaStream, and releases them by calling
|
|
|
|
* Destroy() when no longer needed (actual destruction will be deferred).
|
|
|
|
* The actual object is owned by the MediaStreamGraph. The basic idea is that
|
|
|
|
* main thread objects will keep Streams alive as long as necessary (using the
|
|
|
|
* cycle collector to clean up whenever needed).
|
|
|
|
*
|
|
|
|
* We make them refcounted only so that stream-related messages with MediaStream*
|
|
|
|
* pointers can be sent to the main thread safely.
|
2012-08-09 15:30:02 +04:00
|
|
|
*
|
|
|
|
* The lifetimes of MediaStreams are controlled from the main thread.
|
|
|
|
* For MediaStreams exposed to the DOM, the lifetime is controlled by the DOM
|
|
|
|
* wrapper; the DOM wrappers own their associated MediaStreams. When a DOM
|
|
|
|
* wrapper is destroyed, it sends a Destroy message for the associated
|
|
|
|
* MediaStream and clears its reference (the last main-thread reference to
|
|
|
|
* the object). When the Destroy message is processed on the graph
|
|
|
|
* manager thread we immediately release the affected objects (disentangling them
|
|
|
|
* from other objects as necessary).
|
|
|
|
*
|
|
|
|
* This could cause problems for media processing if a MediaStream is
|
|
|
|
* destroyed while a downstream MediaStream is still using it. Therefore
|
|
|
|
* the DOM wrappers must keep upstream MediaStreams alive as long as they
|
|
|
|
* could be being used in the media graph.
|
|
|
|
*
|
|
|
|
* At any time, however, a set of MediaStream wrappers could be
|
|
|
|
* collected via cycle collection. Destroy messages will be sent
|
|
|
|
* for those objects in arbitrary order and the MediaStreamGraph has to be able
|
|
|
|
* to handle this.
|
2012-04-30 07:11:26 +04:00
|
|
|
*/
|
2015-05-13 16:34:56 +03:00
|
|
|
class MediaStream : public mozilla::LinkedListElement<MediaStream>
|
|
|
|
{
|
2012-04-30 07:11:26 +04:00
|
|
|
public:
|
|
|
|
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaStream)
|
|
|
|
|
2014-09-01 07:50:23 +04:00
|
|
|
explicit MediaStream(DOMMediaStream* aWrapper);
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
virtual dom::AudioContext::AudioContextId AudioContextId() const { return 0; }
|
2014-04-02 20:21:11 +04:00
|
|
|
|
|
|
|
protected:
|
|
|
|
// Protected destructor, to discourage deletion outside of Release():
|
2013-02-04 14:04:24 +04:00
|
|
|
virtual ~MediaStream()
|
|
|
|
{
|
2013-05-21 23:17:47 +04:00
|
|
|
MOZ_COUNT_DTOR(MediaStream);
|
2013-02-04 14:04:24 +04:00
|
|
|
NS_ASSERTION(mMainThreadDestroyed, "Should have been destroyed already");
|
|
|
|
NS_ASSERTION(mMainThreadListeners.IsEmpty(),
|
|
|
|
"All main thread listeners should have been removed");
|
|
|
|
}
|
2012-04-30 07:11:26 +04:00
|
|
|
|
2014-04-02 20:21:11 +04:00
|
|
|
public:
|
2012-04-30 07:11:26 +04:00
|
|
|
/**
|
|
|
|
* Returns the graph that owns this stream.
|
|
|
|
*/
|
|
|
|
MediaStreamGraphImpl* GraphImpl();
|
2012-07-31 16:17:21 +04:00
|
|
|
MediaStreamGraph* Graph();
|
2013-02-01 23:43:36 +04:00
|
|
|
/**
|
|
|
|
* Sets the graph that owns this stream. Should only be called once.
|
|
|
|
*/
|
|
|
|
void SetGraphImpl(MediaStreamGraphImpl* aGraph);
|
2013-06-10 23:01:19 +04:00
|
|
|
void SetGraphImpl(MediaStreamGraph* aGraph);
|
2012-04-30 07:11:26 +04:00
|
|
|
|
2014-09-18 09:05:04 +04:00
|
|
|
/**
|
|
|
|
* Returns sample rate of the graph.
|
|
|
|
*/
|
|
|
|
TrackRate GraphRate() { return mBuffer.GraphRate(); }
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
// Control API.
|
|
|
|
// Since a stream can be played multiple ways, we need to combine independent
|
|
|
|
// volume settings. The aKey parameter is used to keep volume settings
|
|
|
|
// separate. Since the stream is always playing the same contents, only
|
|
|
|
// a single audio output stream is used; the volumes are combined.
|
|
|
|
// Currently only the first enabled audio track is played.
|
|
|
|
// XXX change this so all enabled audio tracks are mixed and played.
|
2013-06-10 23:01:19 +04:00
|
|
|
virtual void AddAudioOutput(void* aKey);
|
|
|
|
virtual void SetAudioOutputVolume(void* aKey, float aVolume);
|
|
|
|
virtual void RemoveAudioOutput(void* aKey);
|
2012-04-30 07:11:26 +04:00
|
|
|
// Since a stream can be played multiple ways, we need to be able to
|
|
|
|
// play to multiple VideoFrameContainers.
|
|
|
|
// Only the first enabled video track is played.
|
2013-06-10 23:01:19 +04:00
|
|
|
virtual void AddVideoOutput(VideoFrameContainer* aContainer);
|
|
|
|
virtual void RemoveVideoOutput(VideoFrameContainer* aContainer);
|
2012-04-30 07:11:26 +04:00
|
|
|
// Explicitly block. Useful for example if a media element is pausing
|
|
|
|
// and we need to stop its stream emitting its buffered data.
|
2013-06-10 23:01:19 +04:00
|
|
|
virtual void ChangeExplicitBlockerCount(int32_t aDelta);
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
void BlockStreamIfNeeded();
|
|
|
|
void UnblockStreamIfNeeded();
|
2012-04-30 07:11:26 +04:00
|
|
|
// Events will be dispatched by calling methods of aListener.
|
2013-06-10 23:01:19 +04:00
|
|
|
virtual void AddListener(MediaStreamListener* aListener);
|
|
|
|
virtual void RemoveListener(MediaStreamListener* aListener);
|
2013-05-30 08:44:43 +04:00
|
|
|
// A disabled track has video replaced by black, and audio replaced by
|
|
|
|
// silence.
|
|
|
|
void SetTrackEnabled(TrackID aTrackID, bool aEnabled);
|
2015-05-11 17:07:38 +03:00
|
|
|
|
|
|
|
// Finish event will be notified by calling methods of aListener. It is the
|
2013-02-04 14:04:24 +04:00
|
|
|
// responsibility of the caller to remove aListener before it is destroyed.
|
2015-05-11 17:07:38 +03:00
|
|
|
void AddMainThreadListener(MainThreadMediaStreamListener* aListener);
|
2013-02-04 14:04:24 +04:00
|
|
|
// It's safe to call this even if aListener is not currently a listener;
|
|
|
|
// the call will be ignored.
|
2012-08-20 08:20:44 +04:00
|
|
|
void RemoveMainThreadListener(MainThreadMediaStreamListener* aListener)
|
|
|
|
{
|
2015-05-11 17:07:38 +03:00
|
|
|
MOZ_ASSERT(NS_IsMainThread());
|
|
|
|
MOZ_ASSERT(aListener);
|
2012-08-20 08:20:44 +04:00
|
|
|
mMainThreadListeners.RemoveElement(aListener);
|
|
|
|
}
|
2015-05-11 17:07:38 +03:00
|
|
|
|
2013-10-25 03:07:29 +04:00
|
|
|
/**
|
|
|
|
* Ensure a runnable will run on the main thread after running all pending
|
|
|
|
* updates that were sent from the graph thread or will be sent before the
|
|
|
|
* graph thread receives the next graph update.
|
|
|
|
*
|
2014-07-02 10:04:54 +04:00
|
|
|
* If the graph has been shut down or destroyed, then the runnable will be
|
|
|
|
* dispatched to the event queue immediately. If the graph is non-realtime
|
2013-10-25 03:07:29 +04:00
|
|
|
* and has not started, then the runnable will be run
|
|
|
|
* synchronously/immediately. (There are no pending updates in these
|
|
|
|
* situations.)
|
|
|
|
*
|
|
|
|
* Main thread only.
|
|
|
|
*/
|
2015-03-17 19:29:17 +03:00
|
|
|
void RunAfterPendingUpdates(already_AddRefed<nsIRunnable> aRunnable);
|
2013-10-25 03:07:29 +04:00
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
// Signal that the client is done with this MediaStream. It will be deleted later.
|
2013-03-20 18:07:46 +04:00
|
|
|
virtual void Destroy();
|
2012-04-30 07:11:26 +04:00
|
|
|
// Returns the main-thread's view of how much data has been processed by
|
|
|
|
// this stream.
|
2012-08-09 15:30:09 +04:00
|
|
|
StreamTime GetCurrentTime()
|
|
|
|
{
|
|
|
|
NS_ASSERTION(NS_IsMainThread(), "Call only on main thread");
|
|
|
|
return mMainThreadCurrentTime;
|
|
|
|
}
|
2012-04-30 07:11:26 +04:00
|
|
|
// Return the main thread's view of whether this stream has finished.
|
2012-08-09 15:30:09 +04:00
|
|
|
bool IsFinished()
|
|
|
|
{
|
|
|
|
NS_ASSERTION(NS_IsMainThread(), "Call only on main thread");
|
|
|
|
return mMainThreadFinished;
|
|
|
|
}
|
2015-05-11 17:07:24 +03:00
|
|
|
|
2012-08-09 15:30:09 +04:00
|
|
|
bool IsDestroyed()
|
|
|
|
{
|
|
|
|
NS_ASSERTION(NS_IsMainThread(), "Call only on main thread");
|
|
|
|
return mMainThreadDestroyed;
|
|
|
|
}
|
2012-04-30 07:11:26 +04:00
|
|
|
|
|
|
|
friend class MediaStreamGraphImpl;
|
2012-07-31 16:17:21 +04:00
|
|
|
friend class MediaInputPort;
|
2013-07-24 15:29:39 +04:00
|
|
|
friend class AudioNodeExternalInputStream;
|
2012-04-30 07:11:26 +04:00
|
|
|
|
2012-07-30 18:20:58 +04:00
|
|
|
virtual SourceMediaStream* AsSourceStream() { return nullptr; }
|
2012-07-31 16:17:21 +04:00
|
|
|
virtual ProcessedMediaStream* AsProcessedStream() { return nullptr; }
|
2013-01-14 02:46:57 +04:00
|
|
|
virtual AudioNodeStream* AsAudioNodeStream() { return nullptr; }
|
2015-01-22 11:27:24 +03:00
|
|
|
virtual CameraPreviewMediaStream* AsCameraPreviewStream() { return nullptr; }
|
2012-04-30 07:11:40 +04:00
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
// These Impl methods perform the core functionality of the control methods
|
|
|
|
// above, on the media graph thread.
|
|
|
|
/**
|
|
|
|
* Stop all stream activity and disconnect it from all inputs and outputs.
|
|
|
|
* This must be idempotent.
|
|
|
|
*/
|
|
|
|
virtual void DestroyImpl();
|
|
|
|
StreamTime GetBufferEnd() { return mBuffer.GetEnd(); }
|
2013-03-07 12:53:45 +04:00
|
|
|
#ifdef DEBUG
|
|
|
|
void DumpTrackInfo() { return mBuffer.DumpTrackInfo(); }
|
|
|
|
#endif
|
2012-04-30 07:11:26 +04:00
|
|
|
void SetAudioOutputVolumeImpl(void* aKey, float aVolume);
|
|
|
|
void AddAudioOutputImpl(void* aKey)
|
|
|
|
{
|
|
|
|
mAudioOutputs.AppendElement(AudioOutput(aKey));
|
|
|
|
}
|
2014-08-25 17:25:49 +04:00
|
|
|
// Returns true if this stream has an audio output.
|
|
|
|
bool HasAudioOutput()
|
|
|
|
{
|
|
|
|
return !mAudioOutputs.IsEmpty();
|
|
|
|
}
|
2012-04-30 07:11:26 +04:00
|
|
|
void RemoveAudioOutputImpl(void* aKey);
|
|
|
|
void AddVideoOutputImpl(already_AddRefed<VideoFrameContainer> aContainer)
|
|
|
|
{
|
|
|
|
*mVideoOutputs.AppendElement() = aContainer;
|
|
|
|
}
|
|
|
|
void RemoveVideoOutputImpl(VideoFrameContainer* aContainer)
|
|
|
|
{
|
|
|
|
mVideoOutputs.RemoveElement(aContainer);
|
|
|
|
}
|
2013-02-04 14:04:26 +04:00
|
|
|
void ChangeExplicitBlockerCountImpl(GraphTime aTime, int32_t aDelta)
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
|
|
|
mExplicitBlockerCount.SetAtAndAfter(aTime, mExplicitBlockerCount.GetAt(aTime) + aDelta);
|
|
|
|
}
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
void BlockStreamIfNeededImpl(GraphTime aTime)
|
|
|
|
{
|
|
|
|
bool blocked = mExplicitBlockerCount.GetAt(aTime) > 0;
|
|
|
|
if (blocked) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ChangeExplicitBlockerCountImpl(aTime, 1);
|
|
|
|
}
|
|
|
|
void UnblockStreamIfNeededImpl(GraphTime aTime)
|
|
|
|
{
|
|
|
|
bool blocked = mExplicitBlockerCount.GetAt(aTime) > 0;
|
|
|
|
if (!blocked) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ChangeExplicitBlockerCountImpl(aTime, -1);
|
|
|
|
}
|
2012-05-24 14:37:14 +04:00
|
|
|
void AddListenerImpl(already_AddRefed<MediaStreamListener> aListener);
|
2013-01-07 06:31:30 +04:00
|
|
|
void RemoveListenerImpl(MediaStreamListener* aListener);
|
|
|
|
void RemoveAllListenersImpl();
|
2014-12-09 13:37:01 +03:00
|
|
|
virtual void SetTrackEnabledImpl(TrackID aTrackID, bool aEnabled);
|
2013-07-24 14:11:35 +04:00
|
|
|
/**
|
|
|
|
* Returns true when this stream requires the contents of its inputs even if
|
|
|
|
* its own outputs are not being consumed. This is used to signal inputs to
|
|
|
|
* this stream that they are being consumed; when they're not being consumed,
|
|
|
|
* we make some optimizations.
|
|
|
|
*/
|
|
|
|
virtual bool IsIntrinsicallyConsumed() const
|
|
|
|
{
|
|
|
|
return !mAudioOutputs.IsEmpty() || !mVideoOutputs.IsEmpty();
|
|
|
|
}
|
2013-01-07 06:31:30 +04:00
|
|
|
|
2012-07-31 16:17:21 +04:00
|
|
|
void AddConsumer(MediaInputPort* aPort)
|
|
|
|
{
|
|
|
|
mConsumers.AppendElement(aPort);
|
|
|
|
}
|
|
|
|
void RemoveConsumer(MediaInputPort* aPort)
|
|
|
|
{
|
|
|
|
mConsumers.RemoveElement(aPort);
|
|
|
|
}
|
2013-12-12 18:31:51 +04:00
|
|
|
uint32_t ConsumerCount()
|
|
|
|
{
|
|
|
|
return mConsumers.Length();
|
|
|
|
}
|
2015-05-13 16:35:10 +03:00
|
|
|
StreamBuffer& GetStreamBuffer() { return mBuffer; }
|
2012-07-31 16:17:21 +04:00
|
|
|
GraphTime GetStreamBufferStartTime() { return mBufferStartTime; }
|
2014-06-12 08:44:56 +04:00
|
|
|
|
|
|
|
double StreamTimeToSeconds(StreamTime aTime)
|
|
|
|
{
|
2014-09-18 09:20:43 +04:00
|
|
|
NS_ASSERTION(0 <= aTime && aTime <= STREAM_TIME_MAX, "Bad time");
|
|
|
|
return static_cast<double>(aTime)/mBuffer.GraphRate();
|
2014-06-12 08:44:56 +04:00
|
|
|
}
|
|
|
|
int64_t StreamTimeToMicroseconds(StreamTime aTime)
|
|
|
|
{
|
2014-09-18 09:20:43 +04:00
|
|
|
NS_ASSERTION(0 <= aTime && aTime <= STREAM_TIME_MAX, "Bad time");
|
2014-09-18 09:13:13 +04:00
|
|
|
return (aTime*1000000)/mBuffer.GraphRate();
|
2014-06-12 08:44:56 +04:00
|
|
|
}
|
2014-09-18 03:50:01 +04:00
|
|
|
StreamTime MicrosecondsToStreamTimeRoundDown(int64_t aMicroseconds) {
|
|
|
|
return (aMicroseconds*mBuffer.GraphRate())/1000000;
|
|
|
|
}
|
|
|
|
|
2014-06-12 08:44:56 +04:00
|
|
|
TrackTicks TimeToTicksRoundUp(TrackRate aRate, StreamTime aTime)
|
|
|
|
{
|
2014-06-12 08:44:59 +04:00
|
|
|
return RateConvertTicksRoundUp(aRate, mBuffer.GraphRate(), aTime);
|
2014-06-12 08:44:56 +04:00
|
|
|
}
|
|
|
|
StreamTime TicksToTimeRoundDown(TrackRate aRate, TrackTicks aTicks)
|
|
|
|
{
|
2014-06-12 08:44:59 +04:00
|
|
|
return RateConvertTicksRoundDown(mBuffer.GraphRate(), aRate, aTicks);
|
2014-06-12 08:44:56 +04:00
|
|
|
}
|
2013-02-04 14:04:26 +04:00
|
|
|
/**
|
|
|
|
* Convert graph time to stream time. aTime must be <= mStateComputedTime
|
|
|
|
* to ensure we know exactly how much time this stream will be blocked during
|
|
|
|
* the interval.
|
|
|
|
*/
|
2012-07-31 16:17:21 +04:00
|
|
|
StreamTime GraphTimeToStreamTime(GraphTime aTime);
|
2013-02-04 14:04:26 +04:00
|
|
|
/**
|
|
|
|
* Convert graph time to stream time. aTime can be > mStateComputedTime,
|
|
|
|
* in which case we optimistically assume the stream will not be blocked
|
|
|
|
* after mStateComputedTime.
|
|
|
|
*/
|
|
|
|
StreamTime GraphTimeToStreamTimeOptimistic(GraphTime aTime);
|
|
|
|
/**
|
|
|
|
* Convert stream time to graph time. The result can be > mStateComputedTime,
|
|
|
|
* in which case we did the conversion optimistically assuming the stream
|
|
|
|
* will not be blocked after mStateComputedTime.
|
|
|
|
*/
|
|
|
|
GraphTime StreamTimeToGraphTime(StreamTime aTime);
|
2012-07-31 16:17:21 +04:00
|
|
|
bool IsFinishedOnGraphThread() { return mFinished; }
|
|
|
|
void FinishOnGraphThread();
|
2012-04-30 07:11:26 +04:00
|
|
|
|
2013-03-20 15:19:39 +04:00
|
|
|
bool HasCurrentData() { return mHasCurrentData; }
|
|
|
|
|
2014-09-18 09:13:16 +04:00
|
|
|
StreamBuffer::Track* EnsureTrack(TrackID aTrack);
|
2013-05-21 23:17:47 +04:00
|
|
|
|
2014-12-09 13:37:01 +03:00
|
|
|
virtual void ApplyTrackDisabling(TrackID aTrackID, MediaSegment* aSegment, MediaSegment* aRawSegment = nullptr);
|
2013-05-30 08:44:43 +04:00
|
|
|
|
2013-05-03 09:02:55 +04:00
|
|
|
DOMMediaStream* GetWrapper()
|
|
|
|
{
|
|
|
|
NS_ASSERTION(NS_IsMainThread(), "Only use DOMMediaStream on main thread");
|
|
|
|
return mWrapper;
|
|
|
|
}
|
|
|
|
|
2013-06-19 07:09:44 +04:00
|
|
|
// Return true if the main thread needs to observe updates from this stream.
|
|
|
|
virtual bool MainThreadNeedsUpdates() const
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-04-13 22:08:10 +04:00
|
|
|
virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const;
|
|
|
|
virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const;
|
|
|
|
|
2014-04-18 13:23:36 +04:00
|
|
|
void SetAudioChannelType(dom::AudioChannel aType) { mAudioChannelType = aType; }
|
2014-08-03 17:46:17 +04:00
|
|
|
dom::AudioChannel AudioChannelType() const { return mAudioChannelType; }
|
2014-04-18 13:23:36 +04:00
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
protected:
|
2015-08-03 03:28:19 +03:00
|
|
|
void AdvanceTimeVaryingValuesToCurrentTime(GraphTime aCurrentTime, GraphTime aBlockedTime)
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
|
|
|
mBufferStartTime += aBlockedTime;
|
|
|
|
mExplicitBlockerCount.AdvanceCurrentTime(aCurrentTime);
|
|
|
|
|
|
|
|
mBuffer.ForgetUpTo(aCurrentTime - mBufferStartTime);
|
|
|
|
}
|
|
|
|
|
2015-05-11 17:07:38 +03:00
|
|
|
void NotifyMainThreadListeners()
|
|
|
|
{
|
|
|
|
NS_ASSERTION(NS_IsMainThread(), "Call only on main thread");
|
|
|
|
|
|
|
|
for (int32_t i = mMainThreadListeners.Length() - 1; i >= 0; --i) {
|
|
|
|
mMainThreadListeners[i]->NotifyMainThreadStreamFinished();
|
|
|
|
}
|
|
|
|
mMainThreadListeners.Clear();
|
|
|
|
}
|
|
|
|
|
2015-05-11 17:07:24 +03:00
|
|
|
bool ShouldNotifyStreamFinished()
|
|
|
|
{
|
|
|
|
NS_ASSERTION(NS_IsMainThread(), "Call only on main thread");
|
|
|
|
if (!mMainThreadFinished || mFinishedNotificationSent) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
mFinishedNotificationSent = true;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
// This state is all initialized on the main thread but
|
|
|
|
// otherwise modified only on the media graph thread.
|
|
|
|
|
|
|
|
// Buffered data. The start of the buffer corresponds to mBufferStartTime.
|
|
|
|
// Conceptually the buffer contains everything this stream has ever played,
|
|
|
|
// but we forget some prefix of the buffered data to bound the space usage.
|
|
|
|
StreamBuffer mBuffer;
|
|
|
|
// The time when the buffered data could be considered to have started playing.
|
|
|
|
// This increases over time to account for time the stream was blocked before
|
|
|
|
// mCurrentTime.
|
|
|
|
GraphTime mBufferStartTime;
|
|
|
|
|
|
|
|
// Client-set volume of this stream
|
|
|
|
struct AudioOutput {
|
2014-09-01 07:50:23 +04:00
|
|
|
explicit AudioOutput(void* aKey) : mKey(aKey), mVolume(1.0f) {}
|
2012-04-30 07:11:26 +04:00
|
|
|
void* mKey;
|
|
|
|
float mVolume;
|
|
|
|
};
|
|
|
|
nsTArray<AudioOutput> mAudioOutputs;
|
|
|
|
nsTArray<nsRefPtr<VideoFrameContainer> > mVideoOutputs;
|
2015-08-14 05:16:57 +03:00
|
|
|
// We record the last played video frame to avoid playing the frame again
|
|
|
|
// with a different frame id.
|
2012-04-30 07:11:26 +04:00
|
|
|
VideoFrame mLastPlayedVideoFrame;
|
|
|
|
// The number of times this stream has been explicitly blocked by the control
|
|
|
|
// API, minus the number of times it has been explicitly unblocked.
|
2013-03-18 07:27:14 +04:00
|
|
|
TimeVarying<GraphTime,uint32_t,0> mExplicitBlockerCount;
|
2012-04-30 07:11:26 +04:00
|
|
|
nsTArray<nsRefPtr<MediaStreamListener> > mListeners;
|
2013-02-04 14:04:24 +04:00
|
|
|
nsTArray<MainThreadMediaStreamListener*> mMainThreadListeners;
|
2013-05-30 08:44:43 +04:00
|
|
|
nsTArray<TrackID> mDisabledTrackIDs;
|
2012-04-30 07:11:26 +04:00
|
|
|
|
|
|
|
// Precomputed blocking status (over GraphTime).
|
|
|
|
// This is only valid between the graph's mCurrentTime and
|
2012-07-31 16:17:21 +04:00
|
|
|
// mStateComputedTime. The stream is considered to have
|
2012-04-30 07:11:26 +04:00
|
|
|
// not been blocked before mCurrentTime (its mBufferStartTime is increased
|
|
|
|
// as necessary to account for that time instead) --- this avoids us having to
|
|
|
|
// record the entire history of the stream's blocking-ness in mBlocked.
|
2013-03-18 07:27:14 +04:00
|
|
|
TimeVarying<GraphTime,bool,5> mBlocked;
|
2012-04-30 07:11:26 +04:00
|
|
|
|
2012-07-31 16:17:21 +04:00
|
|
|
// MediaInputPorts to which this is connected
|
|
|
|
nsTArray<MediaInputPort*> mConsumers;
|
|
|
|
|
2012-07-31 16:17:22 +04:00
|
|
|
// Where audio output is going. There is one AudioOutputStream per
|
|
|
|
// audio track.
|
2015-05-13 16:34:56 +03:00
|
|
|
struct AudioOutputStream
|
|
|
|
{
|
2012-07-31 16:17:22 +04:00
|
|
|
// When we started audio playback for this track.
|
|
|
|
// Add mStream->GetPosition() to find the current audio playback position.
|
|
|
|
GraphTime mAudioPlaybackStartTime;
|
|
|
|
// Amount of time that we've wanted to play silence because of the stream
|
|
|
|
// blocking.
|
|
|
|
MediaTime mBlockedAudioTime;
|
2014-03-24 14:06:06 +04:00
|
|
|
// Last tick written to the audio output.
|
2014-09-18 09:20:43 +04:00
|
|
|
StreamTime mLastTickWritten;
|
2012-07-31 16:17:22 +04:00
|
|
|
TrackID mTrackID;
|
|
|
|
};
|
|
|
|
nsTArray<AudioOutputStream> mAudioOutputStreams;
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
/**
|
|
|
|
* When true, this means the stream will be finished once all
|
|
|
|
* buffered data has been consumed.
|
|
|
|
*/
|
|
|
|
bool mFinished;
|
|
|
|
/**
|
|
|
|
* When true, mFinished is true and we've played all the data in this stream
|
|
|
|
* and fired NotifyFinished notifications.
|
|
|
|
*/
|
|
|
|
bool mNotifiedFinished;
|
2013-01-02 17:49:18 +04:00
|
|
|
/**
|
|
|
|
* When true, the last NotifyBlockingChanged delivered to the listeners
|
|
|
|
* indicated that the stream is blocked.
|
|
|
|
*/
|
|
|
|
bool mNotifiedBlocked;
|
2013-03-20 15:19:39 +04:00
|
|
|
/**
|
|
|
|
* True if some data can be present by this stream if/when it's unblocked.
|
|
|
|
* Set by the stream itself on the MediaStreamGraph thread. Only changes
|
|
|
|
* from false to true once a stream has data, since we won't
|
|
|
|
* unblock it until there's more data.
|
|
|
|
*/
|
|
|
|
bool mHasCurrentData;
|
|
|
|
/**
|
|
|
|
* True if mHasCurrentData is true and we've notified listeners.
|
|
|
|
*/
|
|
|
|
bool mNotifiedHasCurrentData;
|
2012-04-30 07:11:26 +04:00
|
|
|
|
2012-10-29 08:34:17 +04:00
|
|
|
// True if the stream is being consumed (i.e. has track data being played,
|
|
|
|
// or is feeding into some stream that is being consumed).
|
2012-07-31 16:17:21 +04:00
|
|
|
bool mIsConsumed;
|
|
|
|
// Temporary data for computing blocking status of streams
|
|
|
|
// True if we've added this stream to the set of streams we're computing
|
|
|
|
// blocking for.
|
|
|
|
bool mInBlockingSet;
|
|
|
|
// True if this stream should be blocked in this phase.
|
|
|
|
bool mBlockInThisPhase;
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
// This state is only used on the main thread.
|
2013-02-15 12:01:58 +04:00
|
|
|
DOMMediaStream* mWrapper;
|
2012-04-30 07:11:26 +04:00
|
|
|
// Main-thread views of state
|
|
|
|
StreamTime mMainThreadCurrentTime;
|
|
|
|
bool mMainThreadFinished;
|
2015-05-11 17:07:24 +03:00
|
|
|
bool mFinishedNotificationSent;
|
2012-08-09 15:30:09 +04:00
|
|
|
bool mMainThreadDestroyed;
|
2013-02-01 23:43:36 +04:00
|
|
|
|
2014-07-25 01:09:22 +04:00
|
|
|
// Our media stream graph. null if destroyed on the graph thread.
|
2013-02-01 23:43:36 +04:00
|
|
|
MediaStreamGraphImpl* mGraph;
|
2014-04-18 13:23:36 +04:00
|
|
|
|
|
|
|
dom::AudioChannel mAudioChannelType;
|
2012-04-30 07:11:26 +04:00
|
|
|
};
|
|
|
|
|
2012-04-30 07:11:40 +04:00
|
|
|
/**
|
|
|
|
* This is a stream into which a decoder can write audio and video.
|
|
|
|
*
|
|
|
|
* Audio and video can be written on any thread, but you probably want to
|
|
|
|
* always write from the same thread to avoid unexpected interleavings.
|
|
|
|
*/
|
2015-05-13 16:34:56 +03:00
|
|
|
class SourceMediaStream : public MediaStream
|
|
|
|
{
|
2012-04-30 07:11:40 +04:00
|
|
|
public:
|
2014-09-01 07:50:23 +04:00
|
|
|
explicit SourceMediaStream(DOMMediaStream* aWrapper) :
|
2012-06-01 10:26:17 +04:00
|
|
|
MediaStream(aWrapper),
|
|
|
|
mLastConsumptionState(MediaStreamListener::NOT_CONSUMED),
|
|
|
|
mMutex("mozilla::media::SourceMediaStream"),
|
|
|
|
mUpdateKnownTracksTime(0),
|
2012-07-20 23:36:03 +04:00
|
|
|
mPullEnabled(false),
|
2014-09-03 00:55:11 +04:00
|
|
|
mUpdateFinished(false),
|
|
|
|
mNeedsMixing(false)
|
2012-04-30 07:11:40 +04:00
|
|
|
{}
|
|
|
|
|
2015-03-21 19:28:04 +03:00
|
|
|
virtual SourceMediaStream* AsSourceStream() override { return this; }
|
2012-04-30 07:11:40 +04:00
|
|
|
|
2012-05-23 10:01:15 +04:00
|
|
|
// Media graph thread only
|
2015-03-21 19:28:04 +03:00
|
|
|
virtual void DestroyImpl() override;
|
2012-05-23 10:01:15 +04:00
|
|
|
|
2012-04-30 07:11:40 +04:00
|
|
|
// Call these on any thread.
|
2012-07-20 23:36:03 +04:00
|
|
|
/**
|
|
|
|
* Enable or disable pulling. When pulling is enabled, NotifyPull
|
|
|
|
* gets called on MediaStreamListeners for this stream during the
|
|
|
|
* MediaStreamGraph control loop. Pulling is initially disabled.
|
|
|
|
* Due to unavoidable race conditions, after a call to SetPullEnabled(false)
|
|
|
|
* it is still possible for a NotifyPull to occur.
|
|
|
|
*/
|
|
|
|
void SetPullEnabled(bool aEnabled);
|
2013-08-24 17:53:11 +04:00
|
|
|
|
2014-08-17 10:09:21 +04:00
|
|
|
/**
|
|
|
|
* These add/remove DirectListeners, which allow bypassing the graph and any
|
|
|
|
* synchronization delays for e.g. PeerConnection, which wants the data ASAP
|
|
|
|
* and lets the far-end handle sync and playout timing.
|
|
|
|
*/
|
|
|
|
void NotifyListenersEventImpl(MediaStreamListener::MediaStreamGraphEvent aEvent);
|
|
|
|
void NotifyListenersEvent(MediaStreamListener::MediaStreamGraphEvent aEvent);
|
2013-08-24 17:53:11 +04:00
|
|
|
void AddDirectListener(MediaStreamDirectListener* aListener);
|
|
|
|
void RemoveDirectListener(MediaStreamDirectListener* aListener);
|
|
|
|
|
2015-02-19 20:04:26 +03:00
|
|
|
enum {
|
|
|
|
ADDTRACK_QUEUED = 0x01 // Queue track add until FinishAddTracks()
|
|
|
|
};
|
2012-04-30 07:11:40 +04:00
|
|
|
/**
|
|
|
|
* Add a new track to the stream starting at the given base time (which
|
|
|
|
* must be greater than or equal to the last time passed to
|
|
|
|
* AdvanceKnownTracksTime). Takes ownership of aSegment. aSegment should
|
|
|
|
* contain data starting after aStart.
|
|
|
|
*/
|
2015-02-19 20:04:26 +03:00
|
|
|
void AddTrack(TrackID aID, StreamTime aStart, MediaSegment* aSegment,
|
|
|
|
uint32_t aFlags = 0)
|
2014-09-18 03:50:02 +04:00
|
|
|
{
|
2015-02-19 20:04:26 +03:00
|
|
|
AddTrackInternal(aID, GraphRate(), aStart, aSegment, aFlags);
|
2014-09-18 03:50:02 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Like AddTrack, but resamples audio from aRate to the graph rate.
|
|
|
|
*/
|
2014-09-18 09:20:43 +04:00
|
|
|
void AddAudioTrack(TrackID aID, TrackRate aRate, StreamTime aStart,
|
2015-02-19 20:04:26 +03:00
|
|
|
AudioSegment* aSegment, uint32_t aFlags = 0)
|
2014-09-18 03:50:02 +04:00
|
|
|
{
|
2015-02-19 20:04:26 +03:00
|
|
|
AddTrackInternal(aID, aRate, aStart, aSegment, aFlags);
|
2014-09-18 03:50:02 +04:00
|
|
|
}
|
2014-03-24 14:06:05 +04:00
|
|
|
|
2015-02-19 20:04:26 +03:00
|
|
|
/**
|
|
|
|
* Call after a series of AddTrack or AddAudioTrack calls to implement
|
|
|
|
* any pending track adds.
|
|
|
|
*/
|
|
|
|
void FinishAddTracks();
|
|
|
|
|
2015-06-15 04:11:00 +03:00
|
|
|
/**
|
|
|
|
* Find track by track id.
|
|
|
|
*/
|
|
|
|
StreamBuffer::Track* FindTrack(TrackID aID);
|
|
|
|
|
2012-04-30 07:11:40 +04:00
|
|
|
/**
|
|
|
|
* Append media data to a track. Ownership of aSegment remains with the caller,
|
|
|
|
* but aSegment is emptied.
|
2013-02-25 13:25:07 +04:00
|
|
|
* Returns false if the data was not appended because no such track exists
|
|
|
|
* or the stream was already finished.
|
2012-04-30 07:11:40 +04:00
|
|
|
*/
|
2013-08-24 17:53:11 +04:00
|
|
|
bool AppendToTrack(TrackID aID, MediaSegment* aSegment, MediaSegment *aRawSegment = nullptr);
|
2014-12-30 04:54:01 +03:00
|
|
|
/**
|
|
|
|
* Get the stream time of the end of the data that has been appended so far.
|
|
|
|
* Can be called from any thread but won't be useful if it can race with
|
|
|
|
* an AppendToTrack call, so should probably just be called from the thread
|
|
|
|
* that also calls AppendToTrack.
|
|
|
|
*/
|
|
|
|
StreamTime GetEndOfAppendedData(TrackID aID);
|
2012-04-30 07:11:40 +04:00
|
|
|
/**
|
|
|
|
* Indicate that a track has ended. Do not do any more API calls
|
|
|
|
* affecting this track.
|
2013-02-25 13:25:07 +04:00
|
|
|
* Ignored if the track does not exist.
|
2012-04-30 07:11:40 +04:00
|
|
|
*/
|
|
|
|
void EndTrack(TrackID aID);
|
|
|
|
/**
|
|
|
|
* Indicate that no tracks will be added starting before time aKnownTime.
|
|
|
|
* aKnownTime must be >= its value at the last call to AdvanceKnownTracksTime.
|
|
|
|
*/
|
|
|
|
void AdvanceKnownTracksTime(StreamTime aKnownTime);
|
|
|
|
/**
|
|
|
|
* Indicate that this stream should enter the "finished" state. All tracks
|
|
|
|
* must have been ended via EndTrack. The finish time of the stream is
|
2014-05-19 00:24:01 +04:00
|
|
|
* when all tracks have ended.
|
2012-04-30 07:11:40 +04:00
|
|
|
*/
|
2012-10-25 03:21:32 +04:00
|
|
|
void FinishWithLockHeld();
|
|
|
|
void Finish()
|
2014-12-09 13:37:01 +03:00
|
|
|
{
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
FinishWithLockHeld();
|
|
|
|
}
|
2012-10-25 03:21:32 +04:00
|
|
|
|
2013-08-26 10:07:17 +04:00
|
|
|
// Overriding allows us to hold the mMutex lock while changing the track enable status
|
2014-12-09 13:37:01 +03:00
|
|
|
virtual void
|
2015-03-21 19:28:04 +03:00
|
|
|
SetTrackEnabledImpl(TrackID aTrackID, bool aEnabled) override {
|
2013-08-26 10:07:17 +04:00
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
MediaStream::SetTrackEnabledImpl(aTrackID, aEnabled);
|
|
|
|
}
|
2012-10-25 03:21:32 +04:00
|
|
|
|
2014-12-09 13:37:01 +03:00
|
|
|
// Overriding allows us to ensure mMutex is locked while changing the track enable status
|
|
|
|
virtual void
|
|
|
|
ApplyTrackDisabling(TrackID aTrackID, MediaSegment* aSegment,
|
2015-03-21 19:28:04 +03:00
|
|
|
MediaSegment* aRawSegment = nullptr) override {
|
2014-12-09 13:37:01 +03:00
|
|
|
mMutex.AssertCurrentThreadOwns();
|
|
|
|
MediaStream::ApplyTrackDisabling(aTrackID, aSegment, aRawSegment);
|
|
|
|
}
|
|
|
|
|
2012-10-25 03:21:32 +04:00
|
|
|
/**
|
|
|
|
* End all tracks and Finish() this stream. Used to voluntarily revoke access
|
|
|
|
* to a LocalMediaStream.
|
|
|
|
*/
|
|
|
|
void EndAllTrackAndFinish();
|
2012-04-30 07:11:40 +04:00
|
|
|
|
2013-08-24 17:53:01 +04:00
|
|
|
/**
|
|
|
|
* Note: Only call from Media Graph thread (eg NotifyPull)
|
|
|
|
*
|
|
|
|
* Returns amount of time (data) that is currently buffered in the track,
|
|
|
|
* assuming playout via PlayAudio or via a TrackUnion - note that
|
|
|
|
* NotifyQueuedTrackChanges() on a SourceMediaStream will occur without
|
|
|
|
* any "extra" buffering, but NotifyQueued TrackChanges() on a TrackUnion
|
|
|
|
* will be buffered.
|
|
|
|
*/
|
2014-09-18 09:20:43 +04:00
|
|
|
StreamTime GetBufferedTicks(TrackID aID);
|
2013-08-24 17:53:01 +04:00
|
|
|
|
2014-07-25 00:23:40 +04:00
|
|
|
void RegisterForAudioMixing();
|
|
|
|
|
2012-04-30 07:11:40 +04:00
|
|
|
// XXX need a Reset API
|
|
|
|
|
|
|
|
friend class MediaStreamGraphImpl;
|
|
|
|
|
2014-07-25 00:23:40 +04:00
|
|
|
protected:
|
2012-04-30 07:11:40 +04:00
|
|
|
struct ThreadAndRunnable {
|
2015-07-16 21:13:49 +03:00
|
|
|
void Init(TaskQueue* aTarget, nsIRunnable* aRunnable)
|
2012-04-30 07:11:40 +04:00
|
|
|
{
|
2014-02-18 02:53:53 +04:00
|
|
|
mTarget = aTarget;
|
2012-04-30 07:11:40 +04:00
|
|
|
mRunnable = aRunnable;
|
|
|
|
}
|
|
|
|
|
2015-07-16 21:13:49 +03:00
|
|
|
nsRefPtr<TaskQueue> mTarget;
|
2015-04-07 22:20:43 +03:00
|
|
|
nsCOMPtr<nsIRunnable> mRunnable;
|
2012-04-30 07:11:40 +04:00
|
|
|
};
|
|
|
|
enum TrackCommands {
|
2012-04-30 07:12:50 +04:00
|
|
|
TRACK_CREATE = MediaStreamListener::TRACK_EVENT_CREATED,
|
|
|
|
TRACK_END = MediaStreamListener::TRACK_EVENT_ENDED
|
2012-04-30 07:11:40 +04:00
|
|
|
};
|
|
|
|
/**
|
|
|
|
* Data for each track that hasn't ended.
|
|
|
|
*/
|
|
|
|
struct TrackData {
|
|
|
|
TrackID mID;
|
2014-03-24 14:06:05 +04:00
|
|
|
// Sample rate of the input data.
|
|
|
|
TrackRate mInputRate;
|
|
|
|
// Resampler if the rate of the input track does not match the
|
|
|
|
// MediaStreamGraph's.
|
|
|
|
nsAutoRef<SpeexResamplerState> mResampler;
|
2014-06-09 04:11:41 +04:00
|
|
|
#ifdef DEBUG
|
|
|
|
int mResamplerChannelCount;
|
|
|
|
#endif
|
2014-09-18 09:20:43 +04:00
|
|
|
StreamTime mStart;
|
2014-12-30 04:54:01 +03:00
|
|
|
// End-time of data already flushed to the track (excluding mData)
|
|
|
|
StreamTime mEndOfFlushedData;
|
2012-04-30 07:11:40 +04:00
|
|
|
// Each time the track updates are flushed to the media graph thread,
|
|
|
|
// the segment buffer is emptied.
|
|
|
|
nsAutoPtr<MediaSegment> mData;
|
2014-12-30 04:54:01 +03:00
|
|
|
// Each time the track updates are flushed to the media graph thread,
|
|
|
|
// this is cleared.
|
|
|
|
uint32_t mCommands;
|
2012-04-30 07:11:40 +04:00
|
|
|
};
|
|
|
|
|
2014-03-24 14:06:06 +04:00
|
|
|
bool NeedsMixing();
|
|
|
|
|
2014-07-25 00:23:40 +04:00
|
|
|
void ResampleAudioToGraphSampleRate(TrackData* aTrackData, MediaSegment* aSegment);
|
|
|
|
|
2014-09-18 03:50:02 +04:00
|
|
|
void AddTrackInternal(TrackID aID, TrackRate aRate,
|
2015-02-19 20:04:26 +03:00
|
|
|
StreamTime aStart, MediaSegment* aSegment,
|
|
|
|
uint32_t aFlags);
|
2014-09-18 03:50:02 +04:00
|
|
|
|
2012-04-30 07:11:40 +04:00
|
|
|
TrackData* FindDataForTrack(TrackID aID)
|
|
|
|
{
|
2014-08-26 05:20:44 +04:00
|
|
|
mMutex.AssertCurrentThreadOwns();
|
2012-08-22 19:56:38 +04:00
|
|
|
for (uint32_t i = 0; i < mUpdateTracks.Length(); ++i) {
|
2012-04-30 07:11:40 +04:00
|
|
|
if (mUpdateTracks[i].mID == aID) {
|
|
|
|
return &mUpdateTracks[i];
|
|
|
|
}
|
|
|
|
}
|
2012-07-30 18:20:58 +04:00
|
|
|
return nullptr;
|
2012-04-30 07:11:40 +04:00
|
|
|
}
|
|
|
|
|
2013-08-24 17:53:11 +04:00
|
|
|
/**
|
|
|
|
* Notify direct consumers of new data to one of the stream tracks.
|
|
|
|
* The data doesn't have to be resampled (though it may be). This is called
|
|
|
|
* from AppendToTrack on the thread providing the data, and will call
|
|
|
|
* the Listeners on this thread.
|
|
|
|
*/
|
|
|
|
void NotifyDirectConsumers(TrackData *aTrack,
|
|
|
|
MediaSegment *aSegment);
|
|
|
|
|
2012-06-01 10:26:17 +04:00
|
|
|
// Media stream graph thread only
|
|
|
|
MediaStreamListener::Consumption mLastConsumptionState;
|
|
|
|
|
2012-05-23 10:01:15 +04:00
|
|
|
// This must be acquired *before* MediaStreamGraphImpl's lock, if they are
|
|
|
|
// held together.
|
2012-04-30 07:11:40 +04:00
|
|
|
Mutex mMutex;
|
|
|
|
// protected by mMutex
|
|
|
|
StreamTime mUpdateKnownTracksTime;
|
|
|
|
nsTArray<TrackData> mUpdateTracks;
|
2015-02-19 20:04:26 +03:00
|
|
|
nsTArray<TrackData> mPendingTracks;
|
2013-08-24 17:53:11 +04:00
|
|
|
nsTArray<nsRefPtr<MediaStreamDirectListener> > mDirectListeners;
|
2012-07-20 23:36:03 +04:00
|
|
|
bool mPullEnabled;
|
2012-04-30 07:11:40 +04:00
|
|
|
bool mUpdateFinished;
|
2014-03-24 14:06:06 +04:00
|
|
|
bool mNeedsMixing;
|
2012-04-30 07:11:40 +04:00
|
|
|
};
|
|
|
|
|
2012-07-31 16:17:21 +04:00
|
|
|
/**
|
|
|
|
* Represents a connection between a ProcessedMediaStream and one of its
|
|
|
|
* input streams.
|
|
|
|
* We make these refcounted so that stream-related messages with MediaInputPort*
|
|
|
|
* pointers can be sent to the main thread safely.
|
|
|
|
*
|
|
|
|
* When a port's source or destination stream dies, the stream's DestroyImpl
|
|
|
|
* calls MediaInputPort::Disconnect to disconnect the port from
|
|
|
|
* the source and destination streams.
|
|
|
|
*
|
|
|
|
* The lifetimes of MediaInputPort are controlled from the main thread.
|
|
|
|
* The media graph adds a reference to the port. When a MediaInputPort is no
|
|
|
|
* longer needed, main-thread code sends a Destroy message for the port and
|
|
|
|
* clears its reference (the last main-thread reference to the object). When
|
|
|
|
* the Destroy message is processed on the graph manager thread we disconnect
|
|
|
|
* the port and drop the graph's reference, destroying the object.
|
|
|
|
*/
|
2015-05-13 16:34:56 +03:00
|
|
|
class MediaInputPort final
|
|
|
|
{
|
2014-04-02 20:21:11 +04:00
|
|
|
private:
|
2013-05-05 19:47:36 +04:00
|
|
|
// Do not call this constructor directly. Instead call aDest->AllocateInputPort.
|
|
|
|
MediaInputPort(MediaStream* aSource, ProcessedMediaStream* aDest,
|
|
|
|
uint32_t aFlags, uint16_t aInputNumber,
|
|
|
|
uint16_t aOutputNumber)
|
|
|
|
: mSource(aSource)
|
|
|
|
, mDest(aDest)
|
|
|
|
, mFlags(aFlags)
|
|
|
|
, mInputNumber(aInputNumber)
|
|
|
|
, mOutputNumber(aOutputNumber)
|
|
|
|
, mGraph(nullptr)
|
|
|
|
{
|
|
|
|
MOZ_COUNT_CTOR(MediaInputPort);
|
|
|
|
}
|
|
|
|
|
2014-04-02 20:21:11 +04:00
|
|
|
// Private destructor, to discourage deletion outside of Release():
|
|
|
|
~MediaInputPort()
|
|
|
|
{
|
|
|
|
MOZ_COUNT_DTOR(MediaInputPort);
|
|
|
|
}
|
|
|
|
|
2012-07-31 16:17:21 +04:00
|
|
|
public:
|
|
|
|
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaInputPort)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* The FLAG_BLOCK_INPUT and FLAG_BLOCK_OUTPUT flags can be used to control
|
|
|
|
* exactly how the blocking statuses of the input and output streams affect
|
|
|
|
* each other.
|
|
|
|
*/
|
|
|
|
enum {
|
2013-07-17 07:16:09 +04:00
|
|
|
// When set, blocking on the output stream forces blocking on the input
|
2012-07-31 16:17:21 +04:00
|
|
|
// stream.
|
|
|
|
FLAG_BLOCK_INPUT = 0x01,
|
2013-07-17 07:16:09 +04:00
|
|
|
// When set, blocking on the input stream forces blocking on the output
|
2012-07-31 16:17:21 +04:00
|
|
|
// stream.
|
|
|
|
FLAG_BLOCK_OUTPUT = 0x02
|
|
|
|
};
|
|
|
|
|
|
|
|
// Called on graph manager thread
|
|
|
|
// Do not call these from outside MediaStreamGraph.cpp!
|
|
|
|
void Init();
|
|
|
|
// Called during message processing to trigger removal of this stream.
|
|
|
|
void Disconnect();
|
|
|
|
|
|
|
|
// Control API
|
|
|
|
/**
|
|
|
|
* Disconnects and destroys the port. The caller must not reference this
|
|
|
|
* object again.
|
|
|
|
*/
|
|
|
|
void Destroy();
|
|
|
|
|
|
|
|
// Any thread
|
|
|
|
MediaStream* GetSource() { return mSource; }
|
|
|
|
ProcessedMediaStream* GetDestination() { return mDest; }
|
|
|
|
|
2013-05-05 19:47:36 +04:00
|
|
|
uint16_t InputNumber() const { return mInputNumber; }
|
|
|
|
uint16_t OutputNumber() const { return mOutputNumber; }
|
|
|
|
|
2012-07-31 16:17:21 +04:00
|
|
|
// Call on graph manager thread
|
|
|
|
struct InputInterval {
|
|
|
|
GraphTime mStart;
|
|
|
|
GraphTime mEnd;
|
|
|
|
bool mInputIsBlocked;
|
|
|
|
};
|
|
|
|
// Find the next time interval starting at or after aTime during which
|
|
|
|
// mDest is not blocked and mSource's blocking status does not change.
|
|
|
|
InputInterval GetNextInputInterval(GraphTime aTime);
|
|
|
|
|
2012-08-23 16:46:20 +04:00
|
|
|
/**
|
|
|
|
* Returns the graph that owns this port.
|
|
|
|
*/
|
|
|
|
MediaStreamGraphImpl* GraphImpl();
|
|
|
|
MediaStreamGraph* Graph();
|
2013-02-01 23:49:58 +04:00
|
|
|
/**
|
|
|
|
* Sets the graph that owns this stream. Should only be called once.
|
|
|
|
*/
|
|
|
|
void SetGraphImpl(MediaStreamGraphImpl* aGraph);
|
2012-08-23 16:46:20 +04:00
|
|
|
|
2014-04-13 22:08:10 +04:00
|
|
|
size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
|
|
|
|
{
|
|
|
|
size_t amount = 0;
|
|
|
|
|
|
|
|
// Not owned:
|
|
|
|
// - mSource
|
|
|
|
// - mDest
|
|
|
|
// - mGraph
|
|
|
|
return amount;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
|
|
|
|
{
|
|
|
|
return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
|
|
|
|
}
|
|
|
|
|
2014-04-02 20:21:11 +04:00
|
|
|
private:
|
2012-07-31 16:17:21 +04:00
|
|
|
friend class MediaStreamGraphImpl;
|
|
|
|
friend class MediaStream;
|
|
|
|
friend class ProcessedMediaStream;
|
|
|
|
// Never modified after Init()
|
|
|
|
MediaStream* mSource;
|
|
|
|
ProcessedMediaStream* mDest;
|
2012-08-22 19:56:38 +04:00
|
|
|
uint32_t mFlags;
|
2013-05-05 19:47:36 +04:00
|
|
|
// The input and output numbers are optional, and are currently only used by
|
|
|
|
// Web Audio.
|
|
|
|
const uint16_t mInputNumber;
|
|
|
|
const uint16_t mOutputNumber;
|
2013-02-01 23:49:58 +04:00
|
|
|
|
|
|
|
// Our media stream graph
|
|
|
|
MediaStreamGraphImpl* mGraph;
|
2012-07-31 16:17:21 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This stream processes zero or more input streams in parallel to produce
|
|
|
|
* its output. The details of how the output is produced are handled by
|
2014-03-05 01:53:55 +04:00
|
|
|
* subclasses overriding the ProcessInput method.
|
2012-07-31 16:17:21 +04:00
|
|
|
*/
|
2015-05-13 16:34:56 +03:00
|
|
|
class ProcessedMediaStream : public MediaStream
|
|
|
|
{
|
2012-07-31 16:17:21 +04:00
|
|
|
public:
|
2014-09-01 07:50:23 +04:00
|
|
|
explicit ProcessedMediaStream(DOMMediaStream* aWrapper)
|
2014-07-17 04:55:55 +04:00
|
|
|
: MediaStream(aWrapper), mAutofinish(false)
|
2012-07-31 16:17:21 +04:00
|
|
|
{}
|
|
|
|
|
|
|
|
// Control API.
|
|
|
|
/**
|
|
|
|
* Allocates a new input port attached to source aStream.
|
|
|
|
* This stream can be removed by calling MediaInputPort::Remove().
|
|
|
|
*/
|
2012-11-23 02:25:05 +04:00
|
|
|
already_AddRefed<MediaInputPort> AllocateInputPort(MediaStream* aStream,
|
2013-05-05 19:47:36 +04:00
|
|
|
uint32_t aFlags = 0,
|
|
|
|
uint16_t aInputNumber = 0,
|
|
|
|
uint16_t aOutputNumber = 0);
|
2012-07-31 16:17:21 +04:00
|
|
|
/**
|
|
|
|
* Force this stream into the finished state.
|
|
|
|
*/
|
|
|
|
void Finish();
|
|
|
|
/**
|
|
|
|
* Set the autofinish flag on this stream (defaults to false). When this flag
|
|
|
|
* is set, and all input streams are in the finished state (including if there
|
|
|
|
* are no input streams), this stream automatically enters the finished state.
|
|
|
|
*/
|
|
|
|
void SetAutofinish(bool aAutofinish);
|
|
|
|
|
2015-03-21 19:28:04 +03:00
|
|
|
virtual ProcessedMediaStream* AsProcessedStream() override { return this; }
|
2012-07-31 16:17:21 +04:00
|
|
|
|
|
|
|
friend class MediaStreamGraphImpl;
|
|
|
|
|
|
|
|
// Do not call these from outside MediaStreamGraph.cpp!
|
2013-09-13 20:12:07 +04:00
|
|
|
virtual void AddInput(MediaInputPort* aPort);
|
2012-07-31 16:17:21 +04:00
|
|
|
virtual void RemoveInput(MediaInputPort* aPort)
|
|
|
|
{
|
|
|
|
mInputs.RemoveElement(aPort);
|
|
|
|
}
|
|
|
|
bool HasInputPort(MediaInputPort* aPort)
|
|
|
|
{
|
|
|
|
return mInputs.Contains(aPort);
|
|
|
|
}
|
2013-12-12 18:31:51 +04:00
|
|
|
uint32_t InputPortCount()
|
|
|
|
{
|
|
|
|
return mInputs.Length();
|
|
|
|
}
|
2015-03-21 19:28:04 +03:00
|
|
|
virtual void DestroyImpl() override;
|
2012-07-31 16:17:21 +04:00
|
|
|
/**
|
|
|
|
* This gets called after we've computed the blocking states for all
|
|
|
|
* streams (mBlocked is up to date up to mStateComputedTime).
|
|
|
|
* Also, we've produced output for all streams up to this one. If this stream
|
|
|
|
* is not in a cycle, then all its source streams have produced data.
|
2013-12-06 00:23:57 +04:00
|
|
|
* Generate output from aFrom to aTo.
|
2013-12-10 04:49:03 +04:00
|
|
|
* This will be called on streams that have finished. Most stream types should
|
|
|
|
* just return immediately if IsFinishedOnGraphThread(), but some may wish to
|
|
|
|
* update internal state (see AudioNodeStream).
|
2014-03-05 01:53:55 +04:00
|
|
|
* ProcessInput is allowed to call FinishOnGraphThread only if ALLOW_FINISH
|
2013-12-06 00:23:57 +04:00
|
|
|
* is in aFlags. (This flag will be set when aTo >= mStateComputedTime, i.e.
|
|
|
|
* when we've producing the last block of data we need to produce.) Otherwise
|
|
|
|
* we can get into a situation where we've determined the stream should not
|
|
|
|
* block before mStateComputedTime, but the stream finishes before
|
|
|
|
* mStateComputedTime, violating the invariant that finished streams are blocked.
|
2012-07-31 16:17:21 +04:00
|
|
|
*/
|
2013-12-06 00:23:57 +04:00
|
|
|
enum {
|
|
|
|
ALLOW_FINISH = 0x01
|
|
|
|
};
|
2014-03-05 01:53:55 +04:00
|
|
|
virtual void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) = 0;
|
2012-07-31 16:17:21 +04:00
|
|
|
void SetAutofinishImpl(bool aAutofinish) { mAutofinish = aAutofinish; }
|
|
|
|
|
2013-08-26 10:07:19 +04:00
|
|
|
/**
|
|
|
|
* Forward SetTrackEnabled() to the input MediaStream(s) and translate the ID
|
|
|
|
*/
|
|
|
|
virtual void ForwardTrackEnabled(TrackID aOutputID, bool aEnabled) {};
|
|
|
|
|
2014-07-17 04:55:55 +04:00
|
|
|
// Only valid after MediaStreamGraphImpl::UpdateStreamOrder() has run.
|
|
|
|
// A DelayNode is considered to break a cycle and so this will not return
|
|
|
|
// true for echo loops, only for muted cycles.
|
|
|
|
bool InMutedCycle() const { return mCycleMarker; }
|
2013-09-02 17:15:24 +04:00
|
|
|
|
2015-03-21 19:28:04 +03:00
|
|
|
virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override
|
2014-04-13 22:08:10 +04:00
|
|
|
{
|
|
|
|
size_t amount = MediaStream::SizeOfExcludingThis(aMallocSizeOf);
|
|
|
|
// Not owned:
|
|
|
|
// - mInputs elements
|
2015-07-29 09:24:24 +03:00
|
|
|
amount += mInputs.ShallowSizeOfExcludingThis(aMallocSizeOf);
|
2014-04-13 22:08:10 +04:00
|
|
|
return amount;
|
|
|
|
}
|
|
|
|
|
2015-03-21 19:28:04 +03:00
|
|
|
virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override
|
2014-04-13 22:08:10 +04:00
|
|
|
{
|
|
|
|
return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
|
|
|
|
}
|
2013-09-02 17:15:24 +04:00
|
|
|
|
2012-07-31 16:17:21 +04:00
|
|
|
protected:
|
|
|
|
// This state is all accessed only on the media graph thread.
|
|
|
|
|
|
|
|
// The list of all inputs that are currently enabled or waiting to be enabled.
|
|
|
|
nsTArray<MediaInputPort*> mInputs;
|
|
|
|
bool mAutofinish;
|
2014-07-17 04:55:55 +04:00
|
|
|
// After UpdateStreamOrder(), mCycleMarker is either 0 or 1 to indicate
|
|
|
|
// whether this stream is in a muted cycle. During ordering it can contain
|
|
|
|
// other marker values - see MediaStreamGraphImpl::UpdateStreamOrder().
|
|
|
|
uint32_t mCycleMarker;
|
2012-07-31 16:17:21 +04:00
|
|
|
};
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
/**
|
|
|
|
* Initially, at least, we will have a singleton MediaStreamGraph per
|
2013-05-08 15:44:07 +04:00
|
|
|
* process. Each OfflineAudioContext object creates its own MediaStreamGraph
|
|
|
|
* object too.
|
2012-04-30 07:11:26 +04:00
|
|
|
*/
|
2015-05-13 16:34:56 +03:00
|
|
|
class MediaStreamGraph
|
|
|
|
{
|
2012-04-30 07:11:26 +04:00
|
|
|
public:
|
2013-01-14 02:46:57 +04:00
|
|
|
// We ensure that the graph current time advances in multiples of
|
2014-04-23 13:20:56 +04:00
|
|
|
// IdealAudioBlockSize()/AudioStream::PreferredSampleRate(). A stream that
|
|
|
|
// never blocks and has a track with the ideal audio rate will produce audio
|
|
|
|
// in multiples of the block size.
|
2014-08-25 17:27:25 +04:00
|
|
|
//
|
2013-01-14 02:46:57 +04:00
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
// Main thread only
|
2015-02-09 10:23:34 +03:00
|
|
|
static MediaStreamGraph* GetInstance(bool aStartWithAudioDriver = false,
|
2014-08-26 19:02:08 +04:00
|
|
|
dom::AudioChannel aChannel = dom::AudioChannel::Normal);
|
2014-04-23 13:20:56 +04:00
|
|
|
static MediaStreamGraph* CreateNonRealtimeInstance(TrackRate aSampleRate);
|
2013-09-10 09:05:22 +04:00
|
|
|
// Idempotent
|
2013-05-08 15:44:07 +04:00
|
|
|
static void DestroyNonRealtimeInstance(MediaStreamGraph* aGraph);
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
// Control API.
|
2012-04-30 07:11:40 +04:00
|
|
|
/**
|
|
|
|
* Create a stream that a media decoder (or some other source of
|
|
|
|
* media data, such as a camera) can write to.
|
|
|
|
*/
|
2013-02-15 12:01:58 +04:00
|
|
|
SourceMediaStream* CreateSourceStream(DOMMediaStream* aWrapper);
|
2012-07-31 16:17:21 +04:00
|
|
|
/**
|
|
|
|
* Create a stream that will form the union of the tracks of its input
|
|
|
|
* streams.
|
|
|
|
* A TrackUnionStream contains all the tracks of all its input streams.
|
|
|
|
* Adding a new input stream makes that stream's tracks immediately appear as new
|
|
|
|
* tracks starting at the time the input stream was added.
|
|
|
|
* Removing an input stream makes the output tracks corresponding to the
|
|
|
|
* removed tracks immediately end.
|
|
|
|
* For each added track, the track ID of the output track is the track ID
|
|
|
|
* of the input track or one plus the maximum ID of all previously added
|
|
|
|
* tracks, whichever is greater.
|
|
|
|
* TODO at some point we will probably need to add API to select
|
|
|
|
* particular tracks of each input stream.
|
|
|
|
*/
|
2013-02-15 12:01:58 +04:00
|
|
|
ProcessedMediaStream* CreateTrackUnionStream(DOMMediaStream* aWrapper);
|
2015-07-24 15:28:16 +03:00
|
|
|
/**
|
|
|
|
* Create a stream that will mix all its audio input.
|
|
|
|
*/
|
|
|
|
ProcessedMediaStream* CreateAudioCaptureStream(DOMMediaStream* aWrapper);
|
2013-03-18 04:37:47 +04:00
|
|
|
// Internal AudioNodeStreams can only pass their output to another
|
|
|
|
// AudioNode, whereas external AudioNodeStreams can pass their output
|
|
|
|
// to an nsAudioStream for playback.
|
2013-06-19 07:09:44 +04:00
|
|
|
enum AudioNodeStreamKind { SOURCE_STREAM, INTERNAL_STREAM, EXTERNAL_STREAM };
|
2013-01-14 02:46:57 +04:00
|
|
|
/**
|
|
|
|
* Create a stream that will process audio for an AudioNode.
|
2015-08-12 02:04:13 +03:00
|
|
|
* Takes ownership of aEngine.
|
2013-01-14 02:46:57 +04:00
|
|
|
*/
|
2013-03-18 04:37:47 +04:00
|
|
|
AudioNodeStream* CreateAudioNodeStream(AudioNodeEngine* aEngine,
|
2015-08-12 02:04:13 +03:00
|
|
|
AudioNodeStreamKind aKind);
|
2013-07-24 15:29:39 +04:00
|
|
|
|
|
|
|
AudioNodeExternalInputStream*
|
2015-08-12 02:04:13 +03:00
|
|
|
CreateAudioNodeExternalInputStream(AudioNodeEngine* aEngine);
|
2013-07-24 15:29:39 +04:00
|
|
|
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
/* From the main thread, ask the MSG to send back an event when the graph
|
|
|
|
* thread is running, and audio is being processed. */
|
|
|
|
void NotifyWhenGraphStarted(AudioNodeStream* aNodeStream);
|
|
|
|
/* From the main thread, suspend, resume or close an AudioContext.
|
|
|
|
* aNodeStream is the stream of the DestinationNode of the AudioContext.
|
|
|
|
*
|
|
|
|
* This can possibly pause the graph thread, releasing system resources, if
|
|
|
|
* all streams have been suspended/closed.
|
|
|
|
*
|
|
|
|
* When the operation is complete, aPromise is resolved.
|
|
|
|
*/
|
|
|
|
void ApplyAudioContextOperation(AudioNodeStream* aNodeStream,
|
|
|
|
dom::AudioContextOperation aState,
|
|
|
|
void * aPromise);
|
|
|
|
|
2013-09-10 09:05:22 +04:00
|
|
|
bool IsNonRealtime() const;
|
2013-05-17 03:30:41 +04:00
|
|
|
/**
|
|
|
|
* Start processing non-realtime for a specific number of ticks.
|
|
|
|
*/
|
2014-11-19 13:21:38 +03:00
|
|
|
void StartNonRealtimeProcessing(uint32_t aTicksToProcess);
|
2012-04-30 07:11:26 +04:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Media graph thread only.
|
|
|
|
* Dispatches a runnable that will run on the main thread after all
|
|
|
|
* main-thread stream state has been next updated.
|
2013-10-25 03:12:33 +04:00
|
|
|
* Should only be called during MediaStreamListener callbacks or during
|
2014-03-05 01:53:55 +04:00
|
|
|
* ProcessedMediaStream::ProcessInput().
|
2012-04-30 07:11:26 +04:00
|
|
|
*/
|
2014-09-09 17:27:24 +04:00
|
|
|
virtual void DispatchToMainThreadAfterStreamStateUpdate(already_AddRefed<nsIRunnable> aRunnable)
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
2013-02-04 14:04:24 +04:00
|
|
|
*mPendingUpdateRunnables.AppendElement() = aRunnable;
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
|
2014-09-18 03:50:01 +04:00
|
|
|
/**
|
|
|
|
* Returns graph sample rate in Hz.
|
|
|
|
*/
|
|
|
|
TrackRate GraphRate() const { return mSampleRate; }
|
|
|
|
|
2015-07-24 15:28:16 +03:00
|
|
|
void RegisterCaptureStreamForWindow(uint64_t aWindowId,
|
|
|
|
ProcessedMediaStream* aCaptureStream);
|
|
|
|
void UnregisterCaptureStreamForWindow(uint64_t aWindowId);
|
|
|
|
already_AddRefed<MediaInputPort> ConnectToCaptureStream(
|
|
|
|
uint64_t aWindowId, MediaStream* aMediaStream);
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
protected:
|
2014-12-11 01:49:09 +03:00
|
|
|
explicit MediaStreamGraph(TrackRate aSampleRate)
|
2015-07-31 12:36:05 +03:00
|
|
|
: mSampleRate(aSampleRate)
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
|
|
|
MOZ_COUNT_CTOR(MediaStreamGraph);
|
|
|
|
}
|
2013-11-18 17:09:47 +04:00
|
|
|
virtual ~MediaStreamGraph()
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
|
|
|
MOZ_COUNT_DTOR(MediaStreamGraph);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Media graph thread only
|
|
|
|
nsTArray<nsCOMPtr<nsIRunnable> > mPendingUpdateRunnables;
|
|
|
|
|
2014-09-18 03:50:01 +04:00
|
|
|
/**
|
|
|
|
* Sample rate at which this graph runs. For real time graphs, this is
|
|
|
|
* the rate of the audio mixer. For offline graphs, this is the rate specified
|
|
|
|
* at construction.
|
|
|
|
*/
|
|
|
|
TrackRate mSampleRate;
|
2012-04-30 07:11:26 +04:00
|
|
|
};
|
|
|
|
|
2015-07-13 18:25:42 +03:00
|
|
|
} // namespace mozilla
|
2012-04-30 07:11:26 +04:00
|
|
|
|
|
|
|
#endif /* MOZILLA_MEDIASTREAMGRAPH_H_ */
|