2012-05-11 21:35:36 +04:00
|
|
|
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
|
2012-04-30 07:11:26 +04:00
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
|
|
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
|
|
|
2013-02-04 14:04:25 +04:00
|
|
|
#include "MediaStreamGraphImpl.h"
|
2014-04-13 23:41:07 +04:00
|
|
|
#include "mozilla/MathAlgorithms.h"
|
2016-08-23 07:09:32 +03:00
|
|
|
#include "mozilla/Unused.h"
|
2012-04-30 07:11:26 +04:00
|
|
|
|
|
|
|
#include "AudioSegment.h"
|
|
|
|
#include "VideoSegment.h"
|
|
|
|
#include "nsContentUtils.h"
|
|
|
|
#include "nsIObserver.h"
|
2014-04-13 22:08:10 +04:00
|
|
|
#include "nsPrintfCString.h"
|
2012-04-30 07:11:26 +04:00
|
|
|
#include "nsServiceManagerUtils.h"
|
2014-04-13 22:08:10 +04:00
|
|
|
#include "prerror.h"
|
2015-05-19 21:15:34 +03:00
|
|
|
#include "mozilla/Logging.h"
|
2012-06-19 06:30:09 +04:00
|
|
|
#include "mozilla/Attributes.h"
|
2012-07-31 16:17:21 +04:00
|
|
|
#include "TrackUnionStream.h"
|
2012-08-21 08:06:46 +04:00
|
|
|
#include "ImageContainer.h"
|
2015-07-24 15:28:16 +03:00
|
|
|
#include "AudioCaptureStream.h"
|
2013-01-14 02:46:57 +04:00
|
|
|
#include "AudioNodeStream.h"
|
2013-07-24 15:29:39 +04:00
|
|
|
#include "AudioNodeExternalInputStream.h"
|
2016-06-30 10:07:48 +03:00
|
|
|
#include "MediaStreamListener.h"
|
2016-05-30 06:32:23 +03:00
|
|
|
#include "MediaStreamVideoSink.h"
|
2016-12-21 12:52:50 +03:00
|
|
|
#include "mozilla/dom/BaseAudioContextBinding.h"
|
2016-01-22 21:49:54 +03:00
|
|
|
#include "mozilla/media/MediaUtils.h"
|
2013-01-15 16:22:03 +04:00
|
|
|
#include <algorithm>
|
2013-07-19 18:40:57 +04:00
|
|
|
#include "GeckoProfiler.h"
|
2016-05-27 09:33:48 +03:00
|
|
|
#include "VideoFrameContainer.h"
|
2016-12-08 11:00:12 +03:00
|
|
|
#include "mozilla/AbstractThread.h"
|
2016-08-23 07:09:32 +03:00
|
|
|
#include "mozilla/Unused.h"
|
2014-04-21 11:15:34 +04:00
|
|
|
#ifdef MOZ_WEBRTC
|
2014-04-02 21:58:19 +04:00
|
|
|
#include "AudioOutputObserver.h"
|
2014-04-21 11:15:34 +04:00
|
|
|
#endif
|
2016-01-21 19:51:35 +03:00
|
|
|
#include "mtransport/runnable_utils.h"
|
2012-04-30 07:11:26 +04:00
|
|
|
|
2016-08-05 22:33:08 +03:00
|
|
|
#include "webaudio/blink/DenormalDisabler.h"
|
2014-10-10 06:30:28 +04:00
|
|
|
#include "webaudio/blink/HRTFDatabaseLoader.h"
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
using namespace mozilla::layers;
|
2012-11-16 07:25:26 +04:00
|
|
|
using namespace mozilla::dom;
|
2013-12-31 13:06:12 +04:00
|
|
|
using namespace mozilla::gfx;
|
2016-03-16 18:00:34 +03:00
|
|
|
using namespace mozilla::media;
|
2012-04-30 07:11:26 +04:00
|
|
|
|
|
|
|
namespace mozilla {
|
|
|
|
|
2015-11-15 16:49:01 +03:00
|
|
|
LazyLogModule gMediaStreamGraphLog("MediaStreamGraph");
|
2017-02-06 18:22:36 +03:00
|
|
|
#ifdef LOG
|
|
|
|
#undef LOG
|
|
|
|
#endif // LOG
|
|
|
|
#define LOG(type, msg) MOZ_LOG(gMediaStreamGraphLog, type, msg)
|
2014-08-31 16:19:48 +04:00
|
|
|
|
2016-06-30 10:07:48 +03:00
|
|
|
enum SourceMediaStream::TrackCommands : uint32_t {
|
|
|
|
TRACK_CREATE = TrackEventCommand::TRACK_EVENT_CREATED,
|
|
|
|
TRACK_END = TrackEventCommand::TRACK_EVENT_ENDED,
|
|
|
|
TRACK_UNUSED = TrackEventCommand::TRACK_EVENT_UNUSED,
|
|
|
|
};
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
/**
|
2017-08-10 04:00:08 +03:00
|
|
|
* A hash table containing the graph instances, one per document.
|
2012-04-30 07:11:26 +04:00
|
|
|
*/
|
2014-11-17 19:07:55 +03:00
|
|
|
static nsDataHashtable<nsUint32HashKey, MediaStreamGraphImpl*> gGraphs;
|
2012-04-30 07:11:26 +04:00
|
|
|
|
2013-11-18 15:48:04 +04:00
|
|
|
MediaStreamGraphImpl::~MediaStreamGraphImpl()
|
|
|
|
{
|
|
|
|
NS_ASSERTION(IsEmpty(),
|
|
|
|
"All streams should have been destroyed by messages from the main thread");
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug, ("MediaStreamGraph %p destroyed", this));
|
|
|
|
LOG(LogLevel::Debug, ("MediaStreamGraphImpl::~MediaStreamGraphImpl"));
|
2013-11-18 15:48:04 +04:00
|
|
|
}
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
void
|
|
|
|
MediaStreamGraphImpl::FinishStream(MediaStream* aStream)
|
|
|
|
{
|
|
|
|
if (aStream->mFinished)
|
|
|
|
return;
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug, ("MediaStream %p will finish", aStream));
|
2016-02-05 15:20:20 +03:00
|
|
|
#ifdef DEBUG
|
2016-01-26 05:49:01 +03:00
|
|
|
for (StreamTracks::TrackIter track(aStream->mTracks);
|
2016-02-05 15:20:20 +03:00
|
|
|
!track.IsEnded(); track.Next()) {
|
|
|
|
if (!track->IsEnded()) {
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Error,
|
|
|
|
("MediaStream %p will finish, but track %d has not ended.",
|
|
|
|
aStream,
|
|
|
|
track->GetID()));
|
2016-02-05 15:20:20 +03:00
|
|
|
NS_ASSERTION(false, "Finished stream cannot contain live track");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2012-04-30 07:11:26 +04:00
|
|
|
aStream->mFinished = true;
|
2016-01-26 05:49:01 +03:00
|
|
|
aStream->mTracks.AdvanceKnownTracksTime(STREAM_TIME_MAX);
|
2014-01-29 17:34:35 +04:00
|
|
|
|
|
|
|
SetStreamOrderDirty();
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2015-08-12 02:29:35 +03:00
|
|
|
MediaStreamGraphImpl::AddStreamGraphThread(MediaStream* aStream)
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
2016-01-26 05:49:01 +03:00
|
|
|
aStream->mTracksStartTime = mProcessedTime;
|
2016-07-25 05:01:26 +03:00
|
|
|
|
|
|
|
if (aStream->AsSourceStream()) {
|
|
|
|
SourceMediaStream* source = aStream->AsSourceStream();
|
|
|
|
TimeStamp currentTimeStamp = CurrentDriver()->GetCurrentTimeStamp();
|
|
|
|
TimeStamp processedTimeStamp = currentTimeStamp +
|
|
|
|
TimeDuration::FromSeconds(MediaTimeToSeconds(mProcessedTime - IterationEnd()));
|
|
|
|
source->SetStreamTracksStartTimeStamp(processedTimeStamp);
|
|
|
|
}
|
|
|
|
|
2015-09-16 07:15:21 +03:00
|
|
|
if (aStream->IsSuspended()) {
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
mSuspendedStreams.AppendElement(aStream);
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug,
|
|
|
|
("Adding media stream %p to the graph, in the suspended stream array",
|
|
|
|
aStream));
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
} else {
|
|
|
|
mStreams.AppendElement(aStream);
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug,
|
2017-07-26 23:03:57 +03:00
|
|
|
("Adding media stream %p to graph %p, count %zu",
|
2017-02-06 18:22:36 +03:00
|
|
|
aStream,
|
|
|
|
this,
|
|
|
|
mStreams.Length()));
|
|
|
|
LOG(LogLevel::Debug,
|
2017-07-26 23:03:57 +03:00
|
|
|
("Adding media stream %p to graph %p, count %zu",
|
2017-02-06 18:22:36 +03:00
|
|
|
aStream,
|
|
|
|
this,
|
2016-12-16 06:16:31 +03:00
|
|
|
mStreams.Length()));
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
}
|
2014-01-29 17:34:35 +04:00
|
|
|
|
|
|
|
SetStreamOrderDirty();
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2015-08-12 02:29:35 +03:00
|
|
|
MediaStreamGraphImpl::RemoveStreamGraphThread(MediaStream* aStream)
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
|
|
|
// Remove references in mStreamUpdates before we allow aStream to die.
|
|
|
|
// Pending updates are not needed (since the main thread has already given
|
|
|
|
// up the stream) so we will just drop them.
|
2015-06-11 22:10:06 +03:00
|
|
|
{
|
2014-08-26 19:01:33 +04:00
|
|
|
MonitorAutoLock lock(mMonitor);
|
2012-08-22 19:56:38 +04:00
|
|
|
for (uint32_t i = 0; i < mStreamUpdates.Length(); ++i) {
|
2012-04-30 07:11:26 +04:00
|
|
|
if (mStreamUpdates[i].mStream == aStream) {
|
2012-07-30 18:20:58 +04:00
|
|
|
mStreamUpdates[i].mStream = nullptr;
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-25 00:36:07 +04:00
|
|
|
// Ensure that mFirstCycleBreaker and mMixer are updated when necessary.
|
2014-01-29 17:34:35 +04:00
|
|
|
SetStreamOrderDirty();
|
|
|
|
|
2015-09-16 07:15:21 +03:00
|
|
|
if (aStream->IsSuspended()) {
|
|
|
|
mSuspendedStreams.RemoveElement(aStream);
|
|
|
|
} else {
|
|
|
|
mStreams.RemoveElement(aStream);
|
|
|
|
}
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug,
|
2017-07-26 23:03:57 +03:00
|
|
|
("Removed media stream %p from graph %p, count %zu",
|
2017-02-06 18:22:36 +03:00
|
|
|
aStream,
|
|
|
|
this,
|
|
|
|
mStreams.Length()));
|
|
|
|
LOG(LogLevel::Debug,
|
2017-07-26 23:03:57 +03:00
|
|
|
("Removed media stream %p from graph %p, count %zu",
|
2017-02-06 18:22:36 +03:00
|
|
|
aStream,
|
|
|
|
this,
|
2016-12-16 06:16:31 +03:00
|
|
|
mStreams.Length()));
|
2012-04-30 07:11:26 +04:00
|
|
|
|
2016-03-08 20:11:08 +03:00
|
|
|
NS_RELEASE(aStream); // probably destroying it
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
|
2012-04-30 07:11:40 +04:00
|
|
|
void
|
2012-07-20 23:36:03 +04:00
|
|
|
MediaStreamGraphImpl::ExtractPendingInput(SourceMediaStream* aStream,
|
|
|
|
GraphTime aDesiredUpToTime,
|
|
|
|
bool* aEnsureNextIteration)
|
2012-04-30 07:11:40 +04:00
|
|
|
{
|
|
|
|
bool finished;
|
|
|
|
{
|
|
|
|
MutexAutoLock lock(aStream->mMutex);
|
2012-12-07 15:06:55 +04:00
|
|
|
if (aStream->mPullEnabled && !aStream->mFinished &&
|
|
|
|
!aStream->mListeners.IsEmpty()) {
|
|
|
|
// Compute how much stream time we'll need assuming we don't block
|
2015-09-08 07:50:55 +03:00
|
|
|
// the stream at all.
|
|
|
|
StreamTime t = aStream->GraphTimeToStreamTime(aDesiredUpToTime);
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Verbose,
|
|
|
|
("Calling NotifyPull aStream=%p t=%f current end=%f",
|
|
|
|
aStream,
|
|
|
|
MediaTimeToSeconds(t),
|
|
|
|
MediaTimeToSeconds(aStream->mTracks.GetEnd())));
|
2016-01-26 05:49:01 +03:00
|
|
|
if (t > aStream->mTracks.GetEnd()) {
|
2012-12-07 15:06:55 +04:00
|
|
|
*aEnsureNextIteration = true;
|
2013-03-07 12:53:45 +04:00
|
|
|
#ifdef DEBUG
|
|
|
|
if (aStream->mListeners.Length() == 0) {
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(
|
|
|
|
LogLevel::Error,
|
|
|
|
("No listeners in NotifyPull aStream=%p desired=%f current end=%f",
|
|
|
|
aStream,
|
|
|
|
MediaTimeToSeconds(t),
|
|
|
|
MediaTimeToSeconds(aStream->mTracks.GetEnd())));
|
2013-03-07 12:53:45 +04:00
|
|
|
aStream->DumpTrackInfo();
|
|
|
|
}
|
|
|
|
#endif
|
2012-12-07 15:06:55 +04:00
|
|
|
for (uint32_t j = 0; j < aStream->mListeners.Length(); ++j) {
|
|
|
|
MediaStreamListener* l = aStream->mListeners[j];
|
|
|
|
{
|
|
|
|
MutexAutoUnlock unlock(aStream->mMutex);
|
|
|
|
l->NotifyPull(this, t);
|
|
|
|
}
|
2012-07-20 23:36:03 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-04-30 07:11:40 +04:00
|
|
|
finished = aStream->mUpdateFinished;
|
2016-04-07 06:44:00 +03:00
|
|
|
bool shouldNotifyTrackCreated = false;
|
2012-08-22 19:56:38 +04:00
|
|
|
for (int32_t i = aStream->mUpdateTracks.Length() - 1; i >= 0; --i) {
|
2012-04-30 07:11:40 +04:00
|
|
|
SourceMediaStream::TrackData* data = &aStream->mUpdateTracks[i];
|
2013-05-30 08:44:43 +04:00
|
|
|
aStream->ApplyTrackDisabling(data->mID, data->mData);
|
2016-05-04 11:08:44 +03:00
|
|
|
// Dealing with NotifyQueuedTrackChanges and NotifyQueuedAudioData part.
|
|
|
|
|
|
|
|
// The logic is different from the manipulating of aStream->mTracks part.
|
|
|
|
// So it is not combined with the manipulating of aStream->mTracks part.
|
2017-01-27 15:20:37 +03:00
|
|
|
StreamTime offset =
|
|
|
|
(data->mCommands & SourceMediaStream::TRACK_CREATE)
|
|
|
|
? data->mStart
|
|
|
|
: aStream->mTracks.FindTrack(data->mID)->GetSegment()->GetDuration();
|
2016-05-04 11:08:44 +03:00
|
|
|
|
|
|
|
// Audio case.
|
|
|
|
if (data->mData->GetType() == MediaSegment::AUDIO) {
|
|
|
|
if (data->mCommands) {
|
|
|
|
MOZ_ASSERT(!(data->mCommands & SourceMediaStream::TRACK_UNUSED));
|
|
|
|
for (MediaStreamListener* l : aStream->mListeners) {
|
|
|
|
if (data->mCommands & SourceMediaStream::TRACK_END) {
|
|
|
|
l->NotifyQueuedAudioData(this, data->mID,
|
|
|
|
offset, *(static_cast<AudioSegment*>(data->mData.get())));
|
|
|
|
}
|
|
|
|
l->NotifyQueuedTrackChanges(this, data->mID,
|
2016-06-30 10:07:48 +03:00
|
|
|
offset, static_cast<TrackEventCommand>(data->mCommands), *data->mData);
|
2016-05-04 11:08:44 +03:00
|
|
|
if (data->mCommands & SourceMediaStream::TRACK_CREATE) {
|
|
|
|
l->NotifyQueuedAudioData(this, data->mID,
|
|
|
|
offset, *(static_cast<AudioSegment*>(data->mData.get())));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (MediaStreamListener* l : aStream->mListeners) {
|
|
|
|
l->NotifyQueuedAudioData(this, data->mID,
|
|
|
|
offset, *(static_cast<AudioSegment*>(data->mData.get())));
|
|
|
|
}
|
|
|
|
}
|
2012-04-30 07:12:50 +04:00
|
|
|
}
|
2016-05-04 11:08:44 +03:00
|
|
|
|
|
|
|
// Video case.
|
|
|
|
if (data->mData->GetType() == MediaSegment::VIDEO) {
|
|
|
|
if (data->mCommands) {
|
|
|
|
MOZ_ASSERT(!(data->mCommands & SourceMediaStream::TRACK_UNUSED));
|
|
|
|
for (MediaStreamListener* l : aStream->mListeners) {
|
|
|
|
l->NotifyQueuedTrackChanges(this, data->mID,
|
2016-06-30 10:07:48 +03:00
|
|
|
offset, static_cast<TrackEventCommand>(data->mCommands), *data->mData);
|
2016-05-04 11:08:44 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-03 19:27:59 +03:00
|
|
|
for (TrackBound<MediaStreamTrackListener>& b : aStream->mTrackListeners) {
|
|
|
|
if (b.mTrackID != data->mID) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
b.mListener->NotifyQueuedChanges(this, offset, *data->mData);
|
|
|
|
if (data->mCommands & SourceMediaStream::TRACK_END) {
|
|
|
|
b.mListener->NotifyEnded();
|
|
|
|
}
|
|
|
|
}
|
2012-04-30 07:11:40 +04:00
|
|
|
if (data->mCommands & SourceMediaStream::TRACK_CREATE) {
|
|
|
|
MediaSegment* segment = data->mData.forget();
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug,
|
2016-12-16 06:16:31 +03:00
|
|
|
("SourceMediaStream %p creating track %d, start %" PRId64
|
|
|
|
", initial end %" PRId64,
|
2017-02-06 18:22:36 +03:00
|
|
|
aStream,
|
|
|
|
data->mID,
|
|
|
|
int64_t(data->mStart),
|
|
|
|
int64_t(segment->GetDuration())));
|
2014-03-24 14:06:05 +04:00
|
|
|
|
2014-12-30 04:54:01 +03:00
|
|
|
data->mEndOfFlushedData += segment->GetDuration();
|
2016-01-26 05:49:01 +03:00
|
|
|
aStream->mTracks.AddTrack(data->mID, data->mStart, segment);
|
2012-04-30 07:11:40 +04:00
|
|
|
// The track has taken ownership of data->mData, so let's replace
|
|
|
|
// data->mData with an empty clone.
|
|
|
|
data->mData = segment->CreateEmptyClone();
|
|
|
|
data->mCommands &= ~SourceMediaStream::TRACK_CREATE;
|
2016-04-07 06:44:00 +03:00
|
|
|
shouldNotifyTrackCreated = true;
|
2012-04-30 07:11:40 +04:00
|
|
|
} else if (data->mData->GetDuration() > 0) {
|
2016-01-26 05:49:01 +03:00
|
|
|
MediaSegment* dest = aStream->mTracks.FindTrack(data->mID)->GetSegment();
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Verbose,
|
2016-12-16 06:16:31 +03:00
|
|
|
("SourceMediaStream %p track %d, advancing end from %" PRId64
|
|
|
|
" to %" PRId64,
|
2017-02-06 18:22:36 +03:00
|
|
|
aStream,
|
|
|
|
data->mID,
|
|
|
|
int64_t(dest->GetDuration()),
|
|
|
|
int64_t(dest->GetDuration() + data->mData->GetDuration())));
|
2014-12-30 04:54:01 +03:00
|
|
|
data->mEndOfFlushedData += data->mData->GetDuration();
|
2012-04-30 07:11:40 +04:00
|
|
|
dest->AppendFrom(data->mData);
|
|
|
|
}
|
|
|
|
if (data->mCommands & SourceMediaStream::TRACK_END) {
|
2016-01-26 05:49:01 +03:00
|
|
|
aStream->mTracks.FindTrack(data->mID)->SetEnded();
|
2012-04-30 07:11:40 +04:00
|
|
|
aStream->mUpdateTracks.RemoveElementAt(i);
|
|
|
|
}
|
|
|
|
}
|
2016-04-07 06:44:00 +03:00
|
|
|
if (shouldNotifyTrackCreated) {
|
2015-02-06 12:38:11 +03:00
|
|
|
for (MediaStreamListener* l : aStream->mListeners) {
|
|
|
|
l->NotifyFinishedTrackCreation(this);
|
|
|
|
}
|
|
|
|
}
|
2014-05-19 00:26:54 +04:00
|
|
|
if (!aStream->mFinished) {
|
2016-01-26 05:49:01 +03:00
|
|
|
aStream->mTracks.AdvanceKnownTracksTime(aStream->mUpdateKnownTracksTime);
|
2014-05-19 00:26:54 +04:00
|
|
|
}
|
2012-04-30 07:11:40 +04:00
|
|
|
}
|
2016-01-26 05:49:01 +03:00
|
|
|
if (aStream->mTracks.GetEnd() > 0) {
|
2013-03-20 15:19:39 +04:00
|
|
|
aStream->mHasCurrentData = true;
|
|
|
|
}
|
2012-04-30 07:11:40 +04:00
|
|
|
if (finished) {
|
|
|
|
FinishStream(aStream);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
StreamTime
|
2015-09-08 06:41:00 +03:00
|
|
|
MediaStreamGraphImpl::GraphTimeToStreamTimeWithBlocking(MediaStream* aStream,
|
2015-09-16 07:35:16 +03:00
|
|
|
GraphTime aTime)
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
2015-08-13 07:23:17 +03:00
|
|
|
MOZ_ASSERT(aTime <= mStateComputedTime,
|
2015-09-04 09:44:43 +03:00
|
|
|
"Don't ask about times where we haven't made blocking decisions yet");
|
|
|
|
return std::max<StreamTime>(0,
|
2016-01-26 05:49:01 +03:00
|
|
|
std::min(aTime, aStream->mStartBlocking) - aStream->mTracksStartTime);
|
2013-02-04 14:04:26 +04:00
|
|
|
}
|
|
|
|
|
2014-04-25 18:09:30 +04:00
|
|
|
GraphTime
|
2015-05-13 16:34:56 +03:00
|
|
|
MediaStreamGraphImpl::IterationEnd() const
|
2014-04-25 18:09:30 +04:00
|
|
|
{
|
|
|
|
return CurrentDriver()->IterationEnd();
|
|
|
|
}
|
|
|
|
|
2015-05-13 16:34:56 +03:00
|
|
|
void
|
2015-09-04 15:42:53 +03:00
|
|
|
MediaStreamGraphImpl::UpdateCurrentTimeForStreams(GraphTime aPrevCurrentTime)
|
2015-05-13 16:34:56 +03:00
|
|
|
{
|
2015-09-08 07:18:15 +03:00
|
|
|
for (MediaStream* stream : AllStreams()) {
|
2015-09-08 06:55:40 +03:00
|
|
|
bool isAnyBlocked = stream->mStartBlocking < mStateComputedTime;
|
|
|
|
bool isAnyUnblocked = stream->mStartBlocking > aPrevCurrentTime;
|
|
|
|
|
2015-09-08 07:18:15 +03:00
|
|
|
// Calculate blocked time and fire Blocked/Unblocked events
|
2015-09-04 15:42:53 +03:00
|
|
|
GraphTime blockedTime = mStateComputedTime - stream->mStartBlocking;
|
2015-09-04 09:44:43 +03:00
|
|
|
NS_ASSERTION(blockedTime >= 0, "Error in blocking time");
|
2015-09-08 06:55:40 +03:00
|
|
|
stream->AdvanceTimeVaryingValuesToCurrentTime(mStateComputedTime,
|
|
|
|
blockedTime);
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Verbose,
|
|
|
|
("MediaStream %p bufferStartTime=%f blockedTime=%f",
|
|
|
|
stream,
|
|
|
|
MediaTimeToSeconds(stream->mTracksStartTime),
|
|
|
|
MediaTimeToSeconds(blockedTime)));
|
2015-09-08 06:55:40 +03:00
|
|
|
stream->mStartBlocking = mStateComputedTime;
|
2015-09-04 09:44:43 +03:00
|
|
|
|
2015-09-08 06:55:40 +03:00
|
|
|
if (isAnyUnblocked && stream->mNotifiedBlocked) {
|
2015-09-04 09:44:43 +03:00
|
|
|
for (uint32_t j = 0; j < stream->mListeners.Length(); ++j) {
|
|
|
|
MediaStreamListener* l = stream->mListeners[j];
|
|
|
|
l->NotifyBlockingChanged(this, MediaStreamListener::UNBLOCKED);
|
2015-09-08 07:18:15 +03:00
|
|
|
}
|
2015-09-04 09:44:43 +03:00
|
|
|
stream->mNotifiedBlocked = false;
|
|
|
|
}
|
2015-09-08 06:55:40 +03:00
|
|
|
if (isAnyBlocked && !stream->mNotifiedBlocked) {
|
2015-09-04 09:44:43 +03:00
|
|
|
for (uint32_t j = 0; j < stream->mListeners.Length(); ++j) {
|
|
|
|
MediaStreamListener* l = stream->mListeners[j];
|
|
|
|
l->NotifyBlockingChanged(this, MediaStreamListener::BLOCKED);
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
}
|
2015-09-04 09:44:43 +03:00
|
|
|
stream->mNotifiedBlocked = true;
|
2015-09-08 07:18:15 +03:00
|
|
|
}
|
2015-04-09 14:44:27 +03:00
|
|
|
|
2015-09-08 06:55:40 +03:00
|
|
|
if (isAnyUnblocked) {
|
2015-09-04 15:45:14 +03:00
|
|
|
NS_ASSERTION(!stream->mNotifiedFinished,
|
|
|
|
"Shouldn't have already notified of finish *and* have output!");
|
|
|
|
for (uint32_t j = 0; j < stream->mListeners.Length(); ++j) {
|
|
|
|
MediaStreamListener* l = stream->mListeners[j];
|
|
|
|
l->NotifyOutput(this, mProcessedTime);
|
|
|
|
}
|
2015-09-04 08:01:01 +03:00
|
|
|
}
|
2015-09-08 07:18:15 +03:00
|
|
|
|
2015-09-04 15:45:14 +03:00
|
|
|
// The stream is fully finished when all of its track data has been played
|
|
|
|
// out.
|
|
|
|
if (stream->mFinished && !stream->mNotifiedFinished &&
|
|
|
|
mProcessedTime >=
|
2016-01-26 05:49:01 +03:00
|
|
|
stream->StreamTimeToGraphTime(stream->GetStreamTracks().GetAllTracksEnd())) {
|
2015-09-04 15:45:14 +03:00
|
|
|
stream->mNotifiedFinished = true;
|
|
|
|
SetStreamOrderDirty();
|
|
|
|
for (uint32_t j = 0; j < stream->mListeners.Length(); ++j) {
|
|
|
|
MediaStreamListener* l = stream->mListeners[j];
|
2016-06-30 10:07:48 +03:00
|
|
|
l->NotifyEvent(this, MediaStreamGraphEvent::EVENT_FINISHED);
|
2015-09-04 15:45:14 +03:00
|
|
|
}
|
2015-04-09 14:44:27 +03:00
|
|
|
}
|
2013-10-02 06:28:49 +04:00
|
|
|
}
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
|
2016-01-26 11:45:25 +03:00
|
|
|
template<typename C, typename Chunk>
|
|
|
|
void
|
|
|
|
MediaStreamGraphImpl::ProcessChunkMetadataForInterval(MediaStream* aStream,
|
|
|
|
TrackID aTrackID,
|
|
|
|
C& aSegment,
|
|
|
|
StreamTime aStart,
|
|
|
|
StreamTime aEnd)
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(aStream);
|
|
|
|
MOZ_ASSERT(IsTrackIDExplicit(aTrackID));
|
|
|
|
|
|
|
|
StreamTime offset = 0;
|
|
|
|
for (typename C::ConstChunkIterator chunk(aSegment);
|
|
|
|
!chunk.IsEnded(); chunk.Next()) {
|
|
|
|
if (offset >= aEnd) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
offset += chunk->GetDuration();
|
|
|
|
if (chunk->IsNull() || offset < aStart) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
PrincipalHandle principalHandle = chunk->GetPrincipalHandle();
|
|
|
|
if (principalHandle != aSegment.GetLastPrincipalHandle()) {
|
|
|
|
aSegment.SetLastPrincipalHandle(principalHandle);
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug,
|
|
|
|
("MediaStream %p track %d, principalHandle "
|
|
|
|
"changed in %sChunk with duration %lld",
|
|
|
|
aStream,
|
|
|
|
aTrackID,
|
|
|
|
aSegment.GetType() == MediaSegment::AUDIO ? "Audio" : "Video",
|
|
|
|
(long long)chunk->GetDuration()));
|
2017-01-27 15:20:37 +03:00
|
|
|
for (const TrackBound<MediaStreamTrackListener>& listener :
|
|
|
|
aStream->mTrackListeners) {
|
2016-01-26 11:45:25 +03:00
|
|
|
if (listener.mTrackID == aTrackID) {
|
|
|
|
listener.mListener->NotifyPrincipalHandleChanged(this, principalHandle);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaStreamGraphImpl::ProcessChunkMetadata(GraphTime aPrevCurrentTime)
|
|
|
|
{
|
|
|
|
for (MediaStream* stream : AllStreams()) {
|
|
|
|
StreamTime iterationStart = stream->GraphTimeToStreamTime(aPrevCurrentTime);
|
|
|
|
StreamTime iterationEnd = stream->GraphTimeToStreamTime(mProcessedTime);
|
2016-01-26 05:49:01 +03:00
|
|
|
for (StreamTracks::TrackIter tracks(stream->mTracks);
|
2016-01-26 11:45:25 +03:00
|
|
|
!tracks.IsEnded(); tracks.Next()) {
|
|
|
|
MediaSegment* segment = tracks->GetSegment();
|
|
|
|
if (!segment) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (tracks->GetType() == MediaSegment::AUDIO) {
|
|
|
|
AudioSegment* audio = static_cast<AudioSegment*>(segment);
|
|
|
|
ProcessChunkMetadataForInterval<AudioSegment, AudioChunk>(
|
|
|
|
stream, tracks->GetID(), *audio, iterationStart, iterationEnd);
|
|
|
|
} else if (tracks->GetType() == MediaSegment::VIDEO) {
|
|
|
|
VideoSegment* video = static_cast<VideoSegment*>(segment);
|
|
|
|
ProcessChunkMetadataForInterval<VideoSegment, VideoChunk>(
|
|
|
|
stream, tracks->GetID(), *video, iterationStart, iterationEnd);
|
|
|
|
} else {
|
|
|
|
MOZ_CRASH("Unknown track type");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-16 07:17:30 +03:00
|
|
|
GraphTime
|
|
|
|
MediaStreamGraphImpl::WillUnderrun(MediaStream* aStream,
|
|
|
|
GraphTime aEndBlockingDecisions)
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
2012-07-31 16:17:21 +04:00
|
|
|
// Finished streams can't underrun. ProcessedMediaStreams also can't cause
|
|
|
|
// underrun currently, since we'll always be able to produce data for them
|
|
|
|
// unless they block on some other stream.
|
|
|
|
if (aStream->mFinished || aStream->AsProcessedStream()) {
|
2015-09-16 07:17:30 +03:00
|
|
|
return aEndBlockingDecisions;
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
2015-09-04 09:44:43 +03:00
|
|
|
// This stream isn't finished or suspended. We don't need to call
|
|
|
|
// StreamTimeToGraphTime since an underrun is the only thing that can block
|
|
|
|
// it.
|
2016-01-26 05:49:01 +03:00
|
|
|
GraphTime bufferEnd = aStream->GetTracksEnd() + aStream->mTracksStartTime;
|
2013-03-07 12:53:45 +04:00
|
|
|
#ifdef DEBUG
|
2015-07-29 10:32:10 +03:00
|
|
|
if (bufferEnd < mProcessedTime) {
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Error,
|
|
|
|
("MediaStream %p underrun, "
|
2016-12-16 06:16:31 +03:00
|
|
|
"bufferEnd %f < mProcessedTime %f (%" PRId64 " < %" PRId64
|
|
|
|
"), Streamtime %" PRId64,
|
2017-02-06 18:22:36 +03:00
|
|
|
aStream,
|
|
|
|
MediaTimeToSeconds(bufferEnd),
|
|
|
|
MediaTimeToSeconds(mProcessedTime),
|
|
|
|
bufferEnd,
|
|
|
|
mProcessedTime,
|
|
|
|
aStream->GetTracksEnd()));
|
2013-03-07 12:53:45 +04:00
|
|
|
aStream->DumpTrackInfo();
|
2015-07-29 10:32:10 +03:00
|
|
|
NS_ASSERTION(bufferEnd >= mProcessedTime, "Buffer underran");
|
2013-03-07 12:53:45 +04:00
|
|
|
}
|
|
|
|
#endif
|
2015-09-16 07:17:30 +03:00
|
|
|
return std::min(bufferEnd, aEndBlockingDecisions);
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
namespace {
|
|
|
|
// Value of mCycleMarker for unvisited streams in cycle detection.
|
|
|
|
const uint32_t NOT_VISITED = UINT32_MAX;
|
|
|
|
// Value of mCycleMarker for ordered streams in muted cycles.
|
|
|
|
const uint32_t IN_MUTED_CYCLE = 1;
|
2015-07-13 18:25:42 +03:00
|
|
|
} // namespace
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
|
2016-03-08 20:11:08 +03:00
|
|
|
bool
|
|
|
|
MediaStreamGraphImpl::AudioTrackPresent(bool& aNeedsAEC)
|
2012-07-31 16:17:21 +04:00
|
|
|
{
|
2016-04-13 21:31:35 +03:00
|
|
|
AssertOnGraphThreadOrNotRunning();
|
|
|
|
|
2014-08-25 17:25:49 +04:00
|
|
|
bool audioTrackPresent = false;
|
2016-03-08 20:11:08 +03:00
|
|
|
for (uint32_t i = 0; i < mStreams.Length() && audioTrackPresent == false; ++i) {
|
2014-07-17 04:55:55 +04:00
|
|
|
MediaStream* stream = mStreams[i];
|
2016-03-08 20:11:08 +03:00
|
|
|
SourceMediaStream* source = stream->AsSourceStream();
|
2014-09-09 20:23:01 +04:00
|
|
|
#ifdef MOZ_WEBRTC
|
2016-03-08 20:11:08 +03:00
|
|
|
if (source && source->NeedsMixing()) {
|
|
|
|
aNeedsAEC = true;
|
2014-03-24 14:06:06 +04:00
|
|
|
}
|
2014-09-09 20:23:01 +04:00
|
|
|
#endif
|
Bug 1060311 - Force the use of an AudioCallbackDriver when at least an AudioNodeStream is present in the graph. r=jesup
This prevent a bug where the graph would be using a SystemClockDriver even if it
was rendering Web Audio API content.
It went like this:
- An AudioContext was created.
- Some AudioNodeStream (Web Audio API MediaStreams) were created, but their
MediaStreamTrack was not added yet
- During the stream ordering, we would see that we were running an
AudioCallbackDriver (because the MSG was created using an AudioContext, and we
pass in hints regarding the type of MediaStreams that will be added in the
future, to open the audio stream as early as we can, because it can take some
time, the MSG was created directly using an AudioCallbackDriver)
- Also during the stream ordering, we see that none of our MediaStream have an
MediaStreamTrack with an audio track. This triggers a switch to a
SystemClockDriver, because the graph thinks there is no audio.
- During CreateAndDestroyAudioNode, we would not switch to an
AudioCallbackDriver on the first iteration (right after the UpdateStreamOrder
call), because we would be switching, and not during the iteration after,
because we thought we already switched (the first patch makes this more robust).
This basically forces an AudioCallbackDriver if there is an AudioNodeStream,
which prevents unnecessary GraphDriver switches (and save threads creation
destruction, audio stream create and destruction, and all other resources
associated with a GraphDriver).
2014-08-29 22:26:29 +04:00
|
|
|
// If this is a AudioNodeStream, force a AudioCallbackDriver.
|
|
|
|
if (stream->AsAudioNodeStream()) {
|
|
|
|
audioTrackPresent = true;
|
2015-02-24 17:59:29 +03:00
|
|
|
} else {
|
2016-01-26 05:49:01 +03:00
|
|
|
for (StreamTracks::TrackIter tracks(stream->GetStreamTracks(), MediaSegment::AUDIO);
|
2015-02-24 17:59:29 +03:00
|
|
|
!tracks.IsEnded(); tracks.Next()) {
|
|
|
|
audioTrackPresent = true;
|
|
|
|
}
|
2014-08-25 17:25:49 +04:00
|
|
|
}
|
2016-03-08 20:11:08 +03:00
|
|
|
if (source) {
|
2016-06-22 18:44:46 +03:00
|
|
|
audioTrackPresent = source->HasPendingAudioTrack();
|
2016-03-08 20:11:08 +03:00
|
|
|
}
|
2012-07-31 16:17:21 +04:00
|
|
|
}
|
2016-03-08 20:11:08 +03:00
|
|
|
|
2016-03-08 20:11:08 +03:00
|
|
|
// XXX For some reason, there are race conditions when starting an audio input where
|
|
|
|
// we find no active audio tracks. In any case, if we have an active audio input we
|
|
|
|
// should not allow a switch back to a SystemClockDriver
|
|
|
|
if (!audioTrackPresent && mInputDeviceUsers.Count() != 0) {
|
|
|
|
NS_WARNING("No audio tracks, but full-duplex audio is enabled!!!!!");
|
|
|
|
audioTrackPresent = true;
|
2016-03-15 20:03:43 +03:00
|
|
|
#ifdef MOZ_WEBRTC
|
2016-04-08 21:54:00 +03:00
|
|
|
aNeedsAEC = true;
|
2016-03-15 20:03:43 +03:00
|
|
|
#endif
|
2016-03-08 20:11:08 +03:00
|
|
|
}
|
|
|
|
|
2016-03-08 20:11:08 +03:00
|
|
|
return audioTrackPresent;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaStreamGraphImpl::UpdateStreamOrder()
|
|
|
|
{
|
|
|
|
bool shouldAEC = false;
|
|
|
|
bool audioTrackPresent = AudioTrackPresent(shouldAEC);
|
|
|
|
|
2016-01-21 19:51:36 +03:00
|
|
|
// Note that this looks for any audio streams, input or output, and switches to a
|
2016-03-08 20:11:08 +03:00
|
|
|
// SystemClockDriver if there are none. However, if another is already pending, let that
|
|
|
|
// switch happen.
|
2012-07-31 16:17:21 +04:00
|
|
|
|
2015-11-09 11:32:00 +03:00
|
|
|
if (!audioTrackPresent && mRealtime &&
|
2014-08-26 19:02:07 +04:00
|
|
|
CurrentDriver()->AsAudioCallbackDriver()) {
|
2015-02-24 17:59:29 +03:00
|
|
|
MonitorAutoLock mon(mMonitor);
|
2016-03-08 20:11:08 +03:00
|
|
|
if (CurrentDriver()->AsAudioCallbackDriver()->IsStarted() &&
|
|
|
|
!(CurrentDriver()->Switching())) {
|
2014-09-03 17:52:43 +04:00
|
|
|
if (mLifecycleState == LIFECYCLE_RUNNING) {
|
|
|
|
SystemClockDriver* driver = new SystemClockDriver(this);
|
|
|
|
CurrentDriver()->SwitchAtNextIteration(driver);
|
|
|
|
}
|
2014-08-26 19:02:07 +04:00
|
|
|
}
|
2014-08-26 19:01:33 +04:00
|
|
|
}
|
|
|
|
|
2015-12-01 13:48:02 +03:00
|
|
|
bool switching = false;
|
|
|
|
{
|
|
|
|
MonitorAutoLock mon(mMonitor);
|
|
|
|
switching = CurrentDriver()->Switching();
|
|
|
|
}
|
|
|
|
|
2015-11-09 11:32:00 +03:00
|
|
|
if (audioTrackPresent && mRealtime &&
|
|
|
|
!CurrentDriver()->AsAudioCallbackDriver() &&
|
2015-12-01 13:48:02 +03:00
|
|
|
!switching) {
|
2015-11-09 11:32:00 +03:00
|
|
|
MonitorAutoLock mon(mMonitor);
|
|
|
|
if (mLifecycleState == LIFECYCLE_RUNNING) {
|
|
|
|
AudioCallbackDriver* driver = new AudioCallbackDriver(this);
|
|
|
|
CurrentDriver()->SwitchAtNextIteration(driver);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-24 17:59:29 +03:00
|
|
|
if (!mStreamOrderDirty) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
mStreamOrderDirty = false;
|
|
|
|
|
2014-07-17 04:55:55 +04:00
|
|
|
// The algorithm for finding cycles is based on Tim Leslie's iterative
|
|
|
|
// implementation [1][2] of Pearce's variant [3] of Tarjan's strongly
|
|
|
|
// connected components (SCC) algorithm. There are variations (a) to
|
|
|
|
// distinguish whether streams in SCCs of size 1 are in a cycle and (b) to
|
|
|
|
// re-run the algorithm over SCCs with breaks at DelayNodes.
|
|
|
|
//
|
|
|
|
// [1] http://www.timl.id.au/?p=327
|
|
|
|
// [2] https://github.com/scipy/scipy/blob/e2c502fca/scipy/sparse/csgraph/_traversal.pyx#L582
|
|
|
|
// [3] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.102.1707
|
|
|
|
//
|
|
|
|
// There are two stacks. One for the depth-first search (DFS),
|
|
|
|
mozilla::LinkedList<MediaStream> dfsStack;
|
|
|
|
// and another for streams popped from the DFS stack, but still being
|
|
|
|
// considered as part of SCCs involving streams on the stack.
|
|
|
|
mozilla::LinkedList<MediaStream> sccStack;
|
|
|
|
|
|
|
|
// An index into mStreams for the next stream found with no unsatisfied
|
|
|
|
// upstream dependencies.
|
|
|
|
uint32_t orderedStreamCount = 0;
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < mStreams.Length(); ++i) {
|
|
|
|
MediaStream* s = mStreams[i];
|
|
|
|
ProcessedMediaStream* ps = s->AsProcessedStream();
|
|
|
|
if (ps) {
|
|
|
|
// The dfsStack initially contains a list of all processed streams in
|
|
|
|
// unchanged order.
|
|
|
|
dfsStack.insertBack(s);
|
|
|
|
ps->mCycleMarker = NOT_VISITED;
|
|
|
|
} else {
|
|
|
|
// SourceMediaStreams have no inputs and so can be ordered now.
|
|
|
|
mStreams[orderedStreamCount] = s;
|
|
|
|
++orderedStreamCount;
|
2012-07-31 16:17:21 +04:00
|
|
|
}
|
|
|
|
}
|
2014-07-17 04:55:55 +04:00
|
|
|
|
|
|
|
// mNextStackMarker corresponds to "index" in Tarjan's algorithm. It is a
|
|
|
|
// counter to label mCycleMarker on the next visited stream in the DFS
|
|
|
|
// uniquely in the set of visited streams that are still being considered.
|
|
|
|
//
|
|
|
|
// In this implementation, the counter descends so that the values are
|
|
|
|
// strictly greater than the values that mCycleMarker takes when the stream
|
|
|
|
// has been ordered (0 or IN_MUTED_CYCLE).
|
|
|
|
//
|
|
|
|
// Each new stream labelled, as the DFS searches upstream, receives a value
|
|
|
|
// less than those used for all other streams being considered.
|
|
|
|
uint32_t nextStackMarker = NOT_VISITED - 1;
|
|
|
|
// Reset list of DelayNodes in cycles stored at the tail of mStreams.
|
|
|
|
mFirstCycleBreaker = mStreams.Length();
|
|
|
|
|
|
|
|
// Rearrange dfsStack order as required to DFS upstream and pop streams
|
|
|
|
// in processing order to place in mStreams.
|
|
|
|
while (auto ps = static_cast<ProcessedMediaStream*>(dfsStack.getFirst())) {
|
|
|
|
const auto& inputs = ps->mInputs;
|
|
|
|
MOZ_ASSERT(ps->AsProcessedStream());
|
|
|
|
if (ps->mCycleMarker == NOT_VISITED) {
|
|
|
|
// Record the position on the visited stack, so that any searches
|
|
|
|
// finding this stream again know how much of the stack is in the cycle.
|
|
|
|
ps->mCycleMarker = nextStackMarker;
|
|
|
|
--nextStackMarker;
|
|
|
|
// Not-visited input streams should be processed first.
|
|
|
|
// SourceMediaStreams have already been ordered.
|
|
|
|
for (uint32_t i = inputs.Length(); i--; ) {
|
2015-09-16 07:15:21 +03:00
|
|
|
if (inputs[i]->mSource->IsSuspended()) {
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
continue;
|
|
|
|
}
|
2014-07-17 04:55:55 +04:00
|
|
|
auto input = inputs[i]->mSource->AsProcessedStream();
|
|
|
|
if (input && input->mCycleMarker == NOT_VISITED) {
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
// It can be that this stream has an input which is from a suspended
|
|
|
|
// AudioContext.
|
|
|
|
if (input->isInList()) {
|
|
|
|
input->remove();
|
|
|
|
dfsStack.insertFront(input);
|
|
|
|
}
|
2014-07-17 04:55:55 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returning from DFS. Pop from dfsStack.
|
|
|
|
ps->remove();
|
|
|
|
|
|
|
|
// cycleStackMarker keeps track of the highest marker value on any
|
|
|
|
// upstream stream, if any, found receiving input, directly or indirectly,
|
|
|
|
// from the visited stack (and so from |ps|, making a cycle). In a
|
|
|
|
// variation from Tarjan's SCC algorithm, this does not include |ps|
|
|
|
|
// unless it is part of the cycle.
|
|
|
|
uint32_t cycleStackMarker = 0;
|
|
|
|
for (uint32_t i = inputs.Length(); i--; ) {
|
2015-09-16 07:15:21 +03:00
|
|
|
if (inputs[i]->mSource->IsSuspended()) {
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
continue;
|
|
|
|
}
|
2014-07-17 04:55:55 +04:00
|
|
|
auto input = inputs[i]->mSource->AsProcessedStream();
|
|
|
|
if (input) {
|
|
|
|
cycleStackMarker = std::max(cycleStackMarker, input->mCycleMarker);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cycleStackMarker <= IN_MUTED_CYCLE) {
|
|
|
|
// All inputs have been ordered and their stack markers have been removed.
|
|
|
|
// This stream is not part of a cycle. It can be processed next.
|
|
|
|
ps->mCycleMarker = 0;
|
|
|
|
mStreams[orderedStreamCount] = ps;
|
|
|
|
++orderedStreamCount;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// A cycle has been found. Record this stream for ordering when all
|
|
|
|
// streams in this SCC have been popped from the DFS stack.
|
|
|
|
sccStack.insertFront(ps);
|
|
|
|
|
|
|
|
if (cycleStackMarker > ps->mCycleMarker) {
|
|
|
|
// Cycles have been found that involve streams that remain on the stack.
|
|
|
|
// Leave mCycleMarker indicating the most downstream (last) stream on
|
|
|
|
// the stack known to be part of this SCC. In this way, any searches on
|
|
|
|
// other paths that find |ps| will know (without having to traverse from
|
|
|
|
// this stream again) that they are part of this SCC (i.e. part of an
|
|
|
|
// intersecting cycle).
|
|
|
|
ps->mCycleMarker = cycleStackMarker;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// |ps| is the root of an SCC involving no other streams on dfsStack, the
|
|
|
|
// complete SCC has been recorded, and streams in this SCC are part of at
|
|
|
|
// least one cycle.
|
|
|
|
MOZ_ASSERT(cycleStackMarker == ps->mCycleMarker);
|
|
|
|
// If there are DelayNodes in this SCC, then they may break the cycles.
|
|
|
|
bool haveDelayNode = false;
|
2014-07-24 13:58:01 +04:00
|
|
|
auto next = sccStack.getFirst();
|
2014-07-17 04:55:55 +04:00
|
|
|
// Streams in this SCC are identified by mCycleMarker <= cycleStackMarker.
|
|
|
|
// (There may be other streams later in sccStack from other incompletely
|
|
|
|
// searched SCCs, involving streams still on dfsStack.)
|
|
|
|
//
|
|
|
|
// DelayNodes in cycles must behave differently from those not in cycles,
|
|
|
|
// so all DelayNodes in the SCC must be identified.
|
2014-07-24 13:58:01 +04:00
|
|
|
while (next && static_cast<ProcessedMediaStream*>(next)->
|
|
|
|
mCycleMarker <= cycleStackMarker) {
|
2014-07-17 04:55:55 +04:00
|
|
|
auto ns = next->AsAudioNodeStream();
|
|
|
|
// Get next before perhaps removing from list below.
|
2014-07-24 13:58:01 +04:00
|
|
|
next = next->getNext();
|
2014-07-17 04:55:55 +04:00
|
|
|
if (ns && ns->Engine()->AsDelayNodeEngine()) {
|
|
|
|
haveDelayNode = true;
|
|
|
|
// DelayNodes break cycles by producing their output in a
|
|
|
|
// preprocessing phase; they do not need to be ordered before their
|
|
|
|
// consumers. Order them at the tail of mStreams so that they can be
|
|
|
|
// handled specially. Do so now, so that DFS ignores them.
|
|
|
|
ns->remove();
|
|
|
|
ns->mCycleMarker = 0;
|
|
|
|
--mFirstCycleBreaker;
|
|
|
|
mStreams[mFirstCycleBreaker] = ns;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
auto after_scc = next;
|
2014-07-24 13:58:01 +04:00
|
|
|
while ((next = sccStack.getFirst()) != after_scc) {
|
|
|
|
next->remove();
|
|
|
|
auto removed = static_cast<ProcessedMediaStream*>(next);
|
2014-07-17 04:55:55 +04:00
|
|
|
if (haveDelayNode) {
|
|
|
|
// Return streams to the DFS stack again (to order and detect cycles
|
|
|
|
// without delayNodes). Any of these streams that are still inputs
|
|
|
|
// for streams on the visited stack must be returned to the front of
|
|
|
|
// the stack to be ordered before their dependents. We know that none
|
|
|
|
// of these streams need input from streams on the visited stack, so
|
|
|
|
// they can all be searched and ordered before the current stack head
|
|
|
|
// is popped.
|
2014-07-24 13:58:01 +04:00
|
|
|
removed->mCycleMarker = NOT_VISITED;
|
|
|
|
dfsStack.insertFront(removed);
|
2014-07-17 04:55:55 +04:00
|
|
|
} else {
|
|
|
|
// Streams in cycles without any DelayNodes must be muted, and so do
|
|
|
|
// not need input and can be ordered now. They must be ordered before
|
|
|
|
// their consumers so that their muted output is available.
|
2014-07-24 13:58:01 +04:00
|
|
|
removed->mCycleMarker = IN_MUTED_CYCLE;
|
|
|
|
mStreams[orderedStreamCount] = removed;
|
2014-07-17 04:55:55 +04:00
|
|
|
++orderedStreamCount;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
MOZ_ASSERT(orderedStreamCount == mFirstCycleBreaker);
|
2012-07-31 16:17:21 +04:00
|
|
|
}
|
|
|
|
|
2012-09-20 04:47:51 +04:00
|
|
|
void
|
|
|
|
MediaStreamGraphImpl::NotifyHasCurrentData(MediaStream* aStream)
|
|
|
|
{
|
2013-03-20 15:19:39 +04:00
|
|
|
if (!aStream->mNotifiedHasCurrentData && aStream->mHasCurrentData) {
|
|
|
|
for (uint32_t j = 0; j < aStream->mListeners.Length(); ++j) {
|
|
|
|
MediaStreamListener* l = aStream->mListeners[j];
|
|
|
|
l->NotifyHasCurrentData(this);
|
|
|
|
}
|
|
|
|
aStream->mNotifiedHasCurrentData = true;
|
2012-09-20 04:47:51 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
void
|
2015-09-16 07:23:14 +03:00
|
|
|
MediaStreamGraphImpl::CreateOrDestroyAudioStreams(MediaStream* aStream)
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
2013-05-08 15:44:07 +04:00
|
|
|
MOZ_ASSERT(mRealtime, "Should only attempt to create audio streams in real-time mode");
|
|
|
|
|
2015-05-13 16:34:56 +03:00
|
|
|
if (aStream->mAudioOutputs.IsEmpty()) {
|
|
|
|
aStream->mAudioOutputStreams.Clear();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-01-26 05:49:01 +03:00
|
|
|
if (!aStream->GetStreamTracks().GetAndResetTracksDirty() &&
|
2015-10-29 08:18:38 +03:00
|
|
|
!aStream->mAudioOutputStreams.IsEmpty()) {
|
2015-05-13 16:35:10 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug,
|
|
|
|
("Updating AudioOutputStreams for MediaStream %p", aStream));
|
2015-10-29 08:19:51 +03:00
|
|
|
|
2016-02-02 18:36:30 +03:00
|
|
|
AutoTArray<bool,2> audioOutputStreamsFound;
|
2012-08-22 19:56:38 +04:00
|
|
|
for (uint32_t i = 0; i < aStream->mAudioOutputStreams.Length(); ++i) {
|
2012-07-31 16:17:22 +04:00
|
|
|
audioOutputStreamsFound.AppendElement(false);
|
|
|
|
}
|
|
|
|
|
2016-01-26 05:49:01 +03:00
|
|
|
for (StreamTracks::TrackIter tracks(aStream->GetStreamTracks(), MediaSegment::AUDIO);
|
2015-05-13 16:35:10 +03:00
|
|
|
!tracks.IsEnded(); tracks.Next()) {
|
|
|
|
uint32_t i;
|
|
|
|
for (i = 0; i < audioOutputStreamsFound.Length(); ++i) {
|
|
|
|
if (aStream->mAudioOutputStreams[i].mTrackID == tracks->GetID()) {
|
|
|
|
break;
|
2012-07-31 16:17:22 +04:00
|
|
|
}
|
2015-05-13 16:35:10 +03:00
|
|
|
}
|
|
|
|
if (i < audioOutputStreamsFound.Length()) {
|
|
|
|
audioOutputStreamsFound[i] = true;
|
|
|
|
} else {
|
|
|
|
MediaStream::AudioOutputStream* audioOutputStream =
|
|
|
|
aStream->mAudioOutputStreams.AppendElement();
|
2015-09-16 07:23:14 +03:00
|
|
|
audioOutputStream->mAudioPlaybackStartTime = mProcessedTime;
|
2015-05-13 16:35:10 +03:00
|
|
|
audioOutputStream->mBlockedAudioTime = 0;
|
|
|
|
audioOutputStream->mLastTickWritten = 0;
|
|
|
|
audioOutputStream->mTrackID = tracks->GetID();
|
|
|
|
|
2015-12-01 13:48:02 +03:00
|
|
|
bool switching = false;
|
|
|
|
|
|
|
|
{
|
|
|
|
MonitorAutoLock lock(mMonitor);
|
|
|
|
switching = CurrentDriver()->Switching();
|
|
|
|
}
|
|
|
|
|
2015-05-13 16:35:10 +03:00
|
|
|
if (!CurrentDriver()->AsAudioCallbackDriver() &&
|
2015-12-01 13:48:02 +03:00
|
|
|
!switching) {
|
2015-05-13 16:35:10 +03:00
|
|
|
MonitorAutoLock mon(mMonitor);
|
|
|
|
if (mLifecycleState == LIFECYCLE_RUNNING) {
|
|
|
|
AudioCallbackDriver* driver = new AudioCallbackDriver(this);
|
|
|
|
CurrentDriver()->SwitchAtNextIteration(driver);
|
2014-08-25 17:25:49 +04:00
|
|
|
}
|
2012-07-31 16:17:22 +04:00
|
|
|
}
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-22 19:56:38 +04:00
|
|
|
for (int32_t i = audioOutputStreamsFound.Length() - 1; i >= 0; --i) {
|
2012-07-31 16:17:22 +04:00
|
|
|
if (!audioOutputStreamsFound[i]) {
|
|
|
|
aStream->mAudioOutputStreams.RemoveElementAt(i);
|
|
|
|
}
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-18 09:20:43 +04:00
|
|
|
StreamTime
|
2015-09-08 07:58:19 +03:00
|
|
|
MediaStreamGraphImpl::PlayAudio(MediaStream* aStream)
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
2013-05-08 15:44:07 +04:00
|
|
|
MOZ_ASSERT(mRealtime, "Should only attempt to play audio in realtime mode");
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
float volume = 0.0f;
|
2012-08-22 19:56:38 +04:00
|
|
|
for (uint32_t i = 0; i < aStream->mAudioOutputs.Length(); ++i) {
|
2012-04-30 07:11:26 +04:00
|
|
|
volume += aStream->mAudioOutputs[i].mVolume;
|
|
|
|
}
|
|
|
|
|
2015-09-04 08:58:26 +03:00
|
|
|
StreamTime ticksWritten = 0;
|
|
|
|
|
2012-08-22 19:56:38 +04:00
|
|
|
for (uint32_t i = 0; i < aStream->mAudioOutputStreams.Length(); ++i) {
|
2015-09-04 08:58:26 +03:00
|
|
|
ticksWritten = 0;
|
|
|
|
|
2012-07-31 16:17:22 +04:00
|
|
|
MediaStream::AudioOutputStream& audioOutput = aStream->mAudioOutputStreams[i];
|
2016-01-26 05:49:01 +03:00
|
|
|
StreamTracks::Track* track = aStream->mTracks.FindTrack(audioOutput.mTrackID);
|
2012-07-31 16:17:22 +04:00
|
|
|
AudioSegment* audio = track->Get<AudioSegment>();
|
2014-03-24 14:06:06 +04:00
|
|
|
AudioSegment output;
|
|
|
|
|
2015-09-08 08:19:03 +03:00
|
|
|
StreamTime offset = aStream->GraphTimeToStreamTime(mProcessedTime);
|
2012-04-30 07:11:26 +04:00
|
|
|
|
2016-01-26 05:49:01 +03:00
|
|
|
// We don't update aStream->mTracksStartTime here to account for time spent
|
2014-04-25 20:04:23 +04:00
|
|
|
// blocked. Instead, we'll update it in UpdateCurrentTimeForStreams after
|
|
|
|
// the blocked period has completed. But we do need to make sure we play
|
|
|
|
// from the right offsets in the stream buffer, even if we've already
|
|
|
|
// written silence for some amount of blocked time after the current time.
|
2015-09-16 07:23:14 +03:00
|
|
|
GraphTime t = mProcessedTime;
|
2015-09-08 07:58:19 +03:00
|
|
|
while (t < mStateComputedTime) {
|
2015-09-04 09:44:43 +03:00
|
|
|
bool blocked = t >= aStream->mStartBlocking;
|
2015-09-08 07:58:19 +03:00
|
|
|
GraphTime end = blocked ? mStateComputedTime : aStream->mStartBlocking;
|
|
|
|
NS_ASSERTION(end <= mStateComputedTime, "mStartBlocking is wrong!");
|
2012-04-30 07:11:26 +04:00
|
|
|
|
2014-03-24 14:06:06 +04:00
|
|
|
// Check how many ticks of sound we can provide if we are blocked some
|
|
|
|
// time in the middle of this cycle.
|
2015-09-04 09:04:09 +03:00
|
|
|
StreamTime toWrite = end - t;
|
2014-03-24 14:06:06 +04:00
|
|
|
|
2012-07-31 16:17:22 +04:00
|
|
|
if (blocked) {
|
2014-03-24 14:06:06 +04:00
|
|
|
output.InsertNullDataAtStart(toWrite);
|
2014-08-26 19:01:33 +04:00
|
|
|
ticksWritten += toWrite;
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Verbose,
|
2016-12-16 06:16:31 +03:00
|
|
|
("MediaStream %p writing %" PRId64 " blocking-silence samples for "
|
|
|
|
"%f to %f (%" PRId64 " to %" PRId64 ")",
|
2017-02-06 18:22:36 +03:00
|
|
|
aStream,
|
|
|
|
toWrite,
|
|
|
|
MediaTimeToSeconds(t),
|
|
|
|
MediaTimeToSeconds(end),
|
|
|
|
offset,
|
|
|
|
offset + toWrite));
|
2012-07-31 16:17:22 +04:00
|
|
|
} else {
|
2014-09-18 09:20:43 +04:00
|
|
|
StreamTime endTicksNeeded = offset + toWrite;
|
|
|
|
StreamTime endTicksAvailable = audio->GetDuration();
|
2014-06-12 08:40:51 +04:00
|
|
|
|
2014-03-24 14:06:06 +04:00
|
|
|
if (endTicksNeeded <= endTicksAvailable) {
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Verbose,
|
2016-12-16 06:16:31 +03:00
|
|
|
("MediaStream %p writing %" PRId64 " samples for %f to %f "
|
|
|
|
"(samples %" PRId64 " to %" PRId64 ")",
|
2017-02-06 18:22:36 +03:00
|
|
|
aStream,
|
|
|
|
toWrite,
|
|
|
|
MediaTimeToSeconds(t),
|
|
|
|
MediaTimeToSeconds(end),
|
|
|
|
offset,
|
|
|
|
endTicksNeeded));
|
2014-03-24 14:06:06 +04:00
|
|
|
output.AppendSlice(*audio, offset, endTicksNeeded);
|
2014-08-26 19:01:33 +04:00
|
|
|
ticksWritten += toWrite;
|
2014-06-12 08:40:51 +04:00
|
|
|
offset = endTicksNeeded;
|
2014-03-24 14:06:06 +04:00
|
|
|
} else {
|
2014-08-26 19:01:33 +04:00
|
|
|
// MOZ_ASSERT(track->IsEnded(), "Not enough data, and track not ended.");
|
2014-03-24 14:06:06 +04:00
|
|
|
// If we are at the end of the track, maybe write the remaining
|
|
|
|
// samples, and pad with/output silence.
|
|
|
|
if (endTicksNeeded > endTicksAvailable &&
|
|
|
|
offset < endTicksAvailable) {
|
|
|
|
output.AppendSlice(*audio, offset, endTicksAvailable);
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Verbose,
|
2016-12-16 06:16:31 +03:00
|
|
|
("MediaStream %p writing %" PRId64 " samples for %f to %f "
|
|
|
|
"(samples %" PRId64 " to %" PRId64 ")",
|
2017-02-06 18:22:36 +03:00
|
|
|
aStream,
|
|
|
|
toWrite,
|
|
|
|
MediaTimeToSeconds(t),
|
|
|
|
MediaTimeToSeconds(end),
|
|
|
|
offset,
|
|
|
|
endTicksNeeded));
|
2014-12-09 16:11:32 +03:00
|
|
|
uint32_t available = endTicksAvailable - offset;
|
|
|
|
ticksWritten += available;
|
|
|
|
toWrite -= available;
|
2014-06-12 08:40:51 +04:00
|
|
|
offset = endTicksAvailable;
|
2014-03-24 14:06:06 +04:00
|
|
|
}
|
|
|
|
output.AppendNullData(toWrite);
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Verbose,
|
2016-12-16 06:16:31 +03:00
|
|
|
("MediaStream %p writing %" PRId64 " padding slsamples for %f to "
|
|
|
|
"%f (samples %" PRId64 " to %" PRId64 ")",
|
2017-02-06 18:22:36 +03:00
|
|
|
aStream,
|
|
|
|
toWrite,
|
|
|
|
MediaTimeToSeconds(t),
|
|
|
|
MediaTimeToSeconds(end),
|
|
|
|
offset,
|
|
|
|
endTicksNeeded));
|
2014-12-09 16:11:32 +03:00
|
|
|
ticksWritten += toWrite;
|
2012-07-31 16:17:22 +04:00
|
|
|
}
|
|
|
|
output.ApplyVolume(volume);
|
|
|
|
}
|
|
|
|
t = end;
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
2014-06-12 08:40:51 +04:00
|
|
|
audioOutput.mLastTickWritten = offset;
|
2014-03-24 14:06:06 +04:00
|
|
|
|
|
|
|
// Need unique id for stream & track - and we want it to match the inserter
|
|
|
|
output.WriteTo(LATENCY_STREAM_ID(aStream, track->GetID()),
|
2017-08-31 18:14:34 +03:00
|
|
|
mMixer,
|
|
|
|
CurrentDriver()->AsAudioCallbackDriver()->OutputChannelCount(),
|
2014-08-25 17:25:49 +04:00
|
|
|
mSampleRate);
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
2014-03-24 14:06:06 +04:00
|
|
|
return ticksWritten;
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
|
2016-01-21 19:51:35 +03:00
|
|
|
void
|
2016-03-08 20:11:09 +03:00
|
|
|
MediaStreamGraphImpl::OpenAudioInputImpl(int aID,
|
2016-01-21 19:51:36 +03:00
|
|
|
AudioDataListener *aListener)
|
2016-01-21 19:51:35 +03:00
|
|
|
{
|
2016-02-04 05:12:51 +03:00
|
|
|
// Bug 1238038 Need support for multiple mics at once
|
|
|
|
if (mInputDeviceUsers.Count() > 0 &&
|
|
|
|
!mInputDeviceUsers.Get(aListener, nullptr)) {
|
|
|
|
NS_ASSERTION(false, "Input from multiple mics not yet supported; bug 1238038");
|
2016-01-21 19:51:36 +03:00
|
|
|
// Need to support separate input-only AudioCallback drivers; they'll
|
|
|
|
// call us back on "other" threads. We will need to echo-cancel them, though.
|
|
|
|
return;
|
|
|
|
}
|
2016-01-21 19:51:36 +03:00
|
|
|
mInputWanted = true;
|
2016-02-04 05:12:51 +03:00
|
|
|
|
|
|
|
// Add to count of users for this ID.
|
|
|
|
// XXX Since we can't rely on IDs staying valid (ugh), use the listener as
|
|
|
|
// a stand-in for the ID. Fix as part of support for multiple-captures
|
|
|
|
// (Bug 1238038)
|
|
|
|
uint32_t count = 0;
|
|
|
|
mInputDeviceUsers.Get(aListener, &count); // ok if this fails
|
|
|
|
count++;
|
|
|
|
mInputDeviceUsers.Put(aListener, count); // creates a new entry in the hash if needed
|
|
|
|
|
|
|
|
if (count == 1) { // first open for this listener
|
2016-03-08 20:11:08 +03:00
|
|
|
// aID is a cubeb_devid, and we assume that opaque ptr is valid until
|
|
|
|
// we close cubeb.
|
|
|
|
mInputDeviceID = aID;
|
2016-02-04 05:12:51 +03:00
|
|
|
mAudioInputs.AppendElement(aListener); // always monitor speaker data
|
2016-01-21 19:51:36 +03:00
|
|
|
|
2016-03-08 20:11:08 +03:00
|
|
|
// Switch Drivers since we're adding input (to input-only or full-duplex)
|
|
|
|
MonitorAutoLock mon(mMonitor);
|
|
|
|
if (mLifecycleState == LIFECYCLE_RUNNING) {
|
|
|
|
AudioCallbackDriver* driver = new AudioCallbackDriver(this);
|
2017-06-12 18:14:26 +03:00
|
|
|
driver->SetMicrophoneActive(true);
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(
|
|
|
|
LogLevel::Debug,
|
|
|
|
("OpenAudioInput: starting new AudioCallbackDriver(input) %p", driver));
|
|
|
|
LOG(
|
|
|
|
LogLevel::Debug,
|
|
|
|
("OpenAudioInput: starting new AudioCallbackDriver(input) %p", driver));
|
2016-03-08 20:11:08 +03:00
|
|
|
driver->SetInputListener(aListener);
|
|
|
|
CurrentDriver()->SwitchAtNextIteration(driver);
|
|
|
|
} else {
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Error, ("OpenAudioInput in shutdown!"));
|
|
|
|
LOG(LogLevel::Debug, ("OpenAudioInput in shutdown!"));
|
|
|
|
NS_ASSERTION(false, "Can't open cubeb inputs in shutdown");
|
2016-03-08 20:11:08 +03:00
|
|
|
}
|
2016-01-21 19:51:36 +03:00
|
|
|
}
|
2016-01-21 19:51:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
2016-03-08 20:11:09 +03:00
|
|
|
MediaStreamGraphImpl::OpenAudioInput(int aID,
|
2016-01-21 19:51:36 +03:00
|
|
|
AudioDataListener *aListener)
|
2016-01-21 19:51:35 +03:00
|
|
|
{
|
2016-01-21 19:51:36 +03:00
|
|
|
// So, so, so annoying. Can't AppendMessage except on Mainthread
|
2016-01-21 19:51:35 +03:00
|
|
|
if (!NS_IsMainThread()) {
|
2017-06-29 21:30:57 +03:00
|
|
|
RefPtr<nsIRunnable> runnable =
|
|
|
|
WrapRunnable(this,
|
|
|
|
&MediaStreamGraphImpl::OpenAudioInput,
|
|
|
|
aID,
|
|
|
|
RefPtr<AudioDataListener>(aListener));
|
2017-06-29 21:31:17 +03:00
|
|
|
mAbstractMainThread->Dispatch(runnable.forget());
|
2016-01-21 19:51:35 +03:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
class Message : public ControlMessage {
|
|
|
|
public:
|
2016-03-08 20:11:09 +03:00
|
|
|
Message(MediaStreamGraphImpl *aGraph, int aID,
|
2016-01-21 19:51:36 +03:00
|
|
|
AudioDataListener *aListener) :
|
|
|
|
ControlMessage(nullptr), mGraph(aGraph), mID(aID), mListener(aListener) {}
|
2017-02-23 13:08:26 +03:00
|
|
|
void Run() override
|
2016-01-21 19:51:35 +03:00
|
|
|
{
|
2016-01-21 19:51:36 +03:00
|
|
|
mGraph->OpenAudioInputImpl(mID, mListener);
|
2016-01-21 19:51:35 +03:00
|
|
|
}
|
|
|
|
MediaStreamGraphImpl *mGraph;
|
2016-03-08 20:11:09 +03:00
|
|
|
int mID;
|
2016-01-21 19:51:36 +03:00
|
|
|
RefPtr<AudioDataListener> mListener;
|
2016-01-21 19:51:35 +03:00
|
|
|
};
|
2016-02-04 05:12:51 +03:00
|
|
|
// XXX Check not destroyed!
|
2016-01-21 00:14:33 +03:00
|
|
|
this->AppendMessage(MakeUnique<Message>(this, aID, aListener));
|
2016-01-21 19:51:35 +03:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-01-21 19:51:36 +03:00
|
|
|
MediaStreamGraphImpl::CloseAudioInputImpl(AudioDataListener *aListener)
|
2016-01-21 19:51:35 +03:00
|
|
|
{
|
2016-02-04 05:12:51 +03:00
|
|
|
uint32_t count;
|
|
|
|
DebugOnly<bool> result = mInputDeviceUsers.Get(aListener, &count);
|
|
|
|
MOZ_ASSERT(result);
|
|
|
|
if (--count > 0) {
|
|
|
|
mInputDeviceUsers.Put(aListener, count);
|
|
|
|
return; // still in use
|
|
|
|
}
|
|
|
|
mInputDeviceUsers.Remove(aListener);
|
2016-03-08 20:11:09 +03:00
|
|
|
mInputDeviceID = -1;
|
2016-01-21 19:51:36 +03:00
|
|
|
mInputWanted = false;
|
2016-02-17 21:19:02 +03:00
|
|
|
AudioCallbackDriver *driver = CurrentDriver()->AsAudioCallbackDriver();
|
|
|
|
if (driver) {
|
|
|
|
driver->RemoveInputListener(aListener);
|
|
|
|
}
|
2016-01-21 19:51:35 +03:00
|
|
|
mAudioInputs.RemoveElement(aListener);
|
2016-01-21 19:51:36 +03:00
|
|
|
|
|
|
|
// Switch Drivers since we're adding or removing an input (to nothing/system or output only)
|
2016-03-08 20:11:08 +03:00
|
|
|
bool shouldAEC = false;
|
|
|
|
bool audioTrackPresent = AudioTrackPresent(shouldAEC);
|
2016-01-21 19:51:36 +03:00
|
|
|
|
|
|
|
MonitorAutoLock mon(mMonitor);
|
|
|
|
if (mLifecycleState == LIFECYCLE_RUNNING) {
|
|
|
|
GraphDriver* driver;
|
|
|
|
if (audioTrackPresent) {
|
|
|
|
// We still have audio output
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug, ("CloseInput: output present (AudioCallback)"));
|
2016-01-21 19:51:36 +03:00
|
|
|
|
|
|
|
driver = new AudioCallbackDriver(this);
|
|
|
|
CurrentDriver()->SwitchAtNextIteration(driver);
|
|
|
|
} else if (CurrentDriver()->AsAudioCallbackDriver()) {
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug,
|
|
|
|
("CloseInput: no output present (SystemClockCallback)"));
|
2016-01-21 19:51:36 +03:00
|
|
|
|
|
|
|
driver = new SystemClockDriver(this);
|
|
|
|
CurrentDriver()->SwitchAtNextIteration(driver);
|
|
|
|
} // else SystemClockDriver->SystemClockDriver, no switch
|
|
|
|
}
|
2016-01-21 19:51:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-01-21 19:51:36 +03:00
|
|
|
MediaStreamGraphImpl::CloseAudioInput(AudioDataListener *aListener)
|
2016-01-21 19:51:35 +03:00
|
|
|
{
|
2016-01-21 19:51:36 +03:00
|
|
|
// So, so, so annoying. Can't AppendMessage except on Mainthread
|
2016-01-21 19:51:35 +03:00
|
|
|
if (!NS_IsMainThread()) {
|
2017-06-29 21:30:57 +03:00
|
|
|
RefPtr<nsIRunnable> runnable =
|
|
|
|
WrapRunnable(this,
|
|
|
|
&MediaStreamGraphImpl::CloseAudioInput,
|
|
|
|
RefPtr<AudioDataListener>(aListener));
|
2017-06-29 21:31:17 +03:00
|
|
|
mAbstractMainThread->Dispatch(runnable.forget());
|
2016-01-21 19:51:35 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
class Message : public ControlMessage {
|
|
|
|
public:
|
2016-01-21 19:51:36 +03:00
|
|
|
Message(MediaStreamGraphImpl *aGraph, AudioDataListener *aListener) :
|
2016-01-21 19:51:35 +03:00
|
|
|
ControlMessage(nullptr), mGraph(aGraph), mListener(aListener) {}
|
2017-02-23 13:08:26 +03:00
|
|
|
void Run() override
|
2016-01-21 19:51:35 +03:00
|
|
|
{
|
|
|
|
mGraph->CloseAudioInputImpl(mListener);
|
|
|
|
}
|
|
|
|
MediaStreamGraphImpl *mGraph;
|
2016-01-21 19:51:36 +03:00
|
|
|
RefPtr<AudioDataListener> mListener;
|
2016-01-21 19:51:35 +03:00
|
|
|
};
|
2016-01-21 00:14:33 +03:00
|
|
|
this->AppendMessage(MakeUnique<Message>(this, aListener));
|
2016-01-21 19:51:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// All AudioInput listeners get the same speaker data (at least for now).
|
|
|
|
void
|
2016-01-21 19:51:36 +03:00
|
|
|
MediaStreamGraph::NotifyOutputData(AudioDataValue* aBuffer, size_t aFrames,
|
2016-02-17 21:19:01 +03:00
|
|
|
TrackRate aRate, uint32_t aChannels)
|
2016-01-21 19:51:35 +03:00
|
|
|
{
|
|
|
|
for (auto& listener : mAudioInputs) {
|
2016-02-17 21:19:01 +03:00
|
|
|
listener->NotifyOutputData(this, aBuffer, aFrames, aRate, aChannels);
|
2016-01-21 19:51:35 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-31 15:07:53 +03:00
|
|
|
void
|
|
|
|
MediaStreamGraph::AssertOnGraphThreadOrNotRunning() const
|
|
|
|
{
|
|
|
|
// either we're on the right thread (and calling CurrentDriver() is safe),
|
|
|
|
// or we're going to assert anyways, so don't cross-check CurrentDriver
|
|
|
|
#ifdef DEBUG
|
|
|
|
MediaStreamGraphImpl const * graph =
|
|
|
|
static_cast<MediaStreamGraphImpl const *>(this);
|
|
|
|
// if all the safety checks fail, assert we own the monitor
|
|
|
|
if (!graph->mDriver->OnThread()) {
|
|
|
|
if (!(graph->mDetectedNotRunning &&
|
|
|
|
graph->mLifecycleState > MediaStreamGraphImpl::LIFECYCLE_RUNNING &&
|
|
|
|
NS_IsMainThread())) {
|
|
|
|
graph->mMonitor.AssertCurrentThreadOwns();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2013-07-19 18:40:57 +04:00
|
|
|
bool
|
|
|
|
MediaStreamGraphImpl::ShouldUpdateMainThread()
|
|
|
|
{
|
|
|
|
if (mRealtime) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
TimeStamp now = TimeStamp::Now();
|
2014-08-26 19:01:33 +04:00
|
|
|
if ((now - mLastMainThreadUpdate).ToMilliseconds() > CurrentDriver()->IterationDuration()) {
|
2013-07-19 18:40:57 +04:00
|
|
|
mLastMainThreadUpdate = now;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
void
|
2013-06-17 17:06:34 +04:00
|
|
|
MediaStreamGraphImpl::PrepareUpdatesToMainThreadState(bool aFinalUpdate)
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
2014-08-26 19:01:33 +04:00
|
|
|
mMonitor.AssertCurrentThreadOwns();
|
2012-04-30 07:11:26 +04:00
|
|
|
|
2013-10-23 22:21:33 +04:00
|
|
|
// We don't want to frequently update the main thread about timing update
|
|
|
|
// when we are not running in realtime.
|
|
|
|
if (aFinalUpdate || ShouldUpdateMainThread()) {
|
2016-05-25 10:18:17 +03:00
|
|
|
// Strip updates that will be obsoleted below, so as to keep the length of
|
|
|
|
// mStreamUpdates sane.
|
|
|
|
size_t keptUpdateCount = 0;
|
|
|
|
for (size_t i = 0; i < mStreamUpdates.Length(); ++i) {
|
|
|
|
MediaStream* stream = mStreamUpdates[i].mStream;
|
|
|
|
// RemoveStreamGraphThread() clears mStream in updates for
|
|
|
|
// streams that are removed from the graph.
|
|
|
|
MOZ_ASSERT(!stream || stream->GraphImpl() == this);
|
|
|
|
if (!stream || stream->MainThreadNeedsUpdates()) {
|
|
|
|
// Discard this update as it has either been cleared when the stream
|
|
|
|
// was destroyed or there will be a newer update below.
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (keptUpdateCount != i) {
|
|
|
|
mStreamUpdates[keptUpdateCount] = Move(mStreamUpdates[i]);
|
|
|
|
MOZ_ASSERT(!mStreamUpdates[i].mStream);
|
|
|
|
}
|
|
|
|
++keptUpdateCount;
|
|
|
|
}
|
|
|
|
mStreamUpdates.TruncateLength(keptUpdateCount);
|
|
|
|
|
2015-09-04 08:01:01 +03:00
|
|
|
mStreamUpdates.SetCapacity(mStreamUpdates.Length() + mStreams.Length() +
|
|
|
|
mSuspendedStreams.Length());
|
|
|
|
for (MediaStream* stream : AllStreams()) {
|
2015-06-11 22:10:06 +03:00
|
|
|
if (!stream->MainThreadNeedsUpdates()) {
|
2013-07-19 18:40:57 +04:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
StreamUpdate* update = mStreamUpdates.AppendElement();
|
|
|
|
update->mStream = stream;
|
2015-09-08 08:12:01 +03:00
|
|
|
// No blocking to worry about here, since we've passed
|
|
|
|
// UpdateCurrentTimeForStreams.
|
2013-07-19 18:40:57 +04:00
|
|
|
update->mNextMainThreadCurrentTime =
|
2015-09-08 08:12:01 +03:00
|
|
|
stream->GraphTimeToStreamTime(mProcessedTime);
|
2013-12-09 09:08:02 +04:00
|
|
|
update->mNextMainThreadFinished = stream->mNotifiedFinished;
|
2013-07-19 18:40:57 +04:00
|
|
|
}
|
|
|
|
if (!mPendingUpdateRunnables.IsEmpty()) {
|
2015-08-11 18:29:46 +03:00
|
|
|
mUpdateRunnables.AppendElements(Move(mPendingUpdateRunnables));
|
2013-06-19 07:09:44 +04:00
|
|
|
}
|
2013-06-19 07:10:04 +04:00
|
|
|
}
|
2012-04-30 07:11:26 +04:00
|
|
|
|
2013-06-17 17:06:34 +04:00
|
|
|
// Don't send the message to the main thread if it's not going to have
|
|
|
|
// any work to do.
|
|
|
|
if (aFinalUpdate ||
|
|
|
|
!mUpdateRunnables.IsEmpty() ||
|
|
|
|
!mStreamUpdates.IsEmpty()) {
|
|
|
|
EnsureStableStateEventPosted();
|
|
|
|
}
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
|
2014-04-25 20:04:23 +04:00
|
|
|
GraphTime
|
|
|
|
MediaStreamGraphImpl::RoundUpToNextAudioBlock(GraphTime aTime)
|
2013-01-14 02:46:57 +04:00
|
|
|
{
|
2014-09-18 09:20:43 +04:00
|
|
|
StreamTime ticks = aTime;
|
2013-08-02 13:27:58 +04:00
|
|
|
uint64_t block = ticks >> WEBAUDIO_BLOCK_SIZE_BITS;
|
|
|
|
uint64_t nextBlock = block + 1;
|
2014-09-18 09:20:43 +04:00
|
|
|
StreamTime nextTicks = nextBlock << WEBAUDIO_BLOCK_SIZE_BITS;
|
2014-06-12 08:45:00 +04:00
|
|
|
return nextTicks;
|
2013-01-14 02:46:57 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaStreamGraphImpl::ProduceDataForStreamsBlockByBlock(uint32_t aStreamIndex,
|
2015-09-16 07:24:10 +03:00
|
|
|
TrackRate aSampleRate)
|
2013-01-14 02:46:57 +04:00
|
|
|
{
|
2014-07-17 04:55:55 +04:00
|
|
|
MOZ_ASSERT(aStreamIndex <= mFirstCycleBreaker,
|
|
|
|
"Cycle breaker is not AudioNodeStream?");
|
2015-09-16 07:24:10 +03:00
|
|
|
GraphTime t = mProcessedTime;
|
|
|
|
while (t < mStateComputedTime) {
|
2014-04-25 20:04:23 +04:00
|
|
|
GraphTime next = RoundUpToNextAudioBlock(t);
|
2014-07-17 04:55:55 +04:00
|
|
|
for (uint32_t i = mFirstCycleBreaker; i < mStreams.Length(); ++i) {
|
|
|
|
auto ns = static_cast<AudioNodeStream*>(mStreams[i]);
|
|
|
|
MOZ_ASSERT(ns->AsAudioNodeStream());
|
|
|
|
ns->ProduceOutputBeforeInput(t);
|
|
|
|
}
|
2013-01-14 02:46:57 +04:00
|
|
|
for (uint32_t i = aStreamIndex; i < mStreams.Length(); ++i) {
|
2014-01-07 03:53:49 +04:00
|
|
|
ProcessedMediaStream* ps = mStreams[i]->AsProcessedStream();
|
2013-01-14 02:46:57 +04:00
|
|
|
if (ps) {
|
2015-09-16 07:24:10 +03:00
|
|
|
ps->ProcessInput(t, next,
|
|
|
|
(next == mStateComputedTime) ? ProcessedMediaStream::ALLOW_FINISH : 0);
|
2013-01-14 02:46:57 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
t = next;
|
|
|
|
}
|
2015-09-16 07:24:10 +03:00
|
|
|
NS_ASSERTION(t == mStateComputedTime,
|
|
|
|
"Something went wrong with rounding to block boundaries");
|
2013-01-14 02:46:57 +04:00
|
|
|
}
|
|
|
|
|
2013-12-09 09:08:02 +04:00
|
|
|
bool
|
|
|
|
MediaStreamGraphImpl::AllFinishedStreamsNotified()
|
|
|
|
{
|
2015-09-04 08:01:01 +03:00
|
|
|
for (MediaStream* stream : AllStreams()) {
|
|
|
|
if (stream->mFinished && !stream->mNotifiedFinished) {
|
2013-12-09 09:08:02 +04:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
void
|
2016-01-21 00:14:33 +03:00
|
|
|
MediaStreamGraphImpl::RunMessageAfterProcessing(UniquePtr<ControlMessage> aMessage)
|
2015-10-22 08:47:57 +03:00
|
|
|
{
|
|
|
|
MOZ_ASSERT(CurrentDriver()->OnThread());
|
|
|
|
|
|
|
|
if (mFrontMessageQueue.IsEmpty()) {
|
|
|
|
mFrontMessageQueue.AppendElement();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Only one block is used for messages from the graph thread.
|
|
|
|
MOZ_ASSERT(mFrontMessageQueue.Length() == 1);
|
|
|
|
mFrontMessageQueue[0].mMessages.AppendElement(Move(aMessage));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaStreamGraphImpl::RunMessagesInQueue()
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
2014-04-25 18:09:30 +04:00
|
|
|
// Calculate independent action times for each batch of messages (each
|
|
|
|
// batch corresponding to an event loop task). This isolates the performance
|
|
|
|
// of different scripts to some extent.
|
2014-08-25 17:26:21 +04:00
|
|
|
for (uint32_t i = 0; i < mFrontMessageQueue.Length(); ++i) {
|
2016-01-21 00:14:33 +03:00
|
|
|
nsTArray<UniquePtr<ControlMessage>>& messages = mFrontMessageQueue[i].mMessages;
|
2014-04-25 18:09:30 +04:00
|
|
|
|
|
|
|
for (uint32_t j = 0; j < messages.Length(); ++j) {
|
|
|
|
messages[j]->Run();
|
|
|
|
}
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
2014-08-25 17:26:21 +04:00
|
|
|
mFrontMessageQueue.Clear();
|
2015-10-22 08:47:57 +03:00
|
|
|
}
|
2012-04-30 07:11:26 +04:00
|
|
|
|
2015-10-22 08:47:57 +03:00
|
|
|
void
|
|
|
|
MediaStreamGraphImpl::UpdateGraph(GraphTime aEndBlockingDecisions)
|
|
|
|
{
|
2015-09-04 10:42:11 +03:00
|
|
|
MOZ_ASSERT(aEndBlockingDecisions >= mProcessedTime);
|
|
|
|
// The next state computed time can be the same as the previous: it
|
|
|
|
// means the driver would be have been blocking indefinitly, but the graph has
|
|
|
|
// been woken up right after having been to sleep.
|
|
|
|
MOZ_ASSERT(aEndBlockingDecisions >= mStateComputedTime);
|
|
|
|
|
2017-01-13 18:15:27 +03:00
|
|
|
UpdateStreamOrder();
|
|
|
|
|
2014-04-25 18:09:30 +04:00
|
|
|
bool ensureNextIteration = false;
|
|
|
|
|
2015-09-04 10:42:11 +03:00
|
|
|
// Grab pending stream input and compute blocking time
|
|
|
|
for (MediaStream* stream : mStreams) {
|
2015-09-08 16:23:31 +03:00
|
|
|
if (SourceMediaStream* is = stream->AsSourceStream()) {
|
2015-09-04 10:42:11 +03:00
|
|
|
ExtractPendingInput(is, aEndBlockingDecisions, &ensureNextIteration);
|
2014-04-25 18:09:30 +04:00
|
|
|
}
|
2015-09-04 10:42:11 +03:00
|
|
|
|
2015-09-08 16:23:31 +03:00
|
|
|
if (stream->mFinished) {
|
|
|
|
// The stream's not suspended, and since it's finished, underruns won't
|
|
|
|
// stop it playing out. So there's no blocking other than what we impose
|
|
|
|
// here.
|
2016-01-26 05:49:01 +03:00
|
|
|
GraphTime endTime = stream->GetStreamTracks().GetAllTracksEnd() +
|
|
|
|
stream->mTracksStartTime;
|
2015-09-08 16:23:31 +03:00
|
|
|
if (endTime <= mStateComputedTime) {
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Verbose,
|
|
|
|
("MediaStream %p is blocked due to being finished", stream));
|
2015-09-08 16:23:31 +03:00
|
|
|
stream->mStartBlocking = mStateComputedTime;
|
|
|
|
} else {
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Verbose,
|
|
|
|
("MediaStream %p is finished, but not blocked yet (end at %f, with "
|
|
|
|
"blocking at %f)",
|
|
|
|
stream,
|
|
|
|
MediaTimeToSeconds(stream->GetTracksEnd()),
|
|
|
|
MediaTimeToSeconds(endTime)));
|
2015-09-08 16:23:31 +03:00
|
|
|
// Data can't be added to a finished stream, so underruns are irrelevant.
|
|
|
|
stream->mStartBlocking = std::min(endTime, aEndBlockingDecisions);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
stream->mStartBlocking = WillUnderrun(stream, aEndBlockingDecisions);
|
|
|
|
}
|
2015-09-04 10:42:11 +03:00
|
|
|
}
|
2015-09-08 16:23:31 +03:00
|
|
|
|
2015-09-04 10:42:11 +03:00
|
|
|
for (MediaStream* stream : mSuspendedStreams) {
|
2015-09-08 16:23:31 +03:00
|
|
|
stream->mStartBlocking = mStateComputedTime;
|
2014-04-25 18:09:30 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
// The loop is woken up so soon that IterationEnd() barely advances and we
|
2015-08-13 07:23:17 +03:00
|
|
|
// end up having aEndBlockingDecision == mStateComputedTime.
|
2014-04-25 18:09:30 +04:00
|
|
|
// Since stream blocking is computed in the interval of
|
2015-08-13 07:23:17 +03:00
|
|
|
// [mStateComputedTime, aEndBlockingDecision), it won't be computed at all.
|
2014-04-25 18:09:30 +04:00
|
|
|
// We should ensure next iteration so that pending blocking changes will be
|
|
|
|
// computed in next loop.
|
2014-04-25 20:04:23 +04:00
|
|
|
if (ensureNextIteration ||
|
2015-09-04 10:42:11 +03:00
|
|
|
aEndBlockingDecisions == mStateComputedTime) {
|
2014-09-28 20:07:25 +04:00
|
|
|
EnsureNextIteration();
|
2014-04-25 18:09:30 +04:00
|
|
|
}
|
2014-04-25 20:04:23 +04:00
|
|
|
}
|
2014-04-25 18:09:30 +04:00
|
|
|
|
2014-04-25 20:04:23 +04:00
|
|
|
void
|
2015-09-08 07:58:19 +03:00
|
|
|
MediaStreamGraphImpl::Process()
|
2014-04-25 20:04:23 +04:00
|
|
|
{
|
2014-04-25 18:09:30 +04:00
|
|
|
// Play stream contents.
|
|
|
|
bool allBlockedForever = true;
|
|
|
|
// True when we've done ProcessInput for all processed streams.
|
|
|
|
bool doneAllProducing = false;
|
|
|
|
// This is the number of frame that are written to the AudioStreams, for
|
|
|
|
// this cycle.
|
2014-09-18 09:20:43 +04:00
|
|
|
StreamTime ticksPlayed = 0;
|
2014-08-25 17:25:49 +04:00
|
|
|
|
|
|
|
mMixer.StartMixing();
|
|
|
|
|
2014-04-25 18:09:30 +04:00
|
|
|
// Figure out what each stream wants to do
|
|
|
|
for (uint32_t i = 0; i < mStreams.Length(); ++i) {
|
|
|
|
MediaStream* stream = mStreams[i];
|
|
|
|
if (!doneAllProducing) {
|
|
|
|
ProcessedMediaStream* ps = stream->AsProcessedStream();
|
|
|
|
if (ps) {
|
|
|
|
AudioNodeStream* n = stream->AsAudioNodeStream();
|
|
|
|
if (n) {
|
|
|
|
#ifdef DEBUG
|
|
|
|
// Verify that the sampling rate for all of the following streams is the same
|
|
|
|
for (uint32_t j = i + 1; j < mStreams.Length(); ++j) {
|
|
|
|
AudioNodeStream* nextStream = mStreams[j]->AsAudioNodeStream();
|
|
|
|
if (nextStream) {
|
|
|
|
MOZ_ASSERT(n->SampleRate() == nextStream->SampleRate(),
|
|
|
|
"All AudioNodeStreams in the graph must have the same sampling rate");
|
|
|
|
}
|
2014-04-13 22:08:10 +04:00
|
|
|
}
|
2014-04-25 18:09:30 +04:00
|
|
|
#endif
|
|
|
|
// Since an AudioNodeStream is present, go ahead and
|
|
|
|
// produce audio block by block for all the rest of the streams.
|
2015-09-16 07:24:10 +03:00
|
|
|
ProduceDataForStreamsBlockByBlock(i, n->SampleRate());
|
2014-04-25 18:09:30 +04:00
|
|
|
doneAllProducing = true;
|
|
|
|
} else {
|
2015-09-08 07:58:19 +03:00
|
|
|
ps->ProcessInput(mProcessedTime, mStateComputedTime,
|
|
|
|
ProcessedMediaStream::ALLOW_FINISH);
|
2016-04-21 12:44:35 +03:00
|
|
|
NS_ASSERTION(stream->mTracks.GetEnd() >=
|
|
|
|
GraphTimeToStreamTimeWithBlocking(stream, mStateComputedTime),
|
|
|
|
"Stream did not produce enough data");
|
2014-04-13 22:08:10 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-04-25 18:09:30 +04:00
|
|
|
NotifyHasCurrentData(stream);
|
2014-08-25 17:25:49 +04:00
|
|
|
// Only playback audio and video in real-time mode
|
2014-04-25 18:09:30 +04:00
|
|
|
if (mRealtime) {
|
2015-09-16 07:23:14 +03:00
|
|
|
CreateOrDestroyAudioStreams(stream);
|
2014-08-27 21:13:15 +04:00
|
|
|
if (CurrentDriver()->AsAudioCallbackDriver()) {
|
2015-09-08 07:58:19 +03:00
|
|
|
StreamTime ticksPlayedForThisStream = PlayAudio(stream);
|
2014-08-27 21:13:15 +04:00
|
|
|
if (!ticksPlayed) {
|
|
|
|
ticksPlayed = ticksPlayedForThisStream;
|
|
|
|
} else {
|
|
|
|
MOZ_ASSERT(!ticksPlayedForThisStream || ticksPlayedForThisStream == ticksPlayed,
|
|
|
|
"Each stream should have the same number of frame.");
|
|
|
|
}
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
}
|
2015-09-16 07:23:14 +03:00
|
|
|
if (stream->mStartBlocking > mProcessedTime) {
|
2014-04-25 18:09:30 +04:00
|
|
|
allBlockedForever = false;
|
2012-04-30 07:11:40 +04:00
|
|
|
}
|
2014-04-25 18:09:30 +04:00
|
|
|
}
|
2012-04-30 07:11:40 +04:00
|
|
|
|
2014-08-26 19:01:33 +04:00
|
|
|
if (CurrentDriver()->AsAudioCallbackDriver() && ticksPlayed) {
|
2014-08-25 17:25:49 +04:00
|
|
|
mMixer.FinishMixing();
|
2014-04-25 18:09:30 +04:00
|
|
|
}
|
2014-03-05 03:15:41 +04:00
|
|
|
|
2014-04-25 20:04:23 +04:00
|
|
|
if (!allBlockedForever) {
|
2014-09-28 20:07:25 +04:00
|
|
|
EnsureNextIteration();
|
2014-04-25 18:09:30 +04:00
|
|
|
}
|
|
|
|
}
|
2012-04-30 07:11:26 +04:00
|
|
|
|
2015-09-04 15:26:48 +03:00
|
|
|
bool
|
|
|
|
MediaStreamGraphImpl::UpdateMainThreadState()
|
|
|
|
{
|
|
|
|
MonitorAutoLock lock(mMonitor);
|
|
|
|
bool finalUpdate = mForceShutDown ||
|
|
|
|
(mProcessedTime >= mEndTime && AllFinishedStreamsNotified()) ||
|
|
|
|
(IsEmpty() && mBackMessageQueue.IsEmpty());
|
|
|
|
PrepareUpdatesToMainThreadState(finalUpdate);
|
|
|
|
if (finalUpdate) {
|
|
|
|
// Enter shutdown mode. The stable-state handler will detect this
|
|
|
|
// and complete shutdown. Destroy any streams immediately.
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug,
|
|
|
|
("MediaStreamGraph %p waiting for main thread cleanup", this));
|
2015-09-04 15:26:48 +03:00
|
|
|
// We'll shut down this graph object if it does not get restarted.
|
|
|
|
mLifecycleState = LIFECYCLE_WAITING_FOR_MAIN_THREAD_CLEANUP;
|
|
|
|
// No need to Destroy streams here. The main-thread owner of each
|
|
|
|
// stream is responsible for calling Destroy on them.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
CurrentDriver()->WaitForNextIteration();
|
|
|
|
|
|
|
|
SwapMessageQueues();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
MediaStreamGraphImpl::OneIteration(GraphTime aStateEnd)
|
|
|
|
{
|
2016-08-05 22:33:08 +03:00
|
|
|
WebCore::DenormalDisabler disabler;
|
|
|
|
|
2015-10-22 08:47:57 +03:00
|
|
|
// Process graph message from the main thread for this iteration.
|
|
|
|
RunMessagesInQueue();
|
|
|
|
|
2015-07-31 14:43:55 +03:00
|
|
|
GraphTime stateEnd = std::min(aStateEnd, mEndTime);
|
|
|
|
UpdateGraph(stateEnd);
|
2014-08-25 16:13:08 +04:00
|
|
|
|
2015-09-04 15:41:15 +03:00
|
|
|
mStateComputedTime = stateEnd;
|
|
|
|
|
2015-09-08 07:58:19 +03:00
|
|
|
Process();
|
2015-09-04 15:41:15 +03:00
|
|
|
|
2015-09-16 07:23:14 +03:00
|
|
|
GraphTime oldProcessedTime = mProcessedTime;
|
2015-07-31 12:28:29 +03:00
|
|
|
mProcessedTime = stateEnd;
|
2014-08-25 16:13:08 +04:00
|
|
|
|
2015-09-16 07:23:14 +03:00
|
|
|
UpdateCurrentTimeForStreams(oldProcessedTime);
|
2015-07-29 10:32:10 +03:00
|
|
|
|
2016-01-26 11:45:25 +03:00
|
|
|
ProcessChunkMetadata(oldProcessedTime);
|
|
|
|
|
2015-10-22 08:47:57 +03:00
|
|
|
// Process graph messages queued from RunMessageAfterProcessing() on this
|
|
|
|
// thread during the iteration.
|
|
|
|
RunMessagesInQueue();
|
|
|
|
|
2015-09-04 15:26:48 +03:00
|
|
|
return UpdateMainThreadState();
|
2014-04-25 18:09:30 +04:00
|
|
|
}
|
2012-04-30 07:11:26 +04:00
|
|
|
|
|
|
|
void
|
|
|
|
MediaStreamGraphImpl::ApplyStreamUpdate(StreamUpdate* aUpdate)
|
|
|
|
{
|
2014-08-26 19:01:33 +04:00
|
|
|
mMonitor.AssertCurrentThreadOwns();
|
2012-04-30 07:11:26 +04:00
|
|
|
|
|
|
|
MediaStream* stream = aUpdate->mStream;
|
2015-06-11 22:10:06 +03:00
|
|
|
if (!stream)
|
2012-04-30 07:11:26 +04:00
|
|
|
return;
|
|
|
|
stream->mMainThreadCurrentTime = aUpdate->mNextMainThreadCurrentTime;
|
|
|
|
stream->mMainThreadFinished = aUpdate->mNextMainThreadFinished;
|
2012-08-20 08:20:44 +04:00
|
|
|
|
2015-05-11 17:07:24 +03:00
|
|
|
if (stream->ShouldNotifyStreamFinished()) {
|
2015-05-11 17:07:38 +03:00
|
|
|
stream->NotifyMainThreadListeners();
|
2012-08-20 08:20:44 +04:00
|
|
|
}
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-05-16 13:39:02 +03:00
|
|
|
MediaStreamGraphImpl::ForceShutDown(media::ShutdownTicket* aShutdownTicket)
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
|
|
|
NS_ASSERTION(NS_IsMainThread(), "Must be called on main thread");
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug, ("MediaStreamGraph %p ForceShutdown", this));
|
2016-10-02 20:51:40 +03:00
|
|
|
|
|
|
|
MonitorAutoLock lock(mMonitor);
|
|
|
|
if (aShutdownTicket) {
|
|
|
|
MOZ_ASSERT(!mForceShutdownTicket);
|
|
|
|
// Avoid waiting forever for a graph to shut down
|
|
|
|
// synchronously. Reports are that some 3rd-party audio drivers
|
|
|
|
// occasionally hang in shutdown (both for us and Chrome).
|
|
|
|
mShutdownTimer = do_CreateInstance(NS_TIMER_CONTRACTID);
|
|
|
|
if (!mShutdownTimer) {
|
|
|
|
return;
|
2016-03-15 18:46:01 +03:00
|
|
|
}
|
2016-10-02 20:51:40 +03:00
|
|
|
mShutdownTimer->InitWithCallback(this,
|
|
|
|
MediaStreamGraph::AUDIO_CALLBACK_DRIVER_SHUTDOWN_TIMEOUT,
|
|
|
|
nsITimer::TYPE_ONE_SHOT);
|
|
|
|
}
|
|
|
|
mForceShutDown = true;
|
|
|
|
mForceShutdownTicket = aShutdownTicket;
|
|
|
|
if (mLifecycleState == LIFECYCLE_THREAD_NOT_STARTED) {
|
|
|
|
// We *could* have just sent this a message to start up, so don't
|
|
|
|
// yank the rug out from under it. Tell it to startup and let it
|
|
|
|
// shut down.
|
|
|
|
RefPtr<GraphDriver> driver = CurrentDriver();
|
|
|
|
MonitorAutoUnlock unlock(mMonitor);
|
|
|
|
driver->Start();
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
2016-10-02 20:51:40 +03:00
|
|
|
EnsureNextIterationLocked();
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
|
2016-10-02 20:51:40 +03:00
|
|
|
NS_IMETHODIMP
|
|
|
|
MediaStreamGraphImpl::Notify(nsITimer* aTimer)
|
|
|
|
{
|
|
|
|
MonitorAutoLock lock(mMonitor);
|
|
|
|
NS_ASSERTION(!mForceShutdownTicket, "MediaStreamGraph took too long to shut down!");
|
|
|
|
// Sigh, graph took too long to shut down. Stop blocking system
|
|
|
|
// shutdown and hope all is well.
|
|
|
|
mForceShutdownTicket = nullptr;
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2017-07-26 21:18:20 +03:00
|
|
|
NS_IMETHODIMP
|
|
|
|
MediaStreamGraphImpl::GetName(nsACString& aName)
|
|
|
|
{
|
|
|
|
aName.AssignLiteral("MediaStreamGraphImpl");
|
|
|
|
return NS_OK;
|
|
|
|
}
|
2016-10-02 20:51:40 +03:00
|
|
|
|
2016-01-22 21:49:54 +03:00
|
|
|
/* static */ StaticRefPtr<nsIAsyncShutdownBlocker> gMediaStreamGraphShutdownBlocker;
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
namespace {
|
|
|
|
|
2016-10-02 20:51:40 +03:00
|
|
|
class MediaStreamGraphShutDownRunnable : public Runnable {
|
2012-04-30 07:11:26 +04:00
|
|
|
public:
|
2014-09-01 07:50:23 +04:00
|
|
|
explicit MediaStreamGraphShutDownRunnable(MediaStreamGraphImpl* aGraph)
|
2017-05-10 02:19:37 +03:00
|
|
|
: Runnable("MediaStreamGraphShutDownRunnable")
|
|
|
|
, mGraph(aGraph)
|
2014-04-25 20:03:04 +04:00
|
|
|
{}
|
2016-10-02 20:51:40 +03:00
|
|
|
NS_IMETHOD Run()
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
|
|
|
NS_ASSERTION(mGraph->mDetectedNotRunning,
|
|
|
|
"We should know the graph thread control loop isn't running!");
|
2013-05-03 09:02:55 +04:00
|
|
|
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug, ("Shutting down graph %p", mGraph.get()));
|
2014-08-26 19:04:38 +04:00
|
|
|
|
2014-09-28 20:07:24 +04:00
|
|
|
// We've asserted the graph isn't running. Use mDriver instead of CurrentDriver
|
|
|
|
// to avoid thread-safety checks
|
|
|
|
#if 0 // AudioCallbackDrivers are released asynchronously anyways
|
|
|
|
// XXX a better test would be have setting mDetectedNotRunning make sure
|
|
|
|
// any current callback has finished and block future ones -- or just
|
|
|
|
// handle it all in Shutdown()!
|
|
|
|
if (mGraph->mDriver->AsAudioCallbackDriver()) {
|
|
|
|
MOZ_ASSERT(!mGraph->mDriver->AsAudioCallbackDriver()->InCallback());
|
2014-08-26 19:01:35 +04:00
|
|
|
}
|
2014-09-28 20:07:24 +04:00
|
|
|
#endif
|
2014-08-26 19:01:35 +04:00
|
|
|
|
2016-01-22 10:39:42 +03:00
|
|
|
mGraph->mDriver->Shutdown(); // This will wait until it's shutdown since
|
|
|
|
// we'll start tearing down the graph after this
|
2013-05-08 09:16:35 +04:00
|
|
|
|
2016-10-02 20:51:40 +03:00
|
|
|
// Safe to access these without the monitor since the graph isn't running.
|
2016-01-22 21:49:54 +03:00
|
|
|
// We may be one of several graphs. Drop ticket to eventually unblock shutdown.
|
2016-10-02 20:51:40 +03:00
|
|
|
if (mGraph->mShutdownTimer && !mGraph->mForceShutdownTicket) {
|
2016-08-24 19:24:17 +03:00
|
|
|
MOZ_ASSERT(false,
|
|
|
|
"AudioCallbackDriver took too long to shut down and we let shutdown"
|
|
|
|
" continue - freezing and leaking");
|
|
|
|
|
|
|
|
// The timer fired, so we may be deeper in shutdown now. Block any further
|
|
|
|
// teardown and just leak, for safety.
|
|
|
|
return NS_OK;
|
|
|
|
}
|
2016-01-22 21:49:54 +03:00
|
|
|
mGraph->mForceShutdownTicket = nullptr;
|
|
|
|
|
|
|
|
// We can't block past the final LIFECYCLE_WAITING_FOR_STREAM_DESTRUCTION
|
|
|
|
// stage, since completion of that stage requires all streams to be freed,
|
|
|
|
// which requires shutdown to proceed.
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
// mGraph's thread is not running so it's OK to do whatever here
|
|
|
|
if (mGraph->IsEmpty()) {
|
2014-02-11 04:04:58 +04:00
|
|
|
// mGraph is no longer needed, so delete it.
|
2014-04-13 22:08:10 +04:00
|
|
|
mGraph->Destroy();
|
2012-04-30 07:11:26 +04:00
|
|
|
} else {
|
2014-02-11 04:04:58 +04:00
|
|
|
// The graph is not empty. We must be in a forced shutdown, or a
|
|
|
|
// non-realtime graph that has finished processing. Some later
|
|
|
|
// AppendMessage will detect that the manager has been emptied, and
|
|
|
|
// delete it.
|
|
|
|
NS_ASSERTION(mGraph->mForceShutDown || !mGraph->mRealtime,
|
|
|
|
"Not in forced shutdown?");
|
2016-06-16 14:45:29 +03:00
|
|
|
for (MediaStream* stream : mGraph->AllStreams()) {
|
|
|
|
// Clean up all MediaSegments since we cannot release Images too
|
|
|
|
// late during shutdown.
|
|
|
|
if (SourceMediaStream* source = stream->AsSourceStream()) {
|
|
|
|
// Finishing a SourceStream prevents new data from being appended.
|
|
|
|
source->Finish();
|
|
|
|
}
|
|
|
|
stream->GetStreamTracks().Clear();
|
|
|
|
}
|
2013-05-03 09:02:55 +04:00
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
mGraph->mLifecycleState =
|
|
|
|
MediaStreamGraphImpl::LIFECYCLE_WAITING_FOR_STREAM_DESTRUCTION;
|
|
|
|
}
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
private:
|
2015-10-18 08:24:48 +03:00
|
|
|
RefPtr<MediaStreamGraphImpl> mGraph;
|
2012-04-30 07:11:26 +04:00
|
|
|
};
|
|
|
|
|
2016-04-26 03:23:21 +03:00
|
|
|
class MediaStreamGraphStableStateRunnable : public Runnable {
|
2012-04-30 07:11:26 +04:00
|
|
|
public:
|
2014-08-25 16:13:14 +04:00
|
|
|
explicit MediaStreamGraphStableStateRunnable(MediaStreamGraphImpl* aGraph,
|
|
|
|
bool aSourceIsMSG)
|
2017-02-11 09:11:48 +03:00
|
|
|
: Runnable("MediaStreamGraphStableStateRunnable")
|
|
|
|
, mGraph(aGraph)
|
2014-08-25 16:13:14 +04:00
|
|
|
, mSourceIsMSG(aSourceIsMSG)
|
2013-02-02 00:20:32 +04:00
|
|
|
{
|
|
|
|
}
|
2016-08-08 05:18:10 +03:00
|
|
|
NS_IMETHOD Run() override
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
2013-02-02 00:20:32 +04:00
|
|
|
if (mGraph) {
|
2014-08-25 16:13:14 +04:00
|
|
|
mGraph->RunInStableState(mSourceIsMSG);
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
return NS_OK;
|
|
|
|
}
|
2013-02-02 00:20:32 +04:00
|
|
|
private:
|
2015-10-18 08:24:48 +03:00
|
|
|
RefPtr<MediaStreamGraphImpl> mGraph;
|
2014-08-25 16:13:14 +04:00
|
|
|
bool mSourceIsMSG;
|
2012-04-30 07:11:26 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Control messages forwarded from main thread to graph manager thread
|
|
|
|
*/
|
|
|
|
class CreateMessage : public ControlMessage {
|
|
|
|
public:
|
2014-09-01 07:50:23 +04:00
|
|
|
explicit CreateMessage(MediaStream* aStream) : ControlMessage(aStream) {}
|
2016-01-18 06:50:29 +03:00
|
|
|
void Run() override
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
2015-08-12 02:29:35 +03:00
|
|
|
mStream->GraphImpl()->AddStreamGraphThread(mStream);
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
2016-01-18 06:50:29 +03:00
|
|
|
void RunDuringShutdown() override
|
2013-05-29 19:38:39 +04:00
|
|
|
{
|
|
|
|
// Make sure to run this message during shutdown too, to make sure
|
|
|
|
// that we balance the number of streams registered with the graph
|
|
|
|
// as they're destroyed during shutdown.
|
|
|
|
Run();
|
|
|
|
}
|
2012-04-30 07:11:26 +04:00
|
|
|
};
|
|
|
|
|
2015-07-13 18:25:42 +03:00
|
|
|
} // namespace
|
2012-04-30 07:11:26 +04:00
|
|
|
|
|
|
|
void
|
2014-08-25 16:13:14 +04:00
|
|
|
MediaStreamGraphImpl::RunInStableState(bool aSourceIsMSG)
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
|
|
|
NS_ASSERTION(NS_IsMainThread(), "Must be called on main thread");
|
|
|
|
|
|
|
|
nsTArray<nsCOMPtr<nsIRunnable> > runnables;
|
2012-07-31 16:17:22 +04:00
|
|
|
// When we're doing a forced shutdown, pending control messages may be
|
|
|
|
// run on the main thread via RunDuringShutdown. Those messages must
|
|
|
|
// run without the graph monitor being held. So, we collect them here.
|
2016-01-21 00:14:33 +03:00
|
|
|
nsTArray<UniquePtr<ControlMessage>> controlMessagesToRunDuringShutdown;
|
2012-04-30 07:11:26 +04:00
|
|
|
|
|
|
|
{
|
2014-08-26 19:01:33 +04:00
|
|
|
MonitorAutoLock lock(mMonitor);
|
2014-08-25 16:13:14 +04:00
|
|
|
if (aSourceIsMSG) {
|
|
|
|
MOZ_ASSERT(mPostedRunInStableStateEvent);
|
|
|
|
mPostedRunInStableStateEvent = false;
|
|
|
|
}
|
2012-04-30 07:11:26 +04:00
|
|
|
|
2017-02-06 18:22:36 +03:00
|
|
|
// This should be kept in sync with the LifecycleState enum in
|
|
|
|
// MediaStreamGraphImpl.h
|
|
|
|
const char* LifecycleState_str[] = {
|
|
|
|
"LIFECYCLE_THREAD_NOT_STARTED",
|
|
|
|
"LIFECYCLE_RUNNING",
|
|
|
|
"LIFECYCLE_WAITING_FOR_MAIN_THREAD_CLEANUP",
|
|
|
|
"LIFECYCLE_WAITING_FOR_THREAD_SHUTDOWN",
|
|
|
|
"LIFECYCLE_WAITING_FOR_STREAM_DESTRUCTION"
|
|
|
|
};
|
2014-08-31 16:19:48 +04:00
|
|
|
|
2017-02-06 18:22:36 +03:00
|
|
|
if (mLifecycleState != LIFECYCLE_RUNNING) {
|
|
|
|
LOG(LogLevel::Debug,
|
|
|
|
("Running %p in stable state. Current state: %s",
|
|
|
|
this,
|
|
|
|
LifecycleState_str[mLifecycleState]));
|
|
|
|
}
|
2014-08-31 16:19:48 +04:00
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
runnables.SwapElements(mUpdateRunnables);
|
2012-08-22 19:56:38 +04:00
|
|
|
for (uint32_t i = 0; i < mStreamUpdates.Length(); ++i) {
|
2012-04-30 07:11:26 +04:00
|
|
|
StreamUpdate* update = &mStreamUpdates[i];
|
|
|
|
if (update->mStream) {
|
|
|
|
ApplyStreamUpdate(update);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mStreamUpdates.Clear();
|
|
|
|
|
|
|
|
if (mCurrentTaskMessageQueue.IsEmpty()) {
|
|
|
|
if (mLifecycleState == LIFECYCLE_WAITING_FOR_MAIN_THREAD_CLEANUP && IsEmpty()) {
|
|
|
|
// Complete shutdown. First, ensure that this graph is no longer used.
|
|
|
|
// A new graph graph will be created if one is needed.
|
|
|
|
// Asynchronously clean up old graph. We don't want to do this
|
|
|
|
// synchronously because it spins the event loop waiting for threads
|
|
|
|
// to shut down, and we don't want to do that in a stable state handler.
|
|
|
|
mLifecycleState = LIFECYCLE_WAITING_FOR_THREAD_SHUTDOWN;
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug,
|
|
|
|
("Sending MediaStreamGraphShutDownRunnable %p", this));
|
2014-08-26 19:01:33 +04:00
|
|
|
nsCOMPtr<nsIRunnable> event = new MediaStreamGraphShutDownRunnable(this );
|
2017-06-29 21:31:17 +03:00
|
|
|
mAbstractMainThread->Dispatch(event.forget());
|
2014-09-03 17:52:43 +04:00
|
|
|
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug, ("Disconnecting MediaStreamGraph %p", this));
|
2017-06-24 02:18:34 +03:00
|
|
|
|
|
|
|
// Find the graph in the hash table and remove it.
|
|
|
|
for (auto iter = gGraphs.Iter(); !iter.Done(); iter.Next()) {
|
|
|
|
if (iter.UserData() == this) {
|
|
|
|
iter.Remove();
|
|
|
|
break;
|
|
|
|
}
|
2014-09-03 17:52:43 +04:00
|
|
|
}
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (mLifecycleState <= LIFECYCLE_WAITING_FOR_MAIN_THREAD_CLEANUP) {
|
2014-08-25 17:26:21 +04:00
|
|
|
MessageBlock* block = mBackMessageQueue.AppendElement();
|
2012-04-30 07:11:26 +04:00
|
|
|
block->mMessages.SwapElements(mCurrentTaskMessageQueue);
|
2014-09-28 20:07:25 +04:00
|
|
|
EnsureNextIterationLocked();
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
|
2014-01-07 00:09:29 +04:00
|
|
|
// If the MediaStreamGraph has more messages going to it, try to revive
|
|
|
|
// it to process those messages. Don't do this if we're in a forced
|
|
|
|
// shutdown or it's a non-realtime graph that has already terminated
|
|
|
|
// processing.
|
|
|
|
if (mLifecycleState == LIFECYCLE_WAITING_FOR_MAIN_THREAD_CLEANUP &&
|
|
|
|
mRealtime && !mForceShutDown) {
|
2012-04-30 07:11:26 +04:00
|
|
|
mLifecycleState = LIFECYCLE_RUNNING;
|
|
|
|
// Revive the MediaStreamGraph since we have more messages going to it.
|
|
|
|
// Note that we need to put messages into its queue before reviving it,
|
|
|
|
// or it might exit immediately.
|
2014-08-26 19:01:33 +04:00
|
|
|
{
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug,
|
|
|
|
("Reviving a graph (%p) ! %s",
|
|
|
|
this,
|
|
|
|
CurrentDriver()->AsAudioCallbackDriver() ? "AudioDriver"
|
|
|
|
: "SystemDriver"));
|
2015-10-18 08:24:48 +03:00
|
|
|
RefPtr<GraphDriver> driver = CurrentDriver();
|
2014-09-28 20:07:24 +04:00
|
|
|
MonitorAutoUnlock unlock(mMonitor);
|
|
|
|
driver->Revive();
|
2014-08-26 19:01:33 +04:00
|
|
|
}
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-26 19:02:09 +04:00
|
|
|
// Don't start the thread for a non-realtime graph until it has been
|
|
|
|
// explicitly started by StartNonRealtimeProcessing.
|
|
|
|
if (mLifecycleState == LIFECYCLE_THREAD_NOT_STARTED &&
|
|
|
|
(mRealtime || mNonRealtimeProcessing)) {
|
|
|
|
mLifecycleState = LIFECYCLE_RUNNING;
|
|
|
|
// Start the thread now. We couldn't start it earlier because
|
|
|
|
// the graph might exit immediately on finding it has no streams. The
|
|
|
|
// first message for a new graph must create a stream.
|
|
|
|
{
|
|
|
|
// We should exit the monitor for now, because starting a stream might
|
|
|
|
// take locks, and we don't want to deadlock.
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug,
|
|
|
|
("Starting a graph (%p) ! %s",
|
|
|
|
this,
|
|
|
|
CurrentDriver()->AsAudioCallbackDriver() ? "AudioDriver"
|
|
|
|
: "SystemDriver"));
|
2015-10-18 08:24:48 +03:00
|
|
|
RefPtr<GraphDriver> driver = CurrentDriver();
|
2014-09-28 20:07:24 +04:00
|
|
|
MonitorAutoUnlock unlock(mMonitor);
|
|
|
|
driver->Start();
|
2017-01-13 21:54:20 +03:00
|
|
|
// It's not safe to Shutdown() a thread from StableState, and
|
|
|
|
// releasing this may shutdown a SystemClockDriver thread.
|
|
|
|
// Proxy the release to outside of StableState.
|
2017-07-14 09:49:22 +03:00
|
|
|
NS_ReleaseOnMainThreadSystemGroup(
|
2017-06-14 04:27:17 +03:00
|
|
|
"MediaStreamGraphImpl::CurrentDriver", driver.forget(),
|
|
|
|
true); // always proxy
|
2014-08-26 19:02:09 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-11 04:04:58 +04:00
|
|
|
if ((mForceShutDown || !mRealtime) &&
|
2014-01-30 09:50:17 +04:00
|
|
|
mLifecycleState == LIFECYCLE_WAITING_FOR_MAIN_THREAD_CLEANUP) {
|
2014-01-07 00:09:29 +04:00
|
|
|
// Defer calls to RunDuringShutdown() to happen while mMonitor is not held.
|
2014-08-25 17:26:21 +04:00
|
|
|
for (uint32_t i = 0; i < mBackMessageQueue.Length(); ++i) {
|
|
|
|
MessageBlock& mb = mBackMessageQueue[i];
|
2015-08-11 18:29:46 +03:00
|
|
|
controlMessagesToRunDuringShutdown.AppendElements(Move(mb.mMessages));
|
2014-01-07 00:09:29 +04:00
|
|
|
}
|
2014-08-25 17:26:21 +04:00
|
|
|
mBackMessageQueue.Clear();
|
2014-01-08 07:58:14 +04:00
|
|
|
MOZ_ASSERT(mCurrentTaskMessageQueue.IsEmpty());
|
2014-01-07 00:09:29 +04:00
|
|
|
// Stop MediaStreamGraph threads. Do not clear gGraph since
|
|
|
|
// we have outstanding DOM objects that may need it.
|
|
|
|
mLifecycleState = LIFECYCLE_WAITING_FOR_THREAD_SHUTDOWN;
|
2014-08-26 19:01:33 +04:00
|
|
|
nsCOMPtr<nsIRunnable> event = new MediaStreamGraphShutDownRunnable(this);
|
2017-06-29 21:31:17 +03:00
|
|
|
mAbstractMainThread->Dispatch(event.forget());
|
2014-01-07 00:09:29 +04:00
|
|
|
}
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
mDetectedNotRunning = mLifecycleState > LIFECYCLE_RUNNING;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure we get a new current time in the next event loop task
|
2014-08-25 16:13:14 +04:00
|
|
|
if (!aSourceIsMSG) {
|
|
|
|
MOZ_ASSERT(mPostedRunInStableState);
|
|
|
|
mPostedRunInStableState = false;
|
|
|
|
}
|
2012-04-30 07:11:26 +04:00
|
|
|
|
2012-08-22 19:56:38 +04:00
|
|
|
for (uint32_t i = 0; i < controlMessagesToRunDuringShutdown.Length(); ++i) {
|
2012-07-31 16:17:22 +04:00
|
|
|
controlMessagesToRunDuringShutdown[i]->RunDuringShutdown();
|
|
|
|
}
|
2014-07-02 10:04:54 +04:00
|
|
|
|
|
|
|
#ifdef DEBUG
|
|
|
|
mCanRunMessagesSynchronously = mDetectedNotRunning &&
|
|
|
|
mLifecycleState >= LIFECYCLE_WAITING_FOR_THREAD_SHUTDOWN;
|
|
|
|
#endif
|
2015-01-08 08:21:00 +03:00
|
|
|
|
|
|
|
for (uint32_t i = 0; i < runnables.Length(); ++i) {
|
|
|
|
runnables[i]->Run();
|
|
|
|
}
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
|
2014-09-28 20:07:24 +04:00
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
void
|
|
|
|
MediaStreamGraphImpl::EnsureRunInStableState()
|
|
|
|
{
|
|
|
|
NS_ASSERTION(NS_IsMainThread(), "main thread only");
|
|
|
|
|
|
|
|
if (mPostedRunInStableState)
|
|
|
|
return;
|
|
|
|
mPostedRunInStableState = true;
|
2014-08-25 16:13:14 +04:00
|
|
|
nsCOMPtr<nsIRunnable> event = new MediaStreamGraphStableStateRunnable(this, false);
|
2015-06-11 05:36:12 +03:00
|
|
|
nsContentUtils::RunInStableState(event.forget());
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaStreamGraphImpl::EnsureStableStateEventPosted()
|
|
|
|
{
|
2014-08-26 19:01:33 +04:00
|
|
|
mMonitor.AssertCurrentThreadOwns();
|
2012-04-30 07:11:26 +04:00
|
|
|
|
|
|
|
if (mPostedRunInStableStateEvent)
|
|
|
|
return;
|
|
|
|
mPostedRunInStableStateEvent = true;
|
2014-08-25 16:13:14 +04:00
|
|
|
nsCOMPtr<nsIRunnable> event = new MediaStreamGraphStableStateRunnable(this, true);
|
2017-06-29 21:31:17 +03:00
|
|
|
mAbstractMainThread->Dispatch(event.forget());
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-01-21 00:14:33 +03:00
|
|
|
MediaStreamGraphImpl::AppendMessage(UniquePtr<ControlMessage> aMessage)
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
MOZ_ASSERT(NS_IsMainThread(), "main thread only");
|
2015-04-29 12:02:55 +03:00
|
|
|
MOZ_ASSERT(!aMessage->GetStream() ||
|
|
|
|
!aMessage->GetStream()->IsDestroyed(),
|
|
|
|
"Stream already destroyed");
|
2012-04-30 07:11:26 +04:00
|
|
|
|
|
|
|
if (mDetectedNotRunning &&
|
|
|
|
mLifecycleState > LIFECYCLE_WAITING_FOR_MAIN_THREAD_CLEANUP) {
|
|
|
|
// The graph control loop is not running and main thread cleanup has
|
|
|
|
// happened. From now on we can't append messages to mCurrentTaskMessageQueue,
|
2012-07-31 16:17:21 +04:00
|
|
|
// because that will never be processed again, so just RunDuringShutdown
|
2012-04-30 07:11:26 +04:00
|
|
|
// this message.
|
2014-02-11 04:04:58 +04:00
|
|
|
// This should only happen during forced shutdown, or after a non-realtime
|
|
|
|
// graph has finished processing.
|
2014-07-02 10:04:54 +04:00
|
|
|
#ifdef DEBUG
|
|
|
|
MOZ_ASSERT(mCanRunMessagesSynchronously);
|
|
|
|
mCanRunMessagesSynchronously = false;
|
|
|
|
#endif
|
2012-07-31 16:17:21 +04:00
|
|
|
aMessage->RunDuringShutdown();
|
2014-07-02 10:04:54 +04:00
|
|
|
#ifdef DEBUG
|
|
|
|
mCanRunMessagesSynchronously = true;
|
|
|
|
#endif
|
2013-05-31 04:53:51 +04:00
|
|
|
if (IsEmpty() &&
|
|
|
|
mLifecycleState >= LIFECYCLE_WAITING_FOR_STREAM_DESTRUCTION) {
|
2014-11-17 19:07:55 +03:00
|
|
|
|
2017-06-24 02:18:34 +03:00
|
|
|
// Find the graph in the hash table and remove it.
|
|
|
|
for (auto iter = gGraphs.Iter(); !iter.Done(); iter.Next()) {
|
|
|
|
if (iter.UserData() == this) {
|
|
|
|
iter.Remove();
|
|
|
|
break;
|
|
|
|
}
|
2013-02-04 21:29:14 +04:00
|
|
|
}
|
2014-11-17 19:07:55 +03:00
|
|
|
|
2014-04-13 22:08:10 +04:00
|
|
|
Destroy();
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-01-21 00:14:33 +03:00
|
|
|
mCurrentTaskMessageQueue.AppendElement(Move(aMessage));
|
2014-01-07 00:09:29 +04:00
|
|
|
EnsureRunInStableState();
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
|
2017-06-29 21:31:17 +03:00
|
|
|
void
|
|
|
|
MediaStreamGraphImpl::Dispatch(already_AddRefed<nsIRunnable>&& aRunnable)
|
|
|
|
{
|
|
|
|
mAbstractMainThread->Dispatch(Move(aRunnable));
|
|
|
|
}
|
|
|
|
|
2017-06-29 21:30:57 +03:00
|
|
|
MediaStream::MediaStream()
|
2016-01-26 05:49:01 +03:00
|
|
|
: mTracksStartTime(0)
|
2015-09-04 09:44:43 +03:00
|
|
|
, mStartBlocking(GRAPH_TIME_MAX)
|
2015-09-16 07:15:21 +03:00
|
|
|
, mSuspendedCount(0)
|
2013-08-07 02:14:35 +04:00
|
|
|
, mFinished(false)
|
|
|
|
, mNotifiedFinished(false)
|
|
|
|
, mNotifiedBlocked(false)
|
|
|
|
, mHasCurrentData(false)
|
|
|
|
, mNotifiedHasCurrentData(false)
|
|
|
|
, mMainThreadCurrentTime(0)
|
|
|
|
, mMainThreadFinished(false)
|
2015-05-11 17:07:24 +03:00
|
|
|
, mFinishedNotificationSent(false)
|
2013-08-07 02:14:35 +04:00
|
|
|
, mMainThreadDestroyed(false)
|
2016-01-05 05:16:22 +03:00
|
|
|
, mNrOfMainThreadUsers(0)
|
2013-08-07 02:14:35 +04:00
|
|
|
, mGraph(nullptr)
|
|
|
|
{
|
|
|
|
MOZ_COUNT_CTOR(MediaStream);
|
|
|
|
}
|
|
|
|
|
2016-06-30 10:07:48 +03:00
|
|
|
MediaStream::~MediaStream()
|
|
|
|
{
|
|
|
|
MOZ_COUNT_DTOR(MediaStream);
|
|
|
|
NS_ASSERTION(mMainThreadDestroyed, "Should have been destroyed already");
|
|
|
|
NS_ASSERTION(mMainThreadListeners.IsEmpty(),
|
|
|
|
"All main thread listeners should have been removed");
|
|
|
|
}
|
|
|
|
|
2014-04-13 22:08:10 +04:00
|
|
|
size_t
|
|
|
|
MediaStream::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
|
|
|
|
{
|
|
|
|
size_t amount = 0;
|
|
|
|
|
|
|
|
// Not owned:
|
|
|
|
// - mGraph - Not reported here
|
|
|
|
// - mConsumers - elements
|
|
|
|
// Future:
|
|
|
|
// - mVideoOutputs - elements
|
|
|
|
// - mLastPlayedVideoFrame
|
|
|
|
// - mListeners - elements
|
2014-08-25 17:25:49 +04:00
|
|
|
// - mAudioOutputStream - elements
|
2014-04-13 22:08:10 +04:00
|
|
|
|
2016-01-26 05:49:01 +03:00
|
|
|
amount += mTracks.SizeOfExcludingThis(aMallocSizeOf);
|
2015-07-29 09:24:24 +03:00
|
|
|
amount += mAudioOutputs.ShallowSizeOfExcludingThis(aMallocSizeOf);
|
|
|
|
amount += mVideoOutputs.ShallowSizeOfExcludingThis(aMallocSizeOf);
|
|
|
|
amount += mListeners.ShallowSizeOfExcludingThis(aMallocSizeOf);
|
|
|
|
amount += mMainThreadListeners.ShallowSizeOfExcludingThis(aMallocSizeOf);
|
2016-08-15 15:19:42 +03:00
|
|
|
amount += mDisabledTracks.ShallowSizeOfExcludingThis(aMallocSizeOf);
|
2015-07-29 09:24:24 +03:00
|
|
|
amount += mConsumers.ShallowSizeOfExcludingThis(aMallocSizeOf);
|
2014-04-13 22:08:10 +04:00
|
|
|
|
|
|
|
return amount;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
|
|
|
MediaStream::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
|
|
|
|
{
|
|
|
|
return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
|
|
|
|
}
|
|
|
|
|
2016-08-24 20:04:33 +03:00
|
|
|
void
|
|
|
|
MediaStream::IncrementSuspendCount()
|
|
|
|
{
|
|
|
|
++mSuspendedCount;
|
|
|
|
if (mSuspendedCount == 1) {
|
|
|
|
for (uint32_t i = 0; i < mConsumers.Length(); ++i) {
|
|
|
|
mConsumers[i]->Suspended();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaStream::DecrementSuspendCount()
|
|
|
|
{
|
|
|
|
NS_ASSERTION(mSuspendedCount > 0, "Suspend count underrun");
|
|
|
|
--mSuspendedCount;
|
|
|
|
|
|
|
|
if (mSuspendedCount == 0) {
|
|
|
|
for (uint32_t i = 0; i < mConsumers.Length(); ++i) {
|
|
|
|
mConsumers[i]->Resumed();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
MediaStreamGraphImpl*
|
|
|
|
MediaStream::GraphImpl()
|
|
|
|
{
|
2013-02-01 23:43:36 +04:00
|
|
|
return mGraph;
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
|
2012-07-31 16:17:21 +04:00
|
|
|
MediaStreamGraph*
|
|
|
|
MediaStream::Graph()
|
|
|
|
{
|
2013-02-01 23:43:36 +04:00
|
|
|
return mGraph;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaStream::SetGraphImpl(MediaStreamGraphImpl* aGraph)
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(!mGraph, "Should only be called once");
|
|
|
|
mGraph = aGraph;
|
2016-01-26 05:49:01 +03:00
|
|
|
mTracks.InitGraphRate(aGraph->GraphRate());
|
2012-07-31 16:17:21 +04:00
|
|
|
}
|
|
|
|
|
2013-06-10 23:01:19 +04:00
|
|
|
void
|
|
|
|
MediaStream::SetGraphImpl(MediaStreamGraph* aGraph)
|
|
|
|
{
|
|
|
|
MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(aGraph);
|
|
|
|
SetGraphImpl(graph);
|
|
|
|
}
|
|
|
|
|
2012-07-31 16:17:21 +04:00
|
|
|
StreamTime
|
2015-09-16 07:35:16 +03:00
|
|
|
MediaStream::GraphTimeToStreamTime(GraphTime aTime)
|
2012-07-31 16:17:21 +04:00
|
|
|
{
|
2015-09-08 08:14:43 +03:00
|
|
|
NS_ASSERTION(mStartBlocking == GraphImpl()->mStateComputedTime ||
|
|
|
|
aTime <= mStartBlocking,
|
|
|
|
"Incorrectly ignoring blocking!");
|
2016-01-26 05:49:01 +03:00
|
|
|
return aTime - mTracksStartTime;
|
2015-09-16 07:35:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
GraphTime
|
|
|
|
MediaStream::StreamTimeToGraphTime(StreamTime aTime)
|
|
|
|
{
|
2015-09-08 08:14:43 +03:00
|
|
|
NS_ASSERTION(mStartBlocking == GraphImpl()->mStateComputedTime ||
|
2016-01-26 05:49:01 +03:00
|
|
|
aTime + mTracksStartTime <= mStartBlocking,
|
2015-09-08 08:14:43 +03:00
|
|
|
"Incorrectly ignoring blocking!");
|
2016-01-26 05:49:01 +03:00
|
|
|
return aTime + mTracksStartTime;
|
2012-07-31 16:17:21 +04:00
|
|
|
}
|
|
|
|
|
2013-02-04 14:04:26 +04:00
|
|
|
StreamTime
|
2015-09-16 07:35:16 +03:00
|
|
|
MediaStream::GraphTimeToStreamTimeWithBlocking(GraphTime aTime)
|
2013-02-04 14:04:26 +04:00
|
|
|
{
|
2015-09-16 07:35:16 +03:00
|
|
|
return GraphImpl()->GraphTimeToStreamTimeWithBlocking(this, aTime);
|
2013-02-04 14:04:26 +04:00
|
|
|
}
|
|
|
|
|
2012-07-31 16:17:21 +04:00
|
|
|
void
|
|
|
|
MediaStream::FinishOnGraphThread()
|
|
|
|
{
|
|
|
|
GraphImpl()->FinishStream(this);
|
|
|
|
}
|
|
|
|
|
2016-01-26 05:49:01 +03:00
|
|
|
StreamTracks::Track*
|
2016-01-21 14:25:25 +03:00
|
|
|
MediaStream::FindTrack(TrackID aID)
|
|
|
|
{
|
2016-01-26 05:49:01 +03:00
|
|
|
return mTracks.FindTrack(aID);
|
2016-01-21 14:25:25 +03:00
|
|
|
}
|
|
|
|
|
2016-01-26 05:49:01 +03:00
|
|
|
StreamTracks::Track*
|
2014-09-18 09:13:16 +04:00
|
|
|
MediaStream::EnsureTrack(TrackID aTrackId)
|
2013-05-21 23:17:47 +04:00
|
|
|
{
|
2016-01-26 05:49:01 +03:00
|
|
|
StreamTracks::Track* track = mTracks.FindTrack(aTrackId);
|
2013-05-21 23:17:47 +04:00
|
|
|
if (!track) {
|
|
|
|
nsAutoPtr<MediaSegment> segment(new AudioSegment());
|
|
|
|
for (uint32_t j = 0; j < mListeners.Length(); ++j) {
|
|
|
|
MediaStreamListener* l = mListeners[j];
|
2014-09-18 03:51:13 +04:00
|
|
|
l->NotifyQueuedTrackChanges(Graph(), aTrackId, 0,
|
2016-06-30 10:07:48 +03:00
|
|
|
TrackEventCommand::TRACK_EVENT_CREATED,
|
2013-05-21 23:17:47 +04:00
|
|
|
*segment);
|
2015-02-06 12:38:11 +03:00
|
|
|
// TODO If we ever need to ensure several tracks at once, we will have to
|
|
|
|
// change this.
|
|
|
|
l->NotifyFinishedTrackCreation(Graph());
|
2013-05-21 23:17:47 +04:00
|
|
|
}
|
2016-01-26 05:49:01 +03:00
|
|
|
track = &mTracks.AddTrack(aTrackId, 0, segment.forget());
|
2013-05-21 23:17:47 +04:00
|
|
|
}
|
|
|
|
return track;
|
|
|
|
}
|
|
|
|
|
2013-01-07 06:31:30 +04:00
|
|
|
void
|
|
|
|
MediaStream::RemoveAllListenersImpl()
|
|
|
|
{
|
|
|
|
for (int32_t i = mListeners.Length() - 1; i >= 0; --i) {
|
2015-10-18 08:24:48 +03:00
|
|
|
RefPtr<MediaStreamListener> listener = mListeners[i].forget();
|
2016-06-30 10:07:48 +03:00
|
|
|
listener->NotifyEvent(GraphImpl(), MediaStreamGraphEvent::EVENT_REMOVED);
|
2013-01-07 06:31:30 +04:00
|
|
|
}
|
|
|
|
mListeners.Clear();
|
|
|
|
}
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
void
|
|
|
|
MediaStream::DestroyImpl()
|
|
|
|
{
|
2012-08-22 19:56:38 +04:00
|
|
|
for (int32_t i = mConsumers.Length() - 1; i >= 0; --i) {
|
2012-07-31 16:17:21 +04:00
|
|
|
mConsumers[i]->Disconnect();
|
|
|
|
}
|
2014-07-25 01:09:22 +04:00
|
|
|
mGraph = nullptr;
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaStream::Destroy()
|
|
|
|
{
|
2016-01-05 05:16:22 +03:00
|
|
|
NS_ASSERTION(mNrOfMainThreadUsers == 0,
|
|
|
|
"Do not mix Destroy() and RegisterUser()/UnregisterUser()");
|
2012-10-26 05:39:05 +04:00
|
|
|
// Keep this stream alive until we leave this method
|
2015-10-18 08:24:48 +03:00
|
|
|
RefPtr<MediaStream> kungFuDeathGrip = this;
|
2012-10-26 05:39:05 +04:00
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
class Message : public ControlMessage {
|
|
|
|
public:
|
2014-09-01 07:50:23 +04:00
|
|
|
explicit Message(MediaStream* aStream) : ControlMessage(aStream) {}
|
2016-01-18 06:50:29 +03:00
|
|
|
void Run() override
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
2014-07-25 01:09:22 +04:00
|
|
|
mStream->RemoveAllListenersImpl();
|
|
|
|
auto graph = mStream->GraphImpl();
|
2012-04-30 07:11:26 +04:00
|
|
|
mStream->DestroyImpl();
|
2015-08-12 02:29:35 +03:00
|
|
|
graph->RemoveStreamGraphThread(mStream);
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
2016-01-18 06:50:29 +03:00
|
|
|
void RunDuringShutdown() override
|
2015-06-11 22:10:06 +03:00
|
|
|
{ Run(); }
|
2012-04-30 07:11:26 +04:00
|
|
|
};
|
2016-01-21 00:14:33 +03:00
|
|
|
GraphImpl()->AppendMessage(MakeUnique<Message>(this));
|
2012-10-26 05:39:05 +04:00
|
|
|
// Message::RunDuringShutdown may have removed this stream from the graph,
|
|
|
|
// but our kungFuDeathGrip above will have kept this stream alive if
|
|
|
|
// necessary.
|
2012-10-26 03:08:38 +04:00
|
|
|
mMainThreadDestroyed = true;
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
|
2016-01-05 05:16:22 +03:00
|
|
|
void
|
|
|
|
MediaStream::RegisterUser()
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(NS_IsMainThread());
|
|
|
|
++mNrOfMainThreadUsers;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaStream::UnregisterUser()
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(NS_IsMainThread());
|
|
|
|
|
|
|
|
--mNrOfMainThreadUsers;
|
|
|
|
NS_ASSERTION(mNrOfMainThreadUsers >= 0, "Double-removal of main thread user");
|
|
|
|
NS_ASSERTION(!IsDestroyed(), "Do not mix Destroy() and RegisterUser()/UnregisterUser()");
|
|
|
|
if (mNrOfMainThreadUsers == 0) {
|
|
|
|
Destroy();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
void
|
|
|
|
MediaStream::AddAudioOutput(void* aKey)
|
|
|
|
{
|
|
|
|
class Message : public ControlMessage {
|
|
|
|
public:
|
|
|
|
Message(MediaStream* aStream, void* aKey) : ControlMessage(aStream), mKey(aKey) {}
|
2016-01-18 06:50:29 +03:00
|
|
|
void Run() override
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
|
|
|
mStream->AddAudioOutputImpl(mKey);
|
|
|
|
}
|
|
|
|
void* mKey;
|
|
|
|
};
|
2016-01-21 00:14:33 +03:00
|
|
|
GraphImpl()->AppendMessage(MakeUnique<Message>(this, aKey));
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaStream::SetAudioOutputVolumeImpl(void* aKey, float aVolume)
|
|
|
|
{
|
2012-08-22 19:56:38 +04:00
|
|
|
for (uint32_t i = 0; i < mAudioOutputs.Length(); ++i) {
|
2012-04-30 07:11:26 +04:00
|
|
|
if (mAudioOutputs[i].mKey == aKey) {
|
|
|
|
mAudioOutputs[i].mVolume = aVolume;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
NS_ERROR("Audio output key not found");
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaStream::SetAudioOutputVolume(void* aKey, float aVolume)
|
|
|
|
{
|
|
|
|
class Message : public ControlMessage {
|
|
|
|
public:
|
|
|
|
Message(MediaStream* aStream, void* aKey, float aVolume) :
|
|
|
|
ControlMessage(aStream), mKey(aKey), mVolume(aVolume) {}
|
2016-01-18 06:50:29 +03:00
|
|
|
void Run() override
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
|
|
|
mStream->SetAudioOutputVolumeImpl(mKey, mVolume);
|
|
|
|
}
|
|
|
|
void* mKey;
|
|
|
|
float mVolume;
|
|
|
|
};
|
2016-01-21 00:14:33 +03:00
|
|
|
GraphImpl()->AppendMessage(MakeUnique<Message>(this, aKey, aVolume));
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
|
2015-10-29 08:19:51 +03:00
|
|
|
void
|
|
|
|
MediaStream::AddAudioOutputImpl(void* aKey)
|
|
|
|
{
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Info,
|
|
|
|
("MediaStream %p Adding AudioOutput for key %p", this, aKey));
|
2015-10-29 08:19:51 +03:00
|
|
|
mAudioOutputs.AppendElement(AudioOutput(aKey));
|
|
|
|
}
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
void
|
|
|
|
MediaStream::RemoveAudioOutputImpl(void* aKey)
|
|
|
|
{
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Info,
|
|
|
|
("MediaStream %p Removing AudioOutput for key %p", this, aKey));
|
2012-08-22 19:56:38 +04:00
|
|
|
for (uint32_t i = 0; i < mAudioOutputs.Length(); ++i) {
|
2012-04-30 07:11:26 +04:00
|
|
|
if (mAudioOutputs[i].mKey == aKey) {
|
|
|
|
mAudioOutputs.RemoveElementAt(i);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
NS_ERROR("Audio output key not found");
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaStream::RemoveAudioOutput(void* aKey)
|
|
|
|
{
|
|
|
|
class Message : public ControlMessage {
|
|
|
|
public:
|
|
|
|
Message(MediaStream* aStream, void* aKey) :
|
|
|
|
ControlMessage(aStream), mKey(aKey) {}
|
2016-01-18 06:50:29 +03:00
|
|
|
void Run() override
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
|
|
|
mStream->RemoveAudioOutputImpl(mKey);
|
|
|
|
}
|
|
|
|
void* mKey;
|
|
|
|
};
|
2016-01-21 00:14:33 +03:00
|
|
|
GraphImpl()->AppendMessage(MakeUnique<Message>(this, aKey));
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
|
2015-10-29 08:19:51 +03:00
|
|
|
void
|
2016-05-27 09:33:50 +03:00
|
|
|
MediaStream::AddVideoOutputImpl(already_AddRefed<MediaStreamVideoSink> aSink,
|
|
|
|
TrackID aID)
|
2015-10-29 08:19:51 +03:00
|
|
|
{
|
2016-05-27 09:33:48 +03:00
|
|
|
RefPtr<MediaStreamVideoSink> sink = aSink;
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Info,
|
|
|
|
("MediaStream %p Adding MediaStreamVideoSink %p as output",
|
|
|
|
this,
|
|
|
|
sink.get()));
|
2016-05-27 09:33:50 +03:00
|
|
|
MOZ_ASSERT(aID != TRACK_NONE);
|
|
|
|
for (auto entry : mVideoOutputs) {
|
|
|
|
if (entry.mListener == sink &&
|
|
|
|
(entry.mTrackID == TRACK_ANY || entry.mTrackID == aID)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
TrackBound<MediaStreamVideoSink>* l = mVideoOutputs.AppendElement();
|
|
|
|
l->mListener = sink;
|
|
|
|
l->mTrackID = aID;
|
2016-05-30 06:32:23 +03:00
|
|
|
|
|
|
|
AddDirectTrackListenerImpl(sink.forget(), aID);
|
2015-10-29 08:19:51 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-05-27 09:33:50 +03:00
|
|
|
MediaStream::RemoveVideoOutputImpl(MediaStreamVideoSink* aSink,
|
|
|
|
TrackID aID)
|
2015-10-29 08:19:51 +03:00
|
|
|
{
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(
|
|
|
|
LogLevel::Info,
|
|
|
|
("MediaStream %p Removing MediaStreamVideoSink %p as output", this, aSink));
|
2016-05-27 09:33:50 +03:00
|
|
|
MOZ_ASSERT(aID != TRACK_NONE);
|
|
|
|
|
2015-10-29 08:19:51 +03:00
|
|
|
// Ensure that any frames currently queued for playback by the compositor
|
|
|
|
// are removed.
|
2016-05-27 09:33:48 +03:00
|
|
|
aSink->ClearFrames();
|
2016-05-27 09:33:50 +03:00
|
|
|
for (size_t i = 0; i < mVideoOutputs.Length(); ++i) {
|
|
|
|
if (mVideoOutputs[i].mListener == aSink &&
|
|
|
|
(mVideoOutputs[i].mTrackID == TRACK_ANY ||
|
|
|
|
mVideoOutputs[i].mTrackID == aID)) {
|
|
|
|
mVideoOutputs.RemoveElementAt(i);
|
|
|
|
}
|
|
|
|
}
|
2016-05-30 06:32:23 +03:00
|
|
|
|
|
|
|
RemoveDirectTrackListenerImpl(aSink, aID);
|
2015-10-29 08:19:51 +03:00
|
|
|
}
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
void
|
2016-05-27 09:33:50 +03:00
|
|
|
MediaStream::AddVideoOutput(MediaStreamVideoSink* aSink, TrackID aID)
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
|
|
|
class Message : public ControlMessage {
|
|
|
|
public:
|
2016-05-27 09:33:50 +03:00
|
|
|
Message(MediaStream* aStream, MediaStreamVideoSink* aSink, TrackID aID) :
|
|
|
|
ControlMessage(aStream), mSink(aSink), mID(aID) {}
|
2016-01-18 06:50:29 +03:00
|
|
|
void Run() override
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
2016-05-27 09:33:50 +03:00
|
|
|
mStream->AddVideoOutputImpl(mSink.forget(), mID);
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
2016-05-27 09:33:48 +03:00
|
|
|
RefPtr<MediaStreamVideoSink> mSink;
|
2016-05-27 09:33:50 +03:00
|
|
|
TrackID mID;
|
2012-04-30 07:11:26 +04:00
|
|
|
};
|
2016-05-27 09:33:50 +03:00
|
|
|
GraphImpl()->AppendMessage(MakeUnique<Message>(this, aSink, aID));
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-05-27 09:33:50 +03:00
|
|
|
MediaStream::RemoveVideoOutput(MediaStreamVideoSink* aSink, TrackID aID)
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
|
|
|
class Message : public ControlMessage {
|
|
|
|
public:
|
2016-05-27 09:33:50 +03:00
|
|
|
Message(MediaStream* aStream, MediaStreamVideoSink* aSink, TrackID aID) :
|
|
|
|
ControlMessage(aStream), mSink(aSink), mID(aID) {}
|
2016-01-18 06:50:29 +03:00
|
|
|
void Run() override
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
2016-05-27 09:33:50 +03:00
|
|
|
mStream->RemoveVideoOutputImpl(mSink, mID);
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
2016-05-27 09:33:48 +03:00
|
|
|
RefPtr<MediaStreamVideoSink> mSink;
|
2016-05-27 09:33:50 +03:00
|
|
|
TrackID mID;
|
2012-04-30 07:11:26 +04:00
|
|
|
};
|
2016-05-27 09:33:50 +03:00
|
|
|
GraphImpl()->AppendMessage(MakeUnique<Message>(this, aSink, aID));
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2015-09-10 16:45:36 +03:00
|
|
|
MediaStream::Suspend()
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
|
|
|
class Message : public ControlMessage {
|
|
|
|
public:
|
2015-09-10 16:45:36 +03:00
|
|
|
explicit Message(MediaStream* aStream) :
|
|
|
|
ControlMessage(aStream) {}
|
2016-01-18 06:50:29 +03:00
|
|
|
void Run() override
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
2015-09-10 16:45:36 +03:00
|
|
|
mStream->GraphImpl()->IncrementSuspendCount(mStream);
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
};
|
2014-01-13 20:38:30 +04:00
|
|
|
|
|
|
|
// This can happen if this method has been called asynchronously, and the
|
|
|
|
// stream has been destroyed since then.
|
|
|
|
if (mMainThreadDestroyed) {
|
|
|
|
return;
|
|
|
|
}
|
2016-01-21 00:14:33 +03:00
|
|
|
GraphImpl()->AppendMessage(MakeUnique<Message>(this));
|
2015-09-10 16:45:36 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaStream::Resume()
|
|
|
|
{
|
|
|
|
class Message : public ControlMessage {
|
|
|
|
public:
|
|
|
|
explicit Message(MediaStream* aStream) :
|
|
|
|
ControlMessage(aStream) {}
|
2016-01-18 06:50:29 +03:00
|
|
|
void Run() override
|
2015-09-10 16:45:36 +03:00
|
|
|
{
|
|
|
|
mStream->GraphImpl()->DecrementSuspendCount(mStream);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// This can happen if this method has been called asynchronously, and the
|
|
|
|
// stream has been destroyed since then.
|
|
|
|
if (mMainThreadDestroyed) {
|
|
|
|
return;
|
|
|
|
}
|
2016-01-21 00:14:33 +03:00
|
|
|
GraphImpl()->AppendMessage(MakeUnique<Message>(this));
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
}
|
|
|
|
|
2012-05-24 14:37:14 +04:00
|
|
|
void
|
|
|
|
MediaStream::AddListenerImpl(already_AddRefed<MediaStreamListener> aListener)
|
|
|
|
{
|
|
|
|
MediaStreamListener* listener = *mListeners.AppendElement() = aListener;
|
|
|
|
listener->NotifyBlockingChanged(GraphImpl(),
|
2013-01-02 17:49:18 +04:00
|
|
|
mNotifiedBlocked ? MediaStreamListener::BLOCKED : MediaStreamListener::UNBLOCKED);
|
2016-05-24 14:23:50 +03:00
|
|
|
|
|
|
|
for (StreamTracks::TrackIter it(mTracks); !it.IsEnded(); it.Next()) {
|
|
|
|
MediaStream* inputStream = nullptr;
|
|
|
|
TrackID inputTrackID = TRACK_INVALID;
|
|
|
|
if (ProcessedMediaStream* ps = AsProcessedStream()) {
|
|
|
|
// The only ProcessedMediaStream where we should have listeners is
|
|
|
|
// TrackUnionStream - it's what's used as owned stream in DOMMediaStream,
|
|
|
|
// the only main-thread exposed stream type.
|
|
|
|
// TrackUnionStream guarantees that each of its tracks has an input track.
|
|
|
|
// Other types do not implement GetInputStreamFor() and will return null.
|
|
|
|
inputStream = ps->GetInputStreamFor(it->GetID());
|
|
|
|
MOZ_ASSERT(inputStream);
|
|
|
|
inputTrackID = ps->GetInputTrackIDFor(it->GetID());
|
|
|
|
MOZ_ASSERT(IsTrackIDExplicit(inputTrackID));
|
|
|
|
}
|
|
|
|
|
2016-06-30 10:07:48 +03:00
|
|
|
uint32_t flags = TrackEventCommand::TRACK_EVENT_CREATED;
|
2016-05-24 14:23:50 +03:00
|
|
|
if (it->IsEnded()) {
|
2016-06-30 10:07:48 +03:00
|
|
|
flags |= TrackEventCommand::TRACK_EVENT_ENDED;
|
2016-05-24 14:23:50 +03:00
|
|
|
}
|
|
|
|
nsAutoPtr<MediaSegment> segment(it->GetSegment()->CreateEmptyClone());
|
|
|
|
listener->NotifyQueuedTrackChanges(Graph(), it->GetID(), it->GetEnd(),
|
2016-06-30 10:07:48 +03:00
|
|
|
static_cast<TrackEventCommand>(flags), *segment,
|
2016-05-24 14:23:50 +03:00
|
|
|
inputStream, inputTrackID);
|
|
|
|
}
|
2012-05-24 14:37:14 +04:00
|
|
|
if (mNotifiedFinished) {
|
2016-06-30 10:07:48 +03:00
|
|
|
listener->NotifyEvent(GraphImpl(), MediaStreamGraphEvent::EVENT_FINISHED);
|
2012-05-24 14:37:14 +04:00
|
|
|
}
|
2013-03-20 15:19:39 +04:00
|
|
|
if (mNotifiedHasCurrentData) {
|
|
|
|
listener->NotifyHasCurrentData(GraphImpl());
|
|
|
|
}
|
2012-05-24 14:37:14 +04:00
|
|
|
}
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
void
|
|
|
|
MediaStream::AddListener(MediaStreamListener* aListener)
|
|
|
|
{
|
|
|
|
class Message : public ControlMessage {
|
|
|
|
public:
|
|
|
|
Message(MediaStream* aStream, MediaStreamListener* aListener) :
|
|
|
|
ControlMessage(aStream), mListener(aListener) {}
|
2016-01-18 06:50:29 +03:00
|
|
|
void Run() override
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
|
|
|
mStream->AddListenerImpl(mListener.forget());
|
|
|
|
}
|
2015-10-18 08:24:48 +03:00
|
|
|
RefPtr<MediaStreamListener> mListener;
|
2012-04-30 07:11:26 +04:00
|
|
|
};
|
2016-01-21 00:14:33 +03:00
|
|
|
GraphImpl()->AppendMessage(MakeUnique<Message>(this, aListener));
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
|
2013-01-07 06:31:30 +04:00
|
|
|
void
|
|
|
|
MediaStream::RemoveListenerImpl(MediaStreamListener* aListener)
|
2014-06-22 22:21:00 +04:00
|
|
|
{
|
2013-01-07 06:31:30 +04:00
|
|
|
// wouldn't need this if we could do it in the opposite order
|
2015-10-18 08:24:48 +03:00
|
|
|
RefPtr<MediaStreamListener> listener(aListener);
|
2013-01-07 06:31:30 +04:00
|
|
|
mListeners.RemoveElement(aListener);
|
2016-06-30 10:07:48 +03:00
|
|
|
listener->NotifyEvent(GraphImpl(), MediaStreamGraphEvent::EVENT_REMOVED);
|
2013-01-07 06:31:30 +04:00
|
|
|
}
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
void
|
|
|
|
MediaStream::RemoveListener(MediaStreamListener* aListener)
|
|
|
|
{
|
|
|
|
class Message : public ControlMessage {
|
|
|
|
public:
|
|
|
|
Message(MediaStream* aStream, MediaStreamListener* aListener) :
|
|
|
|
ControlMessage(aStream), mListener(aListener) {}
|
2016-01-18 06:50:29 +03:00
|
|
|
void Run() override
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
|
|
|
mStream->RemoveListenerImpl(mListener);
|
|
|
|
}
|
2015-10-18 08:24:48 +03:00
|
|
|
RefPtr<MediaStreamListener> mListener;
|
2012-04-30 07:11:26 +04:00
|
|
|
};
|
2013-02-28 23:53:38 +04:00
|
|
|
// If the stream is destroyed the Listeners have or will be
|
|
|
|
// removed.
|
|
|
|
if (!IsDestroyed()) {
|
2016-01-21 00:14:33 +03:00
|
|
|
GraphImpl()->AppendMessage(MakeUnique<Message>(this, aListener));
|
2013-02-28 23:53:38 +04:00
|
|
|
}
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
2013-10-25 03:07:29 +04:00
|
|
|
|
2016-03-03 19:27:59 +03:00
|
|
|
void
|
|
|
|
MediaStream::AddTrackListenerImpl(already_AddRefed<MediaStreamTrackListener> aListener,
|
|
|
|
TrackID aTrackID)
|
|
|
|
{
|
|
|
|
TrackBound<MediaStreamTrackListener>* l = mTrackListeners.AppendElement();
|
|
|
|
l->mListener = aListener;
|
|
|
|
l->mTrackID = aTrackID;
|
2016-01-26 11:45:25 +03:00
|
|
|
|
2016-01-26 05:49:01 +03:00
|
|
|
StreamTracks::Track* track = FindTrack(aTrackID);
|
2016-01-26 11:45:25 +03:00
|
|
|
if (!track) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
PrincipalHandle lastPrincipalHandle =
|
|
|
|
track->GetSegment()->GetLastPrincipalHandle();
|
|
|
|
l->mListener->NotifyPrincipalHandleChanged(Graph(), lastPrincipalHandle);
|
2017-05-23 17:00:42 +03:00
|
|
|
if (track->IsEnded() &&
|
|
|
|
track->GetEnd() <= GraphTimeToStreamTime(GraphImpl()->mStateComputedTime)) {
|
|
|
|
l->mListener->NotifyEnded();
|
|
|
|
}
|
2016-03-03 19:27:59 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaStream::AddTrackListener(MediaStreamTrackListener* aListener,
|
|
|
|
TrackID aTrackID)
|
|
|
|
{
|
|
|
|
class Message : public ControlMessage {
|
|
|
|
public:
|
|
|
|
Message(MediaStream* aStream, MediaStreamTrackListener* aListener,
|
|
|
|
TrackID aTrackID) :
|
|
|
|
ControlMessage(aStream), mListener(aListener), mTrackID(aTrackID) {}
|
2017-02-23 13:08:26 +03:00
|
|
|
void Run() override
|
2016-03-03 19:27:59 +03:00
|
|
|
{
|
|
|
|
mStream->AddTrackListenerImpl(mListener.forget(), mTrackID);
|
|
|
|
}
|
|
|
|
RefPtr<MediaStreamTrackListener> mListener;
|
|
|
|
TrackID mTrackID;
|
|
|
|
};
|
|
|
|
GraphImpl()->AppendMessage(MakeUnique<Message>(this, aListener, aTrackID));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaStream::RemoveTrackListenerImpl(MediaStreamTrackListener* aListener,
|
|
|
|
TrackID aTrackID)
|
|
|
|
{
|
|
|
|
for (size_t i = 0; i < mTrackListeners.Length(); ++i) {
|
|
|
|
if (mTrackListeners[i].mListener == aListener &&
|
|
|
|
mTrackListeners[i].mTrackID == aTrackID) {
|
|
|
|
mTrackListeners[i].mListener->NotifyRemoved();
|
|
|
|
mTrackListeners.RemoveElementAt(i);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaStream::RemoveTrackListener(MediaStreamTrackListener* aListener,
|
|
|
|
TrackID aTrackID)
|
|
|
|
{
|
|
|
|
class Message : public ControlMessage {
|
|
|
|
public:
|
|
|
|
Message(MediaStream* aStream, MediaStreamTrackListener* aListener,
|
|
|
|
TrackID aTrackID) :
|
|
|
|
ControlMessage(aStream), mListener(aListener), mTrackID(aTrackID) {}
|
2017-02-23 13:08:26 +03:00
|
|
|
void Run() override
|
2016-03-03 19:27:59 +03:00
|
|
|
{
|
|
|
|
mStream->RemoveTrackListenerImpl(mListener, mTrackID);
|
|
|
|
}
|
2017-02-23 13:08:57 +03:00
|
|
|
void RunDuringShutdown() override
|
|
|
|
{
|
|
|
|
// During shutdown we still want the listener's NotifyRemoved to be
|
|
|
|
// called, since not doing that might block shutdown of other modules.
|
|
|
|
Run();
|
|
|
|
}
|
2016-03-03 19:27:59 +03:00
|
|
|
RefPtr<MediaStreamTrackListener> mListener;
|
|
|
|
TrackID mTrackID;
|
|
|
|
};
|
|
|
|
GraphImpl()->AppendMessage(MakeUnique<Message>(this, aListener, aTrackID));
|
|
|
|
}
|
|
|
|
|
2016-03-03 19:28:37 +03:00
|
|
|
void
|
2016-04-29 06:45:25 +03:00
|
|
|
MediaStream::AddDirectTrackListenerImpl(already_AddRefed<DirectMediaStreamTrackListener> aListener,
|
2016-03-03 19:28:37 +03:00
|
|
|
TrackID aTrackID)
|
|
|
|
{
|
|
|
|
// Base implementation, for streams that don't support direct track listeners.
|
2016-04-29 06:45:25 +03:00
|
|
|
RefPtr<DirectMediaStreamTrackListener> listener = aListener;
|
2016-03-03 19:28:37 +03:00
|
|
|
listener->NotifyDirectListenerInstalled(
|
2016-04-29 06:45:25 +03:00
|
|
|
DirectMediaStreamTrackListener::InstallationResult::STREAM_NOT_SUPPORTED);
|
2016-03-03 19:28:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-04-29 06:45:25 +03:00
|
|
|
MediaStream::AddDirectTrackListener(DirectMediaStreamTrackListener* aListener,
|
2016-03-03 19:28:37 +03:00
|
|
|
TrackID aTrackID)
|
|
|
|
{
|
|
|
|
class Message : public ControlMessage {
|
|
|
|
public:
|
2016-04-29 06:45:25 +03:00
|
|
|
Message(MediaStream* aStream, DirectMediaStreamTrackListener* aListener,
|
2016-03-03 19:28:37 +03:00
|
|
|
TrackID aTrackID) :
|
|
|
|
ControlMessage(aStream), mListener(aListener), mTrackID(aTrackID) {}
|
2017-02-23 13:08:26 +03:00
|
|
|
void Run() override
|
2016-03-03 19:28:37 +03:00
|
|
|
{
|
|
|
|
mStream->AddDirectTrackListenerImpl(mListener.forget(), mTrackID);
|
|
|
|
}
|
2016-04-29 06:45:25 +03:00
|
|
|
RefPtr<DirectMediaStreamTrackListener> mListener;
|
2016-03-03 19:28:37 +03:00
|
|
|
TrackID mTrackID;
|
|
|
|
};
|
|
|
|
GraphImpl()->AppendMessage(MakeUnique<Message>(this, aListener, aTrackID));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-04-29 06:45:25 +03:00
|
|
|
MediaStream::RemoveDirectTrackListenerImpl(DirectMediaStreamTrackListener* aListener,
|
2016-03-03 19:28:37 +03:00
|
|
|
TrackID aTrackID)
|
|
|
|
{
|
|
|
|
// Base implementation, the listener was never added so nothing to do.
|
2016-04-29 06:45:25 +03:00
|
|
|
RefPtr<DirectMediaStreamTrackListener> listener = aListener;
|
2016-03-03 19:28:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-04-29 06:45:25 +03:00
|
|
|
MediaStream::RemoveDirectTrackListener(DirectMediaStreamTrackListener* aListener,
|
2016-03-03 19:28:37 +03:00
|
|
|
TrackID aTrackID)
|
|
|
|
{
|
|
|
|
class Message : public ControlMessage {
|
|
|
|
public:
|
2016-04-29 06:45:25 +03:00
|
|
|
Message(MediaStream* aStream, DirectMediaStreamTrackListener* aListener,
|
2016-03-03 19:28:37 +03:00
|
|
|
TrackID aTrackID) :
|
|
|
|
ControlMessage(aStream), mListener(aListener), mTrackID(aTrackID) {}
|
2017-02-23 13:08:26 +03:00
|
|
|
void Run() override
|
2016-03-03 19:28:37 +03:00
|
|
|
{
|
|
|
|
mStream->RemoveDirectTrackListenerImpl(mListener, mTrackID);
|
|
|
|
}
|
2017-02-23 13:08:57 +03:00
|
|
|
void RunDuringShutdown() override
|
|
|
|
{
|
|
|
|
// During shutdown we still want the listener's
|
|
|
|
// NotifyDirectListenerUninstalled to be called, since not doing that
|
|
|
|
// might block shutdown of other modules.
|
|
|
|
Run();
|
|
|
|
}
|
2016-04-29 06:45:25 +03:00
|
|
|
RefPtr<DirectMediaStreamTrackListener> mListener;
|
2016-03-03 19:28:37 +03:00
|
|
|
TrackID mTrackID;
|
|
|
|
};
|
|
|
|
GraphImpl()->AppendMessage(MakeUnique<Message>(this, aListener, aTrackID));
|
|
|
|
}
|
|
|
|
|
2013-10-25 03:07:29 +04:00
|
|
|
void
|
2015-03-17 19:29:17 +03:00
|
|
|
MediaStream::RunAfterPendingUpdates(already_AddRefed<nsIRunnable> aRunnable)
|
2013-10-25 03:07:29 +04:00
|
|
|
{
|
|
|
|
MOZ_ASSERT(NS_IsMainThread());
|
|
|
|
MediaStreamGraphImpl* graph = GraphImpl();
|
2015-03-17 19:29:17 +03:00
|
|
|
nsCOMPtr<nsIRunnable> runnable(aRunnable);
|
2013-10-25 03:07:29 +04:00
|
|
|
|
|
|
|
// Special case when a non-realtime graph has not started, to ensure the
|
|
|
|
// runnable will run in finite time.
|
|
|
|
if (!(graph->mRealtime || graph->mNonRealtimeProcessing)) {
|
2015-03-17 19:29:17 +03:00
|
|
|
runnable->Run();
|
2015-03-17 23:29:33 +03:00
|
|
|
return;
|
2013-10-25 03:07:29 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
class Message : public ControlMessage {
|
|
|
|
public:
|
2017-06-29 21:30:57 +03:00
|
|
|
Message(MediaStream* aStream, already_AddRefed<nsIRunnable> aRunnable)
|
2013-10-25 03:07:29 +04:00
|
|
|
: ControlMessage(aStream)
|
2016-12-08 11:00:12 +03:00
|
|
|
, mRunnable(aRunnable)
|
2017-06-29 21:30:57 +03:00
|
|
|
{}
|
2016-01-18 06:50:29 +03:00
|
|
|
void Run() override
|
2013-10-25 03:07:29 +04:00
|
|
|
{
|
2017-06-29 21:30:57 +03:00
|
|
|
mStream->Graph()->DispatchToMainThreadAfterStreamStateUpdate(
|
|
|
|
mRunnable.forget());
|
2013-10-25 03:07:29 +04:00
|
|
|
}
|
2016-01-18 06:50:29 +03:00
|
|
|
void RunDuringShutdown() override
|
2013-10-25 03:07:29 +04:00
|
|
|
{
|
2014-07-02 10:04:54 +04:00
|
|
|
// Don't run mRunnable now as it may call AppendMessage() which would
|
|
|
|
// assume that there are no remaining controlMessagesToRunDuringShutdown.
|
|
|
|
MOZ_ASSERT(NS_IsMainThread());
|
2017-06-29 21:31:17 +03:00
|
|
|
mStream->GraphImpl()->Dispatch(mRunnable.forget());
|
2013-10-25 03:07:29 +04:00
|
|
|
}
|
|
|
|
private:
|
2015-03-17 19:29:17 +03:00
|
|
|
nsCOMPtr<nsIRunnable> mRunnable;
|
2013-10-25 03:07:29 +04:00
|
|
|
};
|
|
|
|
|
2017-06-29 21:30:57 +03:00
|
|
|
graph->AppendMessage(MakeUnique<Message>(this, runnable.forget()));
|
2013-10-25 03:07:29 +04:00
|
|
|
}
|
2012-04-30 07:11:26 +04:00
|
|
|
|
2013-05-30 08:44:43 +04:00
|
|
|
void
|
2016-08-15 15:19:42 +03:00
|
|
|
MediaStream::SetTrackEnabledImpl(TrackID aTrackID, DisabledTrackMode aMode)
|
2013-05-30 08:44:43 +04:00
|
|
|
{
|
2016-08-15 15:19:42 +03:00
|
|
|
if (aMode == DisabledTrackMode::ENABLED) {
|
|
|
|
for (int32_t i = mDisabledTracks.Length() - 1; i >= 0; --i) {
|
|
|
|
if (aTrackID == mDisabledTracks[i].mTrackID) {
|
|
|
|
mDisabledTracks.RemoveElementAt(i);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2013-05-30 08:44:43 +04:00
|
|
|
} else {
|
2016-08-15 15:19:42 +03:00
|
|
|
for (const DisabledTrack& t : mDisabledTracks) {
|
|
|
|
if (aTrackID == t.mTrackID) {
|
|
|
|
NS_ERROR("Changing disabled track mode for a track is not allowed");
|
|
|
|
return;
|
|
|
|
}
|
2013-05-30 08:44:43 +04:00
|
|
|
}
|
2016-08-15 15:19:42 +03:00
|
|
|
mDisabledTracks.AppendElement(Move(DisabledTrack(aTrackID, aMode)));
|
2013-05-30 08:44:43 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-15 15:19:42 +03:00
|
|
|
DisabledTrackMode
|
|
|
|
MediaStream::GetDisabledTrackMode(TrackID aTrackID)
|
|
|
|
{
|
|
|
|
for (const DisabledTrack& t : mDisabledTracks) {
|
|
|
|
if (t.mTrackID == aTrackID) {
|
|
|
|
return t.mMode;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return DisabledTrackMode::ENABLED;
|
|
|
|
}
|
|
|
|
|
2013-05-30 08:44:43 +04:00
|
|
|
void
|
2016-08-15 15:19:42 +03:00
|
|
|
MediaStream::SetTrackEnabled(TrackID aTrackID, DisabledTrackMode aMode)
|
2013-05-30 08:44:43 +04:00
|
|
|
{
|
|
|
|
class Message : public ControlMessage {
|
|
|
|
public:
|
2016-08-15 15:19:42 +03:00
|
|
|
Message(MediaStream* aStream, TrackID aTrackID, DisabledTrackMode aMode) :
|
|
|
|
ControlMessage(aStream),
|
|
|
|
mTrackID(aTrackID),
|
|
|
|
mMode(aMode) {}
|
2016-01-18 06:50:29 +03:00
|
|
|
void Run() override
|
2013-05-30 08:44:43 +04:00
|
|
|
{
|
2016-08-15 15:19:42 +03:00
|
|
|
mStream->SetTrackEnabledImpl(mTrackID, mMode);
|
2013-05-30 08:44:43 +04:00
|
|
|
}
|
|
|
|
TrackID mTrackID;
|
2016-08-15 15:19:42 +03:00
|
|
|
DisabledTrackMode mMode;
|
2013-05-30 08:44:43 +04:00
|
|
|
};
|
2016-08-15 15:19:42 +03:00
|
|
|
GraphImpl()->AppendMessage(MakeUnique<Message>(this, aTrackID, aMode));
|
2013-05-30 08:44:43 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2013-08-26 10:07:17 +04:00
|
|
|
MediaStream::ApplyTrackDisabling(TrackID aTrackID, MediaSegment* aSegment, MediaSegment* aRawSegment)
|
2013-05-30 08:44:43 +04:00
|
|
|
{
|
2016-08-15 15:19:42 +03:00
|
|
|
DisabledTrackMode mode = GetDisabledTrackMode(aTrackID);
|
|
|
|
if (mode == DisabledTrackMode::ENABLED) {
|
2013-05-30 08:44:43 +04:00
|
|
|
return;
|
|
|
|
}
|
2016-08-15 15:19:42 +03:00
|
|
|
if (mode == DisabledTrackMode::SILENCE_BLACK) {
|
|
|
|
aSegment->ReplaceWithDisabled();
|
|
|
|
if (aRawSegment) {
|
|
|
|
aRawSegment->ReplaceWithDisabled();
|
|
|
|
}
|
|
|
|
} else if (mode == DisabledTrackMode::SILENCE_FREEZE) {
|
|
|
|
aSegment->ReplaceWithNull();
|
|
|
|
if (aRawSegment) {
|
|
|
|
aRawSegment->ReplaceWithNull();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
MOZ_CRASH("Unsupported mode");
|
2013-05-30 08:44:43 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-11 17:07:38 +03:00
|
|
|
void
|
|
|
|
MediaStream::AddMainThreadListener(MainThreadMediaStreamListener* aListener)
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(NS_IsMainThread());
|
|
|
|
MOZ_ASSERT(aListener);
|
|
|
|
MOZ_ASSERT(!mMainThreadListeners.Contains(aListener));
|
|
|
|
|
|
|
|
mMainThreadListeners.AppendElement(aListener);
|
|
|
|
|
2015-08-24 02:54:24 +03:00
|
|
|
// If it is not yet time to send the notification, then finish here.
|
|
|
|
if (!mFinishedNotificationSent) {
|
2015-05-11 17:07:38 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-04-26 03:23:21 +03:00
|
|
|
class NotifyRunnable final : public Runnable
|
2015-05-11 17:07:38 +03:00
|
|
|
{
|
|
|
|
public:
|
2015-05-11 17:25:28 +03:00
|
|
|
explicit NotifyRunnable(MediaStream* aStream)
|
2017-05-10 02:19:37 +03:00
|
|
|
: Runnable("MediaStream::NotifyRunnable")
|
|
|
|
, mStream(aStream)
|
2015-05-11 17:07:38 +03:00
|
|
|
{}
|
|
|
|
|
|
|
|
NS_IMETHOD Run() override
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(NS_IsMainThread());
|
|
|
|
mStream->NotifyMainThreadListeners();
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
~NotifyRunnable() {}
|
|
|
|
|
2015-10-18 08:24:48 +03:00
|
|
|
RefPtr<MediaStream> mStream;
|
2015-05-11 17:07:38 +03:00
|
|
|
};
|
|
|
|
|
2016-04-26 03:23:21 +03:00
|
|
|
nsCOMPtr<nsIRunnable> runnable = new NotifyRunnable(this);
|
2017-06-29 21:30:57 +03:00
|
|
|
GraphImpl()->Dispatch(runnable.forget());
|
2015-05-11 17:07:38 +03:00
|
|
|
}
|
|
|
|
|
2017-06-29 21:30:57 +03:00
|
|
|
SourceMediaStream::SourceMediaStream()
|
|
|
|
: MediaStream()
|
|
|
|
, mMutex("mozilla::media::SourceMediaStream")
|
|
|
|
, mUpdateKnownTracksTime(0)
|
|
|
|
, mPullEnabled(false)
|
|
|
|
, mUpdateFinished(false)
|
|
|
|
, mNeedsMixing(false)
|
2016-06-30 10:07:48 +03:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2016-02-04 05:12:51 +03:00
|
|
|
nsresult
|
2016-03-08 20:11:09 +03:00
|
|
|
SourceMediaStream::OpenAudioInput(int aID,
|
2016-02-04 05:12:51 +03:00
|
|
|
AudioDataListener *aListener)
|
|
|
|
{
|
|
|
|
if (GraphImpl()) {
|
|
|
|
mInputListener = aListener;
|
|
|
|
return GraphImpl()->OpenAudioInput(aID, aListener);
|
|
|
|
}
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
SourceMediaStream::CloseAudioInput()
|
|
|
|
{
|
|
|
|
// Destroy() may have run already and cleared this
|
|
|
|
if (GraphImpl() && mInputListener) {
|
|
|
|
GraphImpl()->CloseAudioInput(mInputListener);
|
|
|
|
}
|
|
|
|
mInputListener = nullptr;
|
|
|
|
}
|
|
|
|
|
2012-04-30 07:11:40 +04:00
|
|
|
void
|
2012-05-23 10:01:15 +04:00
|
|
|
SourceMediaStream::DestroyImpl()
|
2012-04-30 07:11:40 +04:00
|
|
|
{
|
2016-02-04 05:12:51 +03:00
|
|
|
CloseAudioInput();
|
|
|
|
|
2016-10-24 08:46:21 +03:00
|
|
|
GraphImpl()->AssertOnGraphThreadOrNotRunning();
|
|
|
|
for (int32_t i = mConsumers.Length() - 1; i >= 0; --i) {
|
|
|
|
// Disconnect before we come under mMutex's lock since it can call back
|
|
|
|
// through RemoveDirectTrackListenerImpl() and deadlock.
|
|
|
|
mConsumers[i]->Disconnect();
|
|
|
|
}
|
|
|
|
|
2014-07-25 01:09:22 +04:00
|
|
|
// Hold mMutex while mGraph is reset so that other threads holding mMutex
|
|
|
|
// can null-check know that the graph will not destroyed.
|
|
|
|
MutexAutoLock lock(mMutex);
|
2012-05-23 10:01:15 +04:00
|
|
|
MediaStream::DestroyImpl();
|
|
|
|
}
|
|
|
|
|
2012-07-20 23:36:03 +04:00
|
|
|
void
|
|
|
|
SourceMediaStream::SetPullEnabled(bool aEnabled)
|
|
|
|
{
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
mPullEnabled = aEnabled;
|
2014-07-25 01:09:22 +04:00
|
|
|
if (mPullEnabled && GraphImpl()) {
|
2014-09-28 20:07:24 +04:00
|
|
|
GraphImpl()->EnsureNextIteration();
|
2012-07-20 23:36:03 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-23 10:01:15 +04:00
|
|
|
void
|
2014-09-18 09:20:43 +04:00
|
|
|
SourceMediaStream::AddTrackInternal(TrackID aID, TrackRate aRate, StreamTime aStart,
|
2015-02-19 20:04:26 +03:00
|
|
|
MediaSegment* aSegment, uint32_t aFlags)
|
2012-05-23 10:01:15 +04:00
|
|
|
{
|
|
|
|
MutexAutoLock lock(mMutex);
|
2015-02-19 20:04:26 +03:00
|
|
|
nsTArray<TrackData> *track_data = (aFlags & ADDTRACK_QUEUED) ?
|
|
|
|
&mPendingTracks : &mUpdateTracks;
|
|
|
|
TrackData* data = track_data->AppendElement();
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug,
|
|
|
|
("AddTrackInternal: %lu/%lu",
|
|
|
|
(long)mPendingTracks.Length(),
|
|
|
|
(long)mUpdateTracks.Length()));
|
2012-05-23 10:01:15 +04:00
|
|
|
data->mID = aID;
|
2014-03-24 14:06:05 +04:00
|
|
|
data->mInputRate = aRate;
|
2015-10-20 13:01:08 +03:00
|
|
|
data->mResamplerChannelCount = 0;
|
2012-05-23 10:01:15 +04:00
|
|
|
data->mStart = aStart;
|
2014-12-30 04:54:01 +03:00
|
|
|
data->mEndOfFlushedData = aStart;
|
2012-05-23 10:01:15 +04:00
|
|
|
data->mCommands = TRACK_CREATE;
|
|
|
|
data->mData = aSegment;
|
2016-04-15 16:52:59 +03:00
|
|
|
ResampleAudioToGraphSampleRate(data, aSegment);
|
2015-02-19 20:04:26 +03:00
|
|
|
if (!(aFlags & ADDTRACK_QUEUED) && GraphImpl()) {
|
|
|
|
GraphImpl()->EnsureNextIteration();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-30 10:07:48 +03:00
|
|
|
void
|
|
|
|
SourceMediaStream::AddAudioTrack(TrackID aID, TrackRate aRate, StreamTime aStart,
|
|
|
|
AudioSegment* aSegment, uint32_t aFlags)
|
|
|
|
{
|
|
|
|
AddTrackInternal(aID, aRate, aStart, aSegment, aFlags);
|
|
|
|
}
|
|
|
|
|
2015-02-19 20:04:26 +03:00
|
|
|
void
|
|
|
|
SourceMediaStream::FinishAddTracks()
|
|
|
|
{
|
|
|
|
MutexAutoLock lock(mMutex);
|
2015-08-11 18:29:46 +03:00
|
|
|
mUpdateTracks.AppendElements(Move(mPendingTracks));
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug,
|
|
|
|
("FinishAddTracks: %lu/%lu",
|
|
|
|
(long)mPendingTracks.Length(),
|
|
|
|
(long)mUpdateTracks.Length()));
|
2014-09-18 03:50:02 +04:00
|
|
|
if (GraphImpl()) {
|
|
|
|
GraphImpl()->EnsureNextIteration();
|
2012-05-23 10:01:15 +04:00
|
|
|
}
|
2012-04-30 07:11:40 +04:00
|
|
|
}
|
|
|
|
|
2014-03-24 14:06:05 +04:00
|
|
|
void
|
|
|
|
SourceMediaStream::ResampleAudioToGraphSampleRate(TrackData* aTrackData, MediaSegment* aSegment)
|
|
|
|
{
|
|
|
|
if (aSegment->GetType() != MediaSegment::AUDIO ||
|
2014-09-18 03:50:01 +04:00
|
|
|
aTrackData->mInputRate == GraphImpl()->GraphRate()) {
|
2014-03-24 14:06:05 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
AudioSegment* segment = static_cast<AudioSegment*>(aSegment);
|
2014-06-09 04:11:41 +04:00
|
|
|
int channels = segment->ChannelCount();
|
2014-04-25 17:15:21 +04:00
|
|
|
|
2015-09-01 15:25:48 +03:00
|
|
|
// If this segment is just silence, we delay instanciating the resampler. We
|
|
|
|
// also need to recreate the resampler if the channel count changes.
|
|
|
|
if (channels && aTrackData->mResamplerChannelCount != channels) {
|
|
|
|
SpeexResamplerState* state = speex_resampler_init(channels,
|
|
|
|
aTrackData->mInputRate,
|
|
|
|
GraphImpl()->GraphRate(),
|
|
|
|
SPEEX_RESAMPLER_QUALITY_MIN,
|
|
|
|
nullptr);
|
|
|
|
if (!state) {
|
|
|
|
return;
|
2014-03-24 14:06:05 +04:00
|
|
|
}
|
2015-09-01 15:25:48 +03:00
|
|
|
aTrackData->mResampler.own(state);
|
|
|
|
aTrackData->mResamplerChannelCount = channels;
|
2014-03-24 14:06:05 +04:00
|
|
|
}
|
2014-09-18 03:50:01 +04:00
|
|
|
segment->ResampleChunks(aTrackData->mResampler, aTrackData->mInputRate, GraphImpl()->GraphRate());
|
2014-03-24 14:06:05 +04:00
|
|
|
}
|
|
|
|
|
2016-07-25 05:01:26 +03:00
|
|
|
void
|
|
|
|
SourceMediaStream::AdvanceTimeVaryingValuesToCurrentTime(GraphTime aCurrentTime,
|
|
|
|
GraphTime aBlockedTime)
|
|
|
|
{
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
mTracksStartTime += aBlockedTime;
|
|
|
|
mStreamTracksStartTimeStamp += TimeDuration::FromSeconds(GraphImpl()->MediaTimeToSeconds(aBlockedTime));
|
|
|
|
mTracks.ForgetUpTo(aCurrentTime - mTracksStartTime);
|
|
|
|
}
|
|
|
|
|
2013-02-25 13:25:07 +04:00
|
|
|
bool
|
2013-08-24 17:53:11 +04:00
|
|
|
SourceMediaStream::AppendToTrack(TrackID aID, MediaSegment* aSegment, MediaSegment *aRawSegment)
|
2012-04-30 07:11:40 +04:00
|
|
|
{
|
2012-05-23 10:01:15 +04:00
|
|
|
MutexAutoLock lock(mMutex);
|
2012-10-25 03:21:32 +04:00
|
|
|
// ::EndAllTrackAndFinished() can end these before the sources notice
|
2013-02-25 13:25:07 +04:00
|
|
|
bool appended = false;
|
2014-07-25 01:23:59 +04:00
|
|
|
auto graph = GraphImpl();
|
|
|
|
if (!mFinished && graph) {
|
2012-10-25 03:21:32 +04:00
|
|
|
TrackData *track = FindDataForTrack(aID);
|
|
|
|
if (track) {
|
2013-08-24 17:53:01 +04:00
|
|
|
// Data goes into mData, and on the next iteration of the MSG moves
|
|
|
|
// into the track's segment after NotifyQueuedTrackChanges(). This adds
|
|
|
|
// 0-10ms of delay before data gets to direct listeners.
|
|
|
|
// Indirect listeners (via subsequent TrackUnion nodes) are synced to
|
|
|
|
// playout time, and so can be delayed by buffering.
|
|
|
|
|
2013-08-26 10:07:17 +04:00
|
|
|
// Apply track disabling before notifying any consumers directly
|
|
|
|
// or inserting into the graph
|
|
|
|
ApplyTrackDisabling(aID, aSegment, aRawSegment);
|
|
|
|
|
2014-03-24 14:06:05 +04:00
|
|
|
ResampleAudioToGraphSampleRate(track, aSegment);
|
|
|
|
|
2013-08-24 17:53:11 +04:00
|
|
|
// Must notify first, since AppendFrom() will empty out aSegment
|
|
|
|
NotifyDirectConsumers(track, aRawSegment ? aRawSegment : aSegment);
|
|
|
|
track->mData->AppendFrom(aSegment); // note: aSegment is now dead
|
2013-02-25 13:25:07 +04:00
|
|
|
appended = true;
|
2014-09-28 20:07:24 +04:00
|
|
|
GraphImpl()->EnsureNextIteration();
|
2012-10-25 03:21:32 +04:00
|
|
|
} else {
|
2013-02-25 13:25:07 +04:00
|
|
|
aSegment->Clear();
|
2013-02-27 16:49:26 +04:00
|
|
|
}
|
2012-05-23 10:01:15 +04:00
|
|
|
}
|
2013-02-25 13:25:07 +04:00
|
|
|
return appended;
|
2012-04-30 07:11:40 +04:00
|
|
|
}
|
|
|
|
|
2013-08-24 17:53:11 +04:00
|
|
|
void
|
|
|
|
SourceMediaStream::NotifyDirectConsumers(TrackData *aTrack,
|
|
|
|
MediaSegment *aSegment)
|
|
|
|
{
|
2016-03-03 19:28:37 +03:00
|
|
|
mMutex.AssertCurrentThreadOwns();
|
2013-08-24 17:53:11 +04:00
|
|
|
MOZ_ASSERT(aTrack);
|
|
|
|
|
2016-04-29 06:45:25 +03:00
|
|
|
for (const TrackBound<DirectMediaStreamTrackListener>& source
|
2016-03-03 19:28:37 +03:00
|
|
|
: mDirectTrackListeners) {
|
|
|
|
if (aTrack->mID != source.mTrackID) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
StreamTime offset = 0; // FIX! need a separate StreamTime.... or the end of the internal buffer
|
2016-03-18 16:21:51 +03:00
|
|
|
source.mListener->NotifyRealtimeTrackDataAndApplyTrackDisabling(Graph(), offset, *aSegment);
|
2016-03-03 19:28:37 +03:00
|
|
|
}
|
2013-08-24 17:53:11 +04:00
|
|
|
}
|
|
|
|
|
2014-08-17 10:09:21 +04:00
|
|
|
// These handle notifying all the listeners of an event
|
|
|
|
void
|
2016-06-30 10:07:48 +03:00
|
|
|
SourceMediaStream::NotifyListenersEventImpl(MediaStreamGraphEvent aEvent)
|
2014-08-17 10:09:21 +04:00
|
|
|
{
|
|
|
|
for (uint32_t j = 0; j < mListeners.Length(); ++j) {
|
|
|
|
MediaStreamListener* l = mListeners[j];
|
|
|
|
l->NotifyEvent(GraphImpl(), aEvent);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-06-30 10:07:48 +03:00
|
|
|
SourceMediaStream::NotifyListenersEvent(MediaStreamGraphEvent aNewEvent)
|
2014-08-17 10:09:21 +04:00
|
|
|
{
|
|
|
|
class Message : public ControlMessage {
|
|
|
|
public:
|
2016-06-30 10:07:48 +03:00
|
|
|
Message(SourceMediaStream* aStream, MediaStreamGraphEvent aEvent) :
|
2014-08-17 10:09:21 +04:00
|
|
|
ControlMessage(aStream), mEvent(aEvent) {}
|
2016-01-18 06:50:29 +03:00
|
|
|
void Run() override
|
2014-08-17 10:09:21 +04:00
|
|
|
{
|
|
|
|
mStream->AsSourceStream()->NotifyListenersEventImpl(mEvent);
|
|
|
|
}
|
2016-06-30 10:07:48 +03:00
|
|
|
MediaStreamGraphEvent mEvent;
|
2014-08-17 10:09:21 +04:00
|
|
|
};
|
2016-01-21 00:14:33 +03:00
|
|
|
GraphImpl()->AppendMessage(MakeUnique<Message>(this, aNewEvent));
|
2014-08-17 10:09:21 +04:00
|
|
|
}
|
|
|
|
|
2016-03-03 19:28:37 +03:00
|
|
|
void
|
2016-04-29 06:45:25 +03:00
|
|
|
SourceMediaStream::AddDirectTrackListenerImpl(already_AddRefed<DirectMediaStreamTrackListener> aListener,
|
2016-03-03 19:28:37 +03:00
|
|
|
TrackID aTrackID)
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(IsTrackIDExplicit(aTrackID));
|
2017-05-24 19:54:40 +03:00
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
|
2016-04-29 06:45:25 +03:00
|
|
|
RefPtr<DirectMediaStreamTrackListener> listener = aListener;
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug,
|
|
|
|
("Adding direct track listener %p bound to track %d to source stream %p",
|
|
|
|
listener.get(),
|
|
|
|
aTrackID,
|
|
|
|
this));
|
2016-05-10 12:02:15 +03:00
|
|
|
|
2017-05-24 19:54:40 +03:00
|
|
|
StreamTracks::Track* track = FindTrack(aTrackID);
|
2016-05-30 06:32:23 +03:00
|
|
|
|
2016-09-28 17:12:20 +03:00
|
|
|
if (!track) {
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Warning,
|
|
|
|
("Couldn't find source track for direct track listener %p",
|
|
|
|
listener.get()));
|
2016-03-03 19:28:37 +03:00
|
|
|
listener->NotifyDirectListenerInstalled(
|
2016-04-29 06:45:25 +03:00
|
|
|
DirectMediaStreamTrackListener::InstallationResult::TRACK_NOT_FOUND_AT_SOURCE);
|
2016-03-03 19:28:37 +03:00
|
|
|
return;
|
|
|
|
}
|
2017-05-24 19:54:40 +03:00
|
|
|
|
|
|
|
bool isAudio = track->GetType() == MediaSegment::AUDIO;
|
|
|
|
bool isVideo = track->GetType() == MediaSegment::VIDEO;
|
2016-05-10 12:02:15 +03:00
|
|
|
if (!isAudio && !isVideo) {
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(
|
|
|
|
LogLevel::Warning,
|
|
|
|
("Source track for direct track listener %p is unknown", listener.get()));
|
2017-04-26 15:02:09 +03:00
|
|
|
MOZ_ASSERT(false);
|
2016-03-03 19:28:37 +03:00
|
|
|
return;
|
|
|
|
}
|
2017-05-24 19:54:40 +03:00
|
|
|
|
|
|
|
for (auto entry : mDirectTrackListeners) {
|
|
|
|
if (entry.mListener == listener &&
|
|
|
|
(entry.mTrackID == TRACK_ANY || entry.mTrackID == aTrackID)) {
|
|
|
|
listener->NotifyDirectListenerInstalled(
|
|
|
|
DirectMediaStreamTrackListener::InstallationResult::ALREADY_EXISTS);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TrackBound<DirectMediaStreamTrackListener>* sourceListener =
|
|
|
|
mDirectTrackListeners.AppendElement();
|
|
|
|
sourceListener->mListener = listener;
|
|
|
|
sourceListener->mTrackID = aTrackID;
|
|
|
|
|
2017-05-23 17:00:42 +03:00
|
|
|
LOG(LogLevel::Debug, ("Added direct track listener %p", listener.get()));
|
2016-03-03 19:28:37 +03:00
|
|
|
listener->NotifyDirectListenerInstalled(
|
2016-04-29 06:45:25 +03:00
|
|
|
DirectMediaStreamTrackListener::InstallationResult::SUCCESS);
|
2016-11-17 11:59:49 +03:00
|
|
|
|
2017-05-24 19:54:40 +03:00
|
|
|
// Pass buffered data to the listener
|
|
|
|
AudioSegment bufferedAudio;
|
|
|
|
VideoSegment bufferedVideo;
|
|
|
|
MediaSegment& bufferedData =
|
|
|
|
isAudio ? static_cast<MediaSegment&>(bufferedAudio)
|
|
|
|
: static_cast<MediaSegment&>(bufferedVideo);
|
|
|
|
|
|
|
|
MediaSegment& trackSegment = *track->GetSegment();
|
|
|
|
if (mTracks.GetForgottenDuration() < trackSegment.GetDuration()) {
|
|
|
|
bufferedData.AppendSlice(trackSegment,
|
|
|
|
mTracks.GetForgottenDuration(),
|
|
|
|
trackSegment.GetDuration());
|
|
|
|
}
|
|
|
|
|
|
|
|
if (TrackData* updateData = FindDataForTrack(aTrackID)) {
|
|
|
|
bufferedData.AppendSlice(*updateData->mData, 0, updateData->mData->GetDuration());
|
|
|
|
}
|
|
|
|
|
2016-11-17 11:59:49 +03:00
|
|
|
if (bufferedData.GetDuration() != 0) {
|
|
|
|
listener->NotifyRealtimeTrackData(Graph(), 0, bufferedData);
|
|
|
|
}
|
2016-03-03 19:28:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-04-29 06:45:25 +03:00
|
|
|
SourceMediaStream::RemoveDirectTrackListenerImpl(DirectMediaStreamTrackListener* aListener,
|
2016-03-03 19:28:37 +03:00
|
|
|
TrackID aTrackID)
|
|
|
|
{
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
for (int32_t i = mDirectTrackListeners.Length() - 1; i >= 0; --i) {
|
2016-04-29 06:45:25 +03:00
|
|
|
const TrackBound<DirectMediaStreamTrackListener>& source =
|
2016-03-03 19:28:37 +03:00
|
|
|
mDirectTrackListeners[i];
|
|
|
|
if (source.mListener == aListener && source.mTrackID == aTrackID) {
|
|
|
|
aListener->NotifyDirectListenerUninstalled();
|
|
|
|
mDirectTrackListeners.RemoveElementAt(i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-30 04:54:01 +03:00
|
|
|
StreamTime
|
|
|
|
SourceMediaStream::GetEndOfAppendedData(TrackID aID)
|
|
|
|
{
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
TrackData *track = FindDataForTrack(aID);
|
|
|
|
if (track) {
|
|
|
|
return track->mEndOfFlushedData + track->mData->GetDuration();
|
|
|
|
}
|
|
|
|
NS_ERROR("Track not found");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-04-30 07:11:40 +04:00
|
|
|
void
|
|
|
|
SourceMediaStream::EndTrack(TrackID aID)
|
|
|
|
{
|
2012-05-23 10:01:15 +04:00
|
|
|
MutexAutoLock lock(mMutex);
|
2015-01-28 07:40:51 +03:00
|
|
|
TrackData *track = FindDataForTrack(aID);
|
|
|
|
if (track) {
|
2016-06-30 10:07:48 +03:00
|
|
|
track->mCommands |= TrackEventCommand::TRACK_EVENT_ENDED;
|
2012-05-23 10:01:15 +04:00
|
|
|
}
|
2014-07-25 01:09:22 +04:00
|
|
|
if (auto graph = GraphImpl()) {
|
2014-09-28 20:07:24 +04:00
|
|
|
graph->EnsureNextIteration();
|
2012-04-30 07:11:40 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
SourceMediaStream::AdvanceKnownTracksTime(StreamTime aKnownTime)
|
|
|
|
{
|
2012-05-23 10:01:15 +04:00
|
|
|
MutexAutoLock lock(mMutex);
|
2014-05-19 00:26:54 +04:00
|
|
|
MOZ_ASSERT(aKnownTime >= mUpdateKnownTracksTime);
|
2012-05-23 10:01:15 +04:00
|
|
|
mUpdateKnownTracksTime = aKnownTime;
|
2014-07-25 01:09:22 +04:00
|
|
|
if (auto graph = GraphImpl()) {
|
2014-09-28 20:07:24 +04:00
|
|
|
graph->EnsureNextIteration();
|
2012-04-30 07:11:40 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-10-25 03:21:32 +04:00
|
|
|
SourceMediaStream::FinishWithLockHeld()
|
2012-04-30 07:11:40 +04:00
|
|
|
{
|
2013-04-08 16:03:33 +04:00
|
|
|
mMutex.AssertCurrentThreadOwns();
|
2012-05-23 10:01:15 +04:00
|
|
|
mUpdateFinished = true;
|
2014-07-25 01:09:22 +04:00
|
|
|
if (auto graph = GraphImpl()) {
|
2014-09-28 20:07:24 +04:00
|
|
|
graph->EnsureNextIteration();
|
2012-04-30 07:11:40 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-18 16:21:51 +03:00
|
|
|
void
|
2016-08-15 15:19:42 +03:00
|
|
|
SourceMediaStream::SetTrackEnabledImpl(TrackID aTrackID, DisabledTrackMode aMode)
|
2016-03-18 16:21:51 +03:00
|
|
|
{
|
2016-04-25 06:59:39 +03:00
|
|
|
{
|
|
|
|
MutexAutoLock lock(mMutex);
|
2016-04-29 06:45:25 +03:00
|
|
|
for (TrackBound<DirectMediaStreamTrackListener>& l: mDirectTrackListeners) {
|
2016-08-23 19:05:51 +03:00
|
|
|
if (l.mTrackID != aTrackID) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
DisabledTrackMode oldMode = GetDisabledTrackMode(aTrackID);
|
|
|
|
bool oldEnabled = oldMode == DisabledTrackMode::ENABLED;
|
|
|
|
if (!oldEnabled && aMode == DisabledTrackMode::ENABLED) {
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug,
|
|
|
|
("SourceMediaStream %p track %d setting "
|
|
|
|
"direct listener enabled",
|
|
|
|
this,
|
|
|
|
aTrackID));
|
2016-08-23 19:05:51 +03:00
|
|
|
l.mListener->DecreaseDisabled(oldMode);
|
|
|
|
} else if (oldEnabled && aMode != DisabledTrackMode::ENABLED) {
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug,
|
|
|
|
("SourceMediaStream %p track %d setting "
|
|
|
|
"direct listener disabled",
|
|
|
|
this,
|
|
|
|
aTrackID));
|
2016-08-23 19:05:51 +03:00
|
|
|
l.mListener->IncreaseDisabled(aMode);
|
2016-03-18 16:21:51 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-08-15 15:19:42 +03:00
|
|
|
MediaStream::SetTrackEnabledImpl(aTrackID, aMode);
|
2016-03-18 16:21:51 +03:00
|
|
|
}
|
|
|
|
|
2012-10-25 03:21:32 +04:00
|
|
|
void
|
|
|
|
SourceMediaStream::EndAllTrackAndFinish()
|
|
|
|
{
|
2013-04-08 16:03:33 +04:00
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
for (uint32_t i = 0; i < mUpdateTracks.Length(); ++i) {
|
|
|
|
SourceMediaStream::TrackData* data = &mUpdateTracks[i];
|
2016-06-30 10:07:48 +03:00
|
|
|
data->mCommands |= TrackEventCommand::TRACK_EVENT_ENDED;
|
2012-10-25 03:21:32 +04:00
|
|
|
}
|
2015-02-19 20:04:26 +03:00
|
|
|
mPendingTracks.Clear();
|
2012-10-25 03:21:32 +04:00
|
|
|
FinishWithLockHeld();
|
2014-07-14 09:47:56 +04:00
|
|
|
// we will call NotifyEvent() to let GetUserMedia know
|
2012-10-25 03:21:32 +04:00
|
|
|
}
|
|
|
|
|
2016-06-30 10:07:48 +03:00
|
|
|
SourceMediaStream::~SourceMediaStream()
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2014-03-24 14:06:06 +04:00
|
|
|
void
|
|
|
|
SourceMediaStream::RegisterForAudioMixing()
|
|
|
|
{
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
mNeedsMixing = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
SourceMediaStream::NeedsMixing()
|
|
|
|
{
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
return mNeedsMixing;
|
|
|
|
}
|
|
|
|
|
2016-06-22 18:44:46 +03:00
|
|
|
bool
|
|
|
|
SourceMediaStream::HasPendingAudioTrack()
|
|
|
|
{
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
bool audioTrackPresent = false;
|
|
|
|
|
|
|
|
for (auto& data : mPendingTracks) {
|
|
|
|
if (data.mData->GetType() == MediaSegment::AUDIO) {
|
|
|
|
audioTrackPresent = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return audioTrackPresent;
|
|
|
|
}
|
|
|
|
|
2017-06-30 07:01:17 +03:00
|
|
|
bool
|
|
|
|
SourceMediaStream::OpenNewAudioCallbackDriver(AudioDataListener * aListener)
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(GraphImpl()->mLifecycleState ==
|
|
|
|
MediaStreamGraphImpl::LifecycleState::LIFECYCLE_RUNNING);
|
|
|
|
AudioCallbackDriver* nextDriver = new AudioCallbackDriver(GraphImpl());
|
|
|
|
nextDriver->SetInputListener(aListener);
|
|
|
|
{
|
|
|
|
MonitorAutoLock lock(GraphImpl()->GetMonitor());
|
|
|
|
GraphImpl()->CurrentDriver()->SwitchAtNextIteration(nextDriver);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-07-31 16:17:21 +04:00
|
|
|
void
|
|
|
|
MediaInputPort::Init()
|
|
|
|
{
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug,
|
|
|
|
("Adding MediaInputPort %p (from %p to %p) to the graph",
|
|
|
|
this,
|
|
|
|
mSource,
|
|
|
|
mDest));
|
2012-07-31 16:17:21 +04:00
|
|
|
mSource->AddConsumer(this);
|
|
|
|
mDest->AddInput(this);
|
2012-08-23 16:46:20 +04:00
|
|
|
// mPortCount decremented via MediaInputPort::Destroy's message
|
2012-07-31 16:17:21 +04:00
|
|
|
++mDest->GraphImpl()->mPortCount;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaInputPort::Disconnect()
|
|
|
|
{
|
2015-11-05 13:28:00 +03:00
|
|
|
GraphImpl()->AssertOnGraphThreadOrNotRunning();
|
2012-07-31 16:17:21 +04:00
|
|
|
NS_ASSERTION(!mSource == !mDest,
|
|
|
|
"mSource must either both be null or both non-null");
|
|
|
|
if (!mSource)
|
|
|
|
return;
|
|
|
|
|
|
|
|
mSource->RemoveConsumer(this);
|
2015-09-18 17:36:33 +03:00
|
|
|
mDest->RemoveInput(this);
|
2015-09-09 05:19:05 +03:00
|
|
|
mSource = nullptr;
|
2012-07-31 16:17:21 +04:00
|
|
|
mDest = nullptr;
|
2014-01-29 17:34:35 +04:00
|
|
|
|
|
|
|
GraphImpl()->SetStreamOrderDirty();
|
2012-07-31 16:17:21 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
MediaInputPort::InputInterval
|
|
|
|
MediaInputPort::GetNextInputInterval(GraphTime aTime)
|
|
|
|
{
|
|
|
|
InputInterval result = { GRAPH_TIME_MAX, GRAPH_TIME_MAX, false };
|
2015-09-04 09:44:43 +03:00
|
|
|
if (aTime >= mDest->mStartBlocking) {
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
result.mStart = aTime;
|
|
|
|
result.mEnd = mDest->mStartBlocking;
|
|
|
|
result.mInputIsBlocked = aTime >= mSource->mStartBlocking;
|
|
|
|
if (!result.mInputIsBlocked) {
|
|
|
|
result.mEnd = std::min(result.mEnd, mSource->mStartBlocking);
|
2012-07-31 16:17:21 +04:00
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-08-24 20:04:33 +03:00
|
|
|
void
|
|
|
|
MediaInputPort::Suspended()
|
|
|
|
{
|
|
|
|
mDest->InputSuspended(this);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaInputPort::Resumed()
|
|
|
|
{
|
|
|
|
mDest->InputResumed(this);
|
|
|
|
}
|
|
|
|
|
2012-07-31 16:17:21 +04:00
|
|
|
void
|
|
|
|
MediaInputPort::Destroy()
|
|
|
|
{
|
|
|
|
class Message : public ControlMessage {
|
|
|
|
public:
|
2014-09-01 07:50:23 +04:00
|
|
|
explicit Message(MediaInputPort* aPort)
|
2012-08-23 16:46:20 +04:00
|
|
|
: ControlMessage(nullptr), mPort(aPort) {}
|
2016-01-18 06:50:29 +03:00
|
|
|
void Run() override
|
2012-07-31 16:17:21 +04:00
|
|
|
{
|
|
|
|
mPort->Disconnect();
|
2012-08-23 16:46:20 +04:00
|
|
|
--mPort->GraphImpl()->mPortCount;
|
2014-07-25 01:18:38 +04:00
|
|
|
mPort->SetGraphImpl(nullptr);
|
2012-07-31 16:17:21 +04:00
|
|
|
NS_RELEASE(mPort);
|
|
|
|
}
|
2016-01-18 06:50:29 +03:00
|
|
|
void RunDuringShutdown() override
|
2012-07-31 16:17:21 +04:00
|
|
|
{
|
|
|
|
Run();
|
|
|
|
}
|
|
|
|
MediaInputPort* mPort;
|
|
|
|
};
|
2016-01-21 00:14:33 +03:00
|
|
|
GraphImpl()->AppendMessage(MakeUnique<Message>(this));
|
2012-08-23 16:46:20 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
MediaStreamGraphImpl*
|
|
|
|
MediaInputPort::GraphImpl()
|
|
|
|
{
|
2013-02-01 23:49:58 +04:00
|
|
|
return mGraph;
|
2012-08-23 16:46:20 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
MediaStreamGraph*
|
|
|
|
MediaInputPort::Graph()
|
|
|
|
{
|
2013-02-01 23:49:58 +04:00
|
|
|
return mGraph;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaInputPort::SetGraphImpl(MediaStreamGraphImpl* aGraph)
|
|
|
|
{
|
2014-07-25 01:18:38 +04:00
|
|
|
MOZ_ASSERT(!mGraph || !aGraph, "Should only be set once");
|
2013-02-01 23:49:58 +04:00
|
|
|
mGraph = aGraph;
|
2012-07-31 16:17:21 +04:00
|
|
|
}
|
|
|
|
|
2015-09-30 04:31:53 +03:00
|
|
|
void
|
2016-06-07 17:20:29 +03:00
|
|
|
MediaInputPort::BlockSourceTrackIdImpl(TrackID aTrackId, BlockingMode aBlockingMode)
|
2015-09-30 04:31:53 +03:00
|
|
|
{
|
2016-06-07 17:20:29 +03:00
|
|
|
mBlockedTracks.AppendElement(Pair<TrackID, BlockingMode>(aTrackId, aBlockingMode));
|
2015-09-30 04:31:53 +03:00
|
|
|
}
|
|
|
|
|
2016-03-16 18:00:34 +03:00
|
|
|
already_AddRefed<Pledge<bool>>
|
2016-06-07 17:20:29 +03:00
|
|
|
MediaInputPort::BlockSourceTrackId(TrackID aTrackId, BlockingMode aBlockingMode)
|
2015-09-30 04:31:53 +03:00
|
|
|
{
|
|
|
|
class Message : public ControlMessage {
|
|
|
|
public:
|
2016-12-08 11:00:12 +03:00
|
|
|
Message(MediaInputPort* aPort,
|
|
|
|
TrackID aTrackId,
|
|
|
|
BlockingMode aBlockingMode,
|
2017-06-29 21:30:57 +03:00
|
|
|
already_AddRefed<nsIRunnable> aRunnable)
|
|
|
|
: ControlMessage(aPort->GetDestination())
|
|
|
|
, mPort(aPort)
|
|
|
|
, mTrackId(aTrackId)
|
|
|
|
, mBlockingMode(aBlockingMode)
|
|
|
|
, mRunnable(aRunnable)
|
|
|
|
{
|
|
|
|
}
|
2016-01-18 06:50:29 +03:00
|
|
|
void Run() override
|
2015-09-30 04:31:53 +03:00
|
|
|
{
|
2016-06-07 17:20:29 +03:00
|
|
|
mPort->BlockSourceTrackIdImpl(mTrackId, mBlockingMode);
|
2016-03-16 18:00:34 +03:00
|
|
|
if (mRunnable) {
|
2017-06-29 21:30:57 +03:00
|
|
|
mStream->Graph()->DispatchToMainThreadAfterStreamStateUpdate(
|
|
|
|
mRunnable.forget());
|
2016-03-16 18:00:34 +03:00
|
|
|
}
|
2015-09-30 04:31:53 +03:00
|
|
|
}
|
2016-01-18 06:50:29 +03:00
|
|
|
void RunDuringShutdown() override
|
2015-09-30 04:31:53 +03:00
|
|
|
{
|
|
|
|
Run();
|
|
|
|
}
|
2015-10-18 08:24:48 +03:00
|
|
|
RefPtr<MediaInputPort> mPort;
|
2015-09-30 04:31:53 +03:00
|
|
|
TrackID mTrackId;
|
2016-06-07 17:20:29 +03:00
|
|
|
BlockingMode mBlockingMode;
|
2016-03-16 18:00:34 +03:00
|
|
|
nsCOMPtr<nsIRunnable> mRunnable;
|
2015-09-30 04:31:53 +03:00
|
|
|
};
|
|
|
|
|
2016-01-05 05:16:22 +03:00
|
|
|
MOZ_ASSERT(IsTrackIDExplicit(aTrackId),
|
2015-09-30 04:31:53 +03:00
|
|
|
"Only explicit TrackID is allowed");
|
2016-03-16 18:00:34 +03:00
|
|
|
|
2017-04-07 22:08:42 +03:00
|
|
|
auto pledge = MakeRefPtr<Pledge<bool>>();
|
2016-03-16 18:00:34 +03:00
|
|
|
nsCOMPtr<nsIRunnable> runnable = NewRunnableFrom([pledge]() {
|
|
|
|
MOZ_ASSERT(NS_IsMainThread());
|
|
|
|
pledge->Resolve(true);
|
|
|
|
return NS_OK;
|
|
|
|
});
|
2017-06-29 21:30:57 +03:00
|
|
|
GraphImpl()->AppendMessage(
|
|
|
|
MakeUnique<Message>(this, aTrackId, aBlockingMode, runnable.forget()));
|
2016-03-16 18:00:34 +03:00
|
|
|
return pledge.forget();
|
2015-09-30 04:31:53 +03:00
|
|
|
}
|
|
|
|
|
2012-11-23 02:25:05 +04:00
|
|
|
already_AddRefed<MediaInputPort>
|
2015-09-30 04:31:53 +03:00
|
|
|
ProcessedMediaStream::AllocateInputPort(MediaStream* aStream, TrackID aTrackID,
|
2016-01-05 05:16:22 +03:00
|
|
|
TrackID aDestTrackID,
|
2016-01-05 05:16:25 +03:00
|
|
|
uint16_t aInputNumber, uint16_t aOutputNumber,
|
|
|
|
nsTArray<TrackID>* aBlockedTracks)
|
2012-07-31 16:17:21 +04:00
|
|
|
{
|
2012-11-23 02:25:05 +04:00
|
|
|
// This method creates two references to the MediaInputPort: one for
|
|
|
|
// the main thread, and one for the MediaStreamGraph.
|
2012-07-31 16:17:21 +04:00
|
|
|
class Message : public ControlMessage {
|
|
|
|
public:
|
2014-09-01 07:50:23 +04:00
|
|
|
explicit Message(MediaInputPort* aPort)
|
2012-07-31 16:17:21 +04:00
|
|
|
: ControlMessage(aPort->GetDestination()),
|
|
|
|
mPort(aPort) {}
|
2016-01-18 06:50:29 +03:00
|
|
|
void Run() override
|
2012-07-31 16:17:21 +04:00
|
|
|
{
|
|
|
|
mPort->Init();
|
2012-11-23 02:25:05 +04:00
|
|
|
// The graph holds its reference implicitly
|
2014-01-29 17:34:35 +04:00
|
|
|
mPort->GraphImpl()->SetStreamOrderDirty();
|
2015-11-02 08:53:26 +03:00
|
|
|
Unused << mPort.forget();
|
2012-07-31 16:17:21 +04:00
|
|
|
}
|
2016-01-18 06:50:29 +03:00
|
|
|
void RunDuringShutdown() override
|
2013-05-25 18:01:08 +04:00
|
|
|
{
|
|
|
|
Run();
|
|
|
|
}
|
2015-10-18 08:24:48 +03:00
|
|
|
RefPtr<MediaInputPort> mPort;
|
2012-07-31 16:17:21 +04:00
|
|
|
};
|
2015-09-30 04:31:53 +03:00
|
|
|
|
2015-12-03 07:38:34 +03:00
|
|
|
MOZ_ASSERT(aStream->GraphImpl() == GraphImpl());
|
2016-01-05 05:16:22 +03:00
|
|
|
MOZ_ASSERT(aTrackID == TRACK_ANY || IsTrackIDExplicit(aTrackID),
|
|
|
|
"Only TRACK_ANY and explicit ID are allowed for source track");
|
2016-01-05 05:16:22 +03:00
|
|
|
MOZ_ASSERT(aDestTrackID == TRACK_ANY || IsTrackIDExplicit(aDestTrackID),
|
|
|
|
"Only TRACK_ANY and explicit ID are allowed for destination track");
|
|
|
|
MOZ_ASSERT(aTrackID != TRACK_ANY || aDestTrackID == TRACK_ANY,
|
|
|
|
"Generic MediaInputPort cannot produce a single destination track");
|
2017-06-29 21:30:57 +03:00
|
|
|
RefPtr<MediaInputPort> port = new MediaInputPort(
|
|
|
|
aStream, aTrackID, this, aDestTrackID, aInputNumber, aOutputNumber);
|
2016-01-05 05:16:25 +03:00
|
|
|
if (aBlockedTracks) {
|
|
|
|
for (TrackID trackID : *aBlockedTracks) {
|
2016-06-07 17:20:29 +03:00
|
|
|
port->BlockSourceTrackIdImpl(trackID, BlockingMode::CREATION);
|
2016-01-05 05:16:25 +03:00
|
|
|
}
|
|
|
|
}
|
2013-02-01 23:49:58 +04:00
|
|
|
port->SetGraphImpl(GraphImpl());
|
2016-01-21 00:14:33 +03:00
|
|
|
GraphImpl()->AppendMessage(MakeUnique<Message>(port));
|
2012-11-23 02:25:05 +04:00
|
|
|
return port.forget();
|
2012-07-31 16:17:21 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ProcessedMediaStream::Finish()
|
|
|
|
{
|
|
|
|
class Message : public ControlMessage {
|
|
|
|
public:
|
2014-09-01 07:50:23 +04:00
|
|
|
explicit Message(ProcessedMediaStream* aStream)
|
2012-07-31 16:17:21 +04:00
|
|
|
: ControlMessage(aStream) {}
|
2016-01-18 06:50:29 +03:00
|
|
|
void Run() override
|
2012-07-31 16:17:21 +04:00
|
|
|
{
|
|
|
|
mStream->GraphImpl()->FinishStream(mStream);
|
|
|
|
}
|
|
|
|
};
|
2016-01-21 00:14:33 +03:00
|
|
|
GraphImpl()->AppendMessage(MakeUnique<Message>(this));
|
2012-07-31 16:17:21 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ProcessedMediaStream::SetAutofinish(bool aAutofinish)
|
|
|
|
{
|
|
|
|
class Message : public ControlMessage {
|
|
|
|
public:
|
|
|
|
Message(ProcessedMediaStream* aStream, bool aAutofinish)
|
|
|
|
: ControlMessage(aStream), mAutofinish(aAutofinish) {}
|
2016-01-18 06:50:29 +03:00
|
|
|
void Run() override
|
2012-07-31 16:17:21 +04:00
|
|
|
{
|
2013-02-04 14:04:26 +04:00
|
|
|
static_cast<ProcessedMediaStream*>(mStream)->SetAutofinishImpl(mAutofinish);
|
2012-07-31 16:17:21 +04:00
|
|
|
}
|
|
|
|
bool mAutofinish;
|
|
|
|
};
|
2016-01-21 00:14:33 +03:00
|
|
|
GraphImpl()->AppendMessage(MakeUnique<Message>(this, aAutofinish));
|
2012-07-31 16:17:21 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ProcessedMediaStream::DestroyImpl()
|
|
|
|
{
|
2012-08-22 19:56:38 +04:00
|
|
|
for (int32_t i = mInputs.Length() - 1; i >= 0; --i) {
|
2012-07-31 16:17:21 +04:00
|
|
|
mInputs[i]->Disconnect();
|
|
|
|
}
|
2016-08-24 20:04:33 +03:00
|
|
|
|
|
|
|
for (int32_t i = mSuspendedInputs.Length() - 1; i >= 0; --i) {
|
|
|
|
mSuspendedInputs[i]->Disconnect();
|
|
|
|
}
|
|
|
|
|
2012-07-31 16:17:21 +04:00
|
|
|
MediaStream::DestroyImpl();
|
2014-07-25 00:36:07 +04:00
|
|
|
// The stream order is only important if there are connections, in which
|
|
|
|
// case MediaInputPort::Disconnect() called SetStreamOrderDirty().
|
2015-08-12 02:29:35 +03:00
|
|
|
// MediaStreamGraphImpl::RemoveStreamGraphThread() will also call
|
2014-07-25 00:36:07 +04:00
|
|
|
// SetStreamOrderDirty(), for other reasons.
|
2012-07-31 16:17:21 +04:00
|
|
|
}
|
2012-04-30 07:11:26 +04:00
|
|
|
|
2015-08-25 11:17:31 +03:00
|
|
|
MediaStreamGraphImpl::MediaStreamGraphImpl(GraphDriverType aDriverRequested,
|
2014-08-25 17:27:25 +04:00
|
|
|
TrackRate aSampleRate,
|
2017-06-29 21:31:17 +03:00
|
|
|
AbstractThread* aMainThread)
|
2014-09-18 03:50:01 +04:00
|
|
|
: MediaStreamGraph(aSampleRate)
|
2012-07-31 16:17:21 +04:00
|
|
|
, mPortCount(0)
|
2016-01-21 19:51:36 +03:00
|
|
|
, mInputWanted(false)
|
2016-03-08 20:11:09 +03:00
|
|
|
, mInputDeviceID(-1)
|
2016-01-21 19:51:36 +03:00
|
|
|
, mOutputWanted(true)
|
2016-03-08 20:11:09 +03:00
|
|
|
, mOutputDeviceID(-1)
|
2014-09-28 20:07:25 +04:00
|
|
|
, mNeedAnotherIteration(false)
|
|
|
|
, mGraphDriverAsleep(false)
|
2014-08-26 19:01:33 +04:00
|
|
|
, mMonitor("MediaStreamGraphImpl")
|
2012-04-30 07:11:26 +04:00
|
|
|
, mLifecycleState(LIFECYCLE_THREAD_NOT_STARTED)
|
2013-12-09 09:08:02 +04:00
|
|
|
, mEndTime(GRAPH_TIME_MAX)
|
2012-04-30 07:11:26 +04:00
|
|
|
, mForceShutDown(false)
|
|
|
|
, mPostedRunInStableStateEvent(false)
|
|
|
|
, mDetectedNotRunning(false)
|
|
|
|
, mPostedRunInStableState(false)
|
2015-08-25 11:17:31 +03:00
|
|
|
, mRealtime(aDriverRequested != OFFLINE_THREAD_DRIVER)
|
2013-05-17 03:30:41 +04:00
|
|
|
, mNonRealtimeProcessing(false)
|
2013-09-13 20:12:07 +04:00
|
|
|
, mStreamOrderDirty(false)
|
2013-09-25 06:10:24 +04:00
|
|
|
, mLatencyLog(AsyncLatencyLogger::Get())
|
2017-06-29 21:30:57 +03:00
|
|
|
, mAbstractMainThread(aMainThread)
|
2014-09-29 01:27:00 +04:00
|
|
|
#ifdef MOZ_WEBRTC
|
2014-08-26 19:02:31 +04:00
|
|
|
, mFarendObserverRef(nullptr)
|
2014-09-09 20:23:01 +04:00
|
|
|
#endif
|
2015-01-07 08:39:46 +03:00
|
|
|
, mSelfRef(this)
|
2014-07-02 10:04:54 +04:00
|
|
|
#ifdef DEBUG
|
|
|
|
, mCanRunMessagesSynchronously(false)
|
|
|
|
#endif
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
2014-04-25 18:09:30 +04:00
|
|
|
if (mRealtime) {
|
2015-08-25 11:17:31 +03:00
|
|
|
if (aDriverRequested == AUDIO_THREAD_DRIVER) {
|
2015-10-23 06:43:15 +03:00
|
|
|
AudioCallbackDriver* driver = new AudioCallbackDriver(this);
|
2014-08-26 19:02:28 +04:00
|
|
|
mDriver = driver;
|
2014-08-26 19:01:33 +04:00
|
|
|
} else {
|
2014-08-26 19:02:28 +04:00
|
|
|
mDriver = new SystemClockDriver(this);
|
2014-08-26 19:01:33 +04:00
|
|
|
}
|
2014-04-25 18:09:30 +04:00
|
|
|
} else {
|
2015-02-09 10:23:34 +03:00
|
|
|
mDriver = new OfflineClockDriver(this, MEDIA_GRAPH_TARGET_PERIOD_MS);
|
2014-04-25 18:09:30 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
mLastMainThreadUpdate = TimeStamp::Now();
|
2014-04-13 22:08:10 +04:00
|
|
|
|
2016-07-04 01:40:48 +03:00
|
|
|
RegisterWeakAsyncMemoryReporter(this);
|
2014-04-13 22:08:10 +04:00
|
|
|
}
|
|
|
|
|
2017-06-29 21:30:57 +03:00
|
|
|
AbstractThread*
|
|
|
|
MediaStreamGraph::AbstractMainThread()
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(static_cast<MediaStreamGraphImpl*>(this)->mAbstractMainThread);
|
|
|
|
return static_cast<MediaStreamGraphImpl*>(this)->mAbstractMainThread;
|
|
|
|
}
|
|
|
|
|
2014-04-13 22:08:10 +04:00
|
|
|
void
|
|
|
|
MediaStreamGraphImpl::Destroy()
|
|
|
|
{
|
|
|
|
// First unregister from memory reporting.
|
|
|
|
UnregisterWeakMemoryReporter(this);
|
|
|
|
|
|
|
|
// Clear the self reference which will destroy this instance.
|
|
|
|
mSelfRef = nullptr;
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
|
2017-06-24 02:18:34 +03:00
|
|
|
static
|
2017-08-10 04:00:08 +03:00
|
|
|
uint32_t WindowToHash(nsPIDOMWindowInner* aWindow)
|
2017-06-24 02:18:34 +03:00
|
|
|
{
|
|
|
|
uint32_t hashkey = 0;
|
|
|
|
|
|
|
|
hashkey = AddToHash(hashkey, aWindow);
|
|
|
|
|
|
|
|
return hashkey;
|
|
|
|
}
|
|
|
|
|
2012-04-30 07:11:26 +04:00
|
|
|
MediaStreamGraph*
|
2015-08-25 11:17:31 +03:00
|
|
|
MediaStreamGraph::GetInstance(MediaStreamGraph::GraphDriverType aGraphDriverRequested,
|
2017-06-24 02:18:34 +03:00
|
|
|
nsPIDOMWindowInner* aWindow)
|
2012-04-30 07:11:26 +04:00
|
|
|
{
|
|
|
|
NS_ASSERTION(NS_IsMainThread(), "Main thread only");
|
|
|
|
|
2014-11-17 19:07:55 +03:00
|
|
|
MediaStreamGraphImpl* graph = nullptr;
|
|
|
|
|
2017-08-10 04:00:08 +03:00
|
|
|
// We hash the nsPIDOMWindowInner to form a key to the gloabl
|
|
|
|
// MediaStreamGraph hashtable. Effectively, this means there is a graph per
|
|
|
|
// document.
|
2017-06-24 02:18:34 +03:00
|
|
|
|
2017-08-10 04:00:08 +03:00
|
|
|
uint32_t hashkey = WindowToHash(aWindow);
|
2017-06-24 02:18:34 +03:00
|
|
|
|
|
|
|
if (!gGraphs.Get(hashkey, &graph)) {
|
2016-01-22 21:49:54 +03:00
|
|
|
if (!gMediaStreamGraphShutdownBlocker) {
|
|
|
|
|
|
|
|
class Blocker : public media::ShutdownBlocker
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
Blocker()
|
|
|
|
: media::ShutdownBlocker(NS_LITERAL_STRING(
|
2016-08-24 19:24:17 +03:00
|
|
|
"MediaStreamGraph shutdown: blocking on msg thread"))
|
|
|
|
{}
|
2016-01-22 21:49:54 +03:00
|
|
|
|
|
|
|
NS_IMETHOD
|
|
|
|
BlockShutdown(nsIAsyncShutdownClient* aProfileBeforeChange) override
|
|
|
|
{
|
|
|
|
// Distribute the global async shutdown blocker in a ticket. If there
|
|
|
|
// are zero graphs then shutdown is unblocked when we go out of scope.
|
2017-05-16 13:39:02 +03:00
|
|
|
auto ticket = MakeRefPtr<media::ShutdownTicket>(
|
|
|
|
gMediaStreamGraphShutdownBlocker.get());
|
2016-01-22 21:49:54 +03:00
|
|
|
gMediaStreamGraphShutdownBlocker = nullptr;
|
|
|
|
|
|
|
|
for (auto iter = gGraphs.Iter(); !iter.Done(); iter.Next()) {
|
|
|
|
iter.UserData()->ForceShutDown(ticket);
|
|
|
|
}
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
gMediaStreamGraphShutdownBlocker = new Blocker();
|
2017-05-16 13:39:02 +03:00
|
|
|
nsCOMPtr<nsIAsyncShutdownClient> barrier = media::GetShutdownBarrier();
|
2016-01-22 21:49:54 +03:00
|
|
|
nsresult rv = barrier->
|
|
|
|
AddBlocker(gMediaStreamGraphShutdownBlocker,
|
|
|
|
NS_LITERAL_STRING(__FILE__), __LINE__,
|
|
|
|
NS_LITERAL_STRING("MediaStreamGraph shutdown"));
|
|
|
|
MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv));
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
|
2017-06-29 21:31:17 +03:00
|
|
|
AbstractThread* mainThread;
|
|
|
|
if (aWindow) {
|
|
|
|
nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(aWindow);
|
|
|
|
mainThread = parentObject->AbstractMainThreadFor(TaskCategory::Other);
|
|
|
|
} else {
|
|
|
|
// Uncommon case, only for some old configuration of webspeech.
|
|
|
|
mainThread = AbstractThread::MainThread();
|
|
|
|
}
|
2015-08-25 11:17:31 +03:00
|
|
|
graph = new MediaStreamGraphImpl(aGraphDriverRequested,
|
|
|
|
CubebUtils::PreferredSampleRate(),
|
2017-06-29 21:31:17 +03:00
|
|
|
mainThread);
|
2015-08-25 11:17:31 +03:00
|
|
|
|
2017-06-24 02:18:34 +03:00
|
|
|
gGraphs.Put(hashkey, graph);
|
2014-03-24 14:06:05 +04:00
|
|
|
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug,
|
2017-08-10 04:00:08 +03:00
|
|
|
("Starting up MediaStreamGraph %p for window %p", graph, aWindow));
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
|
2014-11-17 19:07:55 +03:00
|
|
|
return graph;
|
2012-04-30 07:11:26 +04:00
|
|
|
}
|
|
|
|
|
2013-05-08 15:44:07 +04:00
|
|
|
MediaStreamGraph*
|
2017-06-29 21:31:17 +03:00
|
|
|
MediaStreamGraph::CreateNonRealtimeInstance(TrackRate aSampleRate,
|
|
|
|
nsPIDOMWindowInner* aWindow)
|
2013-05-08 15:44:07 +04:00
|
|
|
{
|
|
|
|
NS_ASSERTION(NS_IsMainThread(), "Main thread only");
|
|
|
|
|
2017-06-29 21:31:17 +03:00
|
|
|
nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(aWindow);
|
2017-06-29 21:30:57 +03:00
|
|
|
MediaStreamGraphImpl* graph = new MediaStreamGraphImpl(
|
|
|
|
OFFLINE_THREAD_DRIVER,
|
|
|
|
aSampleRate,
|
|
|
|
parentObject->AbstractMainThreadFor(TaskCategory::Other));
|
2014-04-13 22:08:10 +04:00
|
|
|
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug, ("Starting up Offline MediaStreamGraph %p", graph));
|
2014-08-26 19:01:33 +04:00
|
|
|
|
2013-05-08 15:44:07 +04:00
|
|
|
return graph;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaStreamGraph::DestroyNonRealtimeInstance(MediaStreamGraph* aGraph)
|
|
|
|
{
|
|
|
|
NS_ASSERTION(NS_IsMainThread(), "Main thread only");
|
2013-09-10 09:05:22 +04:00
|
|
|
MOZ_ASSERT(aGraph->IsNonRealtime(), "Should not destroy the global graph here");
|
2013-05-08 15:44:07 +04:00
|
|
|
|
|
|
|
MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(aGraph);
|
2013-09-10 09:05:22 +04:00
|
|
|
|
2013-05-25 17:59:59 +04:00
|
|
|
if (!graph->mNonRealtimeProcessing) {
|
|
|
|
// Start the graph, but don't produce anything
|
2014-11-19 13:21:38 +03:00
|
|
|
graph->StartNonRealtimeProcessing(0);
|
2013-05-25 17:59:59 +04:00
|
|
|
}
|
2016-01-22 21:49:54 +03:00
|
|
|
graph->ForceShutDown(nullptr);
|
2013-05-08 15:44:07 +04:00
|
|
|
}
|
|
|
|
|
2017-07-26 21:18:20 +03:00
|
|
|
NS_IMPL_ISUPPORTS(MediaStreamGraphImpl, nsIMemoryReporter, nsITimerCallback,
|
|
|
|
nsINamed)
|
2014-04-13 22:08:10 +04:00
|
|
|
|
|
|
|
NS_IMETHODIMP
|
|
|
|
MediaStreamGraphImpl::CollectReports(nsIHandleReportCallback* aHandleReport,
|
2014-05-21 10:06:54 +04:00
|
|
|
nsISupports* aData, bool aAnonymize)
|
2014-04-13 22:08:10 +04:00
|
|
|
{
|
2016-07-04 11:55:29 +03:00
|
|
|
if (mLifecycleState >= LIFECYCLE_WAITING_FOR_THREAD_SHUTDOWN) {
|
|
|
|
// Shutting down, nothing to report.
|
2016-07-04 01:40:48 +03:00
|
|
|
FinishCollectReports(aHandleReport, aData, nsTArray<AudioNodeSizes>());
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
class Message final : public ControlMessage {
|
|
|
|
public:
|
|
|
|
Message(MediaStreamGraphImpl *aGraph,
|
|
|
|
nsIHandleReportCallback* aHandleReport,
|
|
|
|
nsISupports *aHandlerData)
|
|
|
|
: ControlMessage(nullptr)
|
|
|
|
, mGraph(aGraph)
|
|
|
|
, mHandleReport(aHandleReport)
|
|
|
|
, mHandlerData(aHandlerData) {}
|
|
|
|
void Run() override
|
|
|
|
{
|
|
|
|
mGraph->CollectSizesForMemoryReport(mHandleReport.forget(),
|
|
|
|
mHandlerData.forget());
|
|
|
|
}
|
|
|
|
void RunDuringShutdown() override
|
|
|
|
{
|
|
|
|
// Run this message during shutdown too, so that endReports is called.
|
|
|
|
Run();
|
|
|
|
}
|
|
|
|
MediaStreamGraphImpl *mGraph;
|
|
|
|
// nsMemoryReporterManager keeps the callback and data alive only if it
|
|
|
|
// does not time out.
|
|
|
|
nsCOMPtr<nsIHandleReportCallback> mHandleReport;
|
|
|
|
nsCOMPtr<nsISupports> mHandlerData;
|
|
|
|
};
|
|
|
|
|
|
|
|
// When a non-realtime graph has not started, there is no thread yet, so
|
|
|
|
// collect sizes on this thread.
|
|
|
|
if (!(mRealtime || mNonRealtimeProcessing)) {
|
|
|
|
CollectSizesForMemoryReport(do_AddRef(aHandleReport), do_AddRef(aData));
|
2016-07-04 11:55:29 +03:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2016-07-04 01:40:48 +03:00
|
|
|
AppendMessage(MakeUnique<Message>(this, aHandleReport, aData));
|
2014-04-13 22:08:10 +04:00
|
|
|
|
2016-07-04 01:40:48 +03:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaStreamGraphImpl::CollectSizesForMemoryReport(
|
|
|
|
already_AddRefed<nsIHandleReportCallback> aHandleReport,
|
|
|
|
already_AddRefed<nsISupports> aHandlerData)
|
|
|
|
{
|
|
|
|
class FinishCollectRunnable final : public Runnable
|
2014-04-13 22:08:10 +04:00
|
|
|
{
|
2016-07-04 01:40:48 +03:00
|
|
|
public:
|
|
|
|
explicit FinishCollectRunnable(
|
2017-06-12 22:34:10 +03:00
|
|
|
already_AddRefed<nsIHandleReportCallback> aHandleReport,
|
|
|
|
already_AddRefed<nsISupports> aHandlerData)
|
|
|
|
: mozilla::Runnable("FinishCollectRunnable")
|
|
|
|
, mHandleReport(aHandleReport)
|
2016-07-04 01:40:48 +03:00
|
|
|
, mHandlerData(aHandlerData)
|
|
|
|
{}
|
2014-04-13 22:08:10 +04:00
|
|
|
|
2016-07-04 01:40:48 +03:00
|
|
|
NS_IMETHOD Run() override
|
2014-09-29 22:46:29 +04:00
|
|
|
{
|
2016-07-04 01:40:48 +03:00
|
|
|
MediaStreamGraphImpl::FinishCollectReports(mHandleReport, mHandlerData,
|
|
|
|
Move(mAudioStreamSizes));
|
|
|
|
return NS_OK;
|
2014-09-29 22:46:29 +04:00
|
|
|
}
|
2014-04-13 22:08:10 +04:00
|
|
|
|
2016-07-04 01:40:48 +03:00
|
|
|
nsTArray<AudioNodeSizes> mAudioStreamSizes;
|
|
|
|
|
|
|
|
private:
|
|
|
|
~FinishCollectRunnable() {}
|
|
|
|
|
|
|
|
// Avoiding nsCOMPtr because NSCAP_ASSERT_NO_QUERY_NEEDED in its
|
|
|
|
// constructor modifies the ref-count, which cannot be done off main
|
|
|
|
// thread.
|
|
|
|
RefPtr<nsIHandleReportCallback> mHandleReport;
|
|
|
|
RefPtr<nsISupports> mHandlerData;
|
|
|
|
};
|
|
|
|
|
|
|
|
RefPtr<FinishCollectRunnable> runnable =
|
|
|
|
new FinishCollectRunnable(Move(aHandleReport), Move(aHandlerData));
|
|
|
|
|
|
|
|
auto audioStreamSizes = &runnable->mAudioStreamSizes;
|
|
|
|
|
|
|
|
for (MediaStream* s : AllStreams()) {
|
|
|
|
AudioNodeStream* stream = s->AsAudioNodeStream();
|
|
|
|
if (stream) {
|
|
|
|
AudioNodeSizes* usage = audioStreamSizes->AppendElement();
|
|
|
|
stream->SizeOfAudioNodesIncludingThis(MallocSizeOf, *usage);
|
2014-04-13 22:08:10 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-29 21:31:17 +03:00
|
|
|
mAbstractMainThread->Dispatch(runnable.forget());
|
2016-07-04 01:40:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaStreamGraphImpl::
|
|
|
|
FinishCollectReports(nsIHandleReportCallback* aHandleReport, nsISupports* aData,
|
|
|
|
const nsTArray<AudioNodeSizes>& aAudioStreamSizes)
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(NS_IsMainThread());
|
|
|
|
|
|
|
|
nsCOMPtr<nsIMemoryReporterManager> manager =
|
|
|
|
do_GetService("@mozilla.org/memory-reporter-manager;1");
|
|
|
|
|
|
|
|
if (!manager)
|
|
|
|
return;
|
|
|
|
|
2016-08-24 08:23:45 +03:00
|
|
|
#define REPORT(_path, _amount, _desc) \
|
|
|
|
aHandleReport->Callback(EmptyCString(), _path, KIND_HEAP, UNITS_BYTES, \
|
|
|
|
_amount, NS_LITERAL_CSTRING(_desc), aData);
|
2014-04-13 22:08:10 +04:00
|
|
|
|
2016-07-04 01:40:48 +03:00
|
|
|
for (size_t i = 0; i < aAudioStreamSizes.Length(); i++) {
|
|
|
|
const AudioNodeSizes& usage = aAudioStreamSizes[i];
|
2016-07-04 07:24:47 +03:00
|
|
|
const char* const nodeType =
|
|
|
|
usage.mNodeType ? usage.mNodeType : "<unknown>";
|
2014-04-13 22:08:10 +04:00
|
|
|
|
|
|
|
nsPrintfCString enginePath("explicit/webaudio/audio-node/%s/engine-objects",
|
2014-05-21 10:06:54 +04:00
|
|
|
nodeType);
|
2014-04-13 22:08:10 +04:00
|
|
|
REPORT(enginePath, usage.mEngine,
|
|
|
|
"Memory used by AudioNode engine objects (Web Audio).");
|
|
|
|
|
|
|
|
nsPrintfCString streamPath("explicit/webaudio/audio-node/%s/stream-objects",
|
2014-05-21 10:06:54 +04:00
|
|
|
nodeType);
|
2014-04-13 22:08:10 +04:00
|
|
|
REPORT(streamPath, usage.mStream,
|
|
|
|
"Memory used by AudioNode stream objects (Web Audio).");
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2014-10-10 06:30:28 +04:00
|
|
|
size_t hrtfLoaders = WebCore::HRTFDatabaseLoader::sizeOfLoaders(MallocSizeOf);
|
|
|
|
if (hrtfLoaders) {
|
|
|
|
REPORT(NS_LITERAL_CSTRING(
|
2016-08-24 08:23:45 +03:00
|
|
|
"explicit/webaudio/audio-node/PannerNode/hrtf-databases"),
|
2014-10-10 06:30:28 +04:00
|
|
|
hrtfLoaders,
|
|
|
|
"Memory used by PannerNode databases (Web Audio).");
|
|
|
|
}
|
|
|
|
|
2014-04-13 22:08:10 +04:00
|
|
|
#undef REPORT
|
|
|
|
|
2016-07-04 01:40:48 +03:00
|
|
|
manager->EndReport();
|
2014-04-13 22:08:10 +04:00
|
|
|
}
|
|
|
|
|
2012-04-30 07:11:40 +04:00
|
|
|
SourceMediaStream*
|
2017-06-29 21:30:57 +03:00
|
|
|
MediaStreamGraph::CreateSourceStream()
|
2012-04-30 07:11:40 +04:00
|
|
|
{
|
2017-06-29 21:30:57 +03:00
|
|
|
SourceMediaStream* stream = new SourceMediaStream();
|
2015-08-12 02:46:56 +03:00
|
|
|
AddStream(stream);
|
2012-04-30 07:11:40 +04:00
|
|
|
return stream;
|
|
|
|
}
|
|
|
|
|
2012-07-31 16:17:21 +04:00
|
|
|
ProcessedMediaStream*
|
2017-06-29 21:30:57 +03:00
|
|
|
MediaStreamGraph::CreateTrackUnionStream()
|
2012-07-31 16:17:21 +04:00
|
|
|
{
|
2017-06-29 21:30:57 +03:00
|
|
|
TrackUnionStream* stream = new TrackUnionStream();
|
2015-08-12 02:46:56 +03:00
|
|
|
AddStream(stream);
|
2012-07-31 16:17:21 +04:00
|
|
|
return stream;
|
|
|
|
}
|
|
|
|
|
2015-07-24 15:28:16 +03:00
|
|
|
ProcessedMediaStream*
|
2017-06-29 21:30:57 +03:00
|
|
|
MediaStreamGraph::CreateAudioCaptureStream(TrackID aTrackId)
|
2015-07-24 15:28:16 +03:00
|
|
|
{
|
2017-06-29 21:30:57 +03:00
|
|
|
AudioCaptureStream* stream = new AudioCaptureStream(aTrackId);
|
2015-08-12 02:46:56 +03:00
|
|
|
AddStream(stream);
|
2015-07-24 15:28:16 +03:00
|
|
|
return stream;
|
|
|
|
}
|
|
|
|
|
2015-08-12 02:46:56 +03:00
|
|
|
void
|
2015-10-22 12:14:46 +03:00
|
|
|
MediaStreamGraph::AddStream(MediaStream* aStream)
|
2015-08-12 02:46:56 +03:00
|
|
|
{
|
|
|
|
NS_ADDREF(aStream);
|
|
|
|
MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(this);
|
|
|
|
aStream->SetGraphImpl(graph);
|
2016-01-21 00:14:33 +03:00
|
|
|
graph->AppendMessage(MakeUnique<CreateMessage>(aStream));
|
2015-08-12 02:46:56 +03:00
|
|
|
}
|
|
|
|
|
2016-04-26 03:23:21 +03:00
|
|
|
class GraphStartedRunnable final : public Runnable
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
{
|
|
|
|
public:
|
|
|
|
GraphStartedRunnable(AudioNodeStream* aStream, MediaStreamGraph* aGraph)
|
2017-05-10 02:19:37 +03:00
|
|
|
: Runnable("GraphStartedRunnable")
|
|
|
|
, mStream(aStream)
|
|
|
|
, mGraph(aGraph)
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
{ }
|
|
|
|
|
2016-08-08 05:18:10 +03:00
|
|
|
NS_IMETHOD Run() override {
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
mGraph->NotifyWhenGraphStarted(mStream);
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2015-10-18 08:24:48 +03:00
|
|
|
RefPtr<AudioNodeStream> mStream;
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
MediaStreamGraph* mGraph;
|
|
|
|
};
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaStreamGraph::NotifyWhenGraphStarted(AudioNodeStream* aStream)
|
|
|
|
{
|
2015-04-29 12:02:55 +03:00
|
|
|
MOZ_ASSERT(NS_IsMainThread());
|
|
|
|
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
class GraphStartedNotificationControlMessage : public ControlMessage
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
explicit GraphStartedNotificationControlMessage(AudioNodeStream* aStream)
|
|
|
|
: ControlMessage(aStream)
|
|
|
|
{
|
|
|
|
}
|
2016-01-18 06:50:29 +03:00
|
|
|
void Run() override
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
{
|
|
|
|
// This runs on the graph thread, so when this runs, and the current
|
|
|
|
// driver is an AudioCallbackDriver, we know the audio hardware is
|
|
|
|
// started. If not, we are going to switch soon, keep reposting this
|
|
|
|
// ControlMessage.
|
|
|
|
MediaStreamGraphImpl* graphImpl = mStream->GraphImpl();
|
|
|
|
if (graphImpl->CurrentDriver()->AsAudioCallbackDriver()) {
|
|
|
|
nsCOMPtr<nsIRunnable> event = new dom::StateChangeTask(
|
|
|
|
mStream->AsAudioNodeStream(), nullptr, AudioContextState::Running);
|
2017-06-29 21:31:17 +03:00
|
|
|
graphImpl->Dispatch(event.forget());
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
} else {
|
|
|
|
nsCOMPtr<nsIRunnable> event = new GraphStartedRunnable(
|
|
|
|
mStream->AsAudioNodeStream(), mStream->Graph());
|
2017-06-29 21:31:17 +03:00
|
|
|
graphImpl->Dispatch(event.forget());
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
}
|
|
|
|
}
|
2016-01-18 06:50:29 +03:00
|
|
|
void RunDuringShutdown() override
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
{
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-04-29 12:02:55 +03:00
|
|
|
if (!aStream->IsDestroyed()) {
|
|
|
|
MediaStreamGraphImpl* graphImpl = static_cast<MediaStreamGraphImpl*>(this);
|
2016-01-21 00:14:33 +03:00
|
|
|
graphImpl->AppendMessage(MakeUnique<GraphStartedNotificationControlMessage>(aStream));
|
2015-04-29 12:02:55 +03:00
|
|
|
}
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2015-09-03 14:54:00 +03:00
|
|
|
MediaStreamGraphImpl::IncrementSuspendCount(MediaStream* aStream)
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
{
|
2015-09-03 14:54:00 +03:00
|
|
|
if (!aStream->IsSuspended()) {
|
|
|
|
MOZ_ASSERT(mStreams.Contains(aStream));
|
|
|
|
mStreams.RemoveElement(aStream);
|
|
|
|
mSuspendedStreams.AppendElement(aStream);
|
|
|
|
SetStreamOrderDirty();
|
|
|
|
}
|
|
|
|
aStream->IncrementSuspendCount();
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaStreamGraphImpl::DecrementSuspendCount(MediaStream* aStream)
|
|
|
|
{
|
|
|
|
bool wasSuspended = aStream->IsSuspended();
|
|
|
|
aStream->DecrementSuspendCount();
|
|
|
|
if (wasSuspended && !aStream->IsSuspended()) {
|
|
|
|
MOZ_ASSERT(mSuspendedStreams.Contains(aStream));
|
|
|
|
mSuspendedStreams.RemoveElement(aStream);
|
|
|
|
mStreams.AppendElement(aStream);
|
|
|
|
ProcessedMediaStream* ps = aStream->AsProcessedStream();
|
2015-09-08 07:18:15 +03:00
|
|
|
if (ps) {
|
|
|
|
ps->mCycleMarker = NOT_VISITED;
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
}
|
2015-09-03 14:54:00 +03:00
|
|
|
SetStreamOrderDirty();
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2015-09-16 07:15:21 +03:00
|
|
|
MediaStreamGraphImpl::SuspendOrResumeStreams(AudioContextOperation aAudioContextOperation,
|
|
|
|
const nsTArray<MediaStream*>& aStreamSet)
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
{
|
|
|
|
// For our purpose, Suspend and Close are equivalent: we want to remove the
|
|
|
|
// streams from the set of streams that are going to be processed.
|
2015-09-16 07:15:21 +03:00
|
|
|
for (MediaStream* stream : aStreamSet) {
|
|
|
|
if (aAudioContextOperation == AudioContextOperation::Resume) {
|
2015-09-03 14:54:00 +03:00
|
|
|
DecrementSuspendCount(stream);
|
2015-09-16 07:15:21 +03:00
|
|
|
} else {
|
2015-09-03 14:54:00 +03:00
|
|
|
IncrementSuspendCount(stream);
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
}
|
|
|
|
}
|
2017-02-06 18:22:36 +03:00
|
|
|
LOG(LogLevel::Debug,
|
|
|
|
("Moving streams between suspended and running"
|
2017-07-26 23:03:57 +03:00
|
|
|
"state: mStreams: %zu, mSuspendedStreams: %zu",
|
2017-02-06 18:22:36 +03:00
|
|
|
mStreams.Length(),
|
|
|
|
mSuspendedStreams.Length()));
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
#ifdef DEBUG
|
|
|
|
// The intersection of the two arrays should be null.
|
|
|
|
for (uint32_t i = 0; i < mStreams.Length(); i++) {
|
|
|
|
for (uint32_t j = 0; j < mSuspendedStreams.Length(); j++) {
|
|
|
|
MOZ_ASSERT(
|
|
|
|
mStreams[i] != mSuspendedStreams[j],
|
|
|
|
"The suspended stream set and running stream set are not disjoint.");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaStreamGraphImpl::AudioContextOperationCompleted(MediaStream* aStream,
|
|
|
|
void* aPromise,
|
|
|
|
AudioContextOperation aOperation)
|
|
|
|
{
|
|
|
|
// This can be called from the thread created to do cubeb operation, or the
|
|
|
|
// MSG thread. The pointers passed back here are refcounted, so are still
|
|
|
|
// alive.
|
|
|
|
MonitorAutoLock lock(mMonitor);
|
|
|
|
|
|
|
|
AudioContextState state;
|
|
|
|
switch (aOperation) {
|
2015-09-24 23:49:03 +03:00
|
|
|
case AudioContextOperation::Suspend:
|
|
|
|
state = AudioContextState::Suspended;
|
|
|
|
break;
|
|
|
|
case AudioContextOperation::Resume:
|
|
|
|
state = AudioContextState::Running;
|
|
|
|
break;
|
|
|
|
case AudioContextOperation::Close:
|
|
|
|
state = AudioContextState::Closed;
|
|
|
|
break;
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
default: MOZ_CRASH("Not handled.");
|
|
|
|
}
|
|
|
|
|
|
|
|
nsCOMPtr<nsIRunnable> event = new dom::StateChangeTask(
|
|
|
|
aStream->AsAudioNodeStream(), aPromise, state);
|
2017-06-29 21:31:17 +03:00
|
|
|
mAbstractMainThread->Dispatch(event.forget());
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2015-09-16 07:15:21 +03:00
|
|
|
MediaStreamGraphImpl::ApplyAudioContextOperationImpl(
|
|
|
|
MediaStream* aDestinationStream, const nsTArray<MediaStream*>& aStreams,
|
|
|
|
AudioContextOperation aOperation, void* aPromise)
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
{
|
|
|
|
MOZ_ASSERT(CurrentDriver()->OnThread());
|
|
|
|
|
2015-09-16 07:15:21 +03:00
|
|
|
SuspendOrResumeStreams(aOperation, aStreams);
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
|
2015-12-01 13:48:02 +03:00
|
|
|
bool switching = false;
|
|
|
|
GraphDriver* nextDriver = nullptr;
|
|
|
|
{
|
|
|
|
MonitorAutoLock lock(mMonitor);
|
|
|
|
switching = CurrentDriver()->Switching();
|
|
|
|
if (switching) {
|
|
|
|
nextDriver = CurrentDriver()->NextDriver();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
// If we have suspended the last AudioContext, and we don't have other
|
|
|
|
// streams that have audio, this graph will automatically switch to a
|
|
|
|
// SystemCallbackDriver, because it can't find a MediaStream that has an audio
|
2015-07-13 20:16:53 +03:00
|
|
|
// track. When resuming, force switching to an AudioCallbackDriver (if we're
|
|
|
|
// not already switching). It would have happened at the next iteration
|
|
|
|
// anyways, but doing this now save some time.
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
if (aOperation == AudioContextOperation::Resume) {
|
|
|
|
if (!CurrentDriver()->AsAudioCallbackDriver()) {
|
2015-07-13 20:16:53 +03:00
|
|
|
AudioCallbackDriver* driver;
|
2015-12-01 13:48:02 +03:00
|
|
|
if (switching) {
|
|
|
|
MOZ_ASSERT(nextDriver->AsAudioCallbackDriver());
|
|
|
|
driver = nextDriver->AsAudioCallbackDriver();
|
2015-07-13 20:16:53 +03:00
|
|
|
} else {
|
|
|
|
driver = new AudioCallbackDriver(this);
|
2015-12-01 13:48:02 +03:00
|
|
|
MonitorAutoLock lock(mMonitor);
|
2015-07-13 20:16:53 +03:00
|
|
|
CurrentDriver()->SwitchAtNextIteration(driver);
|
|
|
|
}
|
2015-09-16 07:15:21 +03:00
|
|
|
driver->EnqueueStreamAndPromiseForOperation(aDestinationStream,
|
|
|
|
aPromise, aOperation);
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
} else {
|
|
|
|
// We are resuming a context, but we are already using an
|
|
|
|
// AudioCallbackDriver, we can resolve the promise now.
|
2015-09-16 07:15:21 +03:00
|
|
|
AudioContextOperationCompleted(aDestinationStream, aPromise, aOperation);
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// Close, suspend: check if we are going to switch to a
|
|
|
|
// SystemAudioCallbackDriver, and pass the promise to the AudioCallbackDriver
|
|
|
|
// if that's the case, so it can notify the content.
|
|
|
|
// This is the same logic as in UpdateStreamOrder, but it's simpler to have it
|
|
|
|
// here as well so we don't have to store the Promise(s) on the Graph.
|
|
|
|
if (aOperation != AudioContextOperation::Resume) {
|
2016-03-08 20:11:08 +03:00
|
|
|
bool shouldAEC = false;
|
|
|
|
bool audioTrackPresent = AudioTrackPresent(shouldAEC);
|
|
|
|
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
if (!audioTrackPresent && CurrentDriver()->AsAudioCallbackDriver()) {
|
|
|
|
CurrentDriver()->AsAudioCallbackDriver()->
|
2015-09-16 07:15:21 +03:00
|
|
|
EnqueueStreamAndPromiseForOperation(aDestinationStream, aPromise,
|
|
|
|
aOperation);
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
|
2015-07-13 20:16:53 +03:00
|
|
|
SystemClockDriver* driver;
|
2015-12-01 13:48:02 +03:00
|
|
|
if (nextDriver) {
|
|
|
|
MOZ_ASSERT(!nextDriver->AsAudioCallbackDriver());
|
2015-07-13 20:16:53 +03:00
|
|
|
} else {
|
|
|
|
driver = new SystemClockDriver(this);
|
2015-12-01 13:48:02 +03:00
|
|
|
MonitorAutoLock lock(mMonitor);
|
2015-07-13 20:16:53 +03:00
|
|
|
CurrentDriver()->SwitchAtNextIteration(driver);
|
|
|
|
}
|
2015-07-24 16:09:21 +03:00
|
|
|
// We are closing or suspending an AudioContext, but we just got resumed.
|
|
|
|
// Queue the operation on the next driver so that the ordering is
|
|
|
|
// preserved.
|
2015-12-01 13:48:02 +03:00
|
|
|
} else if (!audioTrackPresent && switching) {
|
2017-08-30 16:18:10 +03:00
|
|
|
MOZ_ASSERT(nextDriver->AsAudioCallbackDriver() ||
|
|
|
|
nextDriver->AsSystemClockDriver()->IsFallback());
|
|
|
|
if (nextDriver->AsAudioCallbackDriver()) {
|
|
|
|
nextDriver->AsAudioCallbackDriver()->
|
|
|
|
EnqueueStreamAndPromiseForOperation(aDestinationStream, aPromise,
|
|
|
|
aOperation);
|
|
|
|
} else {
|
|
|
|
// If this is not an AudioCallbackDriver, this means we failed opening an
|
|
|
|
// AudioCallbackDriver in the past, and we're constantly trying to re-open
|
|
|
|
// an new audio stream, but are running this graph that has an audio track
|
|
|
|
// off a SystemClockDriver for now to keep things moving. This is the
|
|
|
|
// case where we're trying to switch an an system driver (because suspend
|
|
|
|
// or close have been called on an AudioContext, or we've closed the
|
|
|
|
// page), but we're already running one. We can just resolve the promise
|
|
|
|
// now: we're already running off a system thread.
|
|
|
|
AudioContextOperationCompleted(aDestinationStream, aPromise, aOperation);
|
|
|
|
}
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
} else {
|
|
|
|
// We are closing or suspending an AudioContext, but something else is
|
|
|
|
// using the audio stream, we can resolve the promise now.
|
2015-09-16 07:15:21 +03:00
|
|
|
AudioContextOperationCompleted(aDestinationStream, aPromise, aOperation);
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2015-09-16 07:15:21 +03:00
|
|
|
MediaStreamGraph::ApplyAudioContextOperation(MediaStream* aDestinationStream,
|
|
|
|
const nsTArray<MediaStream*>& aStreams,
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
AudioContextOperation aOperation,
|
|
|
|
void* aPromise)
|
|
|
|
{
|
|
|
|
class AudioContextOperationControlMessage : public ControlMessage
|
|
|
|
{
|
|
|
|
public:
|
2015-09-16 07:15:21 +03:00
|
|
|
AudioContextOperationControlMessage(MediaStream* aDestinationStream,
|
|
|
|
const nsTArray<MediaStream*>& aStreams,
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
AudioContextOperation aOperation,
|
|
|
|
void* aPromise)
|
2015-09-16 07:15:21 +03:00
|
|
|
: ControlMessage(aDestinationStream)
|
|
|
|
, mStreams(aStreams)
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
, mAudioContextOperation(aOperation)
|
|
|
|
, mPromise(aPromise)
|
|
|
|
{
|
|
|
|
}
|
2016-01-18 06:50:29 +03:00
|
|
|
void Run() override
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
{
|
2015-09-16 07:15:21 +03:00
|
|
|
mStream->GraphImpl()->ApplyAudioContextOperationImpl(mStream,
|
|
|
|
mStreams, mAudioContextOperation, mPromise);
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
}
|
2016-01-18 06:50:29 +03:00
|
|
|
void RunDuringShutdown() override
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
{
|
2017-03-20 18:25:19 +03:00
|
|
|
MOZ_ASSERT(mAudioContextOperation == AudioContextOperation::Close,
|
|
|
|
"We should be reviving the graph?");
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2015-09-16 07:15:21 +03:00
|
|
|
// We don't need strong references here for the same reason ControlMessage
|
|
|
|
// doesn't.
|
|
|
|
nsTArray<MediaStream*> mStreams;
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
AudioContextOperation mAudioContextOperation;
|
|
|
|
void* mPromise;
|
|
|
|
};
|
|
|
|
|
|
|
|
MediaStreamGraphImpl* graphImpl = static_cast<MediaStreamGraphImpl*>(this);
|
|
|
|
graphImpl->AppendMessage(
|
2016-01-21 00:14:33 +03:00
|
|
|
MakeUnique<AudioContextOperationControlMessage>(aDestinationStream, aStreams,
|
|
|
|
aOperation, aPromise));
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
}
|
|
|
|
|
2013-09-10 09:05:22 +04:00
|
|
|
bool
|
|
|
|
MediaStreamGraph::IsNonRealtime() const
|
|
|
|
{
|
2017-06-01 18:51:15 +03:00
|
|
|
return !static_cast<const MediaStreamGraphImpl*>(this)->mRealtime;
|
2013-09-10 09:05:22 +04:00
|
|
|
}
|
|
|
|
|
2013-05-17 03:30:41 +04:00
|
|
|
void
|
2014-11-19 13:21:38 +03:00
|
|
|
MediaStreamGraph::StartNonRealtimeProcessing(uint32_t aTicksToProcess)
|
2013-05-17 03:30:41 +04:00
|
|
|
{
|
|
|
|
NS_ASSERTION(NS_IsMainThread(), "main thread only");
|
|
|
|
|
|
|
|
MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(this);
|
|
|
|
NS_ASSERTION(!graph->mRealtime, "non-realtime only");
|
|
|
|
|
|
|
|
if (graph->mNonRealtimeProcessing)
|
|
|
|
return;
|
2014-04-25 18:09:30 +04:00
|
|
|
|
2015-07-31 14:43:55 +03:00
|
|
|
graph->mEndTime =
|
2015-08-13 07:23:17 +03:00
|
|
|
graph->RoundUpToNextAudioBlock(graph->mStateComputedTime +
|
2015-07-31 14:43:55 +03:00
|
|
|
aTicksToProcess - 1);
|
2013-05-17 03:30:41 +04:00
|
|
|
graph->mNonRealtimeProcessing = true;
|
|
|
|
graph->EnsureRunInStableState();
|
|
|
|
}
|
|
|
|
|
2013-09-13 20:12:07 +04:00
|
|
|
void
|
|
|
|
ProcessedMediaStream::AddInput(MediaInputPort* aPort)
|
|
|
|
{
|
2016-08-24 20:04:33 +03:00
|
|
|
MediaStream* s = aPort->GetSource();
|
|
|
|
if (!s->IsSuspended()) {
|
|
|
|
mInputs.AppendElement(aPort);
|
|
|
|
} else {
|
|
|
|
mSuspendedInputs.AppendElement(aPort);
|
|
|
|
}
|
|
|
|
GraphImpl()->SetStreamOrderDirty();
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ProcessedMediaStream::InputSuspended(MediaInputPort* aPort)
|
|
|
|
{
|
|
|
|
GraphImpl()->AssertOnGraphThreadOrNotRunning();
|
|
|
|
mInputs.RemoveElement(aPort);
|
|
|
|
mSuspendedInputs.AppendElement(aPort);
|
|
|
|
GraphImpl()->SetStreamOrderDirty();
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ProcessedMediaStream::InputResumed(MediaInputPort* aPort)
|
|
|
|
{
|
|
|
|
GraphImpl()->AssertOnGraphThreadOrNotRunning();
|
|
|
|
mSuspendedInputs.RemoveElement(aPort);
|
2013-09-13 20:12:07 +04:00
|
|
|
mInputs.AppendElement(aPort);
|
|
|
|
GraphImpl()->SetStreamOrderDirty();
|
|
|
|
}
|
|
|
|
|
2015-07-24 15:28:16 +03:00
|
|
|
void
|
|
|
|
MediaStreamGraph::RegisterCaptureStreamForWindow(
|
|
|
|
uint64_t aWindowId, ProcessedMediaStream* aCaptureStream)
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(NS_IsMainThread());
|
|
|
|
MediaStreamGraphImpl* graphImpl = static_cast<MediaStreamGraphImpl*>(this);
|
|
|
|
graphImpl->RegisterCaptureStreamForWindow(aWindowId, aCaptureStream);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaStreamGraphImpl::RegisterCaptureStreamForWindow(
|
|
|
|
uint64_t aWindowId, ProcessedMediaStream* aCaptureStream)
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(NS_IsMainThread());
|
|
|
|
WindowAndStream winAndStream;
|
|
|
|
winAndStream.mWindowId = aWindowId;
|
|
|
|
winAndStream.mCaptureStreamSink = aCaptureStream;
|
|
|
|
mWindowCaptureStreams.AppendElement(winAndStream);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaStreamGraph::UnregisterCaptureStreamForWindow(uint64_t aWindowId)
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(NS_IsMainThread());
|
|
|
|
MediaStreamGraphImpl* graphImpl = static_cast<MediaStreamGraphImpl*>(this);
|
|
|
|
graphImpl->UnregisterCaptureStreamForWindow(aWindowId);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaStreamGraphImpl::UnregisterCaptureStreamForWindow(uint64_t aWindowId)
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(NS_IsMainThread());
|
2016-09-22 11:29:50 +03:00
|
|
|
for (int32_t i = mWindowCaptureStreams.Length() - 1; i >= 0; i--) {
|
2015-07-24 15:28:16 +03:00
|
|
|
if (mWindowCaptureStreams[i].mWindowId == aWindowId) {
|
|
|
|
mWindowCaptureStreams.RemoveElementAt(i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
already_AddRefed<MediaInputPort>
|
|
|
|
MediaStreamGraph::ConnectToCaptureStream(uint64_t aWindowId,
|
|
|
|
MediaStream* aMediaStream)
|
|
|
|
{
|
|
|
|
return aMediaStream->GraphImpl()->ConnectToCaptureStream(aWindowId,
|
|
|
|
aMediaStream);
|
|
|
|
}
|
|
|
|
|
|
|
|
already_AddRefed<MediaInputPort>
|
|
|
|
MediaStreamGraphImpl::ConnectToCaptureStream(uint64_t aWindowId,
|
|
|
|
MediaStream* aMediaStream)
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(NS_IsMainThread());
|
|
|
|
for (uint32_t i = 0; i < mWindowCaptureStreams.Length(); i++) {
|
|
|
|
if (mWindowCaptureStreams[i].mWindowId == aWindowId) {
|
|
|
|
ProcessedMediaStream* sink = mWindowCaptureStreams[i].mCaptureStreamSink;
|
2015-09-04 07:42:42 +03:00
|
|
|
return sink->AllocateInputPort(aMediaStream);
|
2015-07-24 15:28:16 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2016-12-08 11:00:12 +03:00
|
|
|
void
|
2017-06-29 21:30:57 +03:00
|
|
|
MediaStreamGraph::DispatchToMainThreadAfterStreamStateUpdate(
|
|
|
|
already_AddRefed<nsIRunnable> aRunnable)
|
2016-12-08 11:00:12 +03:00
|
|
|
{
|
|
|
|
AssertOnGraphThreadOrNotRunning();
|
|
|
|
*mPendingUpdateRunnables.AppendElement() =
|
2017-06-29 21:30:57 +03:00
|
|
|
AbstractMainThread()->CreateDirectTaskDrainer(Move(aRunnable));
|
2016-12-08 11:00:12 +03:00
|
|
|
}
|
|
|
|
|
2015-07-13 18:25:42 +03:00
|
|
|
} // namespace mozilla
|