2014-10-11 17:02:59 +04:00
|
|
|
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
|
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
|
|
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
|
|
|
|
|
|
#include "MediaStreamGraphImpl.h"
|
2016-06-30 10:07:48 +03:00
|
|
|
#include "MediaStreamListener.h"
|
2014-10-11 17:02:59 +04:00
|
|
|
#include "mozilla/MathAlgorithms.h"
|
|
|
|
#include "mozilla/unused.h"
|
|
|
|
|
|
|
|
#include "AudioSegment.h"
|
|
|
|
#include "VideoSegment.h"
|
|
|
|
#include "nsContentUtils.h"
|
|
|
|
#include "nsIAppShell.h"
|
|
|
|
#include "nsIObserver.h"
|
|
|
|
#include "nsPrintfCString.h"
|
|
|
|
#include "nsServiceManagerUtils.h"
|
|
|
|
#include "nsWidgetsCID.h"
|
|
|
|
#include "prerror.h"
|
2015-05-19 21:15:34 +03:00
|
|
|
#include "mozilla/Logging.h"
|
2014-10-11 17:02:59 +04:00
|
|
|
#include "mozilla/Attributes.h"
|
|
|
|
#include "TrackUnionStream.h"
|
|
|
|
#include "ImageContainer.h"
|
|
|
|
#include "AudioChannelService.h"
|
|
|
|
#include "AudioNodeEngine.h"
|
|
|
|
#include "AudioNodeStream.h"
|
|
|
|
#include "AudioNodeExternalInputStream.h"
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
#include "webaudio/MediaStreamAudioDestinationNode.h"
|
2014-10-11 17:02:59 +04:00
|
|
|
#include <algorithm>
|
|
|
|
#include "DOMMediaStream.h"
|
|
|
|
#include "GeckoProfiler.h"
|
|
|
|
#ifdef MOZ_WEBRTC
|
|
|
|
#include "AudioOutputObserver.h"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
using namespace mozilla::layers;
|
|
|
|
using namespace mozilla::dom;
|
|
|
|
using namespace mozilla::gfx;
|
|
|
|
|
|
|
|
namespace mozilla {
|
|
|
|
|
2014-10-25 21:24:36 +04:00
|
|
|
#ifdef STREAM_LOG
|
|
|
|
#undef STREAM_LOG
|
|
|
|
#endif
|
|
|
|
|
2015-11-15 16:49:01 +03:00
|
|
|
LazyLogModule gTrackUnionStreamLog("TrackUnionStream");
|
2015-05-21 23:22:04 +03:00
|
|
|
#define STREAM_LOG(type, msg) MOZ_LOG(gTrackUnionStreamLog, type, msg)
|
2014-10-11 17:02:59 +04:00
|
|
|
|
2016-06-29 13:27:13 +03:00
|
|
|
TrackUnionStream::TrackUnionStream() :
|
|
|
|
ProcessedMediaStream(), mNextAvailableTrackID(1)
|
2014-10-11 17:02:59 +04:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
void TrackUnionStream::RemoveInput(MediaInputPort* aPort)
|
|
|
|
{
|
2015-09-30 04:31:54 +03:00
|
|
|
STREAM_LOG(LogLevel::Debug, ("TrackUnionStream %p removing input %p", this, aPort));
|
2014-10-11 17:02:59 +04:00
|
|
|
for (int32_t i = mTrackMap.Length() - 1; i >= 0; --i) {
|
|
|
|
if (mTrackMap[i].mInputPort == aPort) {
|
2015-09-30 04:31:54 +03:00
|
|
|
STREAM_LOG(LogLevel::Debug, ("TrackUnionStream %p removing trackmap entry %d", this, i));
|
2014-10-11 17:02:59 +04:00
|
|
|
EndTrack(i);
|
|
|
|
mTrackMap.RemoveElementAt(i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ProcessedMediaStream::RemoveInput(aPort);
|
|
|
|
}
|
|
|
|
void TrackUnionStream::ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags)
|
|
|
|
{
|
|
|
|
if (IsFinishedOnGraphThread()) {
|
|
|
|
return;
|
|
|
|
}
|
2016-02-02 18:36:30 +03:00
|
|
|
AutoTArray<bool,8> mappedTracksFinished;
|
|
|
|
AutoTArray<bool,8> mappedTracksWithMatchingInputTracks;
|
2014-10-11 17:02:59 +04:00
|
|
|
for (uint32_t i = 0; i < mTrackMap.Length(); ++i) {
|
|
|
|
mappedTracksFinished.AppendElement(true);
|
|
|
|
mappedTracksWithMatchingInputTracks.AppendElement(false);
|
|
|
|
}
|
2014-11-20 21:36:00 +03:00
|
|
|
bool allFinished = !mInputs.IsEmpty();
|
|
|
|
bool allHaveCurrentData = !mInputs.IsEmpty();
|
2014-10-11 17:02:59 +04:00
|
|
|
for (uint32_t i = 0; i < mInputs.Length(); ++i) {
|
|
|
|
MediaStream* stream = mInputs[i]->GetSource();
|
|
|
|
if (!stream->IsFinishedOnGraphThread()) {
|
|
|
|
// XXX we really should check whether 'stream' has finished within time aTo,
|
|
|
|
// not just that it's finishing when all its queued data eventually runs
|
|
|
|
// out.
|
|
|
|
allFinished = false;
|
|
|
|
}
|
|
|
|
if (!stream->HasCurrentData()) {
|
|
|
|
allHaveCurrentData = false;
|
|
|
|
}
|
2015-02-06 12:38:11 +03:00
|
|
|
bool trackAdded = false;
|
2016-01-26 05:49:01 +03:00
|
|
|
for (StreamTracks::TrackIter tracks(stream->GetStreamTracks());
|
2014-10-11 17:02:59 +04:00
|
|
|
!tracks.IsEnded(); tracks.Next()) {
|
|
|
|
bool found = false;
|
|
|
|
for (uint32_t j = 0; j < mTrackMap.Length(); ++j) {
|
|
|
|
TrackMapEntry* map = &mTrackMap[j];
|
|
|
|
if (map->mInputPort == mInputs[i] && map->mInputTrackID == tracks->GetID()) {
|
2016-01-26 05:49:01 +03:00
|
|
|
bool trackFinished = false;
|
|
|
|
StreamTracks::Track* outputTrack = mTracks.FindTrack(map->mOutputTrackID);
|
2015-09-30 04:31:53 +03:00
|
|
|
found = true;
|
|
|
|
if (!outputTrack || outputTrack->IsEnded() ||
|
|
|
|
!mInputs[i]->PassTrackThrough(tracks->GetID())) {
|
2014-10-11 17:02:59 +04:00
|
|
|
trackFinished = true;
|
|
|
|
} else {
|
|
|
|
CopyTrackData(tracks.get(), j, aFrom, aTo, &trackFinished);
|
|
|
|
}
|
|
|
|
mappedTracksFinished[j] = trackFinished;
|
|
|
|
mappedTracksWithMatchingInputTracks[j] = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2016-06-07 17:20:29 +03:00
|
|
|
if (!found && mInputs[i]->AllowCreationOf(tracks->GetID())) {
|
2014-10-11 17:02:59 +04:00
|
|
|
bool trackFinished = false;
|
2015-02-06 12:38:11 +03:00
|
|
|
trackAdded = true;
|
2014-10-11 17:02:59 +04:00
|
|
|
uint32_t mapIndex = AddTrack(mInputs[i], tracks.get(), aFrom);
|
|
|
|
CopyTrackData(tracks.get(), mapIndex, aFrom, aTo, &trackFinished);
|
|
|
|
mappedTracksFinished.AppendElement(trackFinished);
|
|
|
|
mappedTracksWithMatchingInputTracks.AppendElement(true);
|
|
|
|
}
|
|
|
|
}
|
2015-02-06 12:38:11 +03:00
|
|
|
if (trackAdded) {
|
|
|
|
for (MediaStreamListener* l : mListeners) {
|
|
|
|
l->NotifyFinishedTrackCreation(Graph());
|
|
|
|
}
|
|
|
|
}
|
2014-10-11 17:02:59 +04:00
|
|
|
}
|
|
|
|
for (int32_t i = mTrackMap.Length() - 1; i >= 0; --i) {
|
|
|
|
if (mappedTracksFinished[i]) {
|
|
|
|
EndTrack(i);
|
|
|
|
} else {
|
|
|
|
allFinished = false;
|
|
|
|
}
|
|
|
|
if (!mappedTracksWithMatchingInputTracks[i]) {
|
|
|
|
mTrackMap.RemoveElementAt(i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (allFinished && mAutofinish && (aFlags & ALLOW_FINISH)) {
|
|
|
|
// All streams have finished and won't add any more tracks, and
|
|
|
|
// all our tracks have actually finished and been removed from our map,
|
|
|
|
// so we're finished now.
|
|
|
|
FinishOnGraphThread();
|
|
|
|
} else {
|
2016-01-26 05:49:01 +03:00
|
|
|
mTracks.AdvanceKnownTracksTime(GraphTimeToStreamTimeWithBlocking(aTo));
|
2014-10-11 17:02:59 +04:00
|
|
|
}
|
|
|
|
if (allHaveCurrentData) {
|
|
|
|
// We can make progress if we're not blocked
|
|
|
|
mHasCurrentData = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-26 05:49:01 +03:00
|
|
|
uint32_t TrackUnionStream::AddTrack(MediaInputPort* aPort, StreamTracks::Track* aTrack,
|
2014-10-11 17:02:59 +04:00
|
|
|
GraphTime aFrom)
|
|
|
|
{
|
2016-01-05 05:16:22 +03:00
|
|
|
STREAM_LOG(LogLevel::Verbose, ("TrackUnionStream %p adding track %d for "
|
|
|
|
"input stream %p track %d, desired id %d",
|
|
|
|
this, aTrack->GetID(), aPort->GetSource(),
|
|
|
|
aTrack->GetID(),
|
|
|
|
aPort->GetDestinationTrackId()));
|
|
|
|
|
|
|
|
TrackID id;
|
|
|
|
if (IsTrackIDExplicit(id = aPort->GetDestinationTrackId())) {
|
|
|
|
MOZ_ASSERT(id >= mNextAvailableTrackID &&
|
|
|
|
mUsedTracks.BinaryIndexOf(id) == mUsedTracks.NoIndex,
|
|
|
|
"Desired destination id taken. Only provide a destination ID "
|
|
|
|
"if you can assure its availability, or we may not be able "
|
|
|
|
"to bind to the correct DOM-side track.");
|
|
|
|
#ifdef DEBUG
|
|
|
|
for (size_t i = 0; mInputs[i] != aPort; ++i) {
|
|
|
|
MOZ_ASSERT(mInputs[i]->GetSourceTrackId() != TRACK_ANY,
|
|
|
|
"You are adding a MediaInputPort with a track mapping "
|
|
|
|
"while there already exist generic MediaInputPorts for this "
|
|
|
|
"destination stream. This can lead to TrackID collisions!");
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
mUsedTracks.InsertElementSorted(id);
|
|
|
|
} else if ((id = aTrack->GetID()) &&
|
|
|
|
id > mNextAvailableTrackID &&
|
|
|
|
mUsedTracks.BinaryIndexOf(id) == mUsedTracks.NoIndex) {
|
2015-06-12 13:56:27 +03:00
|
|
|
// Input id available. Mark it used in mUsedTracks.
|
|
|
|
mUsedTracks.InsertElementSorted(id);
|
|
|
|
} else {
|
2016-01-05 05:16:22 +03:00
|
|
|
// No desired destination id and Input id taken, allocate a new one.
|
2015-06-12 13:56:27 +03:00
|
|
|
id = mNextAvailableTrackID;
|
|
|
|
|
|
|
|
// Update mNextAvailableTrackID and prune any mUsedTracks members it now
|
|
|
|
// covers.
|
|
|
|
while (1) {
|
|
|
|
if (!mUsedTracks.RemoveElementSorted(++mNextAvailableTrackID)) {
|
|
|
|
// Not in use. We're done.
|
|
|
|
break;
|
|
|
|
}
|
2014-10-11 17:02:59 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Round up the track start time so the track, if anything, starts a
|
|
|
|
// little later than the true time. This means we'll have enough
|
|
|
|
// samples in our input stream to go just beyond the destination time.
|
2015-09-08 06:41:00 +03:00
|
|
|
StreamTime outputStart = GraphTimeToStreamTimeWithBlocking(aFrom);
|
2014-10-11 17:02:59 +04:00
|
|
|
|
|
|
|
nsAutoPtr<MediaSegment> segment;
|
|
|
|
segment = aTrack->GetSegment()->CreateEmptyClone();
|
|
|
|
for (uint32_t j = 0; j < mListeners.Length(); ++j) {
|
|
|
|
MediaStreamListener* l = mListeners[j];
|
2014-09-18 03:51:13 +04:00
|
|
|
l->NotifyQueuedTrackChanges(Graph(), id, outputStart,
|
2016-06-30 10:07:48 +03:00
|
|
|
TrackEventCommand::TRACK_EVENT_CREATED,
|
2015-09-30 04:31:53 +03:00
|
|
|
*segment,
|
|
|
|
aPort->GetSource(), aTrack->GetID());
|
2014-10-11 17:02:59 +04:00
|
|
|
}
|
|
|
|
segment->AppendNullData(outputStart);
|
2016-01-26 05:49:01 +03:00
|
|
|
StreamTracks::Track* track =
|
|
|
|
&mTracks.AddTrack(id, outputStart, segment.forget());
|
2016-01-05 05:16:22 +03:00
|
|
|
STREAM_LOG(LogLevel::Debug, ("TrackUnionStream %p added track %d for input stream %p track %d, start ticks %lld",
|
|
|
|
this, track->GetID(), aPort->GetSource(), aTrack->GetID(),
|
|
|
|
(long long)outputStart));
|
2014-10-11 17:02:59 +04:00
|
|
|
|
|
|
|
TrackMapEntry* map = mTrackMap.AppendElement();
|
|
|
|
map->mEndOfConsumedInputTicks = 0;
|
|
|
|
map->mEndOfLastInputIntervalInInputStream = -1;
|
|
|
|
map->mEndOfLastInputIntervalInOutputStream = -1;
|
|
|
|
map->mInputPort = aPort;
|
|
|
|
map->mInputTrackID = aTrack->GetID();
|
|
|
|
map->mOutputTrackID = track->GetID();
|
|
|
|
map->mSegment = aTrack->GetSegment()->CreateEmptyClone();
|
2016-03-03 19:28:37 +03:00
|
|
|
|
|
|
|
for (int32_t i = mPendingDirectTrackListeners.Length() - 1; i >= 0; --i) {
|
2016-04-29 06:45:25 +03:00
|
|
|
TrackBound<DirectMediaStreamTrackListener>& bound =
|
2016-03-03 19:28:37 +03:00
|
|
|
mPendingDirectTrackListeners[i];
|
|
|
|
if (bound.mTrackID != map->mOutputTrackID) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
MediaStream* source = map->mInputPort->GetSource();
|
2016-03-18 16:21:51 +03:00
|
|
|
map->mOwnedDirectListeners.AppendElement(bound.mListener);
|
|
|
|
if (mDisabledTrackIDs.Contains(bound.mTrackID)) {
|
|
|
|
bound.mListener->IncreaseDisabled();
|
|
|
|
}
|
2016-03-03 19:28:37 +03:00
|
|
|
STREAM_LOG(LogLevel::Debug, ("TrackUnionStream %p adding direct listener "
|
|
|
|
"%p for track %d. Forwarding to input "
|
|
|
|
"stream %p track %d.",
|
|
|
|
this, bound.mListener.get(), bound.mTrackID,
|
|
|
|
source, map->mInputTrackID));
|
|
|
|
source->AddDirectTrackListenerImpl(bound.mListener.forget(),
|
|
|
|
map->mInputTrackID);
|
|
|
|
mPendingDirectTrackListeners.RemoveElementAt(i);
|
|
|
|
}
|
|
|
|
|
2014-10-11 17:02:59 +04:00
|
|
|
return mTrackMap.Length() - 1;
|
|
|
|
}
|
2014-09-18 09:13:15 +04:00
|
|
|
|
2014-10-11 17:02:59 +04:00
|
|
|
void TrackUnionStream::EndTrack(uint32_t aIndex)
|
|
|
|
{
|
2016-01-26 05:49:01 +03:00
|
|
|
StreamTracks::Track* outputTrack = mTracks.FindTrack(mTrackMap[aIndex].mOutputTrackID);
|
2014-10-11 17:02:59 +04:00
|
|
|
if (!outputTrack || outputTrack->IsEnded())
|
|
|
|
return;
|
2015-09-30 04:31:54 +03:00
|
|
|
STREAM_LOG(LogLevel::Debug, ("TrackUnionStream %p ending track %d", this, outputTrack->GetID()));
|
2014-10-11 17:02:59 +04:00
|
|
|
for (uint32_t j = 0; j < mListeners.Length(); ++j) {
|
|
|
|
MediaStreamListener* l = mListeners[j];
|
2014-09-18 09:20:43 +04:00
|
|
|
StreamTime offset = outputTrack->GetSegment()->GetDuration();
|
2014-10-11 17:02:59 +04:00
|
|
|
nsAutoPtr<MediaSegment> segment;
|
|
|
|
segment = outputTrack->GetSegment()->CreateEmptyClone();
|
2014-09-18 03:51:13 +04:00
|
|
|
l->NotifyQueuedTrackChanges(Graph(), outputTrack->GetID(), offset,
|
2016-06-30 10:07:48 +03:00
|
|
|
TrackEventCommand::TRACK_EVENT_ENDED,
|
2015-09-30 04:31:53 +03:00
|
|
|
*segment,
|
|
|
|
mTrackMap[aIndex].mInputPort->GetSource(),
|
|
|
|
mTrackMap[aIndex].mInputTrackID);
|
2014-10-11 17:02:59 +04:00
|
|
|
}
|
2016-03-03 19:27:59 +03:00
|
|
|
for (TrackBound<MediaStreamTrackListener>& b : mTrackListeners) {
|
|
|
|
if (b.mTrackID == outputTrack->GetID()) {
|
|
|
|
b.mListener->NotifyEnded();
|
|
|
|
}
|
|
|
|
}
|
2014-10-11 17:02:59 +04:00
|
|
|
outputTrack->SetEnded();
|
|
|
|
}
|
|
|
|
|
2016-01-26 05:49:01 +03:00
|
|
|
void TrackUnionStream::CopyTrackData(StreamTracks::Track* aInputTrack,
|
2014-10-11 17:02:59 +04:00
|
|
|
uint32_t aMapIndex, GraphTime aFrom, GraphTime aTo,
|
|
|
|
bool* aOutputTrackFinished)
|
|
|
|
{
|
|
|
|
TrackMapEntry* map = &mTrackMap[aMapIndex];
|
2016-01-26 05:49:01 +03:00
|
|
|
StreamTracks::Track* outputTrack = mTracks.FindTrack(map->mOutputTrackID);
|
2014-10-11 17:02:59 +04:00
|
|
|
MOZ_ASSERT(outputTrack && !outputTrack->IsEnded(), "Can't copy to ended track");
|
|
|
|
|
|
|
|
MediaSegment* segment = map->mSegment;
|
|
|
|
MediaStream* source = map->mInputPort->GetSource();
|
|
|
|
|
|
|
|
GraphTime next;
|
|
|
|
*aOutputTrackFinished = false;
|
|
|
|
for (GraphTime t = aFrom; t < aTo; t = next) {
|
|
|
|
MediaInputPort::InputInterval interval = map->mInputPort->GetNextInputInterval(t);
|
|
|
|
interval.mEnd = std::min(interval.mEnd, aTo);
|
2015-09-08 06:41:00 +03:00
|
|
|
StreamTime inputEnd = source->GraphTimeToStreamTimeWithBlocking(interval.mEnd);
|
2014-09-18 09:20:43 +04:00
|
|
|
StreamTime inputTrackEndPoint = STREAM_TIME_MAX;
|
2014-10-11 17:02:59 +04:00
|
|
|
|
|
|
|
if (aInputTrack->IsEnded() &&
|
2014-09-18 09:13:13 +04:00
|
|
|
aInputTrack->GetEnd() <= inputEnd) {
|
2014-09-18 03:51:13 +04:00
|
|
|
inputTrackEndPoint = aInputTrack->GetEnd();
|
2014-10-11 17:02:59 +04:00
|
|
|
*aOutputTrackFinished = true;
|
|
|
|
}
|
|
|
|
|
2014-10-29 17:47:28 +03:00
|
|
|
if (interval.mStart >= interval.mEnd) {
|
2014-10-11 17:02:59 +04:00
|
|
|
break;
|
2014-10-29 17:47:28 +03:00
|
|
|
}
|
2014-09-18 09:20:43 +04:00
|
|
|
StreamTime ticks = interval.mEnd - interval.mStart;
|
2014-10-11 17:02:59 +04:00
|
|
|
next = interval.mEnd;
|
|
|
|
|
2014-09-18 09:13:17 +04:00
|
|
|
StreamTime outputStart = outputTrack->GetEnd();
|
2014-10-11 17:02:59 +04:00
|
|
|
|
|
|
|
if (interval.mInputIsBlocked) {
|
|
|
|
// Maybe the input track ended?
|
|
|
|
segment->AppendNullData(ticks);
|
2015-06-04 01:25:57 +03:00
|
|
|
STREAM_LOG(LogLevel::Verbose, ("TrackUnionStream %p appending %lld ticks of null data to track %d",
|
2014-10-11 17:02:59 +04:00
|
|
|
this, (long long)ticks, outputTrack->GetID()));
|
2015-03-05 07:27:16 +03:00
|
|
|
} else if (InMutedCycle()) {
|
|
|
|
segment->AppendNullData(ticks);
|
2014-10-11 17:02:59 +04:00
|
|
|
} else {
|
2015-09-16 07:15:21 +03:00
|
|
|
if (source->IsSuspended()) {
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
segment->AppendNullData(aTo - aFrom);
|
|
|
|
} else {
|
2015-09-08 06:41:00 +03:00
|
|
|
MOZ_ASSERT(outputTrack->GetEnd() == GraphTimeToStreamTimeWithBlocking(interval.mStart),
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
"Samples missing");
|
2015-09-08 06:41:00 +03:00
|
|
|
StreamTime inputStart = source->GraphTimeToStreamTimeWithBlocking(interval.mStart);
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
segment->AppendSlice(*aInputTrack->GetSegment(),
|
|
|
|
std::min(inputTrackEndPoint, inputStart),
|
|
|
|
std::min(inputTrackEndPoint, inputEnd));
|
|
|
|
}
|
2014-10-11 17:02:59 +04:00
|
|
|
}
|
|
|
|
ApplyTrackDisabling(outputTrack->GetID(), segment);
|
|
|
|
for (uint32_t j = 0; j < mListeners.Length(); ++j) {
|
|
|
|
MediaStreamListener* l = mListeners[j];
|
2016-05-04 11:08:44 +03:00
|
|
|
// Separate Audio and Video.
|
|
|
|
if (segment->GetType() == MediaSegment::AUDIO) {
|
|
|
|
l->NotifyQueuedAudioData(Graph(), outputTrack->GetID(),
|
2016-06-07 07:25:04 +03:00
|
|
|
outputStart,
|
|
|
|
*static_cast<AudioSegment*>(segment),
|
|
|
|
map->mInputPort->GetSource(),
|
|
|
|
map->mInputTrackID);
|
2016-05-04 11:08:44 +03:00
|
|
|
} else {
|
|
|
|
// This part will be removed in bug 1201363.
|
|
|
|
l->NotifyQueuedTrackChanges(Graph(), outputTrack->GetID(),
|
2016-06-30 10:07:48 +03:00
|
|
|
outputStart, TrackEventCommand::TRACK_EVENT_NONE, *segment,
|
2016-05-04 11:08:44 +03:00
|
|
|
map->mInputPort->GetSource(),
|
|
|
|
map->mInputTrackID);
|
|
|
|
}
|
2014-10-11 17:02:59 +04:00
|
|
|
}
|
2016-03-03 19:27:59 +03:00
|
|
|
for (TrackBound<MediaStreamTrackListener>& b : mTrackListeners) {
|
|
|
|
if (b.mTrackID != outputTrack->GetID()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
b.mListener->NotifyQueuedChanges(Graph(), outputStart, *segment);
|
|
|
|
}
|
2014-10-11 17:02:59 +04:00
|
|
|
outputTrack->GetSegment()->AppendFrom(segment);
|
|
|
|
}
|
|
|
|
}
|
2016-03-03 19:28:37 +03:00
|
|
|
|
2016-03-18 16:21:51 +03:00
|
|
|
void
|
|
|
|
TrackUnionStream::SetTrackEnabledImpl(TrackID aTrackID, bool aEnabled) {
|
|
|
|
for (TrackMapEntry& entry : mTrackMap) {
|
|
|
|
if (entry.mOutputTrackID == aTrackID) {
|
|
|
|
STREAM_LOG(LogLevel::Info, ("TrackUnionStream %p track %d was explicitly %s",
|
|
|
|
this, aTrackID, aEnabled ? "enabled" : "disabled"));
|
2016-04-29 06:45:25 +03:00
|
|
|
for (DirectMediaStreamTrackListener* listener : entry.mOwnedDirectListeners) {
|
2016-03-18 16:21:51 +03:00
|
|
|
bool oldEnabled = !mDisabledTrackIDs.Contains(aTrackID);
|
|
|
|
if (!oldEnabled && aEnabled) {
|
|
|
|
STREAM_LOG(LogLevel::Debug, ("TrackUnionStream %p track %d setting "
|
|
|
|
"direct listener enabled",
|
|
|
|
this, aTrackID));
|
|
|
|
listener->DecreaseDisabled();
|
|
|
|
} else if (oldEnabled && !aEnabled) {
|
|
|
|
STREAM_LOG(LogLevel::Debug, ("TrackUnionStream %p track %d setting "
|
|
|
|
"direct listener disabled",
|
|
|
|
this, aTrackID));
|
|
|
|
listener->IncreaseDisabled();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
MediaStream::SetTrackEnabledImpl(aTrackID, aEnabled);
|
|
|
|
}
|
|
|
|
|
2016-05-24 14:23:50 +03:00
|
|
|
MediaStream*
|
|
|
|
TrackUnionStream::GetInputStreamFor(TrackID aTrackID)
|
|
|
|
{
|
|
|
|
for (TrackMapEntry& entry : mTrackMap) {
|
|
|
|
if (entry.mOutputTrackID == aTrackID && entry.mInputPort) {
|
|
|
|
return entry.mInputPort->GetSource();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
TrackID
|
|
|
|
TrackUnionStream::GetInputTrackIDFor(TrackID aTrackID)
|
|
|
|
{
|
|
|
|
for (TrackMapEntry& entry : mTrackMap) {
|
|
|
|
if (entry.mOutputTrackID == aTrackID) {
|
|
|
|
return entry.mInputTrackID;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return TRACK_NONE;
|
|
|
|
}
|
|
|
|
|
2016-03-03 19:28:37 +03:00
|
|
|
void
|
2016-04-29 06:45:25 +03:00
|
|
|
TrackUnionStream::AddDirectTrackListenerImpl(already_AddRefed<DirectMediaStreamTrackListener> aListener,
|
2016-03-03 19:28:37 +03:00
|
|
|
TrackID aTrackID)
|
|
|
|
{
|
2016-04-29 06:45:25 +03:00
|
|
|
RefPtr<DirectMediaStreamTrackListener> listener = aListener;
|
2016-03-03 19:28:37 +03:00
|
|
|
|
|
|
|
for (TrackMapEntry& entry : mTrackMap) {
|
|
|
|
if (entry.mOutputTrackID == aTrackID) {
|
|
|
|
MediaStream* source = entry.mInputPort->GetSource();
|
|
|
|
STREAM_LOG(LogLevel::Debug, ("TrackUnionStream %p adding direct listener "
|
|
|
|
"%p for track %d. Forwarding to input "
|
|
|
|
"stream %p track %d.",
|
|
|
|
this, listener.get(), aTrackID, source,
|
|
|
|
entry.mInputTrackID));
|
2016-03-18 16:21:51 +03:00
|
|
|
entry.mOwnedDirectListeners.AppendElement(listener);
|
|
|
|
if (mDisabledTrackIDs.Contains(aTrackID)) {
|
|
|
|
listener->IncreaseDisabled();
|
|
|
|
}
|
2016-03-03 19:28:37 +03:00
|
|
|
source->AddDirectTrackListenerImpl(listener.forget(),
|
|
|
|
entry.mInputTrackID);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-29 06:45:25 +03:00
|
|
|
TrackBound<DirectMediaStreamTrackListener>* bound =
|
2016-03-03 19:28:37 +03:00
|
|
|
mPendingDirectTrackListeners.AppendElement();
|
|
|
|
bound->mListener = listener.forget();
|
|
|
|
bound->mTrackID = aTrackID;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-04-29 06:45:25 +03:00
|
|
|
TrackUnionStream::RemoveDirectTrackListenerImpl(DirectMediaStreamTrackListener* aListener,
|
2016-03-03 19:28:37 +03:00
|
|
|
TrackID aTrackID)
|
|
|
|
{
|
|
|
|
for (TrackMapEntry& entry : mTrackMap) {
|
|
|
|
// OutputTrackID is unique to this stream so we only need to do this once.
|
|
|
|
if (entry.mOutputTrackID != aTrackID) {
|
|
|
|
continue;
|
|
|
|
}
|
2016-03-18 16:21:51 +03:00
|
|
|
for (size_t i = 0; i < entry.mOwnedDirectListeners.Length(); ++i) {
|
|
|
|
if (entry.mOwnedDirectListeners[i] == aListener) {
|
|
|
|
STREAM_LOG(LogLevel::Debug, ("TrackUnionStream %p removing direct "
|
|
|
|
"listener %p for track %d, forwarding "
|
|
|
|
"to input stream %p track %d",
|
|
|
|
this, aListener, aTrackID,
|
|
|
|
entry.mInputPort->GetSource(),
|
|
|
|
entry.mInputTrackID));
|
|
|
|
if (mDisabledTrackIDs.Contains(aTrackID)) {
|
|
|
|
// Reset the listener's state.
|
|
|
|
aListener->DecreaseDisabled();
|
|
|
|
}
|
|
|
|
entry.mOwnedDirectListeners.RemoveElementAt(i);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2016-03-03 19:28:37 +03:00
|
|
|
// Forward to the input
|
|
|
|
MediaStream* source = entry.mInputPort->GetSource();
|
|
|
|
source->RemoveDirectTrackListenerImpl(aListener, entry.mInputTrackID);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t i = 0; i < mPendingDirectTrackListeners.Length(); ++i) {
|
2016-04-29 06:45:25 +03:00
|
|
|
TrackBound<DirectMediaStreamTrackListener>& bound =
|
2016-03-03 19:28:37 +03:00
|
|
|
mPendingDirectTrackListeners[i];
|
|
|
|
if (bound.mListener == aListener && bound.mTrackID == aTrackID) {
|
|
|
|
mPendingDirectTrackListeners.RemoveElementAt(i);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-07-13 18:25:42 +03:00
|
|
|
} // namespace mozilla
|