2013-05-07 15:59:16 +04:00
|
|
|
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
|
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
|
|
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
|
|
|
|
|
|
#include "AudioNodeStream.h"
|
|
|
|
|
|
|
|
#include "MediaStreamGraphImpl.h"
|
|
|
|
#include "AudioNodeEngine.h"
|
|
|
|
#include "ThreeDPoint.h"
|
2013-09-06 00:25:17 +04:00
|
|
|
#include "AudioChannelFormat.h"
|
|
|
|
#include "AudioParamTimeline.h"
|
2014-01-15 15:08:20 +04:00
|
|
|
#include "AudioContext.h"
|
2013-05-07 15:59:16 +04:00
|
|
|
|
|
|
|
using namespace mozilla::dom;
|
|
|
|
|
|
|
|
namespace mozilla {
|
|
|
|
|
|
|
|
/**
|
|
|
|
* An AudioNodeStream produces a single audio track with ID
|
2013-10-08 22:20:33 +04:00
|
|
|
* AUDIO_TRACK. This track has rate AudioContext::sIdealAudioRate
|
2013-05-24 21:09:29 +04:00
|
|
|
* for regular audio contexts, and the rate requested by the web content
|
|
|
|
* for offline audio contexts.
|
2013-05-07 15:59:16 +04:00
|
|
|
* Each chunk in the track is a single block of WEBAUDIO_BLOCK_SIZE samples.
|
2013-05-21 23:17:47 +04:00
|
|
|
* Note: This must be a different value than MEDIA_STREAM_DEST_TRACK_ID
|
2013-05-07 15:59:16 +04:00
|
|
|
*/
|
|
|
|
|
2014-11-20 22:41:18 +03:00
|
|
|
AudioNodeStream::AudioNodeStream(AudioNodeEngine* aEngine,
|
2015-08-13 07:13:34 +03:00
|
|
|
Flags aFlags,
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
TrackRate aSampleRate,
|
|
|
|
AudioContext::AudioContextId aContextId)
|
2014-11-20 22:41:18 +03:00
|
|
|
: ProcessedMediaStream(nullptr),
|
|
|
|
mEngine(aEngine),
|
|
|
|
mSampleRate(aSampleRate),
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 20:22:05 +03:00
|
|
|
mAudioContextId(aContextId),
|
2015-08-13 07:13:34 +03:00
|
|
|
mFlags(aFlags),
|
2014-11-20 22:41:18 +03:00
|
|
|
mNumberOfInputChannels(2),
|
|
|
|
mMarkAsFinishedAfterThisBlock(false),
|
|
|
|
mAudioParamStream(false),
|
|
|
|
mPassThrough(false)
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(NS_IsMainThread());
|
|
|
|
mChannelCountMode = ChannelCountMode::Max;
|
|
|
|
mChannelInterpretation = ChannelInterpretation::Speakers;
|
|
|
|
// AudioNodes are always producing data
|
|
|
|
mHasCurrentData = true;
|
2014-11-18 06:22:45 +03:00
|
|
|
mLastChunks.SetLength(std::max(uint16_t(1), mEngine->OutputCount()));
|
2014-11-20 22:41:18 +03:00
|
|
|
MOZ_COUNT_CTOR(AudioNodeStream);
|
|
|
|
}
|
|
|
|
|
2013-05-07 15:59:16 +04:00
|
|
|
AudioNodeStream::~AudioNodeStream()
|
|
|
|
{
|
|
|
|
MOZ_COUNT_DTOR(AudioNodeStream);
|
|
|
|
}
|
|
|
|
|
2015-08-12 02:26:24 +03:00
|
|
|
/* static */ already_AddRefed<AudioNodeStream>
|
|
|
|
AudioNodeStream::Create(MediaStreamGraph* aGraph, AudioNodeEngine* aEngine,
|
2015-08-13 07:13:34 +03:00
|
|
|
Flags aFlags)
|
2015-08-12 02:26:24 +03:00
|
|
|
{
|
|
|
|
MOZ_ASSERT(NS_IsMainThread());
|
|
|
|
|
|
|
|
// MediaRecorders use an AudioNodeStream, but no AudioNode
|
|
|
|
AudioNode* node = aEngine->NodeMainThread();
|
|
|
|
MOZ_ASSERT(!node || aGraph->GraphRate() == node->Context()->SampleRate());
|
|
|
|
|
|
|
|
dom::AudioContext::AudioContextId contextIdForStream = node ? node->Context()->Id() :
|
|
|
|
NO_AUDIO_CONTEXT;
|
|
|
|
nsRefPtr<AudioNodeStream> stream =
|
2015-08-13 07:13:34 +03:00
|
|
|
new AudioNodeStream(aEngine, aFlags, aGraph->GraphRate(),
|
2015-08-12 02:26:24 +03:00
|
|
|
contextIdForStream);
|
|
|
|
if (aEngine->HasNode()) {
|
|
|
|
stream->SetChannelMixingParametersImpl(aEngine->NodeMainThread()->ChannelCount(),
|
|
|
|
aEngine->NodeMainThread()->ChannelCountModeValue(),
|
|
|
|
aEngine->NodeMainThread()->ChannelInterpretationValue());
|
|
|
|
}
|
|
|
|
aGraph->AddStream(stream);
|
|
|
|
return stream.forget();
|
|
|
|
}
|
|
|
|
|
2014-04-13 22:08:10 +04:00
|
|
|
size_t
|
|
|
|
AudioNodeStream::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
|
|
|
|
{
|
|
|
|
size_t amount = 0;
|
|
|
|
|
|
|
|
// Not reported:
|
|
|
|
// - mEngine
|
|
|
|
|
|
|
|
amount += ProcessedMediaStream::SizeOfExcludingThis(aMallocSizeOf);
|
2015-07-29 09:24:24 +03:00
|
|
|
amount += mLastChunks.ShallowSizeOfExcludingThis(aMallocSizeOf);
|
2014-04-13 22:08:10 +04:00
|
|
|
for (size_t i = 0; i < mLastChunks.Length(); i++) {
|
|
|
|
// NB: This is currently unshared only as there are instances of
|
|
|
|
// double reporting in DMD otherwise.
|
|
|
|
amount += mLastChunks[i].SizeOfExcludingThisIfUnshared(aMallocSizeOf);
|
|
|
|
}
|
|
|
|
|
|
|
|
return amount;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
|
|
|
AudioNodeStream::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
|
|
|
|
{
|
|
|
|
return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
AudioNodeStream::SizeOfAudioNodesIncludingThis(MallocSizeOf aMallocSizeOf,
|
|
|
|
AudioNodeSizes& aUsage) const
|
|
|
|
{
|
|
|
|
// Explicitly separate out the stream memory.
|
|
|
|
aUsage.mStream = SizeOfIncludingThis(aMallocSizeOf);
|
|
|
|
|
|
|
|
if (mEngine) {
|
|
|
|
// This will fill out the rest of |aUsage|.
|
|
|
|
mEngine->SizeOfIncludingThis(aMallocSizeOf, aUsage);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-07 15:59:16 +04:00
|
|
|
void
|
2014-01-15 15:08:20 +04:00
|
|
|
AudioNodeStream::SetStreamTimeParameter(uint32_t aIndex, AudioContext* aContext,
|
2013-05-07 15:59:16 +04:00
|
|
|
double aStreamTime)
|
|
|
|
{
|
2015-04-28 09:42:00 +03:00
|
|
|
class Message final : public ControlMessage
|
|
|
|
{
|
2013-05-07 15:59:16 +04:00
|
|
|
public:
|
|
|
|
Message(AudioNodeStream* aStream, uint32_t aIndex, MediaStream* aRelativeToStream,
|
|
|
|
double aStreamTime)
|
|
|
|
: ControlMessage(aStream), mStreamTime(aStreamTime),
|
2015-04-28 09:42:00 +03:00
|
|
|
mRelativeToStream(aRelativeToStream), mIndex(aIndex)
|
|
|
|
{}
|
|
|
|
virtual void Run() override
|
2013-05-07 15:59:16 +04:00
|
|
|
{
|
|
|
|
static_cast<AudioNodeStream*>(mStream)->
|
|
|
|
SetStreamTimeParameterImpl(mIndex, mRelativeToStream, mStreamTime);
|
|
|
|
}
|
|
|
|
double mStreamTime;
|
|
|
|
MediaStream* mRelativeToStream;
|
|
|
|
uint32_t mIndex;
|
|
|
|
};
|
|
|
|
|
2014-01-15 15:08:20 +04:00
|
|
|
GraphImpl()->AppendMessage(new Message(this, aIndex,
|
|
|
|
aContext->DestinationStream(),
|
|
|
|
aContext->DOMTimeToStreamTime(aStreamTime)));
|
2013-05-07 15:59:16 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
AudioNodeStream::SetStreamTimeParameterImpl(uint32_t aIndex, MediaStream* aRelativeToStream,
|
|
|
|
double aStreamTime)
|
|
|
|
{
|
2014-09-18 09:20:43 +04:00
|
|
|
StreamTime ticks = TicksFromDestinationTime(aRelativeToStream, aStreamTime);
|
2013-05-07 15:59:16 +04:00
|
|
|
mEngine->SetStreamTimeParameter(aIndex, ticks);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
AudioNodeStream::SetDoubleParameter(uint32_t aIndex, double aValue)
|
|
|
|
{
|
2015-04-28 09:42:00 +03:00
|
|
|
class Message final : public ControlMessage
|
|
|
|
{
|
2013-05-07 15:59:16 +04:00
|
|
|
public:
|
|
|
|
Message(AudioNodeStream* aStream, uint32_t aIndex, double aValue)
|
2015-04-28 09:42:00 +03:00
|
|
|
: ControlMessage(aStream), mValue(aValue), mIndex(aIndex)
|
|
|
|
{}
|
|
|
|
virtual void Run() override
|
2013-05-07 15:59:16 +04:00
|
|
|
{
|
|
|
|
static_cast<AudioNodeStream*>(mStream)->Engine()->
|
|
|
|
SetDoubleParameter(mIndex, mValue);
|
|
|
|
}
|
|
|
|
double mValue;
|
|
|
|
uint32_t mIndex;
|
|
|
|
};
|
|
|
|
|
|
|
|
GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
AudioNodeStream::SetInt32Parameter(uint32_t aIndex, int32_t aValue)
|
|
|
|
{
|
2015-04-28 09:42:00 +03:00
|
|
|
class Message final : public ControlMessage
|
|
|
|
{
|
2013-05-07 15:59:16 +04:00
|
|
|
public:
|
|
|
|
Message(AudioNodeStream* aStream, uint32_t aIndex, int32_t aValue)
|
2015-04-28 09:42:00 +03:00
|
|
|
: ControlMessage(aStream), mValue(aValue), mIndex(aIndex)
|
|
|
|
{}
|
|
|
|
virtual void Run() override
|
2013-05-07 15:59:16 +04:00
|
|
|
{
|
|
|
|
static_cast<AudioNodeStream*>(mStream)->Engine()->
|
|
|
|
SetInt32Parameter(mIndex, mValue);
|
|
|
|
}
|
|
|
|
int32_t mValue;
|
|
|
|
uint32_t mIndex;
|
|
|
|
};
|
|
|
|
|
|
|
|
GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
AudioNodeStream::SetTimelineParameter(uint32_t aIndex,
|
|
|
|
const AudioParamTimeline& aValue)
|
|
|
|
{
|
2015-04-28 09:42:00 +03:00
|
|
|
class Message final : public ControlMessage
|
|
|
|
{
|
2013-05-07 15:59:16 +04:00
|
|
|
public:
|
|
|
|
Message(AudioNodeStream* aStream, uint32_t aIndex,
|
|
|
|
const AudioParamTimeline& aValue)
|
2013-05-24 21:10:08 +04:00
|
|
|
: ControlMessage(aStream),
|
|
|
|
mValue(aValue),
|
|
|
|
mSampleRate(aStream->SampleRate()),
|
2015-04-28 09:42:00 +03:00
|
|
|
mIndex(aIndex)
|
|
|
|
{}
|
|
|
|
virtual void Run() override
|
2013-05-07 15:59:16 +04:00
|
|
|
{
|
|
|
|
static_cast<AudioNodeStream*>(mStream)->Engine()->
|
2013-05-24 21:10:08 +04:00
|
|
|
SetTimelineParameter(mIndex, mValue, mSampleRate);
|
2013-05-07 15:59:16 +04:00
|
|
|
}
|
|
|
|
AudioParamTimeline mValue;
|
2013-05-24 21:10:08 +04:00
|
|
|
TrackRate mSampleRate;
|
2013-05-07 15:59:16 +04:00
|
|
|
uint32_t mIndex;
|
|
|
|
};
|
|
|
|
GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
AudioNodeStream::SetThreeDPointParameter(uint32_t aIndex, const ThreeDPoint& aValue)
|
|
|
|
{
|
2015-04-28 09:42:00 +03:00
|
|
|
class Message final : public ControlMessage
|
|
|
|
{
|
2013-05-07 15:59:16 +04:00
|
|
|
public:
|
|
|
|
Message(AudioNodeStream* aStream, uint32_t aIndex, const ThreeDPoint& aValue)
|
2015-04-28 09:42:00 +03:00
|
|
|
: ControlMessage(aStream), mValue(aValue), mIndex(aIndex)
|
|
|
|
{}
|
|
|
|
virtual void Run() override
|
2013-05-07 15:59:16 +04:00
|
|
|
{
|
|
|
|
static_cast<AudioNodeStream*>(mStream)->Engine()->
|
|
|
|
SetThreeDPointParameter(mIndex, mValue);
|
|
|
|
}
|
|
|
|
ThreeDPoint mValue;
|
|
|
|
uint32_t mIndex;
|
|
|
|
};
|
|
|
|
|
|
|
|
GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2014-03-15 23:00:17 +04:00
|
|
|
AudioNodeStream::SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList>&& aBuffer)
|
2013-05-07 15:59:16 +04:00
|
|
|
{
|
2015-04-28 09:42:00 +03:00
|
|
|
class Message final : public ControlMessage
|
|
|
|
{
|
2013-05-07 15:59:16 +04:00
|
|
|
public:
|
|
|
|
Message(AudioNodeStream* aStream,
|
2014-03-15 23:00:17 +04:00
|
|
|
already_AddRefed<ThreadSharedFloatArrayBufferList>& aBuffer)
|
2015-04-28 09:42:00 +03:00
|
|
|
: ControlMessage(aStream), mBuffer(aBuffer)
|
|
|
|
{}
|
|
|
|
virtual void Run() override
|
2013-05-07 15:59:16 +04:00
|
|
|
{
|
|
|
|
static_cast<AudioNodeStream*>(mStream)->Engine()->
|
|
|
|
SetBuffer(mBuffer.forget());
|
|
|
|
}
|
|
|
|
nsRefPtr<ThreadSharedFloatArrayBufferList> mBuffer;
|
|
|
|
};
|
|
|
|
|
|
|
|
GraphImpl()->AppendMessage(new Message(this, aBuffer));
|
|
|
|
}
|
|
|
|
|
2013-05-14 08:12:30 +04:00
|
|
|
void
|
|
|
|
AudioNodeStream::SetRawArrayData(nsTArray<float>& aData)
|
|
|
|
{
|
2015-04-28 09:42:00 +03:00
|
|
|
class Message final : public ControlMessage
|
|
|
|
{
|
2013-05-14 08:12:30 +04:00
|
|
|
public:
|
|
|
|
Message(AudioNodeStream* aStream,
|
|
|
|
nsTArray<float>& aData)
|
|
|
|
: ControlMessage(aStream)
|
|
|
|
{
|
|
|
|
mData.SwapElements(aData);
|
|
|
|
}
|
2015-04-28 09:42:00 +03:00
|
|
|
virtual void Run() override
|
2013-05-14 08:12:30 +04:00
|
|
|
{
|
|
|
|
static_cast<AudioNodeStream*>(mStream)->Engine()->SetRawArrayData(mData);
|
|
|
|
}
|
|
|
|
nsTArray<float> mData;
|
|
|
|
};
|
|
|
|
|
|
|
|
GraphImpl()->AppendMessage(new Message(this, aData));
|
|
|
|
}
|
|
|
|
|
2013-05-07 15:59:16 +04:00
|
|
|
void
|
|
|
|
AudioNodeStream::SetChannelMixingParameters(uint32_t aNumberOfChannels,
|
|
|
|
ChannelCountMode aChannelCountMode,
|
|
|
|
ChannelInterpretation aChannelInterpretation)
|
|
|
|
{
|
2015-04-28 09:42:00 +03:00
|
|
|
class Message final : public ControlMessage
|
|
|
|
{
|
2013-05-07 15:59:16 +04:00
|
|
|
public:
|
|
|
|
Message(AudioNodeStream* aStream,
|
|
|
|
uint32_t aNumberOfChannels,
|
|
|
|
ChannelCountMode aChannelCountMode,
|
|
|
|
ChannelInterpretation aChannelInterpretation)
|
|
|
|
: ControlMessage(aStream),
|
|
|
|
mNumberOfChannels(aNumberOfChannels),
|
|
|
|
mChannelCountMode(aChannelCountMode),
|
|
|
|
mChannelInterpretation(aChannelInterpretation)
|
|
|
|
{}
|
2015-04-28 09:42:00 +03:00
|
|
|
virtual void Run() override
|
2013-05-07 15:59:16 +04:00
|
|
|
{
|
|
|
|
static_cast<AudioNodeStream*>(mStream)->
|
|
|
|
SetChannelMixingParametersImpl(mNumberOfChannels, mChannelCountMode,
|
|
|
|
mChannelInterpretation);
|
|
|
|
}
|
|
|
|
uint32_t mNumberOfChannels;
|
|
|
|
ChannelCountMode mChannelCountMode;
|
|
|
|
ChannelInterpretation mChannelInterpretation;
|
|
|
|
};
|
|
|
|
|
|
|
|
GraphImpl()->AppendMessage(new Message(this, aNumberOfChannels,
|
|
|
|
aChannelCountMode,
|
|
|
|
aChannelInterpretation));
|
|
|
|
}
|
|
|
|
|
2014-08-19 04:12:50 +04:00
|
|
|
void
|
|
|
|
AudioNodeStream::SetPassThrough(bool aPassThrough)
|
|
|
|
{
|
2015-04-28 09:42:00 +03:00
|
|
|
class Message final : public ControlMessage
|
|
|
|
{
|
2014-08-19 04:12:50 +04:00
|
|
|
public:
|
|
|
|
Message(AudioNodeStream* aStream, bool aPassThrough)
|
2015-04-28 09:42:00 +03:00
|
|
|
: ControlMessage(aStream), mPassThrough(aPassThrough)
|
|
|
|
{}
|
|
|
|
virtual void Run() override
|
2014-08-19 04:12:50 +04:00
|
|
|
{
|
|
|
|
static_cast<AudioNodeStream*>(mStream)->mPassThrough = mPassThrough;
|
|
|
|
}
|
|
|
|
bool mPassThrough;
|
|
|
|
};
|
|
|
|
|
|
|
|
GraphImpl()->AppendMessage(new Message(this, aPassThrough));
|
|
|
|
}
|
|
|
|
|
2013-05-07 15:59:16 +04:00
|
|
|
void
|
|
|
|
AudioNodeStream::SetChannelMixingParametersImpl(uint32_t aNumberOfChannels,
|
|
|
|
ChannelCountMode aChannelCountMode,
|
|
|
|
ChannelInterpretation aChannelInterpretation)
|
|
|
|
{
|
|
|
|
// Make sure that we're not clobbering any significant bits by fitting these
|
|
|
|
// values in 16 bits.
|
|
|
|
MOZ_ASSERT(int(aChannelCountMode) < INT16_MAX);
|
|
|
|
MOZ_ASSERT(int(aChannelInterpretation) < INT16_MAX);
|
|
|
|
|
|
|
|
mNumberOfInputChannels = aNumberOfChannels;
|
2013-05-06 23:28:13 +04:00
|
|
|
mChannelCountMode = aChannelCountMode;
|
|
|
|
mChannelInterpretation = aChannelInterpretation;
|
2013-05-07 15:59:16 +04:00
|
|
|
}
|
|
|
|
|
2013-07-24 14:11:35 +04:00
|
|
|
uint32_t
|
2014-04-01 01:26:02 +04:00
|
|
|
AudioNodeStream::ComputedNumberOfChannels(uint32_t aInputChannelCount)
|
2013-07-24 14:11:35 +04:00
|
|
|
{
|
|
|
|
switch (mChannelCountMode) {
|
|
|
|
case ChannelCountMode::Explicit:
|
|
|
|
// Disregard the channel count we've calculated from inputs, and just use
|
|
|
|
// mNumberOfInputChannels.
|
|
|
|
return mNumberOfInputChannels;
|
|
|
|
case ChannelCountMode::Clamped_max:
|
|
|
|
// Clamp the computed output channel count to mNumberOfInputChannels.
|
|
|
|
return std::min(aInputChannelCount, mNumberOfInputChannels);
|
|
|
|
default:
|
|
|
|
case ChannelCountMode::Max:
|
|
|
|
// Nothing to do here, just shut up the compiler warning.
|
|
|
|
return aInputChannelCount;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-07 15:59:16 +04:00
|
|
|
void
|
|
|
|
AudioNodeStream::ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex)
|
|
|
|
{
|
|
|
|
uint32_t inputCount = mInputs.Length();
|
|
|
|
uint32_t outputChannelCount = 1;
|
|
|
|
nsAutoTArray<AudioChunk*,250> inputChunks;
|
|
|
|
for (uint32_t i = 0; i < inputCount; ++i) {
|
|
|
|
if (aPortIndex != mInputs[i]->InputNumber()) {
|
|
|
|
// This input is connected to a different port
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
MediaStream* s = mInputs[i]->GetSource();
|
|
|
|
AudioNodeStream* a = static_cast<AudioNodeStream*>(s);
|
|
|
|
MOZ_ASSERT(a == s->AsAudioNodeStream());
|
2013-12-10 04:49:03 +04:00
|
|
|
if (a->IsAudioParamStream()) {
|
2013-05-07 15:59:16 +04:00
|
|
|
continue;
|
|
|
|
}
|
2013-08-26 21:19:36 +04:00
|
|
|
|
2013-05-07 15:59:16 +04:00
|
|
|
AudioChunk* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()];
|
|
|
|
MOZ_ASSERT(chunk);
|
2013-08-16 18:42:50 +04:00
|
|
|
if (chunk->IsNull() || chunk->mChannelData.IsEmpty()) {
|
2013-05-07 15:59:16 +04:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
inputChunks.AppendElement(chunk);
|
|
|
|
outputChannelCount =
|
|
|
|
GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
|
|
|
|
}
|
|
|
|
|
2014-04-01 01:26:02 +04:00
|
|
|
outputChannelCount = ComputedNumberOfChannels(outputChannelCount);
|
2013-05-07 15:59:16 +04:00
|
|
|
|
|
|
|
uint32_t inputChunkCount = inputChunks.Length();
|
2013-05-08 07:31:55 +04:00
|
|
|
if (inputChunkCount == 0 ||
|
|
|
|
(inputChunkCount == 1 && inputChunks[0]->mChannelData.Length() == 0)) {
|
2013-05-07 15:59:16 +04:00
|
|
|
aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (inputChunkCount == 1 &&
|
|
|
|
inputChunks[0]->mChannelData.Length() == outputChannelCount) {
|
|
|
|
aTmpChunk = *inputChunks[0];
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-05-29 15:40:14 +04:00
|
|
|
if (outputChannelCount == 0) {
|
|
|
|
aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
|
|
|
|
return;
|
|
|
|
}
|
2013-05-23 15:46:20 +04:00
|
|
|
|
2013-05-07 15:59:16 +04:00
|
|
|
AllocateAudioBlock(outputChannelCount, &aTmpChunk);
|
|
|
|
// The static storage here should be 1KB, so it's fine
|
|
|
|
nsAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < inputChunkCount; ++i) {
|
2013-07-24 14:11:35 +04:00
|
|
|
AccumulateInputChunk(i, *inputChunks[i], &aTmpChunk, &downmixBuffer);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
AudioNodeStream::AccumulateInputChunk(uint32_t aInputIndex, const AudioChunk& aChunk,
|
|
|
|
AudioChunk* aBlock,
|
|
|
|
nsTArray<float>* aDownmixBuffer)
|
|
|
|
{
|
2015-07-29 19:36:47 +03:00
|
|
|
nsAutoTArray<const float*,GUESS_AUDIO_CHANNELS> channels;
|
2013-07-24 14:11:35 +04:00
|
|
|
UpMixDownMixChunk(&aChunk, aBlock->mChannelData.Length(), channels, *aDownmixBuffer);
|
|
|
|
|
|
|
|
for (uint32_t c = 0; c < channels.Length(); ++c) {
|
|
|
|
const float* inputData = static_cast<const float*>(channels[c]);
|
2015-07-22 08:59:21 +03:00
|
|
|
float* outputData = aBlock->ChannelFloatsForWrite(c);
|
2013-07-24 14:11:35 +04:00
|
|
|
if (inputData) {
|
|
|
|
if (aInputIndex == 0) {
|
|
|
|
AudioBlockCopyChannelWithScale(inputData, aChunk.mVolume, outputData);
|
2013-05-07 15:59:16 +04:00
|
|
|
} else {
|
2013-07-24 14:11:35 +04:00
|
|
|
AudioBlockAddChannelWithScale(inputData, aChunk.mVolume, outputData);
|
2013-05-07 15:59:16 +04:00
|
|
|
}
|
2013-07-24 14:11:35 +04:00
|
|
|
} else {
|
|
|
|
if (aInputIndex == 0) {
|
|
|
|
PodZero(outputData, WEBAUDIO_BLOCK_SIZE);
|
2013-05-07 15:59:16 +04:00
|
|
|
}
|
|
|
|
}
|
2013-07-24 14:11:35 +04:00
|
|
|
}
|
|
|
|
}
|
2013-05-07 15:59:16 +04:00
|
|
|
|
2013-07-24 14:11:35 +04:00
|
|
|
void
|
|
|
|
AudioNodeStream::UpMixDownMixChunk(const AudioChunk* aChunk,
|
|
|
|
uint32_t aOutputChannelCount,
|
2015-07-29 19:36:47 +03:00
|
|
|
nsTArray<const float*>& aOutputChannels,
|
2013-07-24 14:11:35 +04:00
|
|
|
nsTArray<float>& aDownmixBuffer)
|
|
|
|
{
|
|
|
|
static const float silenceChannel[WEBAUDIO_BLOCK_SIZE] = {0.f};
|
|
|
|
|
2015-07-29 19:36:47 +03:00
|
|
|
for (uint32_t i = 0; i < aChunk->mChannelData.Length(); i++) {
|
|
|
|
aOutputChannels.AppendElement(static_cast<const float*>(aChunk->mChannelData[i]));
|
|
|
|
}
|
2013-07-24 14:11:35 +04:00
|
|
|
if (aOutputChannels.Length() < aOutputChannelCount) {
|
|
|
|
if (mChannelInterpretation == ChannelInterpretation::Speakers) {
|
2015-07-29 19:36:47 +03:00
|
|
|
AudioChannelsUpMix(&aOutputChannels, aOutputChannelCount, SilentChannel::ZeroChannel<float>());
|
2013-07-24 14:11:35 +04:00
|
|
|
NS_ASSERTION(aOutputChannelCount == aOutputChannels.Length(),
|
|
|
|
"We called GetAudioChannelsSuperset to avoid this");
|
|
|
|
} else {
|
|
|
|
// Fill up the remaining aOutputChannels by zeros
|
|
|
|
for (uint32_t j = aOutputChannels.Length(); j < aOutputChannelCount; ++j) {
|
|
|
|
aOutputChannels.AppendElement(silenceChannel);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if (aOutputChannels.Length() > aOutputChannelCount) {
|
|
|
|
if (mChannelInterpretation == ChannelInterpretation::Speakers) {
|
|
|
|
nsAutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannels;
|
|
|
|
outputChannels.SetLength(aOutputChannelCount);
|
|
|
|
aDownmixBuffer.SetLength(aOutputChannelCount * WEBAUDIO_BLOCK_SIZE);
|
|
|
|
for (uint32_t j = 0; j < aOutputChannelCount; ++j) {
|
|
|
|
outputChannels[j] = &aDownmixBuffer[j * WEBAUDIO_BLOCK_SIZE];
|
|
|
|
}
|
|
|
|
|
|
|
|
AudioChannelsDownMix(aOutputChannels, outputChannels.Elements(),
|
|
|
|
aOutputChannelCount, WEBAUDIO_BLOCK_SIZE);
|
|
|
|
|
|
|
|
aOutputChannels.SetLength(aOutputChannelCount);
|
|
|
|
for (uint32_t j = 0; j < aOutputChannels.Length(); ++j) {
|
|
|
|
aOutputChannels[j] = outputChannels[j];
|
2013-05-07 15:59:16 +04:00
|
|
|
}
|
2013-07-24 14:11:35 +04:00
|
|
|
} else {
|
|
|
|
// Drop the remaining aOutputChannels
|
|
|
|
aOutputChannels.RemoveElementsAt(aOutputChannelCount,
|
|
|
|
aOutputChannels.Length() - aOutputChannelCount);
|
2013-05-07 15:59:16 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// The MediaStreamGraph guarantees that this is actually one block, for
|
|
|
|
// AudioNodeStreams.
|
|
|
|
void
|
2014-03-05 01:53:55 +04:00
|
|
|
AudioNodeStream::ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags)
|
2013-05-07 15:59:16 +04:00
|
|
|
{
|
2014-06-05 02:11:52 +04:00
|
|
|
if (!mFinished) {
|
2014-09-18 09:13:16 +04:00
|
|
|
EnsureTrack(AUDIO_TRACK);
|
2014-06-05 02:11:52 +04:00
|
|
|
}
|
2013-12-06 00:23:57 +04:00
|
|
|
// No more tracks will be coming
|
|
|
|
mBuffer.AdvanceKnownTracksTime(STREAM_TIME_MAX);
|
2013-05-07 15:59:16 +04:00
|
|
|
|
2014-11-18 06:22:45 +03:00
|
|
|
uint16_t outputCount = mLastChunks.Length();
|
|
|
|
MOZ_ASSERT(outputCount == std::max(uint16_t(1), mEngine->OutputCount()));
|
2013-05-07 15:59:16 +04:00
|
|
|
|
2014-01-15 14:07:30 +04:00
|
|
|
// Consider this stream blocked if it has already finished output. Normally
|
|
|
|
// mBlocked would reflect this, but due to rounding errors our audio track may
|
|
|
|
// appear to extend slightly beyond aFrom, so we might not be blocked yet.
|
|
|
|
bool blocked = mFinished || mBlocked.GetAt(aFrom);
|
|
|
|
// If the stream has finished at this time, it will be blocked.
|
2014-07-17 04:55:55 +04:00
|
|
|
if (blocked || InMutedCycle()) {
|
2015-08-20 09:41:25 +03:00
|
|
|
mInputChunks.Clear();
|
2013-05-24 06:36:20 +04:00
|
|
|
for (uint16_t i = 0; i < outputCount; ++i) {
|
|
|
|
mLastChunks[i].SetNull(WEBAUDIO_BLOCK_SIZE);
|
|
|
|
}
|
2013-05-07 15:59:16 +04:00
|
|
|
} else {
|
|
|
|
// We need to generate at least one input
|
|
|
|
uint16_t maxInputs = std::max(uint16_t(1), mEngine->InputCount());
|
2015-08-20 09:41:25 +03:00
|
|
|
mInputChunks.SetLength(maxInputs);
|
2013-05-07 15:59:16 +04:00
|
|
|
for (uint16_t i = 0; i < maxInputs; ++i) {
|
2015-08-20 09:41:25 +03:00
|
|
|
ObtainInputBlock(mInputChunks[i], i);
|
2013-05-07 15:59:16 +04:00
|
|
|
}
|
|
|
|
bool finished = false;
|
2014-08-19 04:12:50 +04:00
|
|
|
if (mPassThrough) {
|
|
|
|
MOZ_ASSERT(outputCount == 1, "For now, we only support nodes that have one output port");
|
2015-08-20 09:41:25 +03:00
|
|
|
mLastChunks[0] = mInputChunks[0];
|
2013-05-07 15:59:16 +04:00
|
|
|
} else {
|
2014-11-18 06:22:45 +03:00
|
|
|
if (maxInputs <= 1 && outputCount <= 1) {
|
2015-08-20 09:41:25 +03:00
|
|
|
mEngine->ProcessBlock(this, mInputChunks[0], &mLastChunks[0], &finished);
|
2014-08-19 04:12:50 +04:00
|
|
|
} else {
|
2015-08-20 09:41:25 +03:00
|
|
|
mEngine->ProcessBlocksOnPorts(this, mInputChunks, mLastChunks, &finished);
|
2014-08-19 04:12:50 +04:00
|
|
|
}
|
2013-05-07 15:59:16 +04:00
|
|
|
}
|
2015-09-02 14:44:37 +03:00
|
|
|
for (auto& chunk : mInputChunks) {
|
|
|
|
// If the buffer is shared then it won't be reused, so release the
|
|
|
|
// reference now. Keep the channel data array to save a free/alloc
|
|
|
|
// pair.
|
|
|
|
chunk.ReleaseBufferIfShared();
|
|
|
|
}
|
2013-12-12 16:33:01 +04:00
|
|
|
for (uint16_t i = 0; i < outputCount; ++i) {
|
|
|
|
NS_ASSERTION(mLastChunks[i].GetDuration() == WEBAUDIO_BLOCK_SIZE,
|
|
|
|
"Invalid WebAudio chunk size");
|
|
|
|
}
|
2013-05-07 15:59:16 +04:00
|
|
|
if (finished) {
|
|
|
|
mMarkAsFinishedAfterThisBlock = true;
|
|
|
|
}
|
|
|
|
|
2014-01-15 14:07:30 +04:00
|
|
|
if (mDisabledTrackIDs.Contains(static_cast<TrackID>(AUDIO_TRACK))) {
|
|
|
|
for (uint32_t i = 0; i < outputCount; ++i) {
|
|
|
|
mLastChunks[i].SetNull(WEBAUDIO_BLOCK_SIZE);
|
|
|
|
}
|
2013-05-30 08:44:43 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-15 14:07:30 +04:00
|
|
|
if (!blocked) {
|
|
|
|
// Don't output anything while blocked
|
2013-12-06 00:23:57 +04:00
|
|
|
AdvanceOutputSegment();
|
|
|
|
if (mMarkAsFinishedAfterThisBlock && (aFlags & ALLOW_FINISH)) {
|
|
|
|
// This stream was finished the last time that we looked at it, and all
|
|
|
|
// of the depending streams have finished their output as well, so now
|
|
|
|
// it's time to mark this stream as finished.
|
|
|
|
FinishOutput();
|
|
|
|
}
|
|
|
|
}
|
2013-07-24 14:11:35 +04:00
|
|
|
}
|
|
|
|
|
2014-07-17 04:55:55 +04:00
|
|
|
void
|
|
|
|
AudioNodeStream::ProduceOutputBeforeInput(GraphTime aFrom)
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(mEngine->AsDelayNodeEngine());
|
|
|
|
MOZ_ASSERT(mEngine->OutputCount() == 1,
|
|
|
|
"DelayNodeEngine output count should be 1");
|
|
|
|
MOZ_ASSERT(!InMutedCycle(), "DelayNodes should break cycles");
|
2014-11-18 06:22:45 +03:00
|
|
|
MOZ_ASSERT(mLastChunks.Length() == 1);
|
2014-07-17 04:55:55 +04:00
|
|
|
|
|
|
|
// Consider this stream blocked if it has already finished output. Normally
|
|
|
|
// mBlocked would reflect this, but due to rounding errors our audio track may
|
|
|
|
// appear to extend slightly beyond aFrom, so we might not be blocked yet.
|
|
|
|
bool blocked = mFinished || mBlocked.GetAt(aFrom);
|
|
|
|
// If the stream has finished at this time, it will be blocked.
|
|
|
|
if (blocked) {
|
|
|
|
mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
|
|
|
|
} else {
|
|
|
|
mEngine->ProduceBlockBeforeInput(&mLastChunks[0]);
|
|
|
|
NS_ASSERTION(mLastChunks[0].GetDuration() == WEBAUDIO_BLOCK_SIZE,
|
|
|
|
"Invalid WebAudio chunk size");
|
|
|
|
if (mDisabledTrackIDs.Contains(static_cast<TrackID>(AUDIO_TRACK))) {
|
|
|
|
mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-24 14:11:35 +04:00
|
|
|
void
|
|
|
|
AudioNodeStream::AdvanceOutputSegment()
|
|
|
|
{
|
2014-09-18 09:13:16 +04:00
|
|
|
StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK);
|
2013-07-24 14:11:35 +04:00
|
|
|
AudioSegment* segment = track->Get<AudioSegment>();
|
|
|
|
|
2015-08-13 07:13:34 +03:00
|
|
|
if (mFlags & EXTERNAL_OUTPUT) {
|
2013-05-07 15:59:16 +04:00
|
|
|
segment->AppendAndConsumeChunk(&mLastChunks[0]);
|
|
|
|
} else {
|
|
|
|
segment->AppendNullData(mLastChunks[0].GetDuration());
|
|
|
|
}
|
|
|
|
|
|
|
|
for (uint32_t j = 0; j < mListeners.Length(); ++j) {
|
|
|
|
MediaStreamListener* l = mListeners[j];
|
|
|
|
AudioChunk copyChunk = mLastChunks[0];
|
|
|
|
AudioSegment tmpSegment;
|
|
|
|
tmpSegment.AppendAndConsumeChunk(©Chunk);
|
2013-10-08 22:20:33 +04:00
|
|
|
l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK,
|
2014-09-18 03:51:13 +04:00
|
|
|
segment->GetDuration(), 0, tmpSegment);
|
2013-05-07 15:59:16 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-18 09:20:43 +04:00
|
|
|
StreamTime
|
2013-05-07 15:59:16 +04:00
|
|
|
AudioNodeStream::GetCurrentPosition()
|
|
|
|
{
|
2014-06-05 02:11:52 +04:00
|
|
|
NS_ASSERTION(!mFinished, "Don't create another track after finishing");
|
2014-09-18 09:13:16 +04:00
|
|
|
return EnsureTrack(AUDIO_TRACK)->Get<AudioSegment>()->GetDuration();
|
2013-05-07 15:59:16 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
AudioNodeStream::FinishOutput()
|
|
|
|
{
|
|
|
|
if (IsFinishedOnGraphThread()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-09-18 09:13:16 +04:00
|
|
|
StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK);
|
2013-05-07 15:59:16 +04:00
|
|
|
track->SetEnded();
|
|
|
|
FinishOnGraphThread();
|
|
|
|
|
|
|
|
for (uint32_t j = 0; j < mListeners.Length(); ++j) {
|
|
|
|
MediaStreamListener* l = mListeners[j];
|
|
|
|
AudioSegment emptySegment;
|
2013-10-08 22:20:33 +04:00
|
|
|
l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK,
|
2013-05-07 15:59:16 +04:00
|
|
|
track->GetSegment()->GetDuration(),
|
|
|
|
MediaStreamListener::TRACK_EVENT_ENDED, emptySegment);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-27 02:45:04 +04:00
|
|
|
double
|
2014-10-21 04:54:24 +04:00
|
|
|
AudioNodeStream::FractionalTicksFromDestinationTime(AudioNodeStream* aDestination,
|
|
|
|
double aSeconds)
|
2014-02-17 00:46:56 +04:00
|
|
|
{
|
2014-02-27 02:45:04 +04:00
|
|
|
MOZ_ASSERT(aDestination->SampleRate() == SampleRate());
|
2014-10-21 04:54:24 +04:00
|
|
|
MOZ_ASSERT(SampleRate() == GraphRate());
|
2014-02-17 00:47:19 +04:00
|
|
|
|
|
|
|
double destinationSeconds = std::max(0.0, aSeconds);
|
2014-10-21 04:54:24 +04:00
|
|
|
double destinationFractionalTicks = destinationSeconds * SampleRate();
|
|
|
|
MOZ_ASSERT(destinationFractionalTicks < STREAM_TIME_MAX);
|
|
|
|
StreamTime destinationStreamTime = destinationFractionalTicks; // round down
|
2014-02-17 00:47:19 +04:00
|
|
|
// MediaTime does not have the resolution of double
|
2014-10-21 04:54:24 +04:00
|
|
|
double offset = destinationFractionalTicks - destinationStreamTime;
|
2014-02-17 00:47:19 +04:00
|
|
|
|
2014-10-21 04:54:24 +04:00
|
|
|
GraphTime graphTime =
|
|
|
|
aDestination->StreamTimeToGraphTime(destinationStreamTime);
|
2014-02-17 00:46:56 +04:00
|
|
|
StreamTime thisStreamTime = GraphTimeToStreamTimeOptimistic(graphTime);
|
2014-10-21 04:54:24 +04:00
|
|
|
double thisFractionalTicks = thisStreamTime + offset;
|
|
|
|
MOZ_ASSERT(thisFractionalTicks >= 0.0);
|
|
|
|
return thisFractionalTicks;
|
2014-02-27 02:45:04 +04:00
|
|
|
}
|
|
|
|
|
2014-09-18 09:20:43 +04:00
|
|
|
StreamTime
|
2014-02-27 02:45:04 +04:00
|
|
|
AudioNodeStream::TicksFromDestinationTime(MediaStream* aDestination,
|
|
|
|
double aSeconds)
|
|
|
|
{
|
|
|
|
AudioNodeStream* destination = aDestination->AsAudioNodeStream();
|
|
|
|
MOZ_ASSERT(destination);
|
|
|
|
|
2014-10-21 04:54:24 +04:00
|
|
|
double thisSeconds =
|
|
|
|
FractionalTicksFromDestinationTime(destination, aSeconds);
|
2014-02-17 00:47:19 +04:00
|
|
|
// Round to nearest
|
2014-09-18 09:20:43 +04:00
|
|
|
StreamTime ticks = thisSeconds + 0.5;
|
2014-02-17 00:46:56 +04:00
|
|
|
return ticks;
|
|
|
|
}
|
|
|
|
|
|
|
|
double
|
|
|
|
AudioNodeStream::DestinationTimeFromTicks(AudioNodeStream* aDestination,
|
2014-09-18 09:20:43 +04:00
|
|
|
StreamTime aPosition)
|
2014-02-17 00:46:56 +04:00
|
|
|
{
|
|
|
|
MOZ_ASSERT(SampleRate() == aDestination->SampleRate());
|
2014-09-18 09:13:16 +04:00
|
|
|
GraphTime graphTime = StreamTimeToGraphTime(aPosition);
|
2014-02-17 00:46:56 +04:00
|
|
|
StreamTime destinationTime = aDestination->GraphTimeToStreamTimeOptimistic(graphTime);
|
2014-06-12 08:44:58 +04:00
|
|
|
return StreamTimeToSeconds(destinationTime);
|
2014-02-17 00:46:56 +04:00
|
|
|
}
|
|
|
|
|
2015-07-13 18:25:42 +03:00
|
|
|
} // namespace mozilla
|