2013-05-07 15:59:16 +04:00
|
|
|
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
|
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
|
|
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
|
|
|
|
|
|
#include "AudioNodeStream.h"
|
|
|
|
|
|
|
|
#include "MediaStreamGraphImpl.h"
|
|
|
|
#include "AudioNodeEngine.h"
|
|
|
|
#include "ThreeDPoint.h"
|
2013-09-06 00:25:17 +04:00
|
|
|
#include "AudioChannelFormat.h"
|
|
|
|
#include "AudioParamTimeline.h"
|
2014-01-15 15:08:20 +04:00
|
|
|
#include "AudioContext.h"
|
2015-09-16 07:31:12 +03:00
|
|
|
#include "nsMathUtils.h"
|
2013-05-07 15:59:16 +04:00
|
|
|
|
|
|
|
using namespace mozilla::dom;
|
|
|
|
|
|
|
|
namespace mozilla {
|
|
|
|
|
|
|
|
/**
|
|
|
|
* An AudioNodeStream produces a single audio track with ID
|
2013-10-08 22:20:33 +04:00
|
|
|
* AUDIO_TRACK. This track has rate AudioContext::sIdealAudioRate
|
2013-05-24 21:09:29 +04:00
|
|
|
* for regular audio contexts, and the rate requested by the web content
|
|
|
|
* for offline audio contexts.
|
2013-05-07 15:59:16 +04:00
|
|
|
* Each chunk in the track is a single block of WEBAUDIO_BLOCK_SIZE samples.
|
2013-05-21 23:17:47 +04:00
|
|
|
* Note: This must be a different value than MEDIA_STREAM_DEST_TRACK_ID
|
2013-05-07 15:59:16 +04:00
|
|
|
*/
|
|
|
|
|
2014-11-20 22:41:18 +03:00
|
|
|
AudioNodeStream::AudioNodeStream(AudioNodeEngine* aEngine,
|
2015-08-13 07:13:34 +03:00
|
|
|
Flags aFlags,
|
2015-09-16 07:15:21 +03:00
|
|
|
TrackRate aSampleRate)
|
2014-11-20 22:41:18 +03:00
|
|
|
: ProcessedMediaStream(nullptr),
|
|
|
|
mEngine(aEngine),
|
|
|
|
mSampleRate(aSampleRate),
|
2015-08-13 07:13:34 +03:00
|
|
|
mFlags(aFlags),
|
2014-11-20 22:41:18 +03:00
|
|
|
mNumberOfInputChannels(2),
|
2015-09-17 15:03:00 +03:00
|
|
|
mIsActive(aEngine->IsActive()),
|
2014-11-20 22:41:18 +03:00
|
|
|
mMarkAsFinishedAfterThisBlock(false),
|
|
|
|
mAudioParamStream(false),
|
|
|
|
mPassThrough(false)
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(NS_IsMainThread());
|
|
|
|
mChannelCountMode = ChannelCountMode::Max;
|
|
|
|
mChannelInterpretation = ChannelInterpretation::Speakers;
|
|
|
|
// AudioNodes are always producing data
|
|
|
|
mHasCurrentData = true;
|
2014-11-18 06:22:45 +03:00
|
|
|
mLastChunks.SetLength(std::max(uint16_t(1), mEngine->OutputCount()));
|
2014-11-20 22:41:18 +03:00
|
|
|
MOZ_COUNT_CTOR(AudioNodeStream);
|
|
|
|
}
|
|
|
|
|
2013-05-07 15:59:16 +04:00
|
|
|
AudioNodeStream::~AudioNodeStream()
|
|
|
|
{
|
2015-09-17 15:03:00 +03:00
|
|
|
MOZ_ASSERT(mActiveInputCount == 0);
|
2013-05-07 15:59:16 +04:00
|
|
|
MOZ_COUNT_DTOR(AudioNodeStream);
|
|
|
|
}
|
|
|
|
|
2015-09-10 06:13:45 +03:00
|
|
|
void
|
|
|
|
AudioNodeStream::DestroyImpl()
|
|
|
|
{
|
|
|
|
// These are graph thread objects, so clean up on graph thread.
|
|
|
|
mInputChunks.Clear();
|
|
|
|
mLastChunks.Clear();
|
|
|
|
|
|
|
|
ProcessedMediaStream::DestroyImpl();
|
|
|
|
}
|
|
|
|
|
2015-08-12 02:26:24 +03:00
|
|
|
/* static */ already_AddRefed<AudioNodeStream>
|
2015-09-08 16:22:16 +03:00
|
|
|
AudioNodeStream::Create(AudioContext* aCtx, AudioNodeEngine* aEngine,
|
|
|
|
Flags aFlags, MediaStreamGraph* aGraph)
|
2015-08-12 02:26:24 +03:00
|
|
|
{
|
|
|
|
MOZ_ASSERT(NS_IsMainThread());
|
|
|
|
|
|
|
|
// MediaRecorders use an AudioNodeStream, but no AudioNode
|
|
|
|
AudioNode* node = aEngine->NodeMainThread();
|
2015-09-08 16:22:16 +03:00
|
|
|
MediaStreamGraph* graph = aGraph ? aGraph : aCtx->Graph();
|
|
|
|
MOZ_ASSERT(graph->GraphRate() == aCtx->SampleRate());
|
2015-08-12 02:26:24 +03:00
|
|
|
|
|
|
|
nsRefPtr<AudioNodeStream> stream =
|
2015-09-16 07:15:21 +03:00
|
|
|
new AudioNodeStream(aEngine, aFlags, graph->GraphRate());
|
|
|
|
if (node) {
|
|
|
|
stream->SetChannelMixingParametersImpl(node->ChannelCount(),
|
|
|
|
node->ChannelCountModeValue(),
|
|
|
|
node->ChannelInterpretationValue());
|
2015-08-12 02:26:24 +03:00
|
|
|
}
|
2015-09-16 07:15:21 +03:00
|
|
|
graph->AddStream(stream,
|
|
|
|
aCtx->ShouldSuspendNewStream() ? MediaStreamGraph::ADD_STREAM_SUSPENDED : 0);
|
2015-08-12 02:26:24 +03:00
|
|
|
return stream.forget();
|
|
|
|
}
|
|
|
|
|
2014-04-13 22:08:10 +04:00
|
|
|
size_t
|
|
|
|
AudioNodeStream::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
|
|
|
|
{
|
|
|
|
size_t amount = 0;
|
|
|
|
|
|
|
|
// Not reported:
|
|
|
|
// - mEngine
|
|
|
|
|
|
|
|
amount += ProcessedMediaStream::SizeOfExcludingThis(aMallocSizeOf);
|
2015-07-29 09:24:24 +03:00
|
|
|
amount += mLastChunks.ShallowSizeOfExcludingThis(aMallocSizeOf);
|
2014-04-13 22:08:10 +04:00
|
|
|
for (size_t i = 0; i < mLastChunks.Length(); i++) {
|
|
|
|
// NB: This is currently unshared only as there are instances of
|
|
|
|
// double reporting in DMD otherwise.
|
|
|
|
amount += mLastChunks[i].SizeOfExcludingThisIfUnshared(aMallocSizeOf);
|
|
|
|
}
|
|
|
|
|
|
|
|
return amount;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
|
|
|
AudioNodeStream::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
|
|
|
|
{
|
|
|
|
return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
AudioNodeStream::SizeOfAudioNodesIncludingThis(MallocSizeOf aMallocSizeOf,
|
|
|
|
AudioNodeSizes& aUsage) const
|
|
|
|
{
|
|
|
|
// Explicitly separate out the stream memory.
|
|
|
|
aUsage.mStream = SizeOfIncludingThis(aMallocSizeOf);
|
|
|
|
|
|
|
|
if (mEngine) {
|
|
|
|
// This will fill out the rest of |aUsage|.
|
|
|
|
mEngine->SizeOfIncludingThis(aMallocSizeOf, aUsage);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-07 15:59:16 +04:00
|
|
|
void
|
2014-01-15 15:08:20 +04:00
|
|
|
AudioNodeStream::SetStreamTimeParameter(uint32_t aIndex, AudioContext* aContext,
|
2013-05-07 15:59:16 +04:00
|
|
|
double aStreamTime)
|
|
|
|
{
|
2015-04-28 09:42:00 +03:00
|
|
|
class Message final : public ControlMessage
|
|
|
|
{
|
2013-05-07 15:59:16 +04:00
|
|
|
public:
|
|
|
|
Message(AudioNodeStream* aStream, uint32_t aIndex, MediaStream* aRelativeToStream,
|
|
|
|
double aStreamTime)
|
|
|
|
: ControlMessage(aStream), mStreamTime(aStreamTime),
|
2015-04-28 09:42:00 +03:00
|
|
|
mRelativeToStream(aRelativeToStream), mIndex(aIndex)
|
|
|
|
{}
|
|
|
|
virtual void Run() override
|
2013-05-07 15:59:16 +04:00
|
|
|
{
|
|
|
|
static_cast<AudioNodeStream*>(mStream)->
|
|
|
|
SetStreamTimeParameterImpl(mIndex, mRelativeToStream, mStreamTime);
|
|
|
|
}
|
|
|
|
double mStreamTime;
|
|
|
|
MediaStream* mRelativeToStream;
|
|
|
|
uint32_t mIndex;
|
|
|
|
};
|
|
|
|
|
2014-01-15 15:08:20 +04:00
|
|
|
GraphImpl()->AppendMessage(new Message(this, aIndex,
|
|
|
|
aContext->DestinationStream(),
|
|
|
|
aContext->DOMTimeToStreamTime(aStreamTime)));
|
2013-05-07 15:59:16 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
AudioNodeStream::SetStreamTimeParameterImpl(uint32_t aIndex, MediaStream* aRelativeToStream,
|
|
|
|
double aStreamTime)
|
|
|
|
{
|
2014-09-18 09:20:43 +04:00
|
|
|
StreamTime ticks = TicksFromDestinationTime(aRelativeToStream, aStreamTime);
|
2013-05-07 15:59:16 +04:00
|
|
|
mEngine->SetStreamTimeParameter(aIndex, ticks);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
AudioNodeStream::SetDoubleParameter(uint32_t aIndex, double aValue)
|
|
|
|
{
|
2015-04-28 09:42:00 +03:00
|
|
|
class Message final : public ControlMessage
|
|
|
|
{
|
2013-05-07 15:59:16 +04:00
|
|
|
public:
|
|
|
|
Message(AudioNodeStream* aStream, uint32_t aIndex, double aValue)
|
2015-04-28 09:42:00 +03:00
|
|
|
: ControlMessage(aStream), mValue(aValue), mIndex(aIndex)
|
|
|
|
{}
|
|
|
|
virtual void Run() override
|
2013-05-07 15:59:16 +04:00
|
|
|
{
|
|
|
|
static_cast<AudioNodeStream*>(mStream)->Engine()->
|
|
|
|
SetDoubleParameter(mIndex, mValue);
|
|
|
|
}
|
|
|
|
double mValue;
|
|
|
|
uint32_t mIndex;
|
|
|
|
};
|
|
|
|
|
|
|
|
GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
AudioNodeStream::SetInt32Parameter(uint32_t aIndex, int32_t aValue)
|
|
|
|
{
|
2015-04-28 09:42:00 +03:00
|
|
|
class Message final : public ControlMessage
|
|
|
|
{
|
2013-05-07 15:59:16 +04:00
|
|
|
public:
|
|
|
|
Message(AudioNodeStream* aStream, uint32_t aIndex, int32_t aValue)
|
2015-04-28 09:42:00 +03:00
|
|
|
: ControlMessage(aStream), mValue(aValue), mIndex(aIndex)
|
|
|
|
{}
|
|
|
|
virtual void Run() override
|
2013-05-07 15:59:16 +04:00
|
|
|
{
|
|
|
|
static_cast<AudioNodeStream*>(mStream)->Engine()->
|
|
|
|
SetInt32Parameter(mIndex, mValue);
|
|
|
|
}
|
|
|
|
int32_t mValue;
|
|
|
|
uint32_t mIndex;
|
|
|
|
};
|
|
|
|
|
|
|
|
GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
AudioNodeStream::SetTimelineParameter(uint32_t aIndex,
|
|
|
|
const AudioParamTimeline& aValue)
|
|
|
|
{
|
2015-04-28 09:42:00 +03:00
|
|
|
class Message final : public ControlMessage
|
|
|
|
{
|
2013-05-07 15:59:16 +04:00
|
|
|
public:
|
|
|
|
Message(AudioNodeStream* aStream, uint32_t aIndex,
|
|
|
|
const AudioParamTimeline& aValue)
|
2013-05-24 21:10:08 +04:00
|
|
|
: ControlMessage(aStream),
|
|
|
|
mValue(aValue),
|
|
|
|
mSampleRate(aStream->SampleRate()),
|
2015-04-28 09:42:00 +03:00
|
|
|
mIndex(aIndex)
|
|
|
|
{}
|
|
|
|
virtual void Run() override
|
2013-05-07 15:59:16 +04:00
|
|
|
{
|
|
|
|
static_cast<AudioNodeStream*>(mStream)->Engine()->
|
2013-05-24 21:10:08 +04:00
|
|
|
SetTimelineParameter(mIndex, mValue, mSampleRate);
|
2013-05-07 15:59:16 +04:00
|
|
|
}
|
|
|
|
AudioParamTimeline mValue;
|
2013-05-24 21:10:08 +04:00
|
|
|
TrackRate mSampleRate;
|
2013-05-07 15:59:16 +04:00
|
|
|
uint32_t mIndex;
|
|
|
|
};
|
|
|
|
GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
AudioNodeStream::SetThreeDPointParameter(uint32_t aIndex, const ThreeDPoint& aValue)
|
|
|
|
{
|
2015-04-28 09:42:00 +03:00
|
|
|
class Message final : public ControlMessage
|
|
|
|
{
|
2013-05-07 15:59:16 +04:00
|
|
|
public:
|
|
|
|
Message(AudioNodeStream* aStream, uint32_t aIndex, const ThreeDPoint& aValue)
|
2015-04-28 09:42:00 +03:00
|
|
|
: ControlMessage(aStream), mValue(aValue), mIndex(aIndex)
|
|
|
|
{}
|
|
|
|
virtual void Run() override
|
2013-05-07 15:59:16 +04:00
|
|
|
{
|
|
|
|
static_cast<AudioNodeStream*>(mStream)->Engine()->
|
|
|
|
SetThreeDPointParameter(mIndex, mValue);
|
|
|
|
}
|
|
|
|
ThreeDPoint mValue;
|
|
|
|
uint32_t mIndex;
|
|
|
|
};
|
|
|
|
|
|
|
|
GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2014-03-15 23:00:17 +04:00
|
|
|
AudioNodeStream::SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList>&& aBuffer)
|
2013-05-07 15:59:16 +04:00
|
|
|
{
|
2015-04-28 09:42:00 +03:00
|
|
|
class Message final : public ControlMessage
|
|
|
|
{
|
2013-05-07 15:59:16 +04:00
|
|
|
public:
|
|
|
|
Message(AudioNodeStream* aStream,
|
2014-03-15 23:00:17 +04:00
|
|
|
already_AddRefed<ThreadSharedFloatArrayBufferList>& aBuffer)
|
2015-04-28 09:42:00 +03:00
|
|
|
: ControlMessage(aStream), mBuffer(aBuffer)
|
|
|
|
{}
|
|
|
|
virtual void Run() override
|
2013-05-07 15:59:16 +04:00
|
|
|
{
|
|
|
|
static_cast<AudioNodeStream*>(mStream)->Engine()->
|
|
|
|
SetBuffer(mBuffer.forget());
|
|
|
|
}
|
|
|
|
nsRefPtr<ThreadSharedFloatArrayBufferList> mBuffer;
|
|
|
|
};
|
|
|
|
|
|
|
|
GraphImpl()->AppendMessage(new Message(this, aBuffer));
|
|
|
|
}
|
|
|
|
|
2013-05-14 08:12:30 +04:00
|
|
|
void
|
|
|
|
AudioNodeStream::SetRawArrayData(nsTArray<float>& aData)
|
|
|
|
{
|
2015-04-28 09:42:00 +03:00
|
|
|
class Message final : public ControlMessage
|
|
|
|
{
|
2013-05-14 08:12:30 +04:00
|
|
|
public:
|
|
|
|
Message(AudioNodeStream* aStream,
|
|
|
|
nsTArray<float>& aData)
|
|
|
|
: ControlMessage(aStream)
|
|
|
|
{
|
|
|
|
mData.SwapElements(aData);
|
|
|
|
}
|
2015-04-28 09:42:00 +03:00
|
|
|
virtual void Run() override
|
2013-05-14 08:12:30 +04:00
|
|
|
{
|
|
|
|
static_cast<AudioNodeStream*>(mStream)->Engine()->SetRawArrayData(mData);
|
|
|
|
}
|
|
|
|
nsTArray<float> mData;
|
|
|
|
};
|
|
|
|
|
|
|
|
GraphImpl()->AppendMessage(new Message(this, aData));
|
|
|
|
}
|
|
|
|
|
2013-05-07 15:59:16 +04:00
|
|
|
void
|
|
|
|
AudioNodeStream::SetChannelMixingParameters(uint32_t aNumberOfChannels,
|
|
|
|
ChannelCountMode aChannelCountMode,
|
|
|
|
ChannelInterpretation aChannelInterpretation)
|
|
|
|
{
|
2015-04-28 09:42:00 +03:00
|
|
|
class Message final : public ControlMessage
|
|
|
|
{
|
2013-05-07 15:59:16 +04:00
|
|
|
public:
|
|
|
|
Message(AudioNodeStream* aStream,
|
|
|
|
uint32_t aNumberOfChannels,
|
|
|
|
ChannelCountMode aChannelCountMode,
|
|
|
|
ChannelInterpretation aChannelInterpretation)
|
|
|
|
: ControlMessage(aStream),
|
|
|
|
mNumberOfChannels(aNumberOfChannels),
|
|
|
|
mChannelCountMode(aChannelCountMode),
|
|
|
|
mChannelInterpretation(aChannelInterpretation)
|
|
|
|
{}
|
2015-04-28 09:42:00 +03:00
|
|
|
virtual void Run() override
|
2013-05-07 15:59:16 +04:00
|
|
|
{
|
|
|
|
static_cast<AudioNodeStream*>(mStream)->
|
|
|
|
SetChannelMixingParametersImpl(mNumberOfChannels, mChannelCountMode,
|
|
|
|
mChannelInterpretation);
|
|
|
|
}
|
|
|
|
uint32_t mNumberOfChannels;
|
|
|
|
ChannelCountMode mChannelCountMode;
|
|
|
|
ChannelInterpretation mChannelInterpretation;
|
|
|
|
};
|
|
|
|
|
|
|
|
GraphImpl()->AppendMessage(new Message(this, aNumberOfChannels,
|
|
|
|
aChannelCountMode,
|
|
|
|
aChannelInterpretation));
|
|
|
|
}
|
|
|
|
|
2014-08-19 04:12:50 +04:00
|
|
|
void
|
|
|
|
AudioNodeStream::SetPassThrough(bool aPassThrough)
|
|
|
|
{
|
2015-04-28 09:42:00 +03:00
|
|
|
class Message final : public ControlMessage
|
|
|
|
{
|
2014-08-19 04:12:50 +04:00
|
|
|
public:
|
|
|
|
Message(AudioNodeStream* aStream, bool aPassThrough)
|
2015-04-28 09:42:00 +03:00
|
|
|
: ControlMessage(aStream), mPassThrough(aPassThrough)
|
|
|
|
{}
|
|
|
|
virtual void Run() override
|
2014-08-19 04:12:50 +04:00
|
|
|
{
|
|
|
|
static_cast<AudioNodeStream*>(mStream)->mPassThrough = mPassThrough;
|
|
|
|
}
|
|
|
|
bool mPassThrough;
|
|
|
|
};
|
|
|
|
|
|
|
|
GraphImpl()->AppendMessage(new Message(this, aPassThrough));
|
|
|
|
}
|
|
|
|
|
2013-05-07 15:59:16 +04:00
|
|
|
void
|
|
|
|
AudioNodeStream::SetChannelMixingParametersImpl(uint32_t aNumberOfChannels,
|
|
|
|
ChannelCountMode aChannelCountMode,
|
|
|
|
ChannelInterpretation aChannelInterpretation)
|
|
|
|
{
|
|
|
|
// Make sure that we're not clobbering any significant bits by fitting these
|
|
|
|
// values in 16 bits.
|
|
|
|
MOZ_ASSERT(int(aChannelCountMode) < INT16_MAX);
|
|
|
|
MOZ_ASSERT(int(aChannelInterpretation) < INT16_MAX);
|
|
|
|
|
|
|
|
mNumberOfInputChannels = aNumberOfChannels;
|
2013-05-06 23:28:13 +04:00
|
|
|
mChannelCountMode = aChannelCountMode;
|
|
|
|
mChannelInterpretation = aChannelInterpretation;
|
2013-05-07 15:59:16 +04:00
|
|
|
}
|
|
|
|
|
2013-07-24 14:11:35 +04:00
|
|
|
uint32_t
|
2014-04-01 01:26:02 +04:00
|
|
|
AudioNodeStream::ComputedNumberOfChannels(uint32_t aInputChannelCount)
|
2013-07-24 14:11:35 +04:00
|
|
|
{
|
|
|
|
switch (mChannelCountMode) {
|
|
|
|
case ChannelCountMode::Explicit:
|
|
|
|
// Disregard the channel count we've calculated from inputs, and just use
|
|
|
|
// mNumberOfInputChannels.
|
|
|
|
return mNumberOfInputChannels;
|
|
|
|
case ChannelCountMode::Clamped_max:
|
|
|
|
// Clamp the computed output channel count to mNumberOfInputChannels.
|
|
|
|
return std::min(aInputChannelCount, mNumberOfInputChannels);
|
|
|
|
default:
|
|
|
|
case ChannelCountMode::Max:
|
|
|
|
// Nothing to do here, just shut up the compiler warning.
|
|
|
|
return aInputChannelCount;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-07 15:59:16 +04:00
|
|
|
void
|
2015-09-03 10:01:50 +03:00
|
|
|
AudioNodeStream::ObtainInputBlock(AudioBlock& aTmpChunk,
|
|
|
|
uint32_t aPortIndex)
|
2013-05-07 15:59:16 +04:00
|
|
|
{
|
|
|
|
uint32_t inputCount = mInputs.Length();
|
|
|
|
uint32_t outputChannelCount = 1;
|
2015-09-03 10:01:50 +03:00
|
|
|
nsAutoTArray<const AudioBlock*,250> inputChunks;
|
2013-05-07 15:59:16 +04:00
|
|
|
for (uint32_t i = 0; i < inputCount; ++i) {
|
|
|
|
if (aPortIndex != mInputs[i]->InputNumber()) {
|
|
|
|
// This input is connected to a different port
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
MediaStream* s = mInputs[i]->GetSource();
|
|
|
|
AudioNodeStream* a = static_cast<AudioNodeStream*>(s);
|
|
|
|
MOZ_ASSERT(a == s->AsAudioNodeStream());
|
2013-12-10 04:49:03 +04:00
|
|
|
if (a->IsAudioParamStream()) {
|
2013-05-07 15:59:16 +04:00
|
|
|
continue;
|
|
|
|
}
|
2013-08-26 21:19:36 +04:00
|
|
|
|
2015-09-03 10:01:50 +03:00
|
|
|
const AudioBlock* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()];
|
2013-05-07 15:59:16 +04:00
|
|
|
MOZ_ASSERT(chunk);
|
2013-08-16 18:42:50 +04:00
|
|
|
if (chunk->IsNull() || chunk->mChannelData.IsEmpty()) {
|
2013-05-07 15:59:16 +04:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
inputChunks.AppendElement(chunk);
|
|
|
|
outputChannelCount =
|
2015-09-03 08:30:16 +03:00
|
|
|
GetAudioChannelsSuperset(outputChannelCount, chunk->ChannelCount());
|
2013-05-07 15:59:16 +04:00
|
|
|
}
|
|
|
|
|
2014-04-01 01:26:02 +04:00
|
|
|
outputChannelCount = ComputedNumberOfChannels(outputChannelCount);
|
2013-05-07 15:59:16 +04:00
|
|
|
|
|
|
|
uint32_t inputChunkCount = inputChunks.Length();
|
2013-05-08 07:31:55 +04:00
|
|
|
if (inputChunkCount == 0 ||
|
2015-09-03 08:30:16 +03:00
|
|
|
(inputChunkCount == 1 && inputChunks[0]->ChannelCount() == 0)) {
|
2013-05-07 15:59:16 +04:00
|
|
|
aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (inputChunkCount == 1 &&
|
2015-09-03 08:30:16 +03:00
|
|
|
inputChunks[0]->ChannelCount() == outputChannelCount) {
|
2013-05-07 15:59:16 +04:00
|
|
|
aTmpChunk = *inputChunks[0];
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-05-29 15:40:14 +04:00
|
|
|
if (outputChannelCount == 0) {
|
|
|
|
aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
|
|
|
|
return;
|
|
|
|
}
|
2013-05-23 15:46:20 +04:00
|
|
|
|
2015-09-03 10:01:50 +03:00
|
|
|
aTmpChunk.AllocateChannels(outputChannelCount);
|
2013-05-07 15:59:16 +04:00
|
|
|
// The static storage here should be 1KB, so it's fine
|
|
|
|
nsAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < inputChunkCount; ++i) {
|
2013-07-24 14:11:35 +04:00
|
|
|
AccumulateInputChunk(i, *inputChunks[i], &aTmpChunk, &downmixBuffer);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2015-09-03 10:01:50 +03:00
|
|
|
AudioNodeStream::AccumulateInputChunk(uint32_t aInputIndex,
|
|
|
|
const AudioBlock& aChunk,
|
|
|
|
AudioBlock* aBlock,
|
2013-07-24 14:11:35 +04:00
|
|
|
nsTArray<float>* aDownmixBuffer)
|
|
|
|
{
|
2015-07-29 19:36:47 +03:00
|
|
|
nsAutoTArray<const float*,GUESS_AUDIO_CHANNELS> channels;
|
2015-09-03 08:30:16 +03:00
|
|
|
UpMixDownMixChunk(&aChunk, aBlock->ChannelCount(), channels, *aDownmixBuffer);
|
2013-07-24 14:11:35 +04:00
|
|
|
|
|
|
|
for (uint32_t c = 0; c < channels.Length(); ++c) {
|
|
|
|
const float* inputData = static_cast<const float*>(channels[c]);
|
2015-07-22 08:59:21 +03:00
|
|
|
float* outputData = aBlock->ChannelFloatsForWrite(c);
|
2013-07-24 14:11:35 +04:00
|
|
|
if (inputData) {
|
|
|
|
if (aInputIndex == 0) {
|
|
|
|
AudioBlockCopyChannelWithScale(inputData, aChunk.mVolume, outputData);
|
2013-05-07 15:59:16 +04:00
|
|
|
} else {
|
2013-07-24 14:11:35 +04:00
|
|
|
AudioBlockAddChannelWithScale(inputData, aChunk.mVolume, outputData);
|
2013-05-07 15:59:16 +04:00
|
|
|
}
|
2013-07-24 14:11:35 +04:00
|
|
|
} else {
|
|
|
|
if (aInputIndex == 0) {
|
|
|
|
PodZero(outputData, WEBAUDIO_BLOCK_SIZE);
|
2013-05-07 15:59:16 +04:00
|
|
|
}
|
|
|
|
}
|
2013-07-24 14:11:35 +04:00
|
|
|
}
|
|
|
|
}
|
2013-05-07 15:59:16 +04:00
|
|
|
|
2013-07-24 14:11:35 +04:00
|
|
|
void
|
2015-09-03 10:01:50 +03:00
|
|
|
AudioNodeStream::UpMixDownMixChunk(const AudioBlock* aChunk,
|
2013-07-24 14:11:35 +04:00
|
|
|
uint32_t aOutputChannelCount,
|
2015-07-29 19:36:47 +03:00
|
|
|
nsTArray<const float*>& aOutputChannels,
|
2013-07-24 14:11:35 +04:00
|
|
|
nsTArray<float>& aDownmixBuffer)
|
|
|
|
{
|
|
|
|
static const float silenceChannel[WEBAUDIO_BLOCK_SIZE] = {0.f};
|
|
|
|
|
2015-09-03 08:30:16 +03:00
|
|
|
for (uint32_t i = 0; i < aChunk->ChannelCount(); i++) {
|
2015-07-29 19:36:47 +03:00
|
|
|
aOutputChannels.AppendElement(static_cast<const float*>(aChunk->mChannelData[i]));
|
|
|
|
}
|
2013-07-24 14:11:35 +04:00
|
|
|
if (aOutputChannels.Length() < aOutputChannelCount) {
|
|
|
|
if (mChannelInterpretation == ChannelInterpretation::Speakers) {
|
2015-07-29 19:36:47 +03:00
|
|
|
AudioChannelsUpMix(&aOutputChannels, aOutputChannelCount, SilentChannel::ZeroChannel<float>());
|
2013-07-24 14:11:35 +04:00
|
|
|
NS_ASSERTION(aOutputChannelCount == aOutputChannels.Length(),
|
|
|
|
"We called GetAudioChannelsSuperset to avoid this");
|
|
|
|
} else {
|
|
|
|
// Fill up the remaining aOutputChannels by zeros
|
|
|
|
for (uint32_t j = aOutputChannels.Length(); j < aOutputChannelCount; ++j) {
|
|
|
|
aOutputChannels.AppendElement(silenceChannel);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if (aOutputChannels.Length() > aOutputChannelCount) {
|
|
|
|
if (mChannelInterpretation == ChannelInterpretation::Speakers) {
|
|
|
|
nsAutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannels;
|
|
|
|
outputChannels.SetLength(aOutputChannelCount);
|
|
|
|
aDownmixBuffer.SetLength(aOutputChannelCount * WEBAUDIO_BLOCK_SIZE);
|
|
|
|
for (uint32_t j = 0; j < aOutputChannelCount; ++j) {
|
|
|
|
outputChannels[j] = &aDownmixBuffer[j * WEBAUDIO_BLOCK_SIZE];
|
|
|
|
}
|
|
|
|
|
|
|
|
AudioChannelsDownMix(aOutputChannels, outputChannels.Elements(),
|
|
|
|
aOutputChannelCount, WEBAUDIO_BLOCK_SIZE);
|
|
|
|
|
|
|
|
aOutputChannels.SetLength(aOutputChannelCount);
|
|
|
|
for (uint32_t j = 0; j < aOutputChannels.Length(); ++j) {
|
|
|
|
aOutputChannels[j] = outputChannels[j];
|
2013-05-07 15:59:16 +04:00
|
|
|
}
|
2013-07-24 14:11:35 +04:00
|
|
|
} else {
|
|
|
|
// Drop the remaining aOutputChannels
|
|
|
|
aOutputChannels.RemoveElementsAt(aOutputChannelCount,
|
|
|
|
aOutputChannels.Length() - aOutputChannelCount);
|
2013-05-07 15:59:16 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// The MediaStreamGraph guarantees that this is actually one block, for
|
|
|
|
// AudioNodeStreams.
|
|
|
|
void
|
2014-03-05 01:53:55 +04:00
|
|
|
AudioNodeStream::ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags)
|
2013-05-07 15:59:16 +04:00
|
|
|
{
|
2014-11-18 06:22:45 +03:00
|
|
|
uint16_t outputCount = mLastChunks.Length();
|
|
|
|
MOZ_ASSERT(outputCount == std::max(uint16_t(1), mEngine->OutputCount()));
|
2013-05-07 15:59:16 +04:00
|
|
|
|
2015-09-17 15:03:00 +03:00
|
|
|
if (!mIsActive) {
|
|
|
|
// mLastChunks are already null.
|
|
|
|
#ifdef DEBUG
|
|
|
|
for (const auto& chunk : mLastChunks) {
|
|
|
|
MOZ_ASSERT(chunk.IsNull());
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
} else if (InMutedCycle()) {
|
2015-08-20 09:41:25 +03:00
|
|
|
mInputChunks.Clear();
|
2013-05-24 06:36:20 +04:00
|
|
|
for (uint16_t i = 0; i < outputCount; ++i) {
|
|
|
|
mLastChunks[i].SetNull(WEBAUDIO_BLOCK_SIZE);
|
|
|
|
}
|
2013-05-07 15:59:16 +04:00
|
|
|
} else {
|
|
|
|
// We need to generate at least one input
|
|
|
|
uint16_t maxInputs = std::max(uint16_t(1), mEngine->InputCount());
|
2015-08-20 09:41:25 +03:00
|
|
|
mInputChunks.SetLength(maxInputs);
|
2013-05-07 15:59:16 +04:00
|
|
|
for (uint16_t i = 0; i < maxInputs; ++i) {
|
2015-08-20 09:41:25 +03:00
|
|
|
ObtainInputBlock(mInputChunks[i], i);
|
2013-05-07 15:59:16 +04:00
|
|
|
}
|
|
|
|
bool finished = false;
|
2014-08-19 04:12:50 +04:00
|
|
|
if (mPassThrough) {
|
|
|
|
MOZ_ASSERT(outputCount == 1, "For now, we only support nodes that have one output port");
|
2015-08-20 09:41:25 +03:00
|
|
|
mLastChunks[0] = mInputChunks[0];
|
2013-05-07 15:59:16 +04:00
|
|
|
} else {
|
2014-11-18 06:22:45 +03:00
|
|
|
if (maxInputs <= 1 && outputCount <= 1) {
|
2015-09-18 08:05:25 +03:00
|
|
|
mEngine->ProcessBlock(this, aFrom,
|
|
|
|
mInputChunks[0], &mLastChunks[0], &finished);
|
2014-08-19 04:12:50 +04:00
|
|
|
} else {
|
2015-08-20 09:41:25 +03:00
|
|
|
mEngine->ProcessBlocksOnPorts(this, mInputChunks, mLastChunks, &finished);
|
2014-08-19 04:12:50 +04:00
|
|
|
}
|
2013-05-07 15:59:16 +04:00
|
|
|
}
|
2013-12-12 16:33:01 +04:00
|
|
|
for (uint16_t i = 0; i < outputCount; ++i) {
|
|
|
|
NS_ASSERTION(mLastChunks[i].GetDuration() == WEBAUDIO_BLOCK_SIZE,
|
|
|
|
"Invalid WebAudio chunk size");
|
|
|
|
}
|
2013-05-07 15:59:16 +04:00
|
|
|
if (finished) {
|
|
|
|
mMarkAsFinishedAfterThisBlock = true;
|
2015-09-17 15:03:00 +03:00
|
|
|
CheckForInactive();
|
2013-05-07 15:59:16 +04:00
|
|
|
}
|
|
|
|
|
2014-01-15 14:07:30 +04:00
|
|
|
if (mDisabledTrackIDs.Contains(static_cast<TrackID>(AUDIO_TRACK))) {
|
|
|
|
for (uint32_t i = 0; i < outputCount; ++i) {
|
|
|
|
mLastChunks[i].SetNull(WEBAUDIO_BLOCK_SIZE);
|
|
|
|
}
|
2013-05-30 08:44:43 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-04 08:40:30 +03:00
|
|
|
if (!mFinished) {
|
|
|
|
// Don't output anything while finished
|
2013-12-06 00:23:57 +04:00
|
|
|
AdvanceOutputSegment();
|
|
|
|
if (mMarkAsFinishedAfterThisBlock && (aFlags & ALLOW_FINISH)) {
|
|
|
|
// This stream was finished the last time that we looked at it, and all
|
|
|
|
// of the depending streams have finished their output as well, so now
|
|
|
|
// it's time to mark this stream as finished.
|
|
|
|
FinishOutput();
|
|
|
|
}
|
|
|
|
}
|
2013-07-24 14:11:35 +04:00
|
|
|
}
|
|
|
|
|
2014-07-17 04:55:55 +04:00
|
|
|
void
|
|
|
|
AudioNodeStream::ProduceOutputBeforeInput(GraphTime aFrom)
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(mEngine->AsDelayNodeEngine());
|
|
|
|
MOZ_ASSERT(mEngine->OutputCount() == 1,
|
|
|
|
"DelayNodeEngine output count should be 1");
|
|
|
|
MOZ_ASSERT(!InMutedCycle(), "DelayNodes should break cycles");
|
2014-11-18 06:22:45 +03:00
|
|
|
MOZ_ASSERT(mLastChunks.Length() == 1);
|
2014-07-17 04:55:55 +04:00
|
|
|
|
2015-09-17 15:03:00 +03:00
|
|
|
if (!mIsActive) {
|
2014-07-17 04:55:55 +04:00
|
|
|
mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
|
|
|
|
} else {
|
2015-09-18 08:05:25 +03:00
|
|
|
mEngine->ProduceBlockBeforeInput(aFrom, &mLastChunks[0]);
|
2014-07-17 04:55:55 +04:00
|
|
|
NS_ASSERTION(mLastChunks[0].GetDuration() == WEBAUDIO_BLOCK_SIZE,
|
|
|
|
"Invalid WebAudio chunk size");
|
|
|
|
if (mDisabledTrackIDs.Contains(static_cast<TrackID>(AUDIO_TRACK))) {
|
|
|
|
mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-24 14:11:35 +04:00
|
|
|
void
|
|
|
|
AudioNodeStream::AdvanceOutputSegment()
|
|
|
|
{
|
2014-09-18 09:13:16 +04:00
|
|
|
StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK);
|
2015-09-09 09:41:50 +03:00
|
|
|
// No more tracks will be coming
|
|
|
|
mBuffer.AdvanceKnownTracksTime(STREAM_TIME_MAX);
|
|
|
|
|
2013-07-24 14:11:35 +04:00
|
|
|
AudioSegment* segment = track->Get<AudioSegment>();
|
|
|
|
|
2015-09-09 11:07:19 +03:00
|
|
|
if (mFlags & EXTERNAL_OUTPUT && !mLastChunks[0].IsNull()) {
|
2015-09-03 10:01:50 +03:00
|
|
|
segment->AppendAndConsumeChunk(mLastChunks[0].AsMutableChunk());
|
2013-05-07 15:59:16 +04:00
|
|
|
} else {
|
|
|
|
segment->AppendNullData(mLastChunks[0].GetDuration());
|
|
|
|
}
|
|
|
|
|
|
|
|
for (uint32_t j = 0; j < mListeners.Length(); ++j) {
|
|
|
|
MediaStreamListener* l = mListeners[j];
|
2015-09-03 10:01:50 +03:00
|
|
|
AudioChunk copyChunk = mLastChunks[0].AsAudioChunk();
|
2013-05-07 15:59:16 +04:00
|
|
|
AudioSegment tmpSegment;
|
|
|
|
tmpSegment.AppendAndConsumeChunk(©Chunk);
|
2013-10-08 22:20:33 +04:00
|
|
|
l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK,
|
2014-09-18 03:51:13 +04:00
|
|
|
segment->GetDuration(), 0, tmpSegment);
|
2013-05-07 15:59:16 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
AudioNodeStream::FinishOutput()
|
|
|
|
{
|
|
|
|
if (IsFinishedOnGraphThread()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-09-18 09:13:16 +04:00
|
|
|
StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK);
|
2013-05-07 15:59:16 +04:00
|
|
|
track->SetEnded();
|
|
|
|
FinishOnGraphThread();
|
|
|
|
|
|
|
|
for (uint32_t j = 0; j < mListeners.Length(); ++j) {
|
|
|
|
MediaStreamListener* l = mListeners[j];
|
|
|
|
AudioSegment emptySegment;
|
2013-10-08 22:20:33 +04:00
|
|
|
l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK,
|
2013-05-07 15:59:16 +04:00
|
|
|
track->GetSegment()->GetDuration(),
|
|
|
|
MediaStreamListener::TRACK_EVENT_ENDED, emptySegment);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-27 02:45:04 +04:00
|
|
|
double
|
2014-10-21 04:54:24 +04:00
|
|
|
AudioNodeStream::FractionalTicksFromDestinationTime(AudioNodeStream* aDestination,
|
|
|
|
double aSeconds)
|
2014-02-17 00:46:56 +04:00
|
|
|
{
|
2014-02-27 02:45:04 +04:00
|
|
|
MOZ_ASSERT(aDestination->SampleRate() == SampleRate());
|
2014-10-21 04:54:24 +04:00
|
|
|
MOZ_ASSERT(SampleRate() == GraphRate());
|
2014-02-17 00:47:19 +04:00
|
|
|
|
|
|
|
double destinationSeconds = std::max(0.0, aSeconds);
|
2014-10-21 04:54:24 +04:00
|
|
|
double destinationFractionalTicks = destinationSeconds * SampleRate();
|
|
|
|
MOZ_ASSERT(destinationFractionalTicks < STREAM_TIME_MAX);
|
|
|
|
StreamTime destinationStreamTime = destinationFractionalTicks; // round down
|
2014-02-17 00:47:19 +04:00
|
|
|
// MediaTime does not have the resolution of double
|
2014-10-21 04:54:24 +04:00
|
|
|
double offset = destinationFractionalTicks - destinationStreamTime;
|
2014-02-17 00:47:19 +04:00
|
|
|
|
2014-10-21 04:54:24 +04:00
|
|
|
GraphTime graphTime =
|
2015-09-16 07:35:16 +03:00
|
|
|
aDestination->StreamTimeToGraphTime(destinationStreamTime);
|
|
|
|
StreamTime thisStreamTime = GraphTimeToStreamTime(graphTime);
|
2014-10-21 04:54:24 +04:00
|
|
|
double thisFractionalTicks = thisStreamTime + offset;
|
|
|
|
return thisFractionalTicks;
|
2014-02-27 02:45:04 +04:00
|
|
|
}
|
|
|
|
|
2014-09-18 09:20:43 +04:00
|
|
|
StreamTime
|
2014-02-27 02:45:04 +04:00
|
|
|
AudioNodeStream::TicksFromDestinationTime(MediaStream* aDestination,
|
|
|
|
double aSeconds)
|
|
|
|
{
|
|
|
|
AudioNodeStream* destination = aDestination->AsAudioNodeStream();
|
|
|
|
MOZ_ASSERT(destination);
|
|
|
|
|
2014-10-21 04:54:24 +04:00
|
|
|
double thisSeconds =
|
|
|
|
FractionalTicksFromDestinationTime(destination, aSeconds);
|
2015-09-16 07:31:12 +03:00
|
|
|
return NS_round(thisSeconds);
|
2014-02-17 00:46:56 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
double
|
|
|
|
AudioNodeStream::DestinationTimeFromTicks(AudioNodeStream* aDestination,
|
2014-09-18 09:20:43 +04:00
|
|
|
StreamTime aPosition)
|
2014-02-17 00:46:56 +04:00
|
|
|
{
|
|
|
|
MOZ_ASSERT(SampleRate() == aDestination->SampleRate());
|
2015-09-16 07:35:16 +03:00
|
|
|
|
|
|
|
GraphTime graphTime = StreamTimeToGraphTime(aPosition);
|
|
|
|
StreamTime destinationTime = aDestination->GraphTimeToStreamTime(graphTime);
|
2014-06-12 08:44:58 +04:00
|
|
|
return StreamTimeToSeconds(destinationTime);
|
2014-02-17 00:46:56 +04:00
|
|
|
}
|
|
|
|
|
2015-09-17 15:03:00 +03:00
|
|
|
void
|
|
|
|
AudioNodeStream::AddInput(MediaInputPort* aPort)
|
|
|
|
{
|
|
|
|
ProcessedMediaStream::AddInput(aPort);
|
|
|
|
AudioNodeStream* ns = aPort->GetSource()->AsAudioNodeStream();
|
|
|
|
// Streams that are not AudioNodeStreams are considered active.
|
|
|
|
if (!ns || (ns->mIsActive && !ns->IsAudioParamStream())) {
|
|
|
|
IncrementActiveInputCount();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
void
|
|
|
|
AudioNodeStream::RemoveInput(MediaInputPort* aPort)
|
|
|
|
{
|
|
|
|
ProcessedMediaStream::RemoveInput(aPort);
|
|
|
|
AudioNodeStream* ns = aPort->GetSource()->AsAudioNodeStream();
|
|
|
|
// Streams that are not AudioNodeStreams are considered active.
|
|
|
|
if (!ns || (ns->mIsActive && !ns->IsAudioParamStream())) {
|
|
|
|
DecrementActiveInputCount();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
AudioNodeStream::SetActive()
|
|
|
|
{
|
|
|
|
if (mIsActive || mMarkAsFinishedAfterThisBlock) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
mIsActive = true;
|
|
|
|
if (IsAudioParamStream()) {
|
|
|
|
// Consumers merely influence stream order.
|
|
|
|
// They do not read from the stream.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (const auto& consumer : mConsumers) {
|
|
|
|
AudioNodeStream* ns = consumer->GetDestination()->AsAudioNodeStream();
|
|
|
|
if (ns) {
|
|
|
|
ns->IncrementActiveInputCount();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
AudioNodeStream::CheckForInactive()
|
|
|
|
{
|
|
|
|
if (((mActiveInputCount > 0 || mEngine->IsActive()) &&
|
|
|
|
!mMarkAsFinishedAfterThisBlock) ||
|
|
|
|
!mIsActive) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
mIsActive = false;
|
|
|
|
mInputChunks.Clear(); // not required for foreseeable future
|
|
|
|
for (auto& chunk : mLastChunks) {
|
|
|
|
chunk.SetNull(WEBAUDIO_BLOCK_SIZE);
|
|
|
|
}
|
|
|
|
if (IsAudioParamStream()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (const auto& consumer : mConsumers) {
|
|
|
|
AudioNodeStream* ns = consumer->GetDestination()->AsAudioNodeStream();
|
|
|
|
if (ns) {
|
|
|
|
ns->DecrementActiveInputCount();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
AudioNodeStream::IncrementActiveInputCount()
|
|
|
|
{
|
|
|
|
++mActiveInputCount;
|
|
|
|
SetActive();
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
AudioNodeStream::DecrementActiveInputCount()
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(mActiveInputCount > 0);
|
|
|
|
--mActiveInputCount;
|
|
|
|
CheckForInactive();
|
|
|
|
}
|
|
|
|
|
2015-07-13 18:25:42 +03:00
|
|
|
} // namespace mozilla
|