Bug 866434 - Part 2: Give each AudioParam that is connected to an AudioNode an AudioNodeStream; r=roc

These MediaStreams are used as a way to down-mix the input AudioChunks, and
also as a way to get proper stream processing ordering.  The MediaStream for
the source AudioNode is an input to these streams, and these streams in turn
are inputs to the MediaStream that the AudioNode that owns the AudioParam owns.
This way, the Media Streams Graph processing code will order the streams so
that by the time that the MediaStream for a given node is processed, all of the
MediaStreams belonging to the AudioNode(s) feeding into the AudioParam have
been processed.

This has a tricky side-effect that those streams also being considered when
determining the input block for the AudioNodeStream belonging to the
AudioParam's owner AudioNode.  In order to fix that, we simply special case
those streams and make AudioNodeStream::ObtainInputBlock ignore them.
This commit is contained in:
Ehsan Akhgari 2013-05-01 21:02:31 -04:00
Родитель caeeb616ee
Коммит 85820ff13a
8 изменённых файлов: 124 добавлений и 21 удалений

Просмотреть файл

@ -154,7 +154,6 @@ public:
: mNode(aNode)
, mNodeMutex("AudioNodeEngine::mNodeMutex")
{
MOZ_ASSERT(mNode, "The engine is constructed with a null node");
MOZ_COUNT_CTOR(AudioNodeEngine);
}
virtual ~AudioNodeEngine()
@ -210,6 +209,11 @@ public:
Mutex& NodeMutex() { return mNodeMutex;}
bool HasNode() const
{
return !!mNode;
}
dom::AudioNode* Node() const
{
mNodeMutex.AssertCurrentThreadOwns();

Просмотреть файл

@ -102,19 +102,19 @@ AudioNodeStream::SetInt32Parameter(uint32_t aIndex, int32_t aValue)
void
AudioNodeStream::SetTimelineParameter(uint32_t aIndex,
const AudioEventTimeline<ErrorResult>& aValue)
const AudioParamTimeline& aValue)
{
class Message : public ControlMessage {
public:
Message(AudioNodeStream* aStream, uint32_t aIndex,
const AudioEventTimeline<ErrorResult>& aValue)
const AudioParamTimeline& aValue)
: ControlMessage(aStream), mValue(aValue), mIndex(aIndex) {}
virtual void Run()
{
static_cast<AudioNodeStream*>(mStream)->Engine()->
SetTimelineParameter(mIndex, mValue);
}
AudioEventTimeline<ErrorResult> mValue;
AudioParamTimeline mValue;
uint32_t mIndex;
};
GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
@ -247,7 +247,8 @@ AudioNodeStream::ObtainInputBlock(AudioChunk* aTmpChunk)
MediaStream* s = mInputs[i]->GetSource();
AudioNodeStream* a = static_cast<AudioNodeStream*>(s);
MOZ_ASSERT(a == s->AsAudioNodeStream());
if (a->IsFinishedOnGraphThread()) {
if (a->IsFinishedOnGraphThread() ||
a->IsAudioParamStream()) {
continue;
}
AudioChunk* chunk = &a->mLastChunk;

Просмотреть файл

@ -22,6 +22,7 @@ namespace mozilla {
namespace dom {
struct ThreeDPoint;
class AudioParamTimeline;
}
class ThreadSharedFloatArrayBufferList;
@ -49,7 +50,8 @@ public:
mEngine(aEngine),
mKind(aKind),
mNumberOfInputChannels(2),
mMarkAsFinishedAfterThisBlock(false)
mMarkAsFinishedAfterThisBlock(false),
mAudioParamStream(false)
{
mMixingMode.mChannelCountMode = dom::ChannelCountMode::Max;
mMixingMode.mChannelInterpretation = dom::ChannelInterpretation::Speakers;
@ -74,6 +76,11 @@ public:
void SetChannelMixingParameters(uint32_t aNumberOfChannels,
dom::ChannelCountMode aChannelCountMoe,
dom::ChannelInterpretation aChannelInterpretation);
void SetAudioParamHelperStream()
{
MOZ_ASSERT(!mAudioParamStream, "Can only do this once");
mAudioParamStream = true;
}
virtual AudioNodeStream* AsAudioNodeStream() { return this; }
@ -86,6 +93,10 @@ public:
virtual void ProduceOutput(GraphTime aFrom, GraphTime aTo);
TrackTicks GetCurrentPosition();
bool AllInputsFinished() const;
bool IsAudioParamStream() const
{
return mAudioParamStream;
}
// Any thread
AudioNodeEngine* Engine() { return mEngine; }
@ -112,6 +123,8 @@ protected:
// Whether the stream should be marked as finished as soon
// as the current time range has been computed block by block.
bool mMarkAsFinishedAfterThisBlock;
// Whether the stream is an AudioParamHelper stream.
bool mAudioParamStream;
};
}

Просмотреть файл

@ -2017,9 +2017,11 @@ MediaStreamGraph::CreateAudioNodeStream(AudioNodeEngine* aEngine,
NS_ADDREF(stream);
MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(this);
stream->SetGraphImpl(graph);
stream->SetChannelMixingParametersImpl(aEngine->NodeMainThread()->ChannelCount(),
aEngine->NodeMainThread()->ChannelCountModeValue(),
aEngine->NodeMainThread()->ChannelInterpretationValue());
if (aEngine->HasNode()) {
stream->SetChannelMixingParametersImpl(aEngine->NodeMainThread()->ChannelCount(),
aEngine->NodeMainThread()->ChannelCountModeValue(),
aEngine->NodeMainThread()->ChannelInterpretationValue());
}
graph->AppendMessage(new CreateMessage(stream));
return stream;
}

Просмотреть файл

@ -66,19 +66,21 @@ AudioNode::~AudioNode()
MOZ_ASSERT(mOutputParams.IsEmpty());
}
template <class InputNode>
static uint32_t
FindIndexOfNode(const nsTArray<AudioNode::InputNode>& aInputNodes, const AudioNode* aNode)
FindIndexOfNode(const nsTArray<InputNode>& aInputNodes, const AudioNode* aNode)
{
for (uint32_t i = 0; i < aInputNodes.Length(); ++i) {
if (aInputNodes[i].mInputNode == aNode) {
return i;
}
}
return nsTArray<AudioNode::InputNode>::NoIndex;
return nsTArray<InputNode>::NoIndex;
}
template <class InputNode>
static uint32_t
FindIndexOfNodeWithPorts(const nsTArray<AudioNode::InputNode>& aInputNodes, const AudioNode* aNode,
FindIndexOfNodeWithPorts(const nsTArray<InputNode>& aInputNodes, const AudioNode* aNode,
uint32_t aInputPort, uint32_t aOutputPort)
{
for (uint32_t i = 0; i < aInputNodes.Length(); ++i) {
@ -88,7 +90,7 @@ FindIndexOfNodeWithPorts(const nsTArray<AudioNode::InputNode>& aInputNodes, cons
return i;
}
}
return nsTArray<AudioNode::InputNode>::NoIndex;
return nsTArray<InputNode>::NoIndex;
}
void
@ -199,6 +201,13 @@ AudioNode::Connect(AudioParam& aDestination, uint32_t aOutput,
input->mInputNode = this;
input->mInputPort = INVALID_PORT;
input->mOutputPort = aOutput;
MediaStream* stream = aDestination.Stream();
MOZ_ASSERT(stream->AsProcessedStream());
ProcessedMediaStream* ps = static_cast<ProcessedMediaStream*>(stream);
// Setup our stream as an input to the AudioParam's stream
input->mStreamPort = ps->AllocateInputPort(mStream, MediaInputPort::FLAG_BLOCK_INPUT);
}
void

Просмотреть файл

@ -9,12 +9,14 @@
#include "nsIDOMWindow.h"
#include "mozilla/ErrorResult.h"
#include "mozilla/dom/AudioParamBinding.h"
#include "AudioNodeEngine.h"
#include "AudioNodeStream.h"
namespace mozilla {
namespace dom {
NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(AudioParam)
tmp->DisconnectFromGraph();
tmp->DisconnectFromGraphAndDestroyStream();
NS_IMPL_CYCLE_COLLECTION_UNLINK(mNode)
NS_IMPL_CYCLE_COLLECTION_UNLINK_PRESERVED_WRAPPER
NS_IMPL_CYCLE_COLLECTION_UNLINK_END
@ -33,7 +35,7 @@ AudioParam::Release()
if (mRefCnt.get() == 1) {
// We are about to be deleted, disconnect the object from the graph before
// the derived type is destroyed.
DisconnectFromGraph();
DisconnectFromGraphAndDestroyStream();
}
NS_IMPL_CC_NATIVE_RELEASE_BODY(AudioParam)
}
@ -64,7 +66,7 @@ AudioParam::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aScope)
}
void
AudioParam::DisconnectFromGraph()
AudioParam::DisconnectFromGraphAndDestroyStream()
{
// Addref this temporarily so the refcount bumping below doesn't destroy us
// prematurely
@ -76,6 +78,46 @@ AudioParam::DisconnectFromGraph()
mInputNodes.RemoveElementAt(i);
input->RemoveOutputParam(this);
}
if (mNodeStreamPort) {
mNodeStreamPort->Destroy();
mNodeStreamPort = nullptr;
}
if (mStream) {
mStream->Destroy();
mStream = nullptr;
}
}
MediaStream*
AudioParam::Stream()
{
if (mStream) {
return mStream;
}
AudioNodeEngine* engine = new AudioNodeEngine(nullptr);
nsRefPtr<AudioNodeStream> stream = mNode->Context()->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
// Force the input to have only one channel, and make it down-mix using
// the speaker rules if needed.
stream->SetChannelMixingParametersImpl(1, ChannelCountMode::Explicit, ChannelInterpretation::Speakers);
// Mark as an AudioParam helper stream
stream->SetAudioParamHelperStream();
mStream = stream.forget();
// Setup the AudioParam's stream as an input to the owner AudioNode's stream
MediaStream* nodeStream = mNode->Stream();
MOZ_ASSERT(nodeStream->AsProcessedStream());
ProcessedMediaStream* ps = static_cast<ProcessedMediaStream*>(nodeStream);
mNodeStreamPort = ps->AllocateInputPort(mStream, MediaInputPort::FLAG_BLOCK_INPUT);
// Let the MSG's copy of AudioParamTimeline know about the change in the stream
mCallback(mNode);
return mStream;
}
}

Просмотреть файл

@ -105,6 +105,11 @@ public:
return mDefaultValue;
}
AudioNode* Node() const
{
return mNode;
}
const nsTArray<AudioNode::InputNode>& InputNodes() const
{
return mInputNodes;
@ -120,7 +125,10 @@ public:
return mInputNodes.AppendElement();
}
void DisconnectFromGraph();
void DisconnectFromGraphAndDestroyStream();
// May create the stream if it doesn't exist
MediaStream* Stream();
protected:
nsCycleCollectingAutoRefCnt mRefCnt;
@ -133,6 +141,8 @@ private:
nsTArray<AudioNode::InputNode> mInputNodes;
CallbackType mCallback;
const float mDefaultValue;
// The input port used to connect the AudioParam's stream to its node's stream
nsRefPtr<MediaInputPort> mNodeStreamPort;
};
}

Просмотреть файл

@ -7,17 +7,39 @@
#ifndef AudioParamTimeline_h_
#define AudioParamTimeline_h_
// This header is intended to make it possible to use AudioParamTimeline
// from multiple places without dealing with #include hell!
#include "AudioEventTimeline.h"
#include "mozilla/ErrorResult.h"
#include "nsAutoPtr.h"
#include "MediaStreamGraph.h"
namespace mozilla {
namespace dom {
typedef AudioEventTimeline<ErrorResult> AudioParamTimeline;
// This helper class is used to represent the part of the AudioParam
// class that gets sent to AudioNodeEngine instances. In addition to
// AudioEventTimeline methods, it holds a pointer to an optional
// MediaStream which represents the AudioNode inputs to the AudioParam.
// This MediaStream is managed by the AudioParam subclass on the main
// thread, and can only be obtained from the AudioNodeEngine instances
// consuming this class.
class AudioParamTimeline : public AudioEventTimeline<ErrorResult>
{
public:
explicit AudioParamTimeline(float aDefaultValue)
: AudioEventTimeline<ErrorResult>(aDefaultValue)
{
}
MediaStream* Stream() const
{
return mStream;
}
protected:
// This is created lazily when needed.
nsRefPtr<MediaStream> mStream;
};
}
}