зеркало из https://github.com/mozilla/gecko-dev.git
bug 1197043 use flags to distinguish between external streams and events r=padenot
MediaStreamAudioDestinationNode does not need any main thread events because mDOMStream provides GetCurrentTime to consumers. MediaRecoder also does not use main thread current time. --HG-- extra : rebase_source : e022dc12e8a0e67c70d4a617449e28e76288b57e
This commit is contained in:
Родитель
330a9459ea
Коммит
46ff786925
|
@ -776,8 +776,10 @@ MediaRecorder::MediaRecorder(AudioNode& aSrcAudioNode,
|
|||
if (aSrcAudioNode.NumberOfOutputs() > 0) {
|
||||
AudioContext* ctx = aSrcAudioNode.Context();
|
||||
AudioNodeEngine* engine = new AudioNodeEngine(nullptr);
|
||||
mPipeStream = AudioNodeStream::Create(ctx->Graph(), engine,
|
||||
AudioNodeStream::EXTERNAL_STREAM);
|
||||
AudioNodeStream::Flags flags =
|
||||
AudioNodeStream::EXTERNAL_OUTPUT |
|
||||
AudioNodeStream::NEED_MAIN_THREAD_FINISHED;
|
||||
mPipeStream = AudioNodeStream::Create(ctx->Graph(), engine, flags);
|
||||
AudioNodeStream* ns = aSrcAudioNode.GetStream();
|
||||
if (ns) {
|
||||
mInputPort = mPipeStream->AllocateInputPort(aSrcAudioNode.GetStream(),
|
||||
|
|
|
@ -87,7 +87,7 @@ AnalyserNode::AnalyserNode(AudioContext* aContext)
|
|||
{
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(),
|
||||
new AnalyserNodeEngine(this),
|
||||
AudioNodeStream::INTERNAL_STREAM);
|
||||
AudioNodeStream::NO_STREAM_FLAGS);
|
||||
|
||||
// Enough chunks must be recorded to handle the case of fftSize being
|
||||
// increased to maximum immediately before getFloatTimeDomainData() is
|
||||
|
|
|
@ -543,7 +543,7 @@ AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* aContext)
|
|||
{
|
||||
AudioBufferSourceNodeEngine* engine = new AudioBufferSourceNodeEngine(this, aContext->Destination());
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(), engine,
|
||||
AudioNodeStream::SOURCE_STREAM);
|
||||
AudioNodeStream::NEED_MAIN_THREAD_FINISHED);
|
||||
engine->SetSourceStream(mStream);
|
||||
mStream->AddMainThreadListener(this);
|
||||
}
|
||||
|
|
|
@ -346,8 +346,11 @@ AudioDestinationNode::AudioDestinationNode(AudioContext* aContext,
|
|||
aLength, aSampleRate) :
|
||||
static_cast<AudioNodeEngine*>(new DestinationNodeEngine(this));
|
||||
|
||||
mStream = AudioNodeStream::Create(graph, engine,
|
||||
AudioNodeStream::EXTERNAL_STREAM);
|
||||
AudioNodeStream::Flags flags =
|
||||
AudioNodeStream::NEED_MAIN_THREAD_CURRENT_TIME |
|
||||
AudioNodeStream::NEED_MAIN_THREAD_FINISHED |
|
||||
AudioNodeStream::EXTERNAL_OUTPUT;
|
||||
mStream = AudioNodeStream::Create(graph, engine, flags);
|
||||
mStream->AddMainThreadListener(this);
|
||||
mStream->AddAudioOutput(&gWebAudioOutputKey);
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ using namespace mozilla::dom;
|
|||
namespace mozilla {
|
||||
|
||||
AudioNodeExternalInputStream::AudioNodeExternalInputStream(AudioNodeEngine* aEngine, TrackRate aSampleRate, uint32_t aContextId)
|
||||
: AudioNodeStream(aEngine, INTERNAL_STREAM, aSampleRate, aContextId)
|
||||
: AudioNodeStream(aEngine, NO_STREAM_FLAGS, aSampleRate, aContextId)
|
||||
{
|
||||
MOZ_COUNT_CTOR(AudioNodeExternalInputStream);
|
||||
}
|
||||
|
|
|
@ -26,14 +26,14 @@ namespace mozilla {
|
|||
*/
|
||||
|
||||
AudioNodeStream::AudioNodeStream(AudioNodeEngine* aEngine,
|
||||
AudioNodeStreamKind aKind,
|
||||
Flags aFlags,
|
||||
TrackRate aSampleRate,
|
||||
AudioContext::AudioContextId aContextId)
|
||||
: ProcessedMediaStream(nullptr),
|
||||
mEngine(aEngine),
|
||||
mSampleRate(aSampleRate),
|
||||
mAudioContextId(aContextId),
|
||||
mKind(aKind),
|
||||
mFlags(aFlags),
|
||||
mNumberOfInputChannels(2),
|
||||
mMarkAsFinishedAfterThisBlock(false),
|
||||
mAudioParamStream(false),
|
||||
|
@ -55,7 +55,7 @@ AudioNodeStream::~AudioNodeStream()
|
|||
|
||||
/* static */ already_AddRefed<AudioNodeStream>
|
||||
AudioNodeStream::Create(MediaStreamGraph* aGraph, AudioNodeEngine* aEngine,
|
||||
AudioNodeStreamKind aKind)
|
||||
Flags aFlags)
|
||||
{
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
|
||||
|
@ -66,7 +66,7 @@ AudioNodeStream::Create(MediaStreamGraph* aGraph, AudioNodeEngine* aEngine,
|
|||
dom::AudioContext::AudioContextId contextIdForStream = node ? node->Context()->Id() :
|
||||
NO_AUDIO_CONTEXT;
|
||||
nsRefPtr<AudioNodeStream> stream =
|
||||
new AudioNodeStream(aEngine, aKind, aGraph->GraphRate(),
|
||||
new AudioNodeStream(aEngine, aFlags, aGraph->GraphRate(),
|
||||
contextIdForStream);
|
||||
if (aEngine->HasNode()) {
|
||||
stream->SetChannelMixingParametersImpl(aEngine->NodeMainThread()->ChannelCount(),
|
||||
|
@ -595,7 +595,7 @@ AudioNodeStream::AdvanceOutputSegment()
|
|||
StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK);
|
||||
AudioSegment* segment = track->Get<AudioSegment>();
|
||||
|
||||
if (mKind == EXTERNAL_STREAM) {
|
||||
if (mFlags & EXTERNAL_OUTPUT) {
|
||||
segment->AppendAndConsumeChunk(&mLastChunks[0]);
|
||||
} else {
|
||||
segment->AppendNullData(mLastChunks[0].GetDuration());
|
||||
|
|
|
@ -43,24 +43,30 @@ public:
|
|||
|
||||
typedef nsAutoTArray<AudioChunk, 1> OutputChunks;
|
||||
|
||||
// Internal AudioNodeStreams can only pass their output to another
|
||||
// AudioNode, whereas external AudioNodeStreams can pass their output
|
||||
// to an nsAudioStream for playback.
|
||||
enum AudioNodeStreamKind { SOURCE_STREAM, INTERNAL_STREAM, EXTERNAL_STREAM };
|
||||
// Flags re main thread updates and stream output.
|
||||
typedef unsigned Flags;
|
||||
enum : Flags {
|
||||
NO_STREAM_FLAGS = 0U,
|
||||
NEED_MAIN_THREAD_FINISHED = 1U << 0,
|
||||
NEED_MAIN_THREAD_CURRENT_TIME = 1U << 1,
|
||||
// Internal AudioNodeStreams can only pass their output to another
|
||||
// AudioNode, whereas external AudioNodeStreams can pass their output
|
||||
// to other ProcessedMediaStreams or hardware audio output.
|
||||
EXTERNAL_OUTPUT = 1U << 2,
|
||||
};
|
||||
/**
|
||||
* Create a stream that will process audio for an AudioNode.
|
||||
* Takes ownership of aEngine.
|
||||
*/
|
||||
static already_AddRefed<AudioNodeStream>
|
||||
Create(MediaStreamGraph* aGraph, AudioNodeEngine* aEngine,
|
||||
AudioNodeStreamKind aKind);
|
||||
Create(MediaStreamGraph* aGraph, AudioNodeEngine* aEngine, Flags aKind);
|
||||
|
||||
protected:
|
||||
/**
|
||||
* Transfers ownership of aEngine to the new AudioNodeStream.
|
||||
*/
|
||||
AudioNodeStream(AudioNodeEngine* aEngine,
|
||||
AudioNodeStreamKind aKind,
|
||||
Flags aFlags,
|
||||
TrackRate aSampleRate,
|
||||
AudioContext::AudioContextId aContextId);
|
||||
|
||||
|
@ -123,9 +129,8 @@ public:
|
|||
}
|
||||
virtual bool MainThreadNeedsUpdates() const override
|
||||
{
|
||||
// Only source and external streams need updates on the main thread.
|
||||
return (mKind == SOURCE_STREAM && mFinished) ||
|
||||
mKind == EXTERNAL_STREAM;
|
||||
return ((mFlags & NEED_MAIN_THREAD_FINISHED) && mFinished) ||
|
||||
(mFlags & NEED_MAIN_THREAD_CURRENT_TIME);
|
||||
}
|
||||
virtual bool IsIntrinsicallyConsumed() const override
|
||||
{
|
||||
|
@ -186,7 +191,7 @@ protected:
|
|||
// AudioContext. It is set on the main thread, in the constructor.
|
||||
const AudioContext::AudioContextId mAudioContextId;
|
||||
// Whether this is an internal or external stream
|
||||
const AudioNodeStreamKind mKind;
|
||||
const Flags mFlags;
|
||||
// The number of input channels that this stream requires. 0 means don't care.
|
||||
uint32_t mNumberOfInputChannels;
|
||||
// The mixing modes
|
||||
|
|
|
@ -101,7 +101,7 @@ AudioParam::Stream()
|
|||
AudioNodeEngine* engine = new AudioNodeEngine(nullptr);
|
||||
nsRefPtr<AudioNodeStream> stream =
|
||||
AudioNodeStream::Create(mNode->Context()->Graph(), engine,
|
||||
AudioNodeStream::INTERNAL_STREAM);
|
||||
AudioNodeStream::NO_STREAM_FLAGS);
|
||||
|
||||
// Force the input to have only one channel, and make it down-mix using
|
||||
// the speaker rules if needed.
|
||||
|
|
|
@ -251,7 +251,7 @@ BiquadFilterNode::BiquadFilterNode(AudioContext* aContext)
|
|||
{
|
||||
BiquadFilterNodeEngine* engine = new BiquadFilterNodeEngine(this, aContext->Destination());
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(), engine,
|
||||
AudioNodeStream::INTERNAL_STREAM);
|
||||
AudioNodeStream::NO_STREAM_FLAGS);
|
||||
engine->SetSourceStream(mStream);
|
||||
}
|
||||
|
||||
|
|
|
@ -75,7 +75,7 @@ ChannelMergerNode::ChannelMergerNode(AudioContext* aContext,
|
|||
{
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(),
|
||||
new ChannelMergerNodeEngine(this),
|
||||
AudioNodeStream::INTERNAL_STREAM);
|
||||
AudioNodeStream::NO_STREAM_FLAGS);
|
||||
}
|
||||
|
||||
ChannelMergerNode::~ChannelMergerNode()
|
||||
|
|
|
@ -62,7 +62,7 @@ ChannelSplitterNode::ChannelSplitterNode(AudioContext* aContext,
|
|||
{
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(),
|
||||
new ChannelSplitterNodeEngine(this),
|
||||
AudioNodeStream::INTERNAL_STREAM);
|
||||
AudioNodeStream::NO_STREAM_FLAGS);
|
||||
}
|
||||
|
||||
ChannelSplitterNode::~ChannelSplitterNode()
|
||||
|
|
|
@ -192,7 +192,7 @@ ConvolverNode::ConvolverNode(AudioContext* aContext)
|
|||
{
|
||||
ConvolverNodeEngine* engine = new ConvolverNodeEngine(this, mNormalize);
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(), engine,
|
||||
AudioNodeStream::INTERNAL_STREAM);
|
||||
AudioNodeStream::NO_STREAM_FLAGS);
|
||||
}
|
||||
|
||||
ConvolverNode::~ConvolverNode()
|
||||
|
|
|
@ -199,7 +199,7 @@ DelayNode::DelayNode(AudioContext* aContext, double aMaxDelay)
|
|||
new DelayNodeEngine(this, aContext->Destination(),
|
||||
aContext->SampleRate() * aMaxDelay);
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(), engine,
|
||||
AudioNodeStream::INTERNAL_STREAM);
|
||||
AudioNodeStream::NO_STREAM_FLAGS);
|
||||
engine->SetSourceStream(mStream);
|
||||
}
|
||||
|
||||
|
|
|
@ -204,7 +204,7 @@ DynamicsCompressorNode::DynamicsCompressorNode(AudioContext* aContext)
|
|||
{
|
||||
DynamicsCompressorNodeEngine* engine = new DynamicsCompressorNodeEngine(this, aContext->Destination());
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(), engine,
|
||||
AudioNodeStream::INTERNAL_STREAM);
|
||||
AudioNodeStream::NO_STREAM_FLAGS);
|
||||
engine->SetSourceStream(mStream);
|
||||
}
|
||||
|
||||
|
|
|
@ -129,7 +129,7 @@ GainNode::GainNode(AudioContext* aContext)
|
|||
{
|
||||
GainNodeEngine* engine = new GainNodeEngine(this, aContext->Destination());
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(), engine,
|
||||
AudioNodeStream::INTERNAL_STREAM);
|
||||
AudioNodeStream::NO_STREAM_FLAGS);
|
||||
engine->SetSourceStream(mStream);
|
||||
}
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ MediaStreamAudioDestinationNode::MediaStreamAudioDestinationNode(AudioContext* a
|
|||
MOZ_ASSERT(!!outputStream);
|
||||
AudioNodeEngine* engine = new AudioNodeEngine(this);
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(), engine,
|
||||
AudioNodeStream::EXTERNAL_STREAM);
|
||||
AudioNodeStream::EXTERNAL_OUTPUT);
|
||||
mPort = outputStream->AllocateInputPort(mStream);
|
||||
|
||||
nsIDocument* doc = aContext->GetParentObject()->GetExtantDoc();
|
||||
|
|
|
@ -385,7 +385,7 @@ OscillatorNode::OscillatorNode(AudioContext* aContext)
|
|||
{
|
||||
OscillatorNodeEngine* engine = new OscillatorNodeEngine(this, aContext->Destination());
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(), engine,
|
||||
AudioNodeStream::SOURCE_STREAM);
|
||||
AudioNodeStream::NEED_MAIN_THREAD_FINISHED);
|
||||
engine->SetSourceStream(mStream);
|
||||
mStream->AddMainThreadListener(this);
|
||||
}
|
||||
|
|
|
@ -242,7 +242,7 @@ PannerNode::PannerNode(AudioContext* aContext)
|
|||
{
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(),
|
||||
new PannerNodeEngine(this),
|
||||
AudioNodeStream::INTERNAL_STREAM);
|
||||
AudioNodeStream::NO_STREAM_FLAGS);
|
||||
// We should register once we have set up our stream and engine.
|
||||
Context()->Listener()->RegisterPannerNode(this);
|
||||
}
|
||||
|
|
|
@ -521,7 +521,7 @@ ScriptProcessorNode::ScriptProcessorNode(AudioContext* aContext,
|
|||
BufferSize(),
|
||||
aNumberOfInputChannels);
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(), engine,
|
||||
AudioNodeStream::INTERNAL_STREAM);
|
||||
AudioNodeStream::NO_STREAM_FLAGS);
|
||||
engine->SetSourceStream(mStream);
|
||||
}
|
||||
|
||||
|
|
|
@ -182,7 +182,7 @@ StereoPannerNode::StereoPannerNode(AudioContext* aContext)
|
|||
{
|
||||
StereoPannerNodeEngine* engine = new StereoPannerNodeEngine(this, aContext->Destination());
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(), engine,
|
||||
AudioNodeStream::INTERNAL_STREAM);
|
||||
AudioNodeStream::NO_STREAM_FLAGS);
|
||||
engine->SetSourceStream(mStream);
|
||||
}
|
||||
|
||||
|
|
|
@ -289,7 +289,7 @@ WaveShaperNode::WaveShaperNode(AudioContext* aContext)
|
|||
|
||||
WaveShaperNodeEngine* engine = new WaveShaperNodeEngine(this);
|
||||
mStream = AudioNodeStream::Create(aContext->Graph(), engine,
|
||||
AudioNodeStream::INTERNAL_STREAM);
|
||||
AudioNodeStream::NO_STREAM_FLAGS);
|
||||
}
|
||||
|
||||
WaveShaperNode::~WaveShaperNode()
|
||||
|
|
Загрузка…
Ссылка в новой задаче