From aa81ed9e2404c7bc92500282c80cc2d73e1f7ab2 Mon Sep 17 00:00:00 2001 From: Paul Adenot Date: Wed, 23 Apr 2014 11:20:56 +0200 Subject: [PATCH] Bug 998179 - Refactor how MediaStreamGraph get and use their sample rate. r=roc Use the sample rate passed to the OfflineAudioContext constructor in MediaStreamGraph::CreateOfflineInstance, and pass the preferred mixer sample rate to the (real time) MediaStreamGraph constructor. Then, always use this sample rate for the lifetime of the graph. This patch needed to pass the sample rate to the AudioMixer class to avoid relying on globals like it was done before. --HG-- extra : rebase_source : 2802208819887605fe26a7040998fc328b3c9a57 --- content/media/AudioMixer.h | 20 +++++-- content/media/AudioSegment.cpp | 2 +- content/media/MediaStreamGraph.cpp | 54 ++++++++----------- content/media/MediaStreamGraph.h | 14 +++-- content/media/MediaStreamGraphImpl.h | 11 +++- content/media/compiledtest/TestAudioMixer.cpp | 47 ++++++++-------- .../media/webaudio/AudioDestinationNode.cpp | 2 +- 7 files changed, 78 insertions(+), 72 deletions(-) diff --git a/content/media/AudioMixer.h b/content/media/AudioMixer.h index 0c6e6799b4aa..5d07f245cf47 100644 --- a/content/media/AudioMixer.h +++ b/content/media/AudioMixer.h @@ -14,7 +14,8 @@ namespace mozilla { typedef void(*MixerFunc)(AudioDataValue* aMixedBuffer, AudioSampleFormat aFormat, uint32_t aChannels, - uint32_t aFrames); + uint32_t aFrames, + uint32_t aSampleRate); /** * This class mixes multiple streams of audio together to output a single audio @@ -34,7 +35,8 @@ public: AudioMixer(MixerFunc aCallback) : mCallback(aCallback), mFrames(0), - mChannels(0) + mChannels(0), + mSampleRate(0) { } /* Get the data from the mixer. This is supposed to be called when all the @@ -43,21 +45,27 @@ public: mCallback(mMixedAudio.Elements(), AudioSampleTypeToFormat::Format, mChannels, - mFrames); + mFrames, + mSampleRate); PodZero(mMixedAudio.Elements(), mMixedAudio.Length()); - mChannels = mFrames = 0; + mSampleRate = mChannels = mFrames = 0; } /* Add a buffer to the mix. aSamples is interleaved. */ - void Mix(AudioDataValue* aSamples, uint32_t aChannels, uint32_t aFrames) { + void Mix(AudioDataValue* aSamples, + uint32_t aChannels, + uint32_t aFrames, + uint32_t aSampleRate) { if (!mFrames && !mChannels) { mFrames = aFrames; mChannels = aChannels; + mSampleRate = aSampleRate; EnsureCapacityAndSilence(); } MOZ_ASSERT(aFrames == mFrames); MOZ_ASSERT(aChannels == mChannels); + MOZ_ASSERT(aSampleRate == mSampleRate); for (uint32_t i = 0; i < aFrames * aChannels; i++) { mMixedAudio[i] += aSamples[i]; @@ -77,6 +85,8 @@ private: uint32_t mFrames; /* Number of channels for this mixing block. */ uint32_t mChannels; + /* Sample rate the of the mixed data. */ + uint32_t mSampleRate; /* Buffer containing the mixed audio data. */ nsTArray mMixedAudio; }; diff --git a/content/media/AudioSegment.cpp b/content/media/AudioSegment.cpp index e6bb7922a69e..0cbe80a8b022 100644 --- a/content/media/AudioSegment.cpp +++ b/content/media/AudioSegment.cpp @@ -216,7 +216,7 @@ AudioSegment::WriteTo(uint64_t aID, AudioStream* aOutput, AudioMixer* aMixer) aOutput->Write(buf.Elements(), GetDuration(), &(mChunks[mChunks.Length() - 1].mTimeStamp)); if (aMixer) { - aMixer->Mix(buf.Elements(), outputChannels, GetDuration()); + aMixer->Mix(buf.Elements(), outputChannels, GetDuration(), aOutput->GetRate()); } aOutput->Start(); } diff --git a/content/media/MediaStreamGraph.cpp b/content/media/MediaStreamGraph.cpp index 93d7b05f2d06..b289744647e3 100644 --- a/content/media/MediaStreamGraph.cpp +++ b/content/media/MediaStreamGraph.cpp @@ -340,7 +340,7 @@ MediaStreamGraphImpl::GetAudioPosition(MediaStream* aStream) return mCurrentTime; } return aStream->mAudioOutputStreams[0].mAudioPlaybackStartTime + - TicksToTimeRoundDown(IdealAudioRate(), + TicksToTimeRoundDown(mSampleRate, positionInFrames); } @@ -586,7 +586,8 @@ MediaStreamGraphImpl::UpdateStreamOrderForStream(mozilla::LinkedListInsertFarEnd(aMixedBuffer, aFrames, false, - IdealAudioRate(), aChannels, aFormat); + aSampleRate, aChannels, aFormat); } } #endif @@ -847,7 +848,7 @@ MediaStreamGraphImpl::CreateOrDestroyAudioStreams(GraphTime aAudioOutputStartTim // XXX for now, allocate stereo output. But we need to fix this to // match the system's ideal channel configuration. // NOTE: we presume this is either fast or async-under-the-covers - audioOutputStream->mStream->Init(2, IdealAudioRate(), + audioOutputStream->mStream->Init(2, mSampleRate, AudioChannel::Normal, AudioStream::LowLatency); audioOutputStream->mTrackID = tracks->GetID(); @@ -879,7 +880,7 @@ MediaStreamGraphImpl::PlayAudio(MediaStream* aStream, // the rounding between {Graph,Stream}Time and track ticks is not dependant // on the absolute value of the {Graph,Stream}Time, and so that number of // ticks to play is the same for each cycle. - TrackTicks ticksNeeded = TimeToTicksRoundDown(IdealAudioRate(), aTo) - TimeToTicksRoundDown(IdealAudioRate(), aFrom); + TrackTicks ticksNeeded = TimeToTicksRoundDown(mSampleRate, aTo) - TimeToTicksRoundDown(mSampleRate, aFrom); if (aStream->mAudioOutputStreams.IsEmpty()) { return 0; @@ -897,7 +898,7 @@ MediaStreamGraphImpl::PlayAudio(MediaStream* aStream, StreamBuffer::Track* track = aStream->mBuffer.FindTrack(audioOutput.mTrackID); AudioSegment* audio = track->Get(); AudioSegment output; - MOZ_ASSERT(track->GetRate() == IdealAudioRate()); + MOZ_ASSERT(track->GetRate() == mSampleRate); // offset and audioOutput.mLastTickWritten can differ by at most one sample, // because of the rounding issue. We track that to ensure we don't skip a @@ -933,7 +934,7 @@ MediaStreamGraphImpl::PlayAudio(MediaStream* aStream, if (end >= aTo) { toWrite = ticksNeeded; } else { - toWrite = TimeToTicksRoundDown(IdealAudioRate(), end - aFrom); + toWrite = TimeToTicksRoundDown(mSampleRate, end - aFrom); } if (blocked) { @@ -1284,23 +1285,8 @@ MediaStreamGraphImpl::RunThread() UpdateStreamOrder(); } - TrackRate sampleRate; - // Find the sampling rate that we need to use for non-realtime graphs. - if (!mRealtime) { - for (uint32_t i = 0; i < mStreams.Length(); ++i) { - AudioNodeStream* n = mStreams[i]->AsAudioNodeStream(); - if (n) { - // We know that the rest of the streams will run at the same rate. - sampleRate = n->SampleRate(); - break; - } - } - } else { - sampleRate = IdealAudioRate(); - } - GraphTime endBlockingDecisions = - RoundUpToNextAudioBlock(sampleRate, mCurrentTime + MillisecondsToMediaTime(AUDIO_TARGET_MS)); + RoundUpToNextAudioBlock(mSampleRate, mCurrentTime + MillisecondsToMediaTime(AUDIO_TARGET_MS)); bool ensureNextIteration = false; // Grab pending stream input. @@ -1919,7 +1905,8 @@ MediaStream::EnsureTrack(TrackID aTrackId, TrackRate aSampleRate) nsAutoPtr segment(new AudioSegment()); for (uint32_t j = 0; j < mListeners.Length(); ++j) { MediaStreamListener* l = mListeners[j]; - l->NotifyQueuedTrackChanges(Graph(), aTrackId, IdealAudioRate(), 0, + l->NotifyQueuedTrackChanges(Graph(), aTrackId, + GraphImpl()->AudioSampleRate(), 0, MediaStreamListener::TRACK_EVENT_CREATED, *segment); } @@ -2269,7 +2256,7 @@ SourceMediaStream::AddTrack(TrackID aID, TrackRate aRate, TrackTicks aStart, data->mInputRate = aRate; // We resample all audio input tracks to the sample rate of the audio mixer. data->mOutputRate = aSegment->GetType() == MediaSegment::AUDIO ? - IdealAudioRate() : aRate; + GraphImpl()->AudioSampleRate() : aRate; data->mStart = aStart; data->mCommands = TRACK_CREATE; data->mData = aSegment; @@ -2283,7 +2270,7 @@ void SourceMediaStream::ResampleAudioToGraphSampleRate(TrackData* aTrackData, MediaSegment* aSegment) { if (aSegment->GetType() != MediaSegment::AUDIO || - aTrackData->mInputRate == IdealAudioRate()) { + aTrackData->mInputRate == GraphImpl()->AudioSampleRate()) { return; } AudioSegment* segment = static_cast(aSegment); @@ -2291,7 +2278,7 @@ SourceMediaStream::ResampleAudioToGraphSampleRate(TrackData* aTrackData, MediaSe int channels = segment->ChannelCount(); SpeexResamplerState* state = speex_resampler_init(channels, aTrackData->mInputRate, - IdealAudioRate(), + GraphImpl()->AudioSampleRate(), SPEEX_RESAMPLER_QUALITY_DEFAULT, nullptr); if (state) { @@ -2639,7 +2626,7 @@ ProcessedMediaStream::DestroyImpl() */ static const int32_t INITIAL_CURRENT_TIME = 1; -MediaStreamGraphImpl::MediaStreamGraphImpl(bool aRealtime) +MediaStreamGraphImpl::MediaStreamGraphImpl(bool aRealtime, TrackRate aSampleRate) : mCurrentTime(INITIAL_CURRENT_TIME) , mStateComputedTime(INITIAL_CURRENT_TIME) , mProcessingGraphUpdateIndex(0) @@ -2648,6 +2635,7 @@ MediaStreamGraphImpl::MediaStreamGraphImpl(bool aRealtime) , mLifecycleState(LIFECYCLE_THREAD_NOT_STARTED) , mWaitState(WAITSTATE_RUNNING) , mEndTime(GRAPH_TIME_MAX) + , mSampleRate(aSampleRate) , mNeedAnotherIteration(false) , mForceShutDown(false) , mPostedRunInStableStateEvent(false) @@ -2714,22 +2702,22 @@ MediaStreamGraph::GetInstance() nsContentUtils::RegisterShutdownObserver(new MediaStreamGraphShutdownObserver()); } - gGraph = new MediaStreamGraphImpl(true); + AudioStream::InitPreferredSampleRate(); + + gGraph = new MediaStreamGraphImpl(true, AudioStream::PreferredSampleRate()); STREAM_LOG(PR_LOG_DEBUG, ("Starting up MediaStreamGraph %p", gGraph)); - - AudioStream::InitPreferredSampleRate(); } return gGraph; } MediaStreamGraph* -MediaStreamGraph::CreateNonRealtimeInstance() +MediaStreamGraph::CreateNonRealtimeInstance(TrackRate aSampleRate) { NS_ASSERTION(NS_IsMainThread(), "Main thread only"); - MediaStreamGraphImpl* graph = new MediaStreamGraphImpl(false); + MediaStreamGraphImpl* graph = new MediaStreamGraphImpl(false, aSampleRate); return graph; } diff --git a/content/media/MediaStreamGraph.h b/content/media/MediaStreamGraph.h index 3d546deced00..c6f54c9c6c6b 100644 --- a/content/media/MediaStreamGraph.h +++ b/content/media/MediaStreamGraph.h @@ -790,7 +790,8 @@ public: TrackID mID; // Sample rate of the input data. TrackRate mInputRate; - // Sample rate of the output data, always equal to IdealAudioRate() + // Sample rate of the output data, always equal to the sample rate of the + // graph. TrackRate mOutputRate; // Resampler if the rate of the input track does not match the // MediaStreamGraph's. @@ -1078,9 +1079,6 @@ protected: bool mInCycle; }; -// Returns ideal audio rate for processing. -inline TrackRate IdealAudioRate() { return AudioStream::PreferredSampleRate(); } - /** * Initially, at least, we will have a singleton MediaStreamGraph per * process. Each OfflineAudioContext object creates its own MediaStreamGraph @@ -1089,13 +1087,13 @@ inline TrackRate IdealAudioRate() { return AudioStream::PreferredSampleRate(); } class MediaStreamGraph { public: // We ensure that the graph current time advances in multiples of - // IdealAudioBlockSize()/IdealAudioRate(). A stream that never blocks - // and has a track with the ideal audio rate will produce audio in - // multiples of the block size. + // IdealAudioBlockSize()/AudioStream::PreferredSampleRate(). A stream that + // never blocks and has a track with the ideal audio rate will produce audio + // in multiples of the block size. // Main thread only static MediaStreamGraph* GetInstance(); - static MediaStreamGraph* CreateNonRealtimeInstance(); + static MediaStreamGraph* CreateNonRealtimeInstance(TrackRate aSampleRate); // Idempotent static void DestroyNonRealtimeInstance(MediaStreamGraph* aGraph); diff --git a/content/media/MediaStreamGraphImpl.h b/content/media/MediaStreamGraphImpl.h index cfcd581ddccc..0cf635315ec2 100644 --- a/content/media/MediaStreamGraphImpl.h +++ b/content/media/MediaStreamGraphImpl.h @@ -120,7 +120,7 @@ public: * output. Those objects currently only support audio, and are used to * implement OfflineAudioContext. They do not support MediaStream inputs. */ - explicit MediaStreamGraphImpl(bool aRealtime); + explicit MediaStreamGraphImpl(bool aRealtime, TrackRate aSampleRate); /** * Unregisters memory reporting and deletes this instance. This should be @@ -392,6 +392,8 @@ public: */ void ResumeAllAudioOutputs(); + TrackRate AudioSampleRate() { return mSampleRate; } + // Data members /** @@ -531,6 +533,13 @@ public: * The graph should stop processing at or after this time. */ GraphTime mEndTime; + + /** + * Sample rate at which this graph runs. For real time graphs, this is + * the rate of the audio mixer. For offline graphs, this is the rate specified + * at construction. + */ + TrackRate mSampleRate; /** * True when another iteration of the control loop is required. */ diff --git a/content/media/compiledtest/TestAudioMixer.cpp b/content/media/compiledtest/TestAudioMixer.cpp index 10f6cb8354b2..a4e20f5d1d6f 100644 --- a/content/media/compiledtest/TestAudioMixer.cpp +++ b/content/media/compiledtest/TestAudioMixer.cpp @@ -11,7 +11,7 @@ using mozilla::AudioSampleFormat; /* In this test, the different audio stream and channels are always created to * cancel each other. */ -void MixingDone(AudioDataValue* aData, AudioSampleFormat aFormat, uint32_t aChannels, uint32_t aFrames) +void MixingDone(AudioDataValue* aData, AudioSampleFormat aFormat, uint32_t aChannels, uint32_t aFrames, uint32_t aSampleRate) { bool silent = true; for (uint32_t i = 0; i < aChannels * aFrames; i++) { @@ -67,6 +67,7 @@ void FillBuffer(AudioDataValue* aBuffer, uint32_t aLength, AudioDataValue aValue int main(int argc, char* argv[]) { const uint32_t CHANNEL_LENGTH = 256; + const uint32_t AUDIO_RATE = 44100; AudioDataValue a[CHANNEL_LENGTH * 2]; AudioDataValue b[CHANNEL_LENGTH * 2]; FillBuffer(a, CHANNEL_LENGTH, GetLowValue()); @@ -81,8 +82,8 @@ int main(int argc, char* argv[]) { fprintf(stderr, "Test AudioMixer constant buffer length.\n"); while (iterations--) { - mixer.Mix(a, 2, CHANNEL_LENGTH); - mixer.Mix(b, 2, CHANNEL_LENGTH); + mixer.Mix(a, 2, CHANNEL_LENGTH, AUDIO_RATE); + mixer.Mix(b, 2, CHANNEL_LENGTH, AUDIO_RATE); mixer.FinishMixing(); } } @@ -96,22 +97,22 @@ int main(int argc, char* argv[]) { FillBuffer(a + CHANNEL_LENGTH / 2, CHANNEL_LENGTH / 2, GetLowValue()); FillBuffer(b, CHANNEL_LENGTH / 2, GetHighValue()); FillBuffer(b + CHANNEL_LENGTH / 2, CHANNEL_LENGTH / 2, GetHighValue()); - mixer.Mix(a, 2, CHANNEL_LENGTH / 2); - mixer.Mix(b, 2, CHANNEL_LENGTH / 2); + mixer.Mix(a, 2, CHANNEL_LENGTH / 2, AUDIO_RATE); + mixer.Mix(b, 2, CHANNEL_LENGTH / 2, AUDIO_RATE); mixer.FinishMixing(); FillBuffer(a, CHANNEL_LENGTH, GetLowValue()); FillBuffer(a + CHANNEL_LENGTH, CHANNEL_LENGTH, GetHighValue()); FillBuffer(b, CHANNEL_LENGTH, GetHighValue()); FillBuffer(b + CHANNEL_LENGTH, CHANNEL_LENGTH, GetLowValue()); - mixer.Mix(a, 2, CHANNEL_LENGTH); - mixer.Mix(b, 2, CHANNEL_LENGTH); + mixer.Mix(a, 2, CHANNEL_LENGTH, AUDIO_RATE); + mixer.Mix(b, 2, CHANNEL_LENGTH, AUDIO_RATE); mixer.FinishMixing(); FillBuffer(a, CHANNEL_LENGTH / 2, GetLowValue()); FillBuffer(a + CHANNEL_LENGTH / 2, CHANNEL_LENGTH / 2, GetLowValue()); FillBuffer(b, CHANNEL_LENGTH / 2, GetHighValue()); FillBuffer(b + CHANNEL_LENGTH / 2, CHANNEL_LENGTH / 2, GetHighValue()); - mixer.Mix(a, 2, CHANNEL_LENGTH / 2); - mixer.Mix(b, 2, CHANNEL_LENGTH / 2); + mixer.Mix(a, 2, CHANNEL_LENGTH / 2, AUDIO_RATE); + mixer.Mix(b, 2, CHANNEL_LENGTH / 2, AUDIO_RATE); mixer.FinishMixing(); } @@ -122,14 +123,14 @@ int main(int argc, char* argv[]) { mozilla::AudioMixer mixer(MixingDone); fprintf(stderr, "Test AudioMixer variable channel count.\n"); - mixer.Mix(a, 1, CHANNEL_LENGTH); - mixer.Mix(b, 1, CHANNEL_LENGTH); + mixer.Mix(a, 1, CHANNEL_LENGTH, AUDIO_RATE); + mixer.Mix(b, 1, CHANNEL_LENGTH, AUDIO_RATE); mixer.FinishMixing(); - mixer.Mix(a, 1, CHANNEL_LENGTH); - mixer.Mix(b, 1, CHANNEL_LENGTH); + mixer.Mix(a, 1, CHANNEL_LENGTH, AUDIO_RATE); + mixer.Mix(b, 1, CHANNEL_LENGTH, AUDIO_RATE); mixer.FinishMixing(); - mixer.Mix(a, 1, CHANNEL_LENGTH); - mixer.Mix(b, 1, CHANNEL_LENGTH); + mixer.Mix(a, 1, CHANNEL_LENGTH, AUDIO_RATE); + mixer.Mix(b, 1, CHANNEL_LENGTH, AUDIO_RATE); mixer.FinishMixing(); } @@ -137,16 +138,16 @@ int main(int argc, char* argv[]) { mozilla::AudioMixer mixer(MixingDone); fprintf(stderr, "Test AudioMixer variable stream count.\n"); - mixer.Mix(a, 2, CHANNEL_LENGTH); - mixer.Mix(b, 2, CHANNEL_LENGTH); + mixer.Mix(a, 2, CHANNEL_LENGTH, AUDIO_RATE); + mixer.Mix(b, 2, CHANNEL_LENGTH, AUDIO_RATE); mixer.FinishMixing(); - mixer.Mix(a, 2, CHANNEL_LENGTH); - mixer.Mix(b, 2, CHANNEL_LENGTH); - mixer.Mix(a, 2, CHANNEL_LENGTH); - mixer.Mix(b, 2, CHANNEL_LENGTH); + mixer.Mix(a, 2, CHANNEL_LENGTH, AUDIO_RATE); + mixer.Mix(b, 2, CHANNEL_LENGTH, AUDIO_RATE); + mixer.Mix(a, 2, CHANNEL_LENGTH, AUDIO_RATE); + mixer.Mix(b, 2, CHANNEL_LENGTH, AUDIO_RATE); mixer.FinishMixing(); - mixer.Mix(a, 2, CHANNEL_LENGTH); - mixer.Mix(b, 2, CHANNEL_LENGTH); + mixer.Mix(a, 2, CHANNEL_LENGTH, AUDIO_RATE); + mixer.Mix(b, 2, CHANNEL_LENGTH, AUDIO_RATE); mixer.FinishMixing(); } diff --git a/content/media/webaudio/AudioDestinationNode.cpp b/content/media/webaudio/AudioDestinationNode.cpp index e47b741152a0..bd9b8481375f 100644 --- a/content/media/webaudio/AudioDestinationNode.cpp +++ b/content/media/webaudio/AudioDestinationNode.cpp @@ -244,7 +244,7 @@ AudioDestinationNode::AudioDestinationNode(AudioContext* aContext, , mExtraCurrentTimeUpdatedSinceLastStableState(false) { MediaStreamGraph* graph = aIsOffline ? - MediaStreamGraph::CreateNonRealtimeInstance() : + MediaStreamGraph::CreateNonRealtimeInstance(aSampleRate) : MediaStreamGraph::GetInstance(); AudioNodeEngine* engine = aIsOffline ? new OfflineDestinationNodeEngine(this, aNumberOfChannels,