Bug 998179 - Refactor how MediaStreamGraph get and use their sample rate. r=roc

Use the sample rate passed to the OfflineAudioContext constructor in
MediaStreamGraph::CreateOfflineInstance, and pass the preferred mixer sample
rate to the (real time) MediaStreamGraph constructor.

Then, always use this sample rate for the lifetime of the graph.

This patch needed to pass the sample rate to the AudioMixer class to avoid
relying on globals like it was done before.

--HG--
extra : rebase_source : 2802208819887605fe26a7040998fc328b3c9a57
This commit is contained in:
Paul Adenot 2014-04-23 11:20:56 +02:00
Родитель 75660868f4
Коммит aa81ed9e24
7 изменённых файлов: 78 добавлений и 72 удалений

Просмотреть файл

@ -14,7 +14,8 @@ namespace mozilla {
typedef void(*MixerFunc)(AudioDataValue* aMixedBuffer,
AudioSampleFormat aFormat,
uint32_t aChannels,
uint32_t aFrames);
uint32_t aFrames,
uint32_t aSampleRate);
/**
* This class mixes multiple streams of audio together to output a single audio
@ -34,7 +35,8 @@ public:
AudioMixer(MixerFunc aCallback)
: mCallback(aCallback),
mFrames(0),
mChannels(0)
mChannels(0),
mSampleRate(0)
{ }
/* Get the data from the mixer. This is supposed to be called when all the
@ -43,21 +45,27 @@ public:
mCallback(mMixedAudio.Elements(),
AudioSampleTypeToFormat<AudioDataValue>::Format,
mChannels,
mFrames);
mFrames,
mSampleRate);
PodZero(mMixedAudio.Elements(), mMixedAudio.Length());
mChannels = mFrames = 0;
mSampleRate = mChannels = mFrames = 0;
}
/* Add a buffer to the mix. aSamples is interleaved. */
void Mix(AudioDataValue* aSamples, uint32_t aChannels, uint32_t aFrames) {
void Mix(AudioDataValue* aSamples,
uint32_t aChannels,
uint32_t aFrames,
uint32_t aSampleRate) {
if (!mFrames && !mChannels) {
mFrames = aFrames;
mChannels = aChannels;
mSampleRate = aSampleRate;
EnsureCapacityAndSilence();
}
MOZ_ASSERT(aFrames == mFrames);
MOZ_ASSERT(aChannels == mChannels);
MOZ_ASSERT(aSampleRate == mSampleRate);
for (uint32_t i = 0; i < aFrames * aChannels; i++) {
mMixedAudio[i] += aSamples[i];
@ -77,6 +85,8 @@ private:
uint32_t mFrames;
/* Number of channels for this mixing block. */
uint32_t mChannels;
/* Sample rate the of the mixed data. */
uint32_t mSampleRate;
/* Buffer containing the mixed audio data. */
nsTArray<AudioDataValue> mMixedAudio;
};

Просмотреть файл

@ -216,7 +216,7 @@ AudioSegment::WriteTo(uint64_t aID, AudioStream* aOutput, AudioMixer* aMixer)
aOutput->Write(buf.Elements(), GetDuration(), &(mChunks[mChunks.Length() - 1].mTimeStamp));
if (aMixer) {
aMixer->Mix(buf.Elements(), outputChannels, GetDuration());
aMixer->Mix(buf.Elements(), outputChannels, GetDuration(), aOutput->GetRate());
}
aOutput->Start();
}

Просмотреть файл

@ -340,7 +340,7 @@ MediaStreamGraphImpl::GetAudioPosition(MediaStream* aStream)
return mCurrentTime;
}
return aStream->mAudioOutputStreams[0].mAudioPlaybackStartTime +
TicksToTimeRoundDown(IdealAudioRate(),
TicksToTimeRoundDown(mSampleRate,
positionInFrames);
}
@ -586,7 +586,8 @@ MediaStreamGraphImpl::UpdateStreamOrderForStream(mozilla::LinkedList<MediaStream
static void AudioMixerCallback(AudioDataValue* aMixedBuffer,
AudioSampleFormat aFormat,
uint32_t aChannels,
uint32_t aFrames)
uint32_t aFrames,
uint32_t aSampleRate)
{
// Need an api to register mixer callbacks, bug 989921
#ifdef MOZ_WEBRTC
@ -594,7 +595,7 @@ static void AudioMixerCallback(AudioDataValue* aMixedBuffer,
// XXX need Observer base class and registration API
if (gFarendObserver) {
gFarendObserver->InsertFarEnd(aMixedBuffer, aFrames, false,
IdealAudioRate(), aChannels, aFormat);
aSampleRate, aChannels, aFormat);
}
}
#endif
@ -847,7 +848,7 @@ MediaStreamGraphImpl::CreateOrDestroyAudioStreams(GraphTime aAudioOutputStartTim
// XXX for now, allocate stereo output. But we need to fix this to
// match the system's ideal channel configuration.
// NOTE: we presume this is either fast or async-under-the-covers
audioOutputStream->mStream->Init(2, IdealAudioRate(),
audioOutputStream->mStream->Init(2, mSampleRate,
AudioChannel::Normal,
AudioStream::LowLatency);
audioOutputStream->mTrackID = tracks->GetID();
@ -879,7 +880,7 @@ MediaStreamGraphImpl::PlayAudio(MediaStream* aStream,
// the rounding between {Graph,Stream}Time and track ticks is not dependant
// on the absolute value of the {Graph,Stream}Time, and so that number of
// ticks to play is the same for each cycle.
TrackTicks ticksNeeded = TimeToTicksRoundDown(IdealAudioRate(), aTo) - TimeToTicksRoundDown(IdealAudioRate(), aFrom);
TrackTicks ticksNeeded = TimeToTicksRoundDown(mSampleRate, aTo) - TimeToTicksRoundDown(mSampleRate, aFrom);
if (aStream->mAudioOutputStreams.IsEmpty()) {
return 0;
@ -897,7 +898,7 @@ MediaStreamGraphImpl::PlayAudio(MediaStream* aStream,
StreamBuffer::Track* track = aStream->mBuffer.FindTrack(audioOutput.mTrackID);
AudioSegment* audio = track->Get<AudioSegment>();
AudioSegment output;
MOZ_ASSERT(track->GetRate() == IdealAudioRate());
MOZ_ASSERT(track->GetRate() == mSampleRate);
// offset and audioOutput.mLastTickWritten can differ by at most one sample,
// because of the rounding issue. We track that to ensure we don't skip a
@ -933,7 +934,7 @@ MediaStreamGraphImpl::PlayAudio(MediaStream* aStream,
if (end >= aTo) {
toWrite = ticksNeeded;
} else {
toWrite = TimeToTicksRoundDown(IdealAudioRate(), end - aFrom);
toWrite = TimeToTicksRoundDown(mSampleRate, end - aFrom);
}
if (blocked) {
@ -1284,23 +1285,8 @@ MediaStreamGraphImpl::RunThread()
UpdateStreamOrder();
}
TrackRate sampleRate;
// Find the sampling rate that we need to use for non-realtime graphs.
if (!mRealtime) {
for (uint32_t i = 0; i < mStreams.Length(); ++i) {
AudioNodeStream* n = mStreams[i]->AsAudioNodeStream();
if (n) {
// We know that the rest of the streams will run at the same rate.
sampleRate = n->SampleRate();
break;
}
}
} else {
sampleRate = IdealAudioRate();
}
GraphTime endBlockingDecisions =
RoundUpToNextAudioBlock(sampleRate, mCurrentTime + MillisecondsToMediaTime(AUDIO_TARGET_MS));
RoundUpToNextAudioBlock(mSampleRate, mCurrentTime + MillisecondsToMediaTime(AUDIO_TARGET_MS));
bool ensureNextIteration = false;
// Grab pending stream input.
@ -1919,7 +1905,8 @@ MediaStream::EnsureTrack(TrackID aTrackId, TrackRate aSampleRate)
nsAutoPtr<MediaSegment> segment(new AudioSegment());
for (uint32_t j = 0; j < mListeners.Length(); ++j) {
MediaStreamListener* l = mListeners[j];
l->NotifyQueuedTrackChanges(Graph(), aTrackId, IdealAudioRate(), 0,
l->NotifyQueuedTrackChanges(Graph(), aTrackId,
GraphImpl()->AudioSampleRate(), 0,
MediaStreamListener::TRACK_EVENT_CREATED,
*segment);
}
@ -2269,7 +2256,7 @@ SourceMediaStream::AddTrack(TrackID aID, TrackRate aRate, TrackTicks aStart,
data->mInputRate = aRate;
// We resample all audio input tracks to the sample rate of the audio mixer.
data->mOutputRate = aSegment->GetType() == MediaSegment::AUDIO ?
IdealAudioRate() : aRate;
GraphImpl()->AudioSampleRate() : aRate;
data->mStart = aStart;
data->mCommands = TRACK_CREATE;
data->mData = aSegment;
@ -2283,7 +2270,7 @@ void
SourceMediaStream::ResampleAudioToGraphSampleRate(TrackData* aTrackData, MediaSegment* aSegment)
{
if (aSegment->GetType() != MediaSegment::AUDIO ||
aTrackData->mInputRate == IdealAudioRate()) {
aTrackData->mInputRate == GraphImpl()->AudioSampleRate()) {
return;
}
AudioSegment* segment = static_cast<AudioSegment*>(aSegment);
@ -2291,7 +2278,7 @@ SourceMediaStream::ResampleAudioToGraphSampleRate(TrackData* aTrackData, MediaSe
int channels = segment->ChannelCount();
SpeexResamplerState* state = speex_resampler_init(channels,
aTrackData->mInputRate,
IdealAudioRate(),
GraphImpl()->AudioSampleRate(),
SPEEX_RESAMPLER_QUALITY_DEFAULT,
nullptr);
if (state) {
@ -2639,7 +2626,7 @@ ProcessedMediaStream::DestroyImpl()
*/
static const int32_t INITIAL_CURRENT_TIME = 1;
MediaStreamGraphImpl::MediaStreamGraphImpl(bool aRealtime)
MediaStreamGraphImpl::MediaStreamGraphImpl(bool aRealtime, TrackRate aSampleRate)
: mCurrentTime(INITIAL_CURRENT_TIME)
, mStateComputedTime(INITIAL_CURRENT_TIME)
, mProcessingGraphUpdateIndex(0)
@ -2648,6 +2635,7 @@ MediaStreamGraphImpl::MediaStreamGraphImpl(bool aRealtime)
, mLifecycleState(LIFECYCLE_THREAD_NOT_STARTED)
, mWaitState(WAITSTATE_RUNNING)
, mEndTime(GRAPH_TIME_MAX)
, mSampleRate(aSampleRate)
, mNeedAnotherIteration(false)
, mForceShutDown(false)
, mPostedRunInStableStateEvent(false)
@ -2714,22 +2702,22 @@ MediaStreamGraph::GetInstance()
nsContentUtils::RegisterShutdownObserver(new MediaStreamGraphShutdownObserver());
}
gGraph = new MediaStreamGraphImpl(true);
AudioStream::InitPreferredSampleRate();
gGraph = new MediaStreamGraphImpl(true, AudioStream::PreferredSampleRate());
STREAM_LOG(PR_LOG_DEBUG, ("Starting up MediaStreamGraph %p", gGraph));
AudioStream::InitPreferredSampleRate();
}
return gGraph;
}
MediaStreamGraph*
MediaStreamGraph::CreateNonRealtimeInstance()
MediaStreamGraph::CreateNonRealtimeInstance(TrackRate aSampleRate)
{
NS_ASSERTION(NS_IsMainThread(), "Main thread only");
MediaStreamGraphImpl* graph = new MediaStreamGraphImpl(false);
MediaStreamGraphImpl* graph = new MediaStreamGraphImpl(false, aSampleRate);
return graph;
}

Просмотреть файл

@ -790,7 +790,8 @@ public:
TrackID mID;
// Sample rate of the input data.
TrackRate mInputRate;
// Sample rate of the output data, always equal to IdealAudioRate()
// Sample rate of the output data, always equal to the sample rate of the
// graph.
TrackRate mOutputRate;
// Resampler if the rate of the input track does not match the
// MediaStreamGraph's.
@ -1078,9 +1079,6 @@ protected:
bool mInCycle;
};
// Returns ideal audio rate for processing.
inline TrackRate IdealAudioRate() { return AudioStream::PreferredSampleRate(); }
/**
* Initially, at least, we will have a singleton MediaStreamGraph per
* process. Each OfflineAudioContext object creates its own MediaStreamGraph
@ -1089,13 +1087,13 @@ inline TrackRate IdealAudioRate() { return AudioStream::PreferredSampleRate(); }
class MediaStreamGraph {
public:
// We ensure that the graph current time advances in multiples of
// IdealAudioBlockSize()/IdealAudioRate(). A stream that never blocks
// and has a track with the ideal audio rate will produce audio in
// multiples of the block size.
// IdealAudioBlockSize()/AudioStream::PreferredSampleRate(). A stream that
// never blocks and has a track with the ideal audio rate will produce audio
// in multiples of the block size.
// Main thread only
static MediaStreamGraph* GetInstance();
static MediaStreamGraph* CreateNonRealtimeInstance();
static MediaStreamGraph* CreateNonRealtimeInstance(TrackRate aSampleRate);
// Idempotent
static void DestroyNonRealtimeInstance(MediaStreamGraph* aGraph);

Просмотреть файл

@ -120,7 +120,7 @@ public:
* output. Those objects currently only support audio, and are used to
* implement OfflineAudioContext. They do not support MediaStream inputs.
*/
explicit MediaStreamGraphImpl(bool aRealtime);
explicit MediaStreamGraphImpl(bool aRealtime, TrackRate aSampleRate);
/**
* Unregisters memory reporting and deletes this instance. This should be
@ -392,6 +392,8 @@ public:
*/
void ResumeAllAudioOutputs();
TrackRate AudioSampleRate() { return mSampleRate; }
// Data members
/**
@ -531,6 +533,13 @@ public:
* The graph should stop processing at or after this time.
*/
GraphTime mEndTime;
/**
* Sample rate at which this graph runs. For real time graphs, this is
* the rate of the audio mixer. For offline graphs, this is the rate specified
* at construction.
*/
TrackRate mSampleRate;
/**
* True when another iteration of the control loop is required.
*/

Просмотреть файл

@ -11,7 +11,7 @@ using mozilla::AudioSampleFormat;
/* In this test, the different audio stream and channels are always created to
* cancel each other. */
void MixingDone(AudioDataValue* aData, AudioSampleFormat aFormat, uint32_t aChannels, uint32_t aFrames)
void MixingDone(AudioDataValue* aData, AudioSampleFormat aFormat, uint32_t aChannels, uint32_t aFrames, uint32_t aSampleRate)
{
bool silent = true;
for (uint32_t i = 0; i < aChannels * aFrames; i++) {
@ -67,6 +67,7 @@ void FillBuffer(AudioDataValue* aBuffer, uint32_t aLength, AudioDataValue aValue
int main(int argc, char* argv[]) {
const uint32_t CHANNEL_LENGTH = 256;
const uint32_t AUDIO_RATE = 44100;
AudioDataValue a[CHANNEL_LENGTH * 2];
AudioDataValue b[CHANNEL_LENGTH * 2];
FillBuffer(a, CHANNEL_LENGTH, GetLowValue<AudioDataValue>());
@ -81,8 +82,8 @@ int main(int argc, char* argv[]) {
fprintf(stderr, "Test AudioMixer constant buffer length.\n");
while (iterations--) {
mixer.Mix(a, 2, CHANNEL_LENGTH);
mixer.Mix(b, 2, CHANNEL_LENGTH);
mixer.Mix(a, 2, CHANNEL_LENGTH, AUDIO_RATE);
mixer.Mix(b, 2, CHANNEL_LENGTH, AUDIO_RATE);
mixer.FinishMixing();
}
}
@ -96,22 +97,22 @@ int main(int argc, char* argv[]) {
FillBuffer(a + CHANNEL_LENGTH / 2, CHANNEL_LENGTH / 2, GetLowValue<AudioDataValue>());
FillBuffer(b, CHANNEL_LENGTH / 2, GetHighValue<AudioDataValue>());
FillBuffer(b + CHANNEL_LENGTH / 2, CHANNEL_LENGTH / 2, GetHighValue<AudioDataValue>());
mixer.Mix(a, 2, CHANNEL_LENGTH / 2);
mixer.Mix(b, 2, CHANNEL_LENGTH / 2);
mixer.Mix(a, 2, CHANNEL_LENGTH / 2, AUDIO_RATE);
mixer.Mix(b, 2, CHANNEL_LENGTH / 2, AUDIO_RATE);
mixer.FinishMixing();
FillBuffer(a, CHANNEL_LENGTH, GetLowValue<AudioDataValue>());
FillBuffer(a + CHANNEL_LENGTH, CHANNEL_LENGTH, GetHighValue<AudioDataValue>());
FillBuffer(b, CHANNEL_LENGTH, GetHighValue<AudioDataValue>());
FillBuffer(b + CHANNEL_LENGTH, CHANNEL_LENGTH, GetLowValue<AudioDataValue>());
mixer.Mix(a, 2, CHANNEL_LENGTH);
mixer.Mix(b, 2, CHANNEL_LENGTH);
mixer.Mix(a, 2, CHANNEL_LENGTH, AUDIO_RATE);
mixer.Mix(b, 2, CHANNEL_LENGTH, AUDIO_RATE);
mixer.FinishMixing();
FillBuffer(a, CHANNEL_LENGTH / 2, GetLowValue<AudioDataValue>());
FillBuffer(a + CHANNEL_LENGTH / 2, CHANNEL_LENGTH / 2, GetLowValue<AudioDataValue>());
FillBuffer(b, CHANNEL_LENGTH / 2, GetHighValue<AudioDataValue>());
FillBuffer(b + CHANNEL_LENGTH / 2, CHANNEL_LENGTH / 2, GetHighValue<AudioDataValue>());
mixer.Mix(a, 2, CHANNEL_LENGTH / 2);
mixer.Mix(b, 2, CHANNEL_LENGTH / 2);
mixer.Mix(a, 2, CHANNEL_LENGTH / 2, AUDIO_RATE);
mixer.Mix(b, 2, CHANNEL_LENGTH / 2, AUDIO_RATE);
mixer.FinishMixing();
}
@ -122,14 +123,14 @@ int main(int argc, char* argv[]) {
mozilla::AudioMixer mixer(MixingDone);
fprintf(stderr, "Test AudioMixer variable channel count.\n");
mixer.Mix(a, 1, CHANNEL_LENGTH);
mixer.Mix(b, 1, CHANNEL_LENGTH);
mixer.Mix(a, 1, CHANNEL_LENGTH, AUDIO_RATE);
mixer.Mix(b, 1, CHANNEL_LENGTH, AUDIO_RATE);
mixer.FinishMixing();
mixer.Mix(a, 1, CHANNEL_LENGTH);
mixer.Mix(b, 1, CHANNEL_LENGTH);
mixer.Mix(a, 1, CHANNEL_LENGTH, AUDIO_RATE);
mixer.Mix(b, 1, CHANNEL_LENGTH, AUDIO_RATE);
mixer.FinishMixing();
mixer.Mix(a, 1, CHANNEL_LENGTH);
mixer.Mix(b, 1, CHANNEL_LENGTH);
mixer.Mix(a, 1, CHANNEL_LENGTH, AUDIO_RATE);
mixer.Mix(b, 1, CHANNEL_LENGTH, AUDIO_RATE);
mixer.FinishMixing();
}
@ -137,16 +138,16 @@ int main(int argc, char* argv[]) {
mozilla::AudioMixer mixer(MixingDone);
fprintf(stderr, "Test AudioMixer variable stream count.\n");
mixer.Mix(a, 2, CHANNEL_LENGTH);
mixer.Mix(b, 2, CHANNEL_LENGTH);
mixer.Mix(a, 2, CHANNEL_LENGTH, AUDIO_RATE);
mixer.Mix(b, 2, CHANNEL_LENGTH, AUDIO_RATE);
mixer.FinishMixing();
mixer.Mix(a, 2, CHANNEL_LENGTH);
mixer.Mix(b, 2, CHANNEL_LENGTH);
mixer.Mix(a, 2, CHANNEL_LENGTH);
mixer.Mix(b, 2, CHANNEL_LENGTH);
mixer.Mix(a, 2, CHANNEL_LENGTH, AUDIO_RATE);
mixer.Mix(b, 2, CHANNEL_LENGTH, AUDIO_RATE);
mixer.Mix(a, 2, CHANNEL_LENGTH, AUDIO_RATE);
mixer.Mix(b, 2, CHANNEL_LENGTH, AUDIO_RATE);
mixer.FinishMixing();
mixer.Mix(a, 2, CHANNEL_LENGTH);
mixer.Mix(b, 2, CHANNEL_LENGTH);
mixer.Mix(a, 2, CHANNEL_LENGTH, AUDIO_RATE);
mixer.Mix(b, 2, CHANNEL_LENGTH, AUDIO_RATE);
mixer.FinishMixing();
}

Просмотреть файл

@ -244,7 +244,7 @@ AudioDestinationNode::AudioDestinationNode(AudioContext* aContext,
, mExtraCurrentTimeUpdatedSinceLastStableState(false)
{
MediaStreamGraph* graph = aIsOffline ?
MediaStreamGraph::CreateNonRealtimeInstance() :
MediaStreamGraph::CreateNonRealtimeInstance(aSampleRate) :
MediaStreamGraph::GetInstance();
AudioNodeEngine* engine = aIsOffline ?
new OfflineDestinationNodeEngine(this, aNumberOfChannels,