зеркало из https://github.com/mozilla/gecko-dev.git
Bug 848954 - Part 5 - Mix down all audio and only output a single stream. r=roc
This commit is contained in:
Родитель
7e987d0735
Коммит
735258f143
|
@ -10,6 +10,7 @@
|
|||
#include "nsTArray.h"
|
||||
#include "mozilla/PodOperations.h"
|
||||
#include "mozilla/LinkedList.h"
|
||||
#include "AudioStream.h"
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
|
@ -20,7 +21,6 @@ struct MixerCallbackReceiver {
|
|||
uint32_t aFrames,
|
||||
uint32_t aSampleRate) = 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* This class mixes multiple streams of audio together to output a single audio
|
||||
* stream.
|
||||
|
@ -93,6 +93,16 @@ public:
|
|||
mCallbacks.insertBack(new MixerCallback(aReceiver));
|
||||
}
|
||||
|
||||
bool FindCallback(MixerCallbackReceiver* aReceiver) {
|
||||
for (MixerCallback* cb = mCallbacks.getFirst();
|
||||
cb != nullptr; cb = cb->getNext()) {
|
||||
if (cb->mReceiver == aReceiver) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool RemoveCallback(MixerCallbackReceiver* aReceiver) {
|
||||
for (MixerCallback* cb = mCallbacks.getFirst();
|
||||
cb != nullptr; cb = cb->getNext()) {
|
||||
|
|
|
@ -147,7 +147,7 @@ void AudioSegment::ResampleChunks(SpeexResamplerState* aResampler, uint32_t aInR
|
|||
}
|
||||
|
||||
void
|
||||
AudioSegment::WriteTo(uint64_t aID, AudioStream* aOutput, AudioMixer* aMixer)
|
||||
AudioSegment::WriteTo(uint64_t aID, AudioMixer& aMixer, uint32_t aOutputChannels, uint32_t aSampleRate)
|
||||
{
|
||||
uint32_t outputChannels = aOutput->GetChannels();
|
||||
nsAutoTArray<AudioDataValue,AUDIO_PROCESSING_FRAMES*GUESS_AUDIO_CHANNELS> buf;
|
||||
|
@ -159,7 +159,7 @@ AudioSegment::WriteTo(uint64_t aID, AudioStream* aOutput, AudioMixer* aMixer)
|
|||
return;
|
||||
}
|
||||
|
||||
uint32_t outBufferLength = GetDuration() * outputChannels;
|
||||
uint32_t outBufferLength = GetDuration() * aOutputChannels;
|
||||
buf.SetLength(outBufferLength);
|
||||
|
||||
|
||||
|
@ -172,36 +172,33 @@ AudioSegment::WriteTo(uint64_t aID, AudioStream* aOutput, AudioMixer* aMixer)
|
|||
// AudioStream, and we don't have real data to write to it (just silence).
|
||||
// To avoid overbuffering in the AudioStream, we simply drop the silence,
|
||||
// here. The stream will underrun and output silence anyways.
|
||||
if (c.mBuffer || aOutput->GetWritten()) {
|
||||
if (c.mBuffer && c.mBufferFormat != AUDIO_FORMAT_SILENCE) {
|
||||
channelData.SetLength(c.mChannelData.Length());
|
||||
for (uint32_t i = 0; i < channelData.Length(); ++i) {
|
||||
channelData[i] = c.mChannelData[i];
|
||||
}
|
||||
|
||||
if (channelData.Length() < outputChannels) {
|
||||
// Up-mix. Note that this might actually make channelData have more
|
||||
// than outputChannels temporarily.
|
||||
AudioChannelsUpMix(&channelData, outputChannels, gZeroChannel);
|
||||
}
|
||||
|
||||
if (channelData.Length() > outputChannels) {
|
||||
// Down-mix.
|
||||
DownmixAndInterleave(channelData, c.mBufferFormat, frames,
|
||||
c.mVolume, outputChannels, buf.Elements() + offset);
|
||||
} else {
|
||||
InterleaveAndConvertBuffer(channelData.Elements(), c.mBufferFormat,
|
||||
frames, c.mVolume,
|
||||
outputChannels,
|
||||
buf.Elements() + offset);
|
||||
}
|
||||
} else {
|
||||
// Assumes that a bit pattern of zeroes == 0.0f
|
||||
memset(buf.Elements() + offset, 0, outputChannels * frames * sizeof(AudioDataValue));
|
||||
if (c.mBuffer && c.mBufferFormat != AUDIO_FORMAT_SILENCE) {
|
||||
channelData.SetLength(c.mChannelData.Length());
|
||||
for (uint32_t i = 0; i < channelData.Length(); ++i) {
|
||||
channelData[i] = c.mChannelData[i];
|
||||
}
|
||||
offset += frames * outputChannels;
|
||||
if (channelData.Length() < aOutputChannels) {
|
||||
// Up-mix. Note that this might actually make channelData have more
|
||||
// than aOutputChannels temporarily.
|
||||
AudioChannelsUpMix(&channelData, aOutputChannels, gZeroChannel);
|
||||
}
|
||||
if (channelData.Length() > aOutputChannels) {
|
||||
// Down-mix.
|
||||
DownmixAndInterleave(channelData, c.mBufferFormat, frames,
|
||||
c.mVolume, aOutputChannels, buf.Elements() + offset);
|
||||
} else {
|
||||
InterleaveAndConvertBuffer(channelData.Elements(), c.mBufferFormat,
|
||||
frames, c.mVolume,
|
||||
aOutputChannels,
|
||||
buf.Elements() + offset);
|
||||
}
|
||||
} else {
|
||||
// Assumes that a bit pattern of zeroes == 0.0f
|
||||
memset(buf.Elements() + offset, 0, aOutputChannels * frames * sizeof(AudioDataValue));
|
||||
}
|
||||
|
||||
offset += frames * aOutputChannels;
|
||||
|
||||
if (!c.mTimeStamp.IsNull()) {
|
||||
TimeStamp now = TimeStamp::Now();
|
||||
// would be more efficient to c.mTimeStamp to ms on create time then pass here
|
||||
|
@ -210,15 +207,9 @@ AudioSegment::WriteTo(uint64_t aID, AudioStream* aOutput, AudioMixer* aMixer)
|
|||
}
|
||||
}
|
||||
|
||||
aOutput->Write(buf.Elements(), offset / outputChannels, &(mChunks[mChunks.Length() - 1].mTimeStamp));
|
||||
|
||||
// `offset` is zero when all the chunks above are null (silence). We can
|
||||
// safely skip the mixing here because filling `buf` with zero and then mixing
|
||||
// it would have absolutly no effect in the mix.
|
||||
if (aMixer && offset) {
|
||||
aMixer->Mix(buf.Elements(), outputChannels, GetDuration(), aOutput->GetRate());
|
||||
if (offset) {
|
||||
aMixer.Mix(buf.Elements(), aOutputChannels, offset, aSampleRate);
|
||||
}
|
||||
aOutput->Start();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -274,7 +274,7 @@ public:
|
|||
return chunk;
|
||||
}
|
||||
void ApplyVolume(float aVolume);
|
||||
void WriteTo(uint64_t aID, AudioStream* aOutput, AudioMixer* aMixer = nullptr);
|
||||
void WriteTo(uint64_t aID, AudioMixer& aMixer, uint32_t aChannelCount, uint32_t aSampleRate);
|
||||
|
||||
int ChannelCount() {
|
||||
NS_WARN_IF_FALSE(!mChunks.IsEmpty(),
|
||||
|
|
|
@ -334,10 +334,10 @@ MediaStreamGraphImpl::StreamTimeToGraphTime(MediaStream* aStream,
|
|||
GraphTime
|
||||
MediaStreamGraphImpl::GetAudioPosition(MediaStream* aStream)
|
||||
{
|
||||
if (aStream->mAudioOutputStreams.IsEmpty()) {
|
||||
if (!mMixedAudioStream) {
|
||||
return IterationEnd();
|
||||
}
|
||||
int64_t positionInFrames = aStream->mAudioOutputStreams[0].mStream->GetPositionInFrames();
|
||||
int64_t positionInFrames = mMixedAudioStream->GetPositionInFrames();
|
||||
if (positionInFrames < 0) {
|
||||
return IterationEnd();
|
||||
}
|
||||
|
@ -500,6 +500,7 @@ void
|
|||
MediaStreamGraphImpl::UpdateStreamOrder()
|
||||
{
|
||||
bool shouldMix = false;
|
||||
bool audioTrackPresent = false;
|
||||
// Value of mCycleMarker for unvisited streams in cycle detection.
|
||||
const uint32_t NOT_VISITED = UINT32_MAX;
|
||||
// Value of mCycleMarker for ordered streams in muted cycles.
|
||||
|
@ -513,26 +514,25 @@ MediaStreamGraphImpl::UpdateStreamOrder()
|
|||
stream->AsSourceStream()->NeedsMixing()) {
|
||||
shouldMix = true;
|
||||
}
|
||||
for (StreamBuffer::TrackIter tracks(stream->GetStreamBuffer(), MediaSegment::AUDIO);
|
||||
!tracks.IsEnded(); tracks.Next()) {
|
||||
audioTrackPresent = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!mMixer && shouldMix) {
|
||||
mMixer = new AudioMixer(AudioMixerCallback);
|
||||
for (uint32_t i = 0; i < mStreams.Length(); ++i) {
|
||||
for (uint32_t j = 0; j < mStreams[i]->mAudioOutputStreams.Length(); ++j) {
|
||||
mStreams[i]->mAudioOutputStreams[j].mStream->SetMicrophoneActive(true);
|
||||
}
|
||||
}
|
||||
if (gFarendObserver) {
|
||||
mMixer->AddCallback(gFarendObserver);
|
||||
}
|
||||
} else if (mMixer && !shouldMix) {
|
||||
mMixer->RemoveCallback(gFarendObserver);
|
||||
mMixer = nullptr;
|
||||
for (uint32_t i = 0; i < mStreams.Length(); ++i) {
|
||||
for (uint32_t j = 0; j < mStreams[i]->mAudioOutputStreams.Length(); ++j) {
|
||||
mStreams[i]->mAudioOutputStreams[j].mStream->SetMicrophoneActive(false);
|
||||
mMixedAudioStream->SetMicrophoneActive(true);
|
||||
if (shouldMix) {
|
||||
if (gFarendObserver && !mMixer.FindCallback(gFarendObserver)) {
|
||||
mMixer.AddCallback(gFarendObserver);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
mMixer.RemoveCallback(gFarendObserver);
|
||||
}
|
||||
|
||||
if (!audioTrackPresent && mMixedAudioStream) {
|
||||
mMixedAudioStream = nullptr;
|
||||
}
|
||||
|
||||
// The algorithm for finding cycles is based on Tim Leslie's iterative
|
||||
|
@ -890,50 +890,48 @@ MediaStreamGraphImpl::CreateOrDestroyAudioStreams(GraphTime aAudioOutputStartTim
|
|||
if (i < audioOutputStreamsFound.Length()) {
|
||||
audioOutputStreamsFound[i] = true;
|
||||
} else {
|
||||
// No output stream created for this track yet. Check if it's time to
|
||||
// create one.
|
||||
GraphTime startTime =
|
||||
StreamTimeToGraphTime(aStream, tracks->GetStartTimeRoundDown(),
|
||||
INCLUDE_TRAILING_BLOCKED_INTERVAL);
|
||||
if (startTime >= CurrentDriver()->StateComputedTime()) {
|
||||
// The stream wants to play audio, but nothing will play for the forseeable
|
||||
// future, so don't create the stream.
|
||||
continue;
|
||||
}
|
||||
|
||||
// Allocating a AudioStream would be slow, so we finish the Init async
|
||||
MediaStream::AudioOutputStream* audioOutputStream =
|
||||
aStream->mAudioOutputStreams.AppendElement();
|
||||
audioOutputStream->mAudioPlaybackStartTime = aAudioOutputStartTime;
|
||||
audioOutputStream->mBlockedAudioTime = 0;
|
||||
audioOutputStream->mLastTickWritten = 0;
|
||||
audioOutputStream->mStream = new AudioStream();
|
||||
// XXX for now, allocate stereo output. But we need to fix this to
|
||||
// match the system's ideal channel configuration.
|
||||
// NOTE: we presume this is either fast or async-under-the-covers
|
||||
audioOutputStream->mStream->Init(2, mSampleRate,
|
||||
aStream->mAudioChannelType,
|
||||
AudioStream::LowLatency);
|
||||
audioOutputStream->mTrackID = tracks->GetID();
|
||||
|
||||
// If there is a mixer, there is a micrphone active.
|
||||
audioOutputStream->mStream->SetMicrophoneActive(mMixer);
|
||||
|
||||
LogLatency(AsyncLatencyLogger::AudioStreamCreate,
|
||||
reinterpret_cast<uint64_t>(aStream),
|
||||
reinterpret_cast<int64_t>(audioOutputStream->mStream.get()));
|
||||
if (!mMixedAudioStream) {
|
||||
mMixedAudioStream = new AudioStream();
|
||||
// XXX for now, allocate stereo output. But we need to fix this to
|
||||
// match the system's ideal channel configuration.
|
||||
// NOTE: we presume this is either fast or async-under-the-covers
|
||||
mMixedAudioStream->Init(AudioChannelCount(), mSampleRate,
|
||||
AudioChannel::Normal,
|
||||
AudioStream::LowLatency);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (int32_t i = audioOutputStreamsFound.Length() - 1; i >= 0; --i) {
|
||||
if (!audioOutputStreamsFound[i]) {
|
||||
aStream->mAudioOutputStreams[i].mStream->Shutdown();
|
||||
aStream->mAudioOutputStreams.RemoveElementAt(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
MediaStreamGraphImpl::MixerCallback(AudioDataValue* aMixedBuffer,
|
||||
AudioSampleFormat aFormat,
|
||||
uint32_t aChannels,
|
||||
uint32_t aFrames,
|
||||
uint32_t aSampleRate)
|
||||
{
|
||||
MOZ_ASSERT(mMixedAudioStream);
|
||||
mMixedAudioStream->Write(aMixedBuffer, aFrames, nullptr);
|
||||
}
|
||||
|
||||
|
||||
TrackTicks
|
||||
MediaStreamGraphImpl::PlayAudio(MediaStream* aStream,
|
||||
GraphTime aFrom, GraphTime aTo)
|
||||
|
@ -952,8 +950,6 @@ MediaStreamGraphImpl::PlayAudio(MediaStream* aStream,
|
|||
return 0;
|
||||
}
|
||||
|
||||
// When we're playing multiple copies of this stream at the same time, they're
|
||||
// perfectly correlated so adding volumes is the right thing to do.
|
||||
float volume = 0.0f;
|
||||
for (uint32_t i = 0; i < aStream->mAudioOutputs.Length(); ++i) {
|
||||
volume += aStream->mAudioOutputs[i].mVolume;
|
||||
|
@ -1036,7 +1032,8 @@ MediaStreamGraphImpl::PlayAudio(MediaStream* aStream,
|
|||
|
||||
// Need unique id for stream & track - and we want it to match the inserter
|
||||
output.WriteTo(LATENCY_STREAM_ID(aStream, track->GetID()),
|
||||
audioOutput.mStream, mMixer);
|
||||
mMixer, AudioChannelCount(),
|
||||
mSampleRate);
|
||||
}
|
||||
return ticksWritten;
|
||||
}
|
||||
|
@ -1242,11 +1239,8 @@ MediaStreamGraphImpl::PauseAllAudioOutputs()
|
|||
if (mAudioOutputsPaused) {
|
||||
return;
|
||||
}
|
||||
for (uint32_t i = 0; i < mStreams.Length(); ++i) {
|
||||
MediaStream* s = mStreams[i];
|
||||
for (uint32_t j = 0; j < s->mAudioOutputStreams.Length(); ++j) {
|
||||
s->mAudioOutputStreams[j].mStream->Pause();
|
||||
}
|
||||
if (mMixedAudioStream) {
|
||||
mMixedAudioStream->Pause();
|
||||
}
|
||||
mAudioOutputsPaused = true;
|
||||
}
|
||||
|
@ -1258,11 +1252,8 @@ MediaStreamGraphImpl::ResumeAllAudioOutputs()
|
|||
return;
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < mStreams.Length(); ++i) {
|
||||
MediaStream* s = mStreams[i];
|
||||
for (uint32_t j = 0; j < s->mAudioOutputStreams.Length(); ++j) {
|
||||
s->mAudioOutputStreams[j].mStream->Resume();
|
||||
}
|
||||
if (mMixedAudioStream) {
|
||||
mMixedAudioStream->Resume();
|
||||
}
|
||||
|
||||
mAudioOutputsPaused = false;
|
||||
|
@ -1325,6 +1316,9 @@ MediaStreamGraphImpl::Process(GraphTime aFrom, GraphTime aTo)
|
|||
// This is the number of frame that are written to the AudioStreams, for
|
||||
// this cycle.
|
||||
TrackTicks ticksPlayed = 0;
|
||||
|
||||
mMixer.StartMixing();
|
||||
|
||||
// Figure out what each stream wants to do
|
||||
for (uint32_t i = 0; i < mStreams.Length(); ++i) {
|
||||
MediaStream* stream = mStreams[i];
|
||||
|
@ -1356,8 +1350,8 @@ MediaStreamGraphImpl::Process(GraphTime aFrom, GraphTime aTo)
|
|||
}
|
||||
}
|
||||
NotifyHasCurrentData(stream);
|
||||
// Only playback audio and video in real-time mode
|
||||
if (mRealtime) {
|
||||
// Only playback audio and video in real-time mode
|
||||
CreateOrDestroyAudioStreams(aFrom, stream);
|
||||
TrackTicks ticksPlayedForThisStream = PlayAudio(stream, aFrom, aTo);
|
||||
if (!ticksPlayed) {
|
||||
|
@ -1378,8 +1372,8 @@ MediaStreamGraphImpl::Process(GraphTime aFrom, GraphTime aTo)
|
|||
}
|
||||
}
|
||||
|
||||
if (mMixer) {
|
||||
mMixer->FinishMixing();
|
||||
if (ticksPlayed) {
|
||||
mMixer.FinishMixing();
|
||||
}
|
||||
|
||||
if (!allBlockedForever) {
|
||||
|
@ -1780,7 +1774,7 @@ MediaStream::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
|
|||
// - mVideoOutputs - elements
|
||||
// - mLastPlayedVideoFrame
|
||||
// - mListeners - elements
|
||||
// - mAudioOutputStreams - elements
|
||||
// - mAudioOutputStream - elements
|
||||
|
||||
amount += mBuffer.SizeOfExcludingThis(aMallocSizeOf);
|
||||
amount += mAudioOutputs.SizeOfExcludingThis(aMallocSizeOf);
|
||||
|
@ -1792,10 +1786,6 @@ MediaStream::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
|
|||
amount += mBlocked.SizeOfExcludingThis(aMallocSizeOf);
|
||||
amount += mGraphUpdateIndices.SizeOfExcludingThis(aMallocSizeOf);
|
||||
amount += mConsumers.SizeOfExcludingThis(aMallocSizeOf);
|
||||
amount += mAudioOutputStreams.SizeOfExcludingThis(aMallocSizeOf);
|
||||
for (size_t i = 0; i < mAudioOutputStreams.Length(); i++) {
|
||||
amount += mAudioOutputStreams[i].SizeOfExcludingThis(aMallocSizeOf);
|
||||
}
|
||||
|
||||
return amount;
|
||||
}
|
||||
|
@ -1906,10 +1896,6 @@ MediaStream::DestroyImpl()
|
|||
for (int32_t i = mConsumers.Length() - 1; i >= 0; --i) {
|
||||
mConsumers[i]->Disconnect();
|
||||
}
|
||||
for (uint32_t i = 0; i < mAudioOutputStreams.Length(); ++i) {
|
||||
mAudioOutputStreams[i].mStream->Shutdown();
|
||||
}
|
||||
mAudioOutputStreams.Clear();
|
||||
mGraph = nullptr;
|
||||
}
|
||||
|
||||
|
@ -2671,7 +2657,7 @@ MediaStreamGraphImpl::MediaStreamGraphImpl(bool aRealtime, TrackRate aSampleRate
|
|||
, mNonRealtimeProcessing(false)
|
||||
, mStreamOrderDirty(false)
|
||||
, mLatencyLog(AsyncLatencyLogger::Get())
|
||||
, mMixer(nullptr)
|
||||
, mMixedAudioStream(nullptr)
|
||||
, mMemoryReportMonitor("MSGIMemory")
|
||||
, mSelfRef(MOZ_THIS_IN_INITIALIZER_LIST())
|
||||
, mAudioStreamSizes()
|
||||
|
@ -2695,6 +2681,8 @@ MediaStreamGraphImpl::MediaStreamGraphImpl(bool aRealtime, TrackRate aSampleRate
|
|||
mDriverHolder.Switch(new OfflineClockDriver(this, MEDIA_GRAPH_TARGET_PERIOD_MS));
|
||||
}
|
||||
|
||||
mMixer.AddCallback(this);
|
||||
|
||||
mLastMainThreadUpdate = TimeStamp::Now();
|
||||
|
||||
RegisterWeakMemoryReporter(this);
|
||||
|
|
|
@ -435,6 +435,11 @@ public:
|
|||
{
|
||||
mAudioOutputs.AppendElement(AudioOutput(aKey));
|
||||
}
|
||||
// Returns true if this stream has an audio output.
|
||||
bool HasAudioOutput()
|
||||
{
|
||||
return !mAudioOutputs.IsEmpty();
|
||||
}
|
||||
void RemoveAudioOutputImpl(void* aKey);
|
||||
void AddVideoOutputImpl(already_AddRefed<VideoFrameContainer> aContainer)
|
||||
{
|
||||
|
@ -616,15 +621,7 @@ protected:
|
|||
MediaTime mBlockedAudioTime;
|
||||
// Last tick written to the audio output.
|
||||
TrackTicks mLastTickWritten;
|
||||
RefPtr<AudioStream> mStream;
|
||||
TrackID mTrackID;
|
||||
|
||||
size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
|
||||
{
|
||||
size_t amount = 0;
|
||||
amount += mStream->SizeOfIncludingThis(aMallocSizeOf);
|
||||
return amount;
|
||||
}
|
||||
};
|
||||
nsTArray<AudioOutputStream> mAudioOutputStreams;
|
||||
|
||||
|
|
|
@ -16,14 +16,13 @@
|
|||
#include "Latency.h"
|
||||
#include "mozilla/WeakPtr.h"
|
||||
#include "GraphDriver.h"
|
||||
#include "AudioMixer.h"
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
template <typename T>
|
||||
class LinkedList;
|
||||
|
||||
class AudioMixer;
|
||||
|
||||
/**
|
||||
* Assume we can run an iteration of the MediaStreamGraph loop in this much time
|
||||
* or less.
|
||||
|
@ -116,7 +115,8 @@ struct MessageBlock {
|
|||
* OfflineAudioContext object.
|
||||
*/
|
||||
class MediaStreamGraphImpl : public MediaStreamGraph,
|
||||
public nsIMemoryReporter {
|
||||
public nsIMemoryReporter,
|
||||
public MixerCallbackReceiver {
|
||||
public:
|
||||
NS_DECL_ISUPPORTS
|
||||
NS_DECL_NSIMEMORYREPORTER
|
||||
|
@ -350,13 +350,21 @@ public:
|
|||
* If aStream needs an audio stream but doesn't have one, create it.
|
||||
* If aStream doesn't need an audio stream but has one, destroy it.
|
||||
*/
|
||||
void CreateOrDestroyAudioStreams(GraphTime aAudioOutputStartTime,
|
||||
MediaStream* aStream);
|
||||
void CreateOrDestroyAudioStreams(GraphTime aAudioOutputStartTime, MediaStream* aStream);
|
||||
/**
|
||||
* Queue audio (mix of stream audio and silence for blocked intervals)
|
||||
* to the audio output stream. Returns the number of frames played.
|
||||
*/
|
||||
TrackTicks PlayAudio(MediaStream* aStream, GraphTime aFrom, GraphTime aTo);
|
||||
|
||||
/* The mixer call the Graph back when all the media streams that have audio
|
||||
* outputs have been mixed down, so it can write to its AudioStream to output
|
||||
* sound. */
|
||||
virtual void MixerCallback(AudioDataValue* aMixedBuffer,
|
||||
AudioSampleFormat aFormat,
|
||||
uint32_t aChannels,
|
||||
uint32_t aFrames,
|
||||
uint32_t aSampleRate) MOZ_OVERRIDE;
|
||||
/**
|
||||
* Set the correct current video frame for stream aStream.
|
||||
*/
|
||||
|
@ -412,6 +420,8 @@ public:
|
|||
|
||||
TrackRate AudioSampleRate() const { return mSampleRate; }
|
||||
TrackRate GraphRate() const { return mSampleRate; }
|
||||
// Always stereo for now.
|
||||
uint32_t AudioChannelCount() { return 2; }
|
||||
|
||||
double MediaTimeToSeconds(GraphTime aTime)
|
||||
{
|
||||
|
@ -600,10 +610,11 @@ public:
|
|||
* Hold a ref to the Latency logger
|
||||
*/
|
||||
nsRefPtr<AsyncLatencyLogger> mLatencyLog;
|
||||
AudioMixer mMixer;
|
||||
/**
|
||||
* If this is not null, all the audio output for the MSG will be mixed down.
|
||||
* The mixed down audio output for this graph.
|
||||
*/
|
||||
nsAutoPtr<AudioMixer> mMixer;
|
||||
nsRefPtr<AudioStream> mMixedAudioStream;
|
||||
|
||||
private:
|
||||
virtual ~MediaStreamGraphImpl();
|
||||
|
|
Загрузка…
Ссылка в новой задаче