/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this file, * You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "MediaStreamGraphImpl.h" #include "mozilla/MathAlgorithms.h" #include "mozilla/unused.h" #include "AudioSegment.h" #include "mozilla/Logging.h" #include "mozilla/Attributes.h" #include "AudioCaptureStream.h" #include "ImageContainer.h" #include "AudioNodeEngine.h" #include "AudioNodeStream.h" #include "AudioNodeExternalInputStream.h" #include "webaudio/MediaStreamAudioDestinationNode.h" #include #include "DOMMediaStream.h" using namespace mozilla::layers; using namespace mozilla::dom; using namespace mozilla::gfx; namespace mozilla { // We are mixing to mono until PeerConnection can accept stereo static const uint32_t MONO = 1; AudioCaptureStream::AudioCaptureStream(DOMMediaStream* aWrapper) : ProcessedMediaStream(aWrapper), mTrackCreated(false) { MOZ_ASSERT(NS_IsMainThread()); MOZ_COUNT_CTOR(AudioCaptureStream); mMixer.AddCallback(this); } AudioCaptureStream::~AudioCaptureStream() { MOZ_COUNT_DTOR(AudioCaptureStream); mMixer.RemoveCallback(this); } void AudioCaptureStream::ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) { uint32_t inputCount = mInputs.Length(); StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK); // Notify the DOM everything is in order. if (!mTrackCreated) { for (uint32_t i = 0; i < mListeners.Length(); i++) { MediaStreamListener* l = mListeners[i]; AudioSegment tmp; l->NotifyQueuedTrackChanges( Graph(), AUDIO_TRACK, 0, MediaStreamListener::TRACK_EVENT_CREATED, tmp); l->NotifyFinishedTrackCreation(Graph()); } mTrackCreated = true; } // If the captured stream is connected back to a object on the page (be it an // HTMLMediaElement with a stream as source, or an AudioContext), a cycle // situation occur. This can work if it's an AudioContext with at least one // DelayNode, but the MSG will mute the whole cycle otherwise. bool blocked = mFinished || mBlocked.GetAt(aFrom); if (blocked || InMutedCycle() || inputCount == 0) { track->Get()->AppendNullData(aTo - aFrom); } else { // We mix down all the tracks of all inputs, to a stereo track. Everything // is {up,down}-mixed to stereo. mMixer.StartMixing(); AudioSegment output; for (uint32_t i = 0; i < inputCount; i++) { MediaStream* s = mInputs[i]->GetSource(); StreamBuffer::TrackIter tracks(s->GetStreamBuffer(), MediaSegment::AUDIO); while (!tracks.IsEnded()) { AudioSegment* inputSegment = tracks->Get(); StreamTime inputStart = s->GraphTimeToStreamTime(aFrom); StreamTime inputEnd = s->GraphTimeToStreamTime(aTo); AudioSegment toMix; toMix.AppendSlice(*inputSegment, inputStart, inputEnd); // Care for streams blocked in the [aTo, aFrom] range. if (inputEnd - inputStart < aTo - aFrom) { toMix.AppendNullData((aTo - aFrom) - (inputEnd - inputStart)); } toMix.Mix(mMixer, MONO, Graph()->GraphRate()); tracks.Next(); } } // This calls MixerCallback below mMixer.FinishMixing(); } // Regardless of the status of the input tracks, we go foward. mBuffer.AdvanceKnownTracksTime(GraphTimeToStreamTime((aTo))); } void AudioCaptureStream::MixerCallback(AudioDataValue* aMixedBuffer, AudioSampleFormat aFormat, uint32_t aChannels, uint32_t aFrames, uint32_t aSampleRate) { nsAutoTArray, MONO> output; nsAutoTArray bufferPtrs; output.SetLength(MONO); bufferPtrs.SetLength(MONO); uint32_t written = 0; // We need to copy here, because the mixer will reuse the storage, we should // not hold onto it. Buffers are in planar format. for (uint32_t channel = 0; channel < aChannels; channel++) { AudioDataValue* out = output[channel].AppendElements(aFrames); PodCopy(out, aMixedBuffer + written, aFrames); bufferPtrs[channel] = out; written += aFrames; } AudioChunk chunk; chunk.mBuffer = new mozilla::SharedChannelArrayBuffer(&output); chunk.mDuration = aFrames; chunk.mBufferFormat = aFormat; chunk.mVolume = 1.0f; chunk.mChannelData.SetLength(MONO); for (uint32_t channel = 0; channel < aChannels; channel++) { chunk.mChannelData[channel] = bufferPtrs[channel]; } // Now we have mixed data, simply append it to out track. EnsureTrack(AUDIO_TRACK)->Get()->AppendAndConsumeChunk(&chunk); } }