From 0a9065018a56a7812b15411051143c2c8f7b1a08 Mon Sep 17 00:00:00 2001 From: Chun-Min Chang Date: Tue, 8 Jun 2021 00:48:21 +0000 Subject: [PATCH] Bug 1702646 - Add an util-function to append interleaved buffer in AudioSegment r=padenot Add an utility function named AppendFromInterleavedBuffer in AudioSegment to append data from the given interleaved buffer. This function does the same job as what AudioInputProcessing::InsertInGraph and NativeInputTrack::ProcessInput were used to do. As a result, these two functions can be eliminated or simplified. Depends on D116673 Differential Revision: https://phabricator.services.mozilla.com/D116674 --- dom/media/AudioSegment.h | 33 +++++++++++++++ dom/media/MediaTrackGraph.cpp | 35 ++-------------- dom/media/webrtc/MediaEngineWebRTCAudio.cpp | 45 +-------------------- dom/media/webrtc/MediaEngineWebRTCAudio.h | 4 -- 4 files changed, 39 insertions(+), 78 deletions(-) diff --git a/dom/media/AudioSegment.h b/dom/media/AudioSegment.h index 53b628f9ef17..1f133cb02f1f 100644 --- a/dom/media/AudioSegment.h +++ b/dom/media/AudioSegment.h @@ -389,6 +389,39 @@ class AudioSegment : public MediaSegmentBase { chunk->mPrincipalHandle = aPrincipalHandle; } } + template + void AppendFromInterleavedBuffer(const T* aBuffer, size_t aFrames, + uint32_t aChannels, + const PrincipalHandle& aPrincipalHandle) { + MOZ_ASSERT(aChannels >= 1 && aChannels <= 8, "Support up to 8 channels"); + + CheckedInt bufferSize(sizeof(T)); + bufferSize *= aFrames; + bufferSize *= aChannels; + RefPtr buffer = SharedBuffer::Create(bufferSize); + AutoTArray channels; + if (aChannels == 1) { + PodCopy(static_cast(buffer->Data()), aBuffer, aFrames); + channels.AppendElement(static_cast(buffer->Data())); + } else { + channels.SetLength(aChannels); + AutoTArray writeChannels; + writeChannels.SetLength(aChannels); + T* samples = static_cast(buffer->Data()); + + size_t offset = 0; + for (uint32_t i = 0; i < aChannels; ++i) { + channels[i] = writeChannels[i] = samples + offset; + offset += aFrames; + } + + DeinterleaveAndConvertBuffer(aBuffer, aFrames, aChannels, + writeChannels.Elements()); + } + + MOZ_ASSERT(aChannels == channels.Length()); + AppendFrames(buffer.forget(), channels, aFrames, aPrincipalHandle); + } // Consumes aChunk, and returns a pointer to the persistent copy of aChunk // in the segment. AudioChunk* AppendAndConsumeChunk(AudioChunk* aChunk) { diff --git a/dom/media/MediaTrackGraph.cpp b/dom/media/MediaTrackGraph.cpp index 52bcc74a2d1b..3b5d376028d3 100644 --- a/dom/media/MediaTrackGraph.cpp +++ b/dom/media/MediaTrackGraph.cpp @@ -134,41 +134,14 @@ void NativeInputTrack::ProcessInput(GraphTime aFrom, GraphTime aTo, MOZ_ASSERT(inputInfo.mChannels >= 1 && inputInfo.mChannels <= 8, "Support up to 8 channels"); - CheckedInt bufferSize(sizeof(AudioDataValue)); - bufferSize *= inputInfo.mFrames; - bufferSize *= inputInfo.mChannels; - RefPtr buffer = SharedBuffer::Create(bufferSize); - AutoTArray channels; - if (inputInfo.mChannels == 1) { - PodCopy(static_cast(buffer->Data()), inputInfo.mBuffer, - inputInfo.mFrames); - channels.AppendElement(static_cast(buffer->Data())); - } else { - channels.SetLength(inputInfo.mChannels); - AutoTArray writeChannels; - writeChannels.SetLength(inputInfo.mChannels); - AudioDataValue* samples = static_cast(buffer->Data()); - - size_t offset = 0; - for (uint32_t i = 0; i < inputInfo.mChannels; ++i) { - channels[i] = writeChannels[i] = samples + offset; - offset += inputInfo.mFrames; - } - - DeinterleaveAndConvertBuffer(inputInfo.mBuffer, inputInfo.mFrames, - inputInfo.mChannels, - writeChannels.Elements()); - } + GetData()->Clear(); + GetData()->AppendFromInterleavedBuffer( + inputInfo.mBuffer, inputInfo.mFrames, inputInfo.mChannels, + PRINCIPAL_HANDLE_NONE); LOG(LogLevel::Verbose, ("NativeInputTrack %p Appending %zu frames of raw audio", this, inputInfo.mFrames)); - - MOZ_ASSERT(inputInfo.mChannels == channels.Length()); - GetData()->Clear(); - GetData()->AppendFrames(buffer.forget(), channels, - static_cast(inputInfo.mFrames), - PRINCIPAL_HANDLE_NONE); } uint32_t NativeInputTrack::NumberOfChannels() const { diff --git a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp index b2fa23f37f88..fb59fc195f81 100644 --- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp +++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp @@ -1098,8 +1098,8 @@ void AudioInputProcessing::ProcessInput(MediaTrackGraphImpl* aGraph, if (aSegment) { mSegment.AppendSegment(aSegment, mPrincipal); } else { - InsertInGraph(aGraph, inputInfo.mBuffer, inputInfo.mFrames, - inputInfo.mChannels); + mSegment.AppendFromInterleavedBuffer(inputInfo.mBuffer, inputInfo.mFrames, + inputInfo.mChannels, mPrincipal); } } else { MOZ_ASSERT(aGraph->GraphRate() == inputInfo.mRate); @@ -1108,47 +1108,6 @@ void AudioInputProcessing::ProcessInput(MediaTrackGraphImpl* aGraph, } } -template -void AudioInputProcessing::InsertInGraph(MediaTrackGraphImpl* aGraph, - const T* aBuffer, size_t aFrames, - uint32_t aChannels) { - if (mEnded) { - return; - } - - MOZ_ASSERT(aChannels >= 1 && aChannels <= 8, "Support up to 8 channels"); - - CheckedInt bufferSize(sizeof(T)); - bufferSize *= aFrames; - bufferSize *= aChannels; - RefPtr buffer = SharedBuffer::Create(bufferSize); - AutoTArray channels; - if (aChannels == 1) { - PodCopy(static_cast(buffer->Data()), aBuffer, aFrames); - channels.AppendElement(static_cast(buffer->Data())); - } else { - channels.SetLength(aChannels); - AutoTArray write_channels; - write_channels.SetLength(aChannels); - T* samples = static_cast(buffer->Data()); - - size_t offset = 0; - for (uint32_t i = 0; i < aChannels; ++i) { - channels[i] = write_channels[i] = samples + offset; - offset += aFrames; - } - - DeinterleaveAndConvertBuffer(aBuffer, aFrames, aChannels, - write_channels.Elements()); - } - - LOG_FRAME("AudioInputProcessing %p Appending %zu frames of raw audio", this, - aFrames); - - MOZ_ASSERT(aChannels == channels.Length()); - mSegment.AppendFrames(buffer.forget(), channels, aFrames, mPrincipal); -} - void AudioInputProcessing::NotifyInputStopped(MediaTrackGraphImpl* aGraph) { MOZ_ASSERT(aGraph->OnGraphThread()); // This is called when an AudioCallbackDriver switch has happened for any diff --git a/dom/media/webrtc/MediaEngineWebRTCAudio.h b/dom/media/webrtc/MediaEngineWebRTCAudio.h index 82b28913a809..46a66d9a7a15 100644 --- a/dom/media/webrtc/MediaEngineWebRTCAudio.h +++ b/dom/media/webrtc/MediaEngineWebRTCAudio.h @@ -166,10 +166,6 @@ class AudioInputProcessing : public AudioDataListener { // aSegment stores the unprocessed non-interleaved audio input data from mic void ProcessInput(MediaTrackGraphImpl* aGraph, const AudioSegment* aSegment); - template - void InsertInGraph(MediaTrackGraphImpl* aGraph, const T* aBuffer, - size_t aFrames, uint32_t aChannels); - void PacketizeAndProcess(MediaTrackGraphImpl* aGraph, const AudioDataValue* aBuffer, size_t aFrames, TrackRate aRate, uint32_t aChannels);