зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1702646 - Add an util-function to append interleaved buffer in AudioSegment r=padenot
Add an utility function named AppendFromInterleavedBuffer in AudioSegment to append data from the given interleaved buffer. This function does the same job as what AudioInputProcessing::InsertInGraph and NativeInputTrack::ProcessInput were used to do. As a result, these two functions can be eliminated or simplified. Depends on D116673 Differential Revision: https://phabricator.services.mozilla.com/D116674
This commit is contained in:
Родитель
d2f2ea20bb
Коммит
0a9065018a
|
@ -389,6 +389,39 @@ class AudioSegment : public MediaSegmentBase<AudioSegment, AudioChunk> {
|
|||
chunk->mPrincipalHandle = aPrincipalHandle;
|
||||
}
|
||||
}
|
||||
template <typename T>
|
||||
void AppendFromInterleavedBuffer(const T* aBuffer, size_t aFrames,
|
||||
uint32_t aChannels,
|
||||
const PrincipalHandle& aPrincipalHandle) {
|
||||
MOZ_ASSERT(aChannels >= 1 && aChannels <= 8, "Support up to 8 channels");
|
||||
|
||||
CheckedInt<size_t> bufferSize(sizeof(T));
|
||||
bufferSize *= aFrames;
|
||||
bufferSize *= aChannels;
|
||||
RefPtr<SharedBuffer> buffer = SharedBuffer::Create(bufferSize);
|
||||
AutoTArray<const T*, 8> channels;
|
||||
if (aChannels == 1) {
|
||||
PodCopy(static_cast<T*>(buffer->Data()), aBuffer, aFrames);
|
||||
channels.AppendElement(static_cast<T*>(buffer->Data()));
|
||||
} else {
|
||||
channels.SetLength(aChannels);
|
||||
AutoTArray<T*, 8> writeChannels;
|
||||
writeChannels.SetLength(aChannels);
|
||||
T* samples = static_cast<T*>(buffer->Data());
|
||||
|
||||
size_t offset = 0;
|
||||
for (uint32_t i = 0; i < aChannels; ++i) {
|
||||
channels[i] = writeChannels[i] = samples + offset;
|
||||
offset += aFrames;
|
||||
}
|
||||
|
||||
DeinterleaveAndConvertBuffer(aBuffer, aFrames, aChannels,
|
||||
writeChannels.Elements());
|
||||
}
|
||||
|
||||
MOZ_ASSERT(aChannels == channels.Length());
|
||||
AppendFrames(buffer.forget(), channels, aFrames, aPrincipalHandle);
|
||||
}
|
||||
// Consumes aChunk, and returns a pointer to the persistent copy of aChunk
|
||||
// in the segment.
|
||||
AudioChunk* AppendAndConsumeChunk(AudioChunk* aChunk) {
|
||||
|
|
|
@ -134,41 +134,14 @@ void NativeInputTrack::ProcessInput(GraphTime aFrom, GraphTime aTo,
|
|||
MOZ_ASSERT(inputInfo.mChannels >= 1 && inputInfo.mChannels <= 8,
|
||||
"Support up to 8 channels");
|
||||
|
||||
CheckedInt<size_t> bufferSize(sizeof(AudioDataValue));
|
||||
bufferSize *= inputInfo.mFrames;
|
||||
bufferSize *= inputInfo.mChannels;
|
||||
RefPtr<SharedBuffer> buffer = SharedBuffer::Create(bufferSize);
|
||||
AutoTArray<const AudioDataValue*, 8> channels;
|
||||
if (inputInfo.mChannels == 1) {
|
||||
PodCopy(static_cast<AudioDataValue*>(buffer->Data()), inputInfo.mBuffer,
|
||||
inputInfo.mFrames);
|
||||
channels.AppendElement(static_cast<AudioDataValue*>(buffer->Data()));
|
||||
} else {
|
||||
channels.SetLength(inputInfo.mChannels);
|
||||
AutoTArray<AudioDataValue*, 8> writeChannels;
|
||||
writeChannels.SetLength(inputInfo.mChannels);
|
||||
AudioDataValue* samples = static_cast<AudioDataValue*>(buffer->Data());
|
||||
|
||||
size_t offset = 0;
|
||||
for (uint32_t i = 0; i < inputInfo.mChannels; ++i) {
|
||||
channels[i] = writeChannels[i] = samples + offset;
|
||||
offset += inputInfo.mFrames;
|
||||
}
|
||||
|
||||
DeinterleaveAndConvertBuffer(inputInfo.mBuffer, inputInfo.mFrames,
|
||||
inputInfo.mChannels,
|
||||
writeChannels.Elements());
|
||||
}
|
||||
GetData<AudioSegment>()->Clear();
|
||||
GetData<AudioSegment>()->AppendFromInterleavedBuffer(
|
||||
inputInfo.mBuffer, inputInfo.mFrames, inputInfo.mChannels,
|
||||
PRINCIPAL_HANDLE_NONE);
|
||||
|
||||
LOG(LogLevel::Verbose,
|
||||
("NativeInputTrack %p Appending %zu frames of raw audio", this,
|
||||
inputInfo.mFrames));
|
||||
|
||||
MOZ_ASSERT(inputInfo.mChannels == channels.Length());
|
||||
GetData<AudioSegment>()->Clear();
|
||||
GetData<AudioSegment>()->AppendFrames(buffer.forget(), channels,
|
||||
static_cast<int32_t>(inputInfo.mFrames),
|
||||
PRINCIPAL_HANDLE_NONE);
|
||||
}
|
||||
|
||||
uint32_t NativeInputTrack::NumberOfChannels() const {
|
||||
|
|
|
@ -1098,8 +1098,8 @@ void AudioInputProcessing::ProcessInput(MediaTrackGraphImpl* aGraph,
|
|||
if (aSegment) {
|
||||
mSegment.AppendSegment(aSegment, mPrincipal);
|
||||
} else {
|
||||
InsertInGraph(aGraph, inputInfo.mBuffer, inputInfo.mFrames,
|
||||
inputInfo.mChannels);
|
||||
mSegment.AppendFromInterleavedBuffer(inputInfo.mBuffer, inputInfo.mFrames,
|
||||
inputInfo.mChannels, mPrincipal);
|
||||
}
|
||||
} else {
|
||||
MOZ_ASSERT(aGraph->GraphRate() == inputInfo.mRate);
|
||||
|
@ -1108,47 +1108,6 @@ void AudioInputProcessing::ProcessInput(MediaTrackGraphImpl* aGraph,
|
|||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void AudioInputProcessing::InsertInGraph(MediaTrackGraphImpl* aGraph,
|
||||
const T* aBuffer, size_t aFrames,
|
||||
uint32_t aChannels) {
|
||||
if (mEnded) {
|
||||
return;
|
||||
}
|
||||
|
||||
MOZ_ASSERT(aChannels >= 1 && aChannels <= 8, "Support up to 8 channels");
|
||||
|
||||
CheckedInt<size_t> bufferSize(sizeof(T));
|
||||
bufferSize *= aFrames;
|
||||
bufferSize *= aChannels;
|
||||
RefPtr<SharedBuffer> buffer = SharedBuffer::Create(bufferSize);
|
||||
AutoTArray<const T*, 8> channels;
|
||||
if (aChannels == 1) {
|
||||
PodCopy(static_cast<T*>(buffer->Data()), aBuffer, aFrames);
|
||||
channels.AppendElement(static_cast<T*>(buffer->Data()));
|
||||
} else {
|
||||
channels.SetLength(aChannels);
|
||||
AutoTArray<T*, 8> write_channels;
|
||||
write_channels.SetLength(aChannels);
|
||||
T* samples = static_cast<T*>(buffer->Data());
|
||||
|
||||
size_t offset = 0;
|
||||
for (uint32_t i = 0; i < aChannels; ++i) {
|
||||
channels[i] = write_channels[i] = samples + offset;
|
||||
offset += aFrames;
|
||||
}
|
||||
|
||||
DeinterleaveAndConvertBuffer(aBuffer, aFrames, aChannels,
|
||||
write_channels.Elements());
|
||||
}
|
||||
|
||||
LOG_FRAME("AudioInputProcessing %p Appending %zu frames of raw audio", this,
|
||||
aFrames);
|
||||
|
||||
MOZ_ASSERT(aChannels == channels.Length());
|
||||
mSegment.AppendFrames(buffer.forget(), channels, aFrames, mPrincipal);
|
||||
}
|
||||
|
||||
void AudioInputProcessing::NotifyInputStopped(MediaTrackGraphImpl* aGraph) {
|
||||
MOZ_ASSERT(aGraph->OnGraphThread());
|
||||
// This is called when an AudioCallbackDriver switch has happened for any
|
||||
|
|
|
@ -166,10 +166,6 @@ class AudioInputProcessing : public AudioDataListener {
|
|||
// aSegment stores the unprocessed non-interleaved audio input data from mic
|
||||
void ProcessInput(MediaTrackGraphImpl* aGraph, const AudioSegment* aSegment);
|
||||
|
||||
template <typename T>
|
||||
void InsertInGraph(MediaTrackGraphImpl* aGraph, const T* aBuffer,
|
||||
size_t aFrames, uint32_t aChannels);
|
||||
|
||||
void PacketizeAndProcess(MediaTrackGraphImpl* aGraph,
|
||||
const AudioDataValue* aBuffer, size_t aFrames,
|
||||
TrackRate aRate, uint32_t aChannels);
|
||||
|
|
Загрузка…
Ссылка в новой задаче