Bug 1423923 - Properly feed reverse stream to the AudioProcessingModule. r=pehrsons

We need to feed deinterleaved data, not interleaved data.

MozReview-Commit-ID: 99z8HA7tJgT

--HG--
extra : rebase_source : ca58203084bfd7018036c2d7299d2011dc27270f
extra : amend_source : d88a8d760f22026add0639c75e680435eafa8588
extra : source : 3ba7fe1cddec0a3dcaaf526a85b7f34072c3e199
This commit is contained in:
Paul Adenot 2017-12-07 16:22:28 +01:00
Родитель 7e60cbe93c
Коммит b1b206b075
1 изменённых файлов: 20 добавлений и 40 удалений

Просмотреть файл

@ -741,7 +741,7 @@ MediaEngineWebRTCMicrophoneSource::PacketizeAndProcess(MediaStreamGraph* aGraph,
continue;
}
AudioDataValue* packetDataPointer = buffer->mData;
AutoTArray<AudioDataValue*, MAX_CHANNELS> deinterleavedPacketDataChannelPointers;
AutoTArray<float*, MAX_CHANNELS> deinterleavedPacketDataChannelPointers;
AudioDataValue* interleavedFarend = nullptr;
uint32_t channelCountFarend = 0;
uint32_t framesPerPacketFarend = 0;
@ -771,61 +771,39 @@ MediaEngineWebRTCMicrophoneSource::PacketizeAndProcess(MediaStreamGraph* aGraph,
(channelCountFarend == 1 || channelCountFarend == 2) &&
framesPerPacketFarend);
if (mInputBuffer.Length() < framesPerPacketFarend * channelCountFarend) {
mInputBuffer.SetLength(framesPerPacketFarend * channelCountFarend);
}
offset = 0;
for (size_t i = 0; i < deinterleavedPacketDataChannelPointers.Length(); ++i) {
deinterleavedPacketDataChannelPointers[i] = packetDataPointer + offset;
deinterleavedPacketDataChannelPointers[i] = mInputBuffer.Data() + offset;
offset += framesPerPacketFarend;
}
// deinterleave back into the FarEndAudioChunk buffer to save an alloc.
// There is enough room because either there is the same number of
// channels/frames or we've just downmixed.
Deinterleave(interleavedFarend,
// Deinterleave, prepare a channel pointers array, with enough storage for
// the frames.
//
// If this is a platform that uses s16 for audio input and output,
// convert to floats, the APM API we use only accepts floats.
DeinterleaveAndConvertBuffer(interleavedFarend,
framesPerPacketFarend,
channelCountFarend,
deinterleavedPacketDataChannelPointers.Elements());
// Having the same config for input and output means we potentially save
// some CPU. We won't need the output here, the API forces us to set a
// valid pointer with enough space.
// some CPU.
StreamConfig inputConfig(mAudioOutputObserver->PlayoutFrequency(),
channelCountFarend,
false /* we don't use typing detection*/);
StreamConfig outputConfig = inputConfig;
// Prepare a channel pointers array, with enough storage for the
// frames.
//
// If this is a platform that uses s16 for audio input and output,
// convert to floats, the APM API we use only accepts floats.
float* inputData = nullptr;
#ifdef MOZ_SAMPLE_TYPE_S16
// Convert to floats, use mInputBuffer for this.
size_t sampleCount = framesPerPacketFarend * channelCountFarend;
if (mInputBuffer.Length() < sampleCount) {
mInputBuffer.SetLength(sampleCount);
}
ConvertAudioSamples(buffer->mData, mInputBuffer.Data(), sampleCount);
inputData = mInputBuffer.Data();
#else // MOZ_SAMPLE_TYPE_F32
inputData = buffer->mData;
#endif
AutoTArray<float*, MAX_CHANNELS> channelsPointers;
channelsPointers.SetLength(channelCountFarend);
offset = 0;
for (size_t i = 0; i < channelsPointers.Length(); ++i) {
channelsPointers[i] = inputData + offset;
offset += framesPerPacketFarend;
}
// Passing the same pointers here saves a copy inside this function.
int err =
mAudioProcessing->ProcessReverseStream(channelsPointers.Elements(),
mAudioProcessing->ProcessReverseStream(deinterleavedPacketDataChannelPointers.Elements(),
inputConfig,
outputConfig,
channelsPointers.Elements());
deinterleavedPacketDataChannelPointers.Elements());
if (err) {
MOZ_LOG(GetMediaManagerLog(), LogLevel::Error,
@ -843,6 +821,8 @@ MediaEngineWebRTCMicrophoneSource::PacketizeAndProcess(MediaStreamGraph* aGraph,
mPacketizer->Channels();
if (mInputBuffer.Length() < samplesPerPacket) {
mInputBuffer.SetLength(samplesPerPacket);
}
if (mDeinterleavedBuffer.Length() < samplesPerPacket) {
mDeinterleavedBuffer.SetLength(samplesPerPacket);
}
float* packet = mInputBuffer.Data();