2012-11-28 23:40:07 +04:00
|
|
|
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
|
|
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
|
|
|
|
|
|
#include "AudioSegment.h"
|
|
|
|
|
|
|
|
#include "AudioStream.h"
|
2014-03-24 14:06:06 +04:00
|
|
|
#include "AudioMixer.h"
|
2013-02-01 08:27:02 +04:00
|
|
|
#include "AudioChannelFormat.h"
|
2013-01-28 22:22:37 +04:00
|
|
|
#include "Latency.h"
|
2014-03-24 14:06:05 +04:00
|
|
|
#include "speex/speex_resampler.h"
|
2012-11-28 23:40:07 +04:00
|
|
|
|
|
|
|
namespace mozilla {
|
|
|
|
|
|
|
|
template <class SrcT, class DestT>
|
|
|
|
static void
|
2012-11-22 09:04:27 +04:00
|
|
|
InterleaveAndConvertBuffer(const SrcT** aSourceChannels,
|
|
|
|
int32_t aLength, float aVolume,
|
2012-11-28 23:40:07 +04:00
|
|
|
int32_t aChannels,
|
|
|
|
DestT* aOutput)
|
|
|
|
{
|
|
|
|
DestT* output = aOutput;
|
|
|
|
for (int32_t i = 0; i < aLength; ++i) {
|
|
|
|
for (int32_t channel = 0; channel < aChannels; ++channel) {
|
2012-11-22 09:04:27 +04:00
|
|
|
float v = AudioSampleToFloat(aSourceChannels[channel][i])*aVolume;
|
2012-11-28 23:40:07 +04:00
|
|
|
*output = FloatToAudioSample<DestT>(v);
|
|
|
|
++output;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-03 13:59:50 +04:00
|
|
|
void
|
2012-11-22 09:04:27 +04:00
|
|
|
InterleaveAndConvertBuffer(const void** aSourceChannels,
|
|
|
|
AudioSampleFormat aSourceFormat,
|
|
|
|
int32_t aLength, float aVolume,
|
2012-11-28 23:40:07 +04:00
|
|
|
int32_t aChannels,
|
|
|
|
AudioDataValue* aOutput)
|
|
|
|
{
|
|
|
|
switch (aSourceFormat) {
|
|
|
|
case AUDIO_FORMAT_FLOAT32:
|
2012-11-22 09:04:27 +04:00
|
|
|
InterleaveAndConvertBuffer(reinterpret_cast<const float**>(aSourceChannels),
|
2012-11-28 23:40:07 +04:00
|
|
|
aLength,
|
|
|
|
aVolume,
|
|
|
|
aChannels,
|
|
|
|
aOutput);
|
|
|
|
break;
|
|
|
|
case AUDIO_FORMAT_S16:
|
2012-11-22 09:04:27 +04:00
|
|
|
InterleaveAndConvertBuffer(reinterpret_cast<const int16_t**>(aSourceChannels),
|
2012-11-28 23:40:07 +04:00
|
|
|
aLength,
|
|
|
|
aVolume,
|
|
|
|
aChannels,
|
|
|
|
aOutput);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
AudioSegment::ApplyVolume(float aVolume)
|
|
|
|
{
|
|
|
|
for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) {
|
|
|
|
ci->mVolume *= aVolume;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-02-01 08:27:02 +04:00
|
|
|
static const int AUDIO_PROCESSING_FRAMES = 640; /* > 10ms of 48KHz audio */
|
|
|
|
static const uint8_t gZeroChannel[MAX_AUDIO_SAMPLE_SIZE*AUDIO_PROCESSING_FRAMES] = {0};
|
2012-11-28 23:40:07 +04:00
|
|
|
|
2013-06-03 13:59:50 +04:00
|
|
|
void
|
|
|
|
DownmixAndInterleave(const nsTArray<const void*>& aChannelData,
|
|
|
|
AudioSampleFormat aSourceFormat, int32_t aDuration,
|
2013-06-12 01:50:21 +04:00
|
|
|
float aVolume, uint32_t aOutputChannels,
|
2013-06-03 13:59:50 +04:00
|
|
|
AudioDataValue* aOutput)
|
|
|
|
{
|
|
|
|
nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channelData;
|
|
|
|
nsAutoTArray<float,AUDIO_PROCESSING_FRAMES*GUESS_AUDIO_CHANNELS> downmixConversionBuffer;
|
|
|
|
nsAutoTArray<float,AUDIO_PROCESSING_FRAMES*GUESS_AUDIO_CHANNELS> downmixOutputBuffer;
|
|
|
|
|
2013-06-12 01:50:21 +04:00
|
|
|
channelData.SetLength(aChannelData.Length());
|
2013-06-03 13:59:50 +04:00
|
|
|
if (aSourceFormat != AUDIO_FORMAT_FLOAT32) {
|
|
|
|
NS_ASSERTION(aSourceFormat == AUDIO_FORMAT_S16, "unknown format");
|
|
|
|
downmixConversionBuffer.SetLength(aDuration*aChannelData.Length());
|
|
|
|
for (uint32_t i = 0; i < aChannelData.Length(); ++i) {
|
|
|
|
float* conversionBuf = downmixConversionBuffer.Elements() + (i*aDuration);
|
|
|
|
const int16_t* sourceBuf = static_cast<const int16_t*>(aChannelData[i]);
|
|
|
|
for (uint32_t j = 0; j < (uint32_t)aDuration; ++j) {
|
|
|
|
conversionBuf[j] = AudioSampleToFloat(sourceBuf[j]);
|
|
|
|
}
|
|
|
|
channelData[i] = conversionBuf;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (uint32_t i = 0; i < aChannelData.Length(); ++i) {
|
|
|
|
channelData[i] = aChannelData[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
downmixOutputBuffer.SetLength(aDuration*aOutputChannels);
|
|
|
|
nsAutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannelBuffers;
|
|
|
|
nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> outputChannelData;
|
|
|
|
outputChannelBuffers.SetLength(aOutputChannels);
|
|
|
|
outputChannelData.SetLength(aOutputChannels);
|
|
|
|
for (uint32_t i = 0; i < (uint32_t)aOutputChannels; ++i) {
|
|
|
|
outputChannelData[i] = outputChannelBuffers[i] =
|
|
|
|
downmixOutputBuffer.Elements() + aDuration*i;
|
|
|
|
}
|
2013-06-12 01:50:21 +04:00
|
|
|
if (channelData.Length() > aOutputChannels) {
|
|
|
|
AudioChannelsDownMix(channelData, outputChannelBuffers.Elements(),
|
|
|
|
aOutputChannels, aDuration);
|
|
|
|
}
|
2013-06-03 13:59:50 +04:00
|
|
|
InterleaveAndConvertBuffer(outputChannelData.Elements(), AUDIO_FORMAT_FLOAT32,
|
|
|
|
aDuration, aVolume, aOutputChannels, aOutput);
|
|
|
|
}
|
|
|
|
|
2014-03-24 14:06:05 +04:00
|
|
|
void AudioSegment::ResampleChunks(SpeexResamplerState* aResampler)
|
|
|
|
{
|
|
|
|
uint32_t inRate, outRate;
|
|
|
|
|
|
|
|
if (mChunks.IsEmpty()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
speex_resampler_get_rate(aResampler, &inRate, &outRate);
|
|
|
|
|
|
|
|
switch (mChunks[0].mBufferFormat) {
|
|
|
|
case AUDIO_FORMAT_FLOAT32:
|
|
|
|
Resample<float>(aResampler, inRate, outRate);
|
|
|
|
break;
|
|
|
|
case AUDIO_FORMAT_S16:
|
|
|
|
Resample<int16_t>(aResampler, inRate, outRate);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
MOZ_ASSERT(false);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-11-28 23:40:07 +04:00
|
|
|
void
|
2014-03-24 14:06:06 +04:00
|
|
|
AudioSegment::WriteTo(uint64_t aID, AudioStream* aOutput, AudioMixer* aMixer)
|
2012-11-28 23:40:07 +04:00
|
|
|
{
|
2013-02-01 08:27:02 +04:00
|
|
|
uint32_t outputChannels = aOutput->GetChannels();
|
|
|
|
nsAutoTArray<AudioDataValue,AUDIO_PROCESSING_FRAMES*GUESS_AUDIO_CHANNELS> buf;
|
|
|
|
nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channelData;
|
|
|
|
|
2014-03-24 14:06:06 +04:00
|
|
|
if (!GetDuration()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t outBufferLength = GetDuration() * outputChannels;
|
|
|
|
buf.SetLength(outBufferLength);
|
|
|
|
|
|
|
|
// Offset in the buffer that will end up sent to the AudioStream.
|
|
|
|
uint32_t offset = 0;
|
|
|
|
|
2012-11-28 23:40:07 +04:00
|
|
|
for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) {
|
|
|
|
AudioChunk& c = *ci;
|
2014-03-24 14:06:06 +04:00
|
|
|
uint32_t frames = c.mDuration;
|
|
|
|
|
|
|
|
// If we have written data in the past, or we have real (non-silent) data
|
|
|
|
// to write, we can proceed. Otherwise, it means we just started the
|
|
|
|
// AudioStream, and we don't have real data to write to it (just silence).
|
|
|
|
// To avoid overbuffering in the AudioStream, we simply drop the silence,
|
|
|
|
// here. The stream will underrun and output silence anyways.
|
|
|
|
if (c.mBuffer || aOutput->GetWritten()) {
|
|
|
|
if (c.mBuffer) {
|
|
|
|
channelData.SetLength(c.mChannelData.Length());
|
|
|
|
for (uint32_t i = 0; i < channelData.Length(); ++i) {
|
|
|
|
channelData[i] = c.mChannelData[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
if (channelData.Length() < outputChannels) {
|
|
|
|
// Up-mix. Note that this might actually make channelData have more
|
|
|
|
// than outputChannels temporarily.
|
|
|
|
AudioChannelsUpMix(&channelData, outputChannels, gZeroChannel);
|
|
|
|
}
|
2013-11-19 01:43:15 +04:00
|
|
|
|
2014-03-24 14:06:06 +04:00
|
|
|
if (channelData.Length() > outputChannels) {
|
|
|
|
// Down-mix.
|
|
|
|
DownmixAndInterleave(channelData, c.mBufferFormat, frames,
|
|
|
|
c.mVolume, outputChannels, buf.Elements() + offset);
|
2013-02-01 08:27:02 +04:00
|
|
|
} else {
|
2014-03-24 14:06:06 +04:00
|
|
|
InterleaveAndConvertBuffer(channelData.Elements(), c.mBufferFormat,
|
|
|
|
frames, c.mVolume,
|
|
|
|
outputChannels,
|
|
|
|
buf.Elements() + offset);
|
2013-02-01 08:27:02 +04:00
|
|
|
}
|
2014-03-24 14:06:06 +04:00
|
|
|
} else {
|
|
|
|
// Assumes that a bit pattern of zeroes == 0.0f
|
|
|
|
memset(buf.Elements() + offset, 0, outputChannels * frames * sizeof(AudioDataValue));
|
2014-04-07 23:37:56 +04:00
|
|
|
}
|
2012-11-28 23:40:07 +04:00
|
|
|
}
|
2014-03-24 14:06:06 +04:00
|
|
|
|
|
|
|
offset += frames * outputChannels;
|
|
|
|
|
|
|
|
if (!c.mTimeStamp.IsNull()) {
|
|
|
|
TimeStamp now = TimeStamp::Now();
|
|
|
|
// would be more efficient to c.mTimeStamp to ms on create time then pass here
|
|
|
|
LogTime(AsyncLatencyLogger::AudioMediaStreamTrack, aID,
|
|
|
|
(now - c.mTimeStamp).ToMilliseconds(), c.mTimeStamp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
aOutput->Write(buf.Elements(), GetDuration(), &(mChunks[mChunks.Length() - 1].mTimeStamp));
|
|
|
|
|
|
|
|
if (aMixer) {
|
|
|
|
aMixer->Mix(buf.Elements(), outputChannels, GetDuration());
|
2012-11-28 23:40:07 +04:00
|
|
|
}
|
2013-01-23 09:53:10 +04:00
|
|
|
aOutput->Start();
|
2012-11-28 23:40:07 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|