Bug 886886: Remove 44100->44000 kludges r=derf

This commit is contained in:
Randell Jesup 2013-07-21 03:47:40 -04:00
Родитель 03b686d2df
Коммит bc2879abc2
14 изменённых файлов: 61 добавлений и 267 удалений

Просмотреть файл

@ -134,8 +134,8 @@ AudioDeviceAndroidJni::AudioDeviceAndroidJni(const int32_t id) :
_playError(0), _recWarning(0), _recError(0), _delayPlayout(0),
_delayRecording(0),
_AGC(false),
_samplingFreqIn((N_REC_SAMPLES_PER_SEC/1000)),
_samplingFreqOut((N_PLAY_SAMPLES_PER_SEC/1000)),
_samplingFreqIn((N_REC_SAMPLES_PER_SEC)),
_samplingFreqOut((N_PLAY_SAMPLES_PER_SEC)),
_maxSpeakerVolume(0),
_loudSpeakerOn(false),
_recAudioSource(1), // 1 is AudioSource.MIC which is our default
@ -1379,17 +1379,10 @@ int32_t AudioDeviceAndroidJni::InitPlayout()
// get the method ID
jmethodID initPlaybackID = env->GetMethodID(_javaScClass, "InitPlayback",
"(I)I");
int samplingFreq = 44100;
if (_samplingFreqOut != 44)
{
samplingFreq = _samplingFreqOut * 1000;
}
int retVal = -1;
// Call java sc object method
jint res = env->CallIntMethod(_javaScObj, initPlaybackID, samplingFreq);
jint res = env->CallIntMethod(_javaScObj, initPlaybackID, _samplingFreqOut);
if (res < 0)
{
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
@ -1398,7 +1391,7 @@ int32_t AudioDeviceAndroidJni::InitPlayout()
else
{
// Set the audio device buffer sampling rate
_ptrAudioBuffer->SetPlayoutSampleRate(_samplingFreqOut * 1000);
_ptrAudioBuffer->SetPlayoutSampleRate(_samplingFreqOut);
_playIsInitialized = true;
retVal = 0;
}
@ -1484,18 +1477,11 @@ int32_t AudioDeviceAndroidJni::InitRecording()
// get the method ID
jmethodID initRecordingID = env->GetMethodID(_javaScClass, "InitRecording",
"(II)I");
int samplingFreq = 44100;
if (_samplingFreqIn != 44)
{
samplingFreq = _samplingFreqIn * 1000;
}
int retVal = -1;
// call java sc object method
jint res = env->CallIntMethod(_javaScObj, initRecordingID, _recAudioSource,
samplingFreq);
_samplingFreqIn);
if (res < 0)
{
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
@ -1504,10 +1490,10 @@ int32_t AudioDeviceAndroidJni::InitRecording()
else
{
// Set the audio device buffer sampling rate
_ptrAudioBuffer->SetRecordingSampleRate(_samplingFreqIn * 1000);
_ptrAudioBuffer->SetRecordingSampleRate(_samplingFreqIn);
// the init rec function returns a fixed delay
_delayRecording = res / _samplingFreqIn;
_delayRecording = (res * 1000) / _samplingFreqIn;
_recIsInitialized = true;
retVal = 0;
@ -2025,14 +2011,7 @@ int32_t AudioDeviceAndroidJni::SetRecordingSampleRate(
}
// set the recording sample rate to use
if (samplesPerSec == 44100)
{
_samplingFreqIn = 44;
}
else
{
_samplingFreqIn = samplesPerSec / 1000;
}
_samplingFreqIn = samplesPerSec;
// Update the AudioDeviceBuffer
_ptrAudioBuffer->SetRecordingSampleRate(samplesPerSec);
@ -2056,14 +2035,7 @@ int32_t AudioDeviceAndroidJni::SetPlayoutSampleRate(
}
// set the playout sample rate to use
if (samplesPerSec == 44100)
{
_samplingFreqOut = 44;
}
else
{
_samplingFreqOut = samplesPerSec / 1000;
}
_samplingFreqOut = samplesPerSec;
// Update the AudioDeviceBuffer
_ptrAudioBuffer->SetPlayoutSampleRate(samplesPerSec);
@ -2417,11 +2389,7 @@ int32_t AudioDeviceAndroidJni::InitSampleRate()
if (_samplingFreqIn > 0)
{
// read the configured sampling rate
samplingFreq = 44100;
if (_samplingFreqIn != 44)
{
samplingFreq = _samplingFreqIn * 1000;
}
samplingFreq = _samplingFreqIn;
WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
" Trying configured recording sampling rate %d",
samplingFreq);
@ -2462,14 +2430,7 @@ int32_t AudioDeviceAndroidJni::InitSampleRate()
}
// set the recording sample rate to use
if (samplingFreq == 44100)
{
_samplingFreqIn = 44;
}
else
{
_samplingFreqIn = samplingFreq / 1000;
}
_samplingFreqIn = samplingFreq;
WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
"Recording sample rate set to (%d)", _samplingFreqIn);
@ -2493,11 +2454,7 @@ int32_t AudioDeviceAndroidJni::InitSampleRate()
if (_samplingFreqOut > 0)
{
// read the configured sampling rate
samplingFreq = 44100;
if (_samplingFreqOut != 44)
{
samplingFreq = _samplingFreqOut * 1000;
}
samplingFreq = _samplingFreqOut;
WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
" Trying configured playback sampling rate %d",
samplingFreq);
@ -2551,15 +2508,7 @@ int32_t AudioDeviceAndroidJni::InitSampleRate()
}
// set the playback sample rate to use
if (samplingFreq == 44100)
{
_samplingFreqOut = 44;
}
else
{
_samplingFreqOut = samplingFreq / 1000;
}
_samplingFreqOut = samplingFreq;
WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
"Playback sample rate set to (%d)", _samplingFreqOut);
@ -2672,7 +2621,7 @@ bool AudioDeviceAndroidJni::PlayThreadProcess()
if (_playing)
{
int8_t playBuffer[2 * 480]; // Max 10 ms @ 48 kHz / 16 bit
uint32_t samplesToPlay = _samplingFreqOut * 10;
uint32_t samplesToPlay = _samplingFreqOut / 100;
// ask for new PCM data to be played out using the AudioDeviceBuffer
// ensure that this callback is executed without taking the
@ -2717,7 +2666,7 @@ bool AudioDeviceAndroidJni::PlayThreadProcess()
else if (res > 0)
{
// we are not recording and have got a delay value from playback
_delayPlayout = res / _samplingFreqOut;
_delayPlayout = (res * 1000) / _samplingFreqOut;
}
// If 0 is returned we are recording and then play delay is updated
// in RecordProcess
@ -2815,7 +2764,7 @@ bool AudioDeviceAndroidJni::RecThreadProcess()
if (_recording)
{
uint32_t samplesToRec = _samplingFreqIn * 10;
uint32_t samplesToRec = _samplingFreqIn / 100;
// Call java sc object method to record data to direct buffer
// Will block until data has been recorded (see java sc class),
@ -2832,7 +2781,7 @@ bool AudioDeviceAndroidJni::RecThreadProcess()
}
else
{
_delayPlayout = playDelayInSamples / _samplingFreqOut;
_delayPlayout = (playDelayInSamples * 1000) / _samplingFreqOut;
}
Lock();

Просмотреть файл

@ -742,11 +742,7 @@ int32_t AudioDeviceAndroidOpenSLES::InitPlayout() {
// Setup the data source structure for the buffer queue.
player_pcm_.formatType = SL_DATAFORMAT_PCM;
player_pcm_.numChannels = N_PLAY_CHANNELS;
if (speaker_sampling_rate_ == 44000) {
player_pcm_.samplesPerSec = 44100 * 1000;
} else {
player_pcm_.samplesPerSec = speaker_sampling_rate_ * 1000;
}
player_pcm_.samplesPerSec = speaker_sampling_rate_ * 1000;
player_pcm_.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
player_pcm_.containerSize = SL_PCMSAMPLEFORMAT_FIXED_16;
if (1 == player_pcm_.numChannels) {
@ -866,11 +862,7 @@ int32_t AudioDeviceAndroidOpenSLES::InitRecording() {
// Setup the format of the content in the buffer queue
record_pcm_.formatType = SL_DATAFORMAT_PCM;
record_pcm_.numChannels = N_REC_CHANNELS;
if (speaker_sampling_rate_ == 44000) {
record_pcm_.samplesPerSec = 44100 * 1000;
} else {
record_pcm_.samplesPerSec = speaker_sampling_rate_ * 1000;
}
record_pcm_.samplesPerSec = speaker_sampling_rate_ * 1000;
record_pcm_.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
record_pcm_.containerSize = SL_PCMSAMPLEFORMAT_FIXED_16;
if (1 == record_pcm_.numChannels) {

Просмотреть файл

@ -1332,7 +1332,7 @@ int32_t AudioDeviceIPhone::InitPlayOrRecord() {
// todo: Add 48 kHz (increase buffer sizes). Other fs?
if ((playoutDesc.mSampleRate > 44090.0)
&& (playoutDesc.mSampleRate < 44110.0)) {
_adbSampFreq = 44000;
_adbSampFreq = 44100;
} else if ((playoutDesc.mSampleRate > 15990.0)
&& (playoutDesc.mSampleRate < 16010.0)) {
_adbSampFreq = 16000;

Просмотреть файл

@ -19,8 +19,8 @@
namespace webrtc {
class ThreadWrapper;
const uint32_t N_REC_SAMPLES_PER_SEC = 44000;
const uint32_t N_PLAY_SAMPLES_PER_SEC = 44000;
const uint32_t N_REC_SAMPLES_PER_SEC = 44100;
const uint32_t N_PLAY_SAMPLES_PER_SEC = 44100;
const uint32_t N_REC_CHANNELS = 1; // default is mono recording
const uint32_t N_PLAY_CHANNELS = 1; // default is mono playout

Просмотреть файл

@ -79,7 +79,7 @@ AudioDeviceLinuxPulse::AudioDeviceLinuxPulse(const int32_t id) :
_outputDeviceIndex(0),
_inputDeviceIsSpecified(false),
_outputDeviceIsSpecified(false),
_samplingFreq(0),
sample_rate_hz_(0),
_recChannels(1),
_playChannels(1),
_playBufType(AudioDeviceModule::kFixedBufferSize),
@ -370,7 +370,7 @@ int32_t AudioDeviceLinuxPulse::SpeakerIsAvailable(bool& available)
}
// Given that InitSpeaker was successful, we know that a valid speaker exists
//
//
available = true;
// Close the initialized output mixer
@ -804,13 +804,11 @@ int32_t AudioDeviceLinuxPulse::StereoRecordingIsAvailable(bool& available)
return 0;
}
#ifndef WEBRTC_PA_GTALK
// Check if the selected microphone can record stereo.
bool isAvailable(false);
error = _mixerManager.StereoRecordingIsAvailable(isAvailable);
if (!error)
available = isAvailable;
#endif
// Close the initialized input mixer
if (!wasInitialized)
@ -824,12 +822,10 @@ int32_t AudioDeviceLinuxPulse::StereoRecordingIsAvailable(bool& available)
int32_t AudioDeviceLinuxPulse::SetStereoRecording(bool enable)
{
#ifndef WEBRTC_PA_GTALK
if (enable)
_recChannels = 2;
else
_recChannels = 1;
#endif
return 0;
}
@ -863,13 +859,11 @@ int32_t AudioDeviceLinuxPulse::StereoPlayoutIsAvailable(bool& available)
return -1;
}
#ifndef WEBRTC_PA_GTALK
// Check if the selected speaker can play stereo.
bool isAvailable(false);
error = _mixerManager.StereoPlayoutIsAvailable(isAvailable);
if (!error)
available = isAvailable;
#endif
// Close the initialized input mixer
if (!wasInitialized)
@ -883,12 +877,10 @@ int32_t AudioDeviceLinuxPulse::StereoPlayoutIsAvailable(bool& available)
int32_t AudioDeviceLinuxPulse::SetStereoPlayout(bool enable)
{
#ifndef WEBRTC_PA_GTALK
if (enable)
_playChannels = 2;
else
_playChannels = 1;
#endif
return 0;
}
@ -1276,18 +1268,11 @@ int32_t AudioDeviceLinuxPulse::InitPlayout()
" InitSpeaker() failed");
}
// Set sampling rate to use
uint32_t samplingRate = _samplingFreq * 1000;
if (samplingRate == 44000)
{
samplingRate = 44100;
}
// Set the play sample specification
pa_sample_spec playSampleSpec;
playSampleSpec.channels = _playChannels;
playSampleSpec.format = PA_SAMPLE_S16LE;
playSampleSpec.rate = samplingRate;
playSampleSpec.rate = sample_rate_hz_;
// Create a new play stream
_playStream = LATE(pa_stream_new)(_paContext, "playStream",
@ -1307,7 +1292,7 @@ int32_t AudioDeviceLinuxPulse::InitPlayout()
if (_ptrAudioBuffer)
{
// Update audio buffer with the selected parameters
_ptrAudioBuffer->SetPlayoutSampleRate(_samplingFreq * 1000);
_ptrAudioBuffer->SetPlayoutSampleRate(sample_rate_hz_);
_ptrAudioBuffer->SetPlayoutChannels((uint8_t) _playChannels);
}
@ -1356,7 +1341,7 @@ int32_t AudioDeviceLinuxPulse::InitPlayout()
}
// num samples in bytes * num channels
_playbackBufferSize = _samplingFreq * 10 * 2 * _playChannels;
_playbackBufferSize = sample_rate_hz_ / 100 * 2 * _playChannels;
_playbackBufferUnused = _playbackBufferSize;
_playBuffer = new int8_t[_playbackBufferSize];
@ -1402,18 +1387,11 @@ int32_t AudioDeviceLinuxPulse::InitRecording()
" InitMicrophone() failed");
}
// Set sampling rate to use
uint32_t samplingRate = _samplingFreq * 1000;
if (samplingRate == 44000)
{
samplingRate = 44100;
}
// Set the rec sample specification
pa_sample_spec recSampleSpec;
recSampleSpec.channels = _recChannels;
recSampleSpec.format = PA_SAMPLE_S16LE;
recSampleSpec.rate = samplingRate;
recSampleSpec.rate = sample_rate_hz_;
// Create a new rec stream
_recStream = LATE(pa_stream_new)(_paContext, "recStream", &recSampleSpec,
@ -1432,7 +1410,7 @@ int32_t AudioDeviceLinuxPulse::InitRecording()
if (_ptrAudioBuffer)
{
// Update audio buffer with the selected parameters
_ptrAudioBuffer->SetRecordingSampleRate(_samplingFreq * 1000);
_ptrAudioBuffer->SetRecordingSampleRate(sample_rate_hz_);
_ptrAudioBuffer->SetRecordingChannels((uint8_t) _recChannels);
}
@ -1475,7 +1453,7 @@ int32_t AudioDeviceLinuxPulse::InitRecording()
_configuredLatencyRec = latency;
}
_recordBufferSize = _samplingFreq * 10 * 2 * _recChannels;
_recordBufferSize = sample_rate_hz_ / 100 * 2 * _recChannels;
_recordBufferUsed = 0;
_recBuffer = new int8_t[_recordBufferSize];
@ -1985,17 +1963,7 @@ void AudioDeviceLinuxPulse::PaSourceInfoCallbackHandler(
void AudioDeviceLinuxPulse::PaServerInfoCallbackHandler(const pa_server_info *i)
{
// Use PA native sampling rate
uint32_t paSampleRate = i->sample_spec.rate;
if (paSampleRate == 44100)
{
#ifdef WEBRTC_PA_GTALK
paSampleRate = 48000;
#else
paSampleRate = 44000;
#endif
}
_samplingFreq = paSampleRate / 1000;
sample_rate_hz_ = i->sample_spec.rate;
// Copy the PA server version
strncpy(_paServerVersion, i->server_version, 31);
@ -2052,13 +2020,6 @@ void AudioDeviceLinuxPulse::PaStreamStateCallbackHandler(pa_stream *p)
int32_t AudioDeviceLinuxPulse::CheckPulseAudioVersion()
{
/*int32_t index = 0;
int32_t partIndex = 0;
int32_t partNum = 1;
int32_t minVersion[3] = {0, 9, 15};
bool versionOk = false;
char str[8] = {0};*/
PaLock();
pa_operation* paOperation = NULL;
@ -2074,54 +2035,6 @@ int32_t AudioDeviceLinuxPulse::CheckPulseAudioVersion()
WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1,
" checking PulseAudio version: %s", _paServerVersion);
/* Saved because it may turn out that we need to check the version in the future
while (true)
{
if (_paServerVersion[index] == '.')
{
index++;
str[partIndex] = '\0';
partIndex = 0;
if(partNum == 2)
{
if (atoi(str) < minVersion[1])
{
break;
}
partNum = 3;
}
else
{
if (atoi(str) > minVersion[0])
{
versionOk = true;
break;
}
partNum = 2;
}
}
else if (_paServerVersion[index] == '\0' || _paServerVersion[index] == '-')
{
str[partIndex] = '\0';
if (atoi(str) >= minVersion[2])
{
versionOk = true;
}
break;
}
str[partIndex] = _paServerVersion[index];
index++;
partIndex++;
}
if (!versionOk)
{
return -1;
}
*/
return 0;
}
@ -2131,7 +2044,7 @@ int32_t AudioDeviceLinuxPulse::InitSamplingFrequency()
pa_operation* paOperation = NULL;
// Get the server info and update _samplingFreq
// Get the server info and update sample_rate_hz_
paOperation = LATE(pa_context_get_server_info)(_paContext,
PaServerInfoCallback, this);
@ -2354,11 +2267,11 @@ int32_t AudioDeviceLinuxPulse::InitPulseAudio()
}
// Initialize sampling frequency
if (InitSamplingFrequency() < 0 || _samplingFreq == 0)
if (InitSamplingFrequency() < 0 || sample_rate_hz_ == 0)
{
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
" failed to initialize sampling frequency, set to %d",
_samplingFreq);
" failed to initialize sampling frequency, set to %d Hz",
sample_rate_hz_);
return -1;
}

Просмотреть файл

@ -17,9 +17,6 @@
#include <pulse/pulseaudio.h>
// Set this define to make the code behave like in GTalk/libjingle
//#define WEBRTC_PA_GTALK
// We define this flag if it's missing from our headers, because we want to be
// able to compile against old headers but still use PA_STREAM_ADJUST_LATENCY
// if run against a recent version of the library.
@ -311,7 +308,7 @@ private:
bool _inputDeviceIsSpecified;
bool _outputDeviceIsSpecified;
uint32_t _samplingFreq;
int sample_rate_hz_;
uint8_t _recChannels;
uint8_t _playChannels;

Просмотреть файл

@ -1769,10 +1769,10 @@ TEST_F(AudioDeviceAPITest, RecordingSampleRate) {
EXPECT_EQ(48000, sampleRate);
#elif defined(ANDROID)
TEST_LOG("Recording sample rate is %u\n\n", sampleRate);
EXPECT_TRUE((sampleRate == 44000) || (sampleRate == 16000));
EXPECT_TRUE((sampleRate == 44100) || (sampleRate == 16000));
#elif defined(WEBRTC_IOS)
TEST_LOG("Recording sample rate is %u\n\n", sampleRate);
EXPECT_TRUE((sampleRate == 44000) || (sampleRate == 16000) ||
EXPECT_TRUE((sampleRate == 44100) || (sampleRate == 16000) ||
(sampleRate == 8000));
#endif
@ -1788,10 +1788,10 @@ TEST_F(AudioDeviceAPITest, PlayoutSampleRate) {
EXPECT_EQ(48000, sampleRate);
#elif defined(ANDROID)
TEST_LOG("Playout sample rate is %u\n\n", sampleRate);
EXPECT_TRUE((sampleRate == 44000) || (sampleRate == 16000));
EXPECT_TRUE((sampleRate == 44100) || (sampleRate == 16000));
#elif defined(WEBRTC_IOS)
TEST_LOG("Playout sample rate is %u\n\n", sampleRate);
EXPECT_TRUE((sampleRate == 44000) || (sampleRate == 16000) ||
EXPECT_TRUE((sampleRate == 44100) || (sampleRate == 16000) ||
(sampleRate == 8000));
#endif
}

Просмотреть файл

@ -347,12 +347,6 @@ int32_t AudioTransportImpl::NeedMorePlayData(
int32_t fsInHz(samplesPerSecIn);
int32_t fsOutHz(samplesPerSec);
if (fsInHz == 44100)
fsInHz = 44000;
if (fsOutHz == 44100)
fsOutHz = 44000;
if (nChannelsIn == 2 && nBytesPerSampleIn == 4)
{
// input is stereo => we will resample in stereo
@ -1244,7 +1238,7 @@ int32_t FuncTestManager::TestAudioTransport()
if (samplesPerSec == 48000) {
_audioTransport->SetFilePlayout(
true, GetResource(_playoutFile48.c_str()));
} else if (samplesPerSec == 44100 || samplesPerSec == 44000) {
} else if (samplesPerSec == 44100) {
_audioTransport->SetFilePlayout(
true, GetResource(_playoutFile44.c_str()));
} else if (samplesPerSec == 16000) {
@ -1477,7 +1471,7 @@ int32_t FuncTestManager::TestSpeakerVolume()
if (48000 == samplesPerSec) {
_audioTransport->SetFilePlayout(
true, GetResource(_playoutFile48.c_str()));
} else if (44100 == samplesPerSec || samplesPerSec == 44000) {
} else if (44100 == samplesPerSec) {
_audioTransport->SetFilePlayout(
true, GetResource(_playoutFile44.c_str()));
} else if (samplesPerSec == 16000) {
@ -1578,7 +1572,7 @@ int32_t FuncTestManager::TestSpeakerMute()
EXPECT_EQ(0, audioDevice->PlayoutSampleRate(&samplesPerSec));
if (48000 == samplesPerSec)
_audioTransport->SetFilePlayout(true, _playoutFile48.c_str());
else if (44100 == samplesPerSec || 44000 == samplesPerSec)
else if (44100 == samplesPerSec)
_audioTransport->SetFilePlayout(true, _playoutFile44.c_str());
else
{

Просмотреть файл

@ -430,8 +430,6 @@ AudioDeviceWindowsCore::AudioDeviceWindowsCore(const int32_t id) :
_playChannels(2),
_sndCardPlayDelay(0),
_sndCardRecDelay(0),
_sampleDriftAt48kHz(0),
_driftAccumulator(0),
_writtenSamples(0),
_readSamples(0),
_playAcc(0),
@ -2319,11 +2317,6 @@ int32_t AudioDeviceWindowsCore::InitPlayout()
_playSampleRate = Wfx.nSamplesPerSec;
_devicePlaySampleRate = Wfx.nSamplesPerSec; // The device itself continues to run at 44.1 kHz.
_devicePlayBlockSize = Wfx.nSamplesPerSec/100;
if (_playBlockSize == 441)
{
_playSampleRate = 44000; // we are actually running at 44000 Hz and *not* 44100 Hz
_playBlockSize = 440; // adjust to size we can handle
}
_playChannels = Wfx.nChannels;
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "VoE selected this rendering format:");
@ -2340,8 +2333,6 @@ int32_t AudioDeviceWindowsCore::InitPlayout()
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_playChannels : %d", _playChannels);
}
_Get44kHzDrift();
// Create a rendering stream.
//
// ****************************************************************************
@ -2659,11 +2650,6 @@ int32_t AudioDeviceWindowsCore::InitRecording()
_recSampleRate = Wfx.nSamplesPerSec;
_recBlockSize = Wfx.nSamplesPerSec/100;
_recChannels = Wfx.nChannels;
if (_recBlockSize == 441)
{
_recSampleRate = 44000; // we are actually using 44000 Hz and *not* 44100 Hz
_recBlockSize = 440; // adjust to size we can handle
}
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "VoE selected this capturing format:");
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wFormatTag : 0x%X (%u)", Wfx.wFormatTag, Wfx.wFormatTag);
@ -2679,8 +2665,6 @@ int32_t AudioDeviceWindowsCore::InitRecording()
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_recChannels : %d", _recChannels);
}
_Get44kHzDrift();
// Create a capturing stream.
hr = _ptrClientIn->Initialize(
AUDCLNT_SHAREMODE_SHARED, // share Audio Engine with other applications
@ -4104,15 +4088,9 @@ DWORD AudioDeviceWindowsCore::DoCaptureThread()
if (_ptrAudioBuffer)
{
_ptrAudioBuffer->SetRecordedBuffer((const int8_t*)syncBuffer, _recBlockSize);
_driftAccumulator += _sampleDriftAt48kHz;
const int32_t clockDrift =
static_cast<int32_t>(_driftAccumulator);
_driftAccumulator -= clockDrift;
_ptrAudioBuffer->SetVQEData(sndCardPlayDelay,
sndCardRecDelay,
clockDrift);
0);
QueryPerformanceCounter(&t1); // measure time: START
@ -5145,29 +5123,6 @@ void AudioDeviceWindowsCore::_SetThreadName(DWORD dwThreadID, LPCSTR szThreadNam
}
}
// ----------------------------------------------------------------------------
// _Get44kHzDrift
// ----------------------------------------------------------------------------
void AudioDeviceWindowsCore::_Get44kHzDrift()
{
// We aren't able to resample at 44.1 kHz. Instead we run at 44 kHz and push/pull
// from the engine faster to compensate. If only one direction is set to 44.1 kHz
// the result is indistinguishable from clock drift to the AEC. We can compensate
// internally if we inform the AEC about the drift.
_sampleDriftAt48kHz = 0;
_driftAccumulator = 0;
if (_playSampleRate == 44000 && _recSampleRate != 44000)
{
_sampleDriftAt48kHz = 480.0f/440;
}
else if(_playSampleRate != 44000 && _recSampleRate == 44000)
{
_sampleDriftAt48kHz = -480.0f/441;
}
}
// ----------------------------------------------------------------------------
// WideToUTF8
// ----------------------------------------------------------------------------

Просмотреть файл

@ -67,7 +67,7 @@ class ScopedCOMInitializer {
}
bool succeeded() const { return SUCCEEDED(hr_); }
private:
void Initialize(COINIT init) {
hr_ = CoInitializeEx(NULL, init);
@ -268,8 +268,6 @@ private:
int32_t _GetDefaultDevice(EDataFlow dir, ERole role, IMMDevice** ppDevice);
int32_t _GetListDevice(EDataFlow dir, int index, IMMDevice** ppDevice);
void _Get44kHzDrift();
// Converts from wide-char to UTF-8 if UNICODE is defined.
// Does nothing if UNICODE is undefined.
char* WideToUTF8(const TCHAR* src) const;
@ -336,9 +334,6 @@ private: // WASAPI
UINT64 _readSamples;
uint32_t _sndCardRecDelay;
float _sampleDriftAt48kHz;
float _driftAccumulator;
uint16_t _recChannelsPrioList[2];
uint16_t _playChannelsPrioList[2];

Просмотреть файл

@ -617,13 +617,13 @@ int32_t ModuleFileUtility::ReadWavHeader(InStream& wav)
// special cases?
if(_wavFormatObj.nSamplesPerSec == 44100)
{
_readSizeBytes = 440 * _wavFormatObj.nChannels *
_readSizeBytes = 441 * _wavFormatObj.nChannels *
(_wavFormatObj.nBitsPerSample / 8);
} else if(_wavFormatObj.nSamplesPerSec == 22050) {
_readSizeBytes = 220 * _wavFormatObj.nChannels *
_readSizeBytes = 220 * _wavFormatObj.nChannels * // XXX inexact!
(_wavFormatObj.nBitsPerSample / 8);
} else if(_wavFormatObj.nSamplesPerSec == 11025) {
_readSizeBytes = 110 * _wavFormatObj.nChannels *
_readSizeBytes = 110 * _wavFormatObj.nChannels * // XXX inexact!
(_wavFormatObj.nBitsPerSample / 8);
} else {
_readSizeBytes = (_wavFormatObj.nSamplesPerSec/100) *
@ -685,22 +685,22 @@ int32_t ModuleFileUtility::InitWavCodec(uint32_t samplesPerSec,
{
strcpy(codec_info_.plname, "L16");
_codecId = kCodecL16_16kHz;
codec_info_.pacsize = 110;
codec_info_.plfreq = 11000;
codec_info_.pacsize = 110; // XXX inexact!
codec_info_.plfreq = 11000; // XXX inexact!
}
else if(samplesPerSec == 22050)
{
strcpy(codec_info_.plname, "L16");
_codecId = kCodecL16_16kHz;
codec_info_.pacsize = 220;
codec_info_.plfreq = 22000;
codec_info_.pacsize = 220; // XXX inexact!
codec_info_.plfreq = 22000; // XXX inexact!
}
else if(samplesPerSec == 44100)
{
strcpy(codec_info_.plname, "L16");
_codecId = kCodecL16_16kHz;
codec_info_.pacsize = 440;
codec_info_.plfreq = 44000;
codec_info_.pacsize = 441;
codec_info_.plfreq = 44100;
}
else if(samplesPerSec == 48000)
{
@ -1133,8 +1133,6 @@ int32_t ModuleFileUtility::WriteWavHeader(
{
// Frame size in bytes for 10 ms of audio.
// TODO (hellner): 44.1 kHz has 440 samples frame size. Doesn't seem to
// be taken into consideration here!
int32_t frameSize = (freq / 100) * bytesPerSample * channels;
// Calculate the number of full frames that the wave file contain.

Просмотреть файл

@ -94,7 +94,7 @@ int32_t FilePlayerImpl::Frequency() const
{
return 32000;
}
else if(_codec.plfreq == 44000)
else if(_codec.plfreq == 44100 || _codec.plfreq == 44000 ) // XXX just 44100?
{
return 32000;
}

Просмотреть файл

@ -96,11 +96,12 @@ public:
const int16_t speechData10ms[], int lengthSamples,
int samplingFreqHz, int current_delay_ms) = 0;
// This function gets audio for an external playout sink.
// During transmission, this function should be called every ~10 ms
// to obtain a new 10 ms frame of audio. The length of the block will
// be 160, 320, 440 or 480 samples (for 16, 32, 44 or 48 kHz sampling
// rates respectively).
// be 160, 320, 440 or 480 samples (for 16000, 32000, 44100 or 48000
// kHz sampling rates respectively).
virtual int ExternalPlayoutGetData(
int16_t speechData10ms[], int samplingFreqHz,
int current_delay_ms, int& lengthSamples) = 0;
@ -108,7 +109,7 @@ public:
// Pulls an audio frame from the specified |channel| for external mixing.
// If the |desired_sample_rate_hz| is 0, the signal will be returned with
// its native frequency, otherwise it will be resampled. Valid frequencies
// are 16, 22, 32, 44 or 48 kHz.
// are 16000, 22050, 32000, 44100 or 48000 kHz.
virtual int GetAudioFrame(int channel, int desired_sample_rate_hz,
AudioFrame* frame) = 0;

Просмотреть файл

@ -191,7 +191,7 @@ int VoEExternalMediaImpl::ExternalRecordingInsertData(
return -1;
}
if ((16000 != samplingFreqHz) && (32000 != samplingFreqHz) &&
(48000 != samplingFreqHz) && (44000 != samplingFreqHz))
(48000 != samplingFreqHz) && (44100 != samplingFreqHz))
{
shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
"SetExternalRecordingStatus() invalid sample rate");
@ -300,7 +300,7 @@ int VoEExternalMediaImpl::ExternalPlayoutGetData(
return -1;
}
if ((16000 != samplingFreqHz) && (32000 != samplingFreqHz) &&
(48000 != samplingFreqHz) && (44000 != samplingFreqHz))
(48000 != samplingFreqHz) && (44100 != samplingFreqHz))
{
shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
"ExternalPlayoutGetData() invalid sample rate");