зеркало из https://github.com/mozilla/gecko-dev.git
Backed out changeset b44e089bcfe7 (bug 1397793). r=backout a=backout
--HG-- extra : histedit_source : 2ccb7b753a4be80fb820ed9f1a0e6bccab1320b0
This commit is contained in:
Родитель
fe440c72a7
Коммит
281d99b3d3
|
@ -74,6 +74,13 @@ WebrtcAudioConduit::~WebrtcAudioConduit()
|
|||
delete codec;
|
||||
}
|
||||
|
||||
// The first one of a pair to be deleted shuts down media for both
|
||||
if(mPtrVoEXmedia)
|
||||
{
|
||||
mPtrVoEXmedia->SetExternalRecordingStatus(false);
|
||||
mPtrVoEXmedia->SetExternalPlayoutStatus(false);
|
||||
}
|
||||
|
||||
//Deal with the transport
|
||||
if(mPtrVoENetwork)
|
||||
{
|
||||
|
@ -350,9 +357,8 @@ MediaConduitErrorCode WebrtcAudioConduit::Init()
|
|||
return kMediaConduitSessionNotInited;
|
||||
}
|
||||
|
||||
// Init the engine with a fake audio device (we're using cubeb for audio input
|
||||
// and output anyways).
|
||||
if(mPtrVoEBase->Init(mFakeAudioDevice.get()) == -1)
|
||||
// init the engine with our audio device layer
|
||||
if(mPtrVoEBase->Init() == -1)
|
||||
{
|
||||
CSFLogError(LOGTAG, "%s VoiceEngine Base Not Initialized", __FUNCTION__);
|
||||
return kMediaConduitSessionNotInited;
|
||||
|
@ -417,7 +423,21 @@ MediaConduitErrorCode WebrtcAudioConduit::Init()
|
|||
return kMediaConduitTransportRegistrationFail;
|
||||
}
|
||||
|
||||
CSFLogDebug(LOGTAG, "%s AudioSessionConduit Initialization Done (%p)",__FUNCTION__, this);
|
||||
if(mPtrVoEXmedia->SetExternalRecordingStatus(true) == -1)
|
||||
{
|
||||
CSFLogError(LOGTAG, "%s SetExternalRecordingStatus Failed %d",__FUNCTION__,
|
||||
mPtrVoEBase->LastError());
|
||||
return kMediaConduitExternalPlayoutError;
|
||||
}
|
||||
|
||||
if(mPtrVoEXmedia->SetExternalPlayoutStatus(true) == -1)
|
||||
{
|
||||
CSFLogError(LOGTAG, "%s SetExternalPlayoutStatus Failed %d ",__FUNCTION__,
|
||||
mPtrVoEBase->LastError());
|
||||
return kMediaConduitExternalRecordingError;
|
||||
}
|
||||
|
||||
CSFLogDebug(LOGTAG , "%s AudioSessionConduit Initialization Done (%p)",__FUNCTION__, this);
|
||||
return kMediaConduitNoError;
|
||||
}
|
||||
|
||||
|
@ -697,7 +717,7 @@ WebrtcAudioConduit::SendAudioFrame(const int16_t audio_data[],
|
|||
}
|
||||
|
||||
capture_delay = mCaptureDelay;
|
||||
// Insert the samples
|
||||
//Insert the samples
|
||||
mPtrVoEBase->audio_transport()->PushCaptureData(mChannel, audio_data,
|
||||
sizeof(audio_data[0])*8, // bits
|
||||
samplingFreqHz,
|
||||
|
@ -709,9 +729,9 @@ WebrtcAudioConduit::SendAudioFrame(const int16_t audio_data[],
|
|||
|
||||
MediaConduitErrorCode
|
||||
WebrtcAudioConduit::GetAudioFrame(int16_t speechData[],
|
||||
int32_t samplingFreqHz,
|
||||
int32_t capture_delay,
|
||||
int& lengthSamples)
|
||||
int32_t samplingFreqHz,
|
||||
int32_t capture_delay,
|
||||
int& lengthSamples)
|
||||
{
|
||||
|
||||
CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
|
||||
|
@ -752,9 +772,11 @@ WebrtcAudioConduit::GetAudioFrame(int16_t speechData[],
|
|||
|
||||
lengthSamples = 0; //output paramter
|
||||
|
||||
if (mPtrVoEXmedia->GetAudioFrame(mChannel,
|
||||
samplingFreqHz,
|
||||
&mAudioFrame) != 0) {
|
||||
if(mPtrVoEXmedia->ExternalPlayoutGetData( speechData,
|
||||
samplingFreqHz,
|
||||
capture_delay,
|
||||
lengthSamples) == -1)
|
||||
{
|
||||
int error = mPtrVoEBase->LastError();
|
||||
CSFLogError(LOGTAG, "%s Getting audio data Failed %d", __FUNCTION__, error);
|
||||
if(error == VE_RUNTIME_PLAY_ERROR)
|
||||
|
@ -764,11 +786,6 @@ WebrtcAudioConduit::GetAudioFrame(int16_t speechData[],
|
|||
return kMediaConduitUnknownError;
|
||||
}
|
||||
|
||||
// XXX Annoying, have to copy to our buffers -- refactor?
|
||||
lengthSamples = mAudioFrame.samples_per_channel_ * mAudioFrame.num_channels_;
|
||||
PodCopy(speechData, mAudioFrame.data_,
|
||||
lengthSamples);
|
||||
|
||||
// Not #ifdef DEBUG or on a log module so we can use it for about:webrtc/etc
|
||||
mSamples += lengthSamples;
|
||||
if (mSamples >= mLastSyncLog + samplingFreqHz) {
|
||||
|
@ -947,13 +964,6 @@ WebrtcAudioConduit::StartReceiving()
|
|||
return kMediaConduitUnknownError;
|
||||
}
|
||||
|
||||
// we can't call GetAudioFrame() if we don't enable "external" mixing
|
||||
if(mPtrVoEXmedia->SetExternalMixing(mChannel, true) == -1)
|
||||
{
|
||||
CSFLogError(LOGTAG, "%s SetExternalMixing Failed", __FUNCTION__);
|
||||
return kMediaConduitPlayoutError;
|
||||
}
|
||||
|
||||
if(mPtrVoEBase->StartPlayout(mChannel) == -1)
|
||||
{
|
||||
CSFLogError(LOGTAG, "%s Starting playout Failed", __FUNCTION__);
|
||||
|
|
|
@ -1020,7 +1020,8 @@ static void StartTrack(MediaStream* aSource,
|
|||
segment_(aSegment) {}
|
||||
|
||||
void Run() override {
|
||||
TrackRate track_rate = mStream->GraphRate();
|
||||
TrackRate track_rate = segment_->GetType() == MediaSegment::AUDIO ?
|
||||
WEBRTC_DEFAULT_SAMPLE_RATE : mStream->GraphRate();
|
||||
StreamTime current_end = mStream->GetTracksEnd();
|
||||
MOZ_MTLOG(ML_DEBUG, "current_end = " << current_end);
|
||||
TrackTicks current_ticks =
|
||||
|
@ -1042,7 +1043,7 @@ static void StartTrack(MediaStream* aSource,
|
|||
MOZ_MTLOG(ML_DEBUG, "Calling AddAudioTrack");
|
||||
mStream->AsSourceStream()->AddAudioTrack(
|
||||
kAudioTrack,
|
||||
track_rate,
|
||||
WEBRTC_DEFAULT_SAMPLE_RATE,
|
||||
0,
|
||||
static_cast<AudioSegment*>(segment_.forget()));
|
||||
} else {
|
||||
|
|
|
@ -8,6 +8,27 @@
|
|||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
// In some cases it is desirable to use an audio source or sink which may
|
||||
// not be available to the VoiceEngine, such as a DV camera. This sub-API
|
||||
// contains functions that allow for the use of such external recording
|
||||
// sources and playout sinks. It also describes how recorded data, or data
|
||||
// to be played out, can be modified outside the VoiceEngine.
|
||||
//
|
||||
// Usage example, omitting error checking:
|
||||
//
|
||||
// using namespace webrtc;
|
||||
// VoiceEngine* voe = VoiceEngine::Create();
|
||||
// VoEBase* base = VoEBase::GetInterface(voe);
|
||||
// VoEMediaProcess media = VoEMediaProcess::GetInterface(voe);
|
||||
// base->Init();
|
||||
// ...
|
||||
// media->SetExternalRecordingStatus(true);
|
||||
// ...
|
||||
// base->Terminate();
|
||||
// base->Release();
|
||||
// media->Release();
|
||||
// VoiceEngine::Delete(voe);
|
||||
//
|
||||
#ifndef WEBRTC_VOICE_ENGINE_VOE_EXTERNAL_MEDIA_H
|
||||
#define WEBRTC_VOICE_ENGINE_VOE_EXTERNAL_MEDIA_H
|
||||
|
||||
|
@ -64,6 +85,38 @@ class WEBRTC_DLLEXPORT VoEExternalMedia {
|
|||
// media for the specified |channel| and |type|.
|
||||
virtual int DeRegisterExternalMediaProcessing(int channel,
|
||||
ProcessingTypes type) = 0;
|
||||
|
||||
// Toogles state of external recording.
|
||||
virtual int SetExternalRecordingStatus(bool enable) = 0;
|
||||
|
||||
// Toogles state of external playout.
|
||||
virtual int SetExternalPlayoutStatus(bool enable) = 0;
|
||||
|
||||
// This function accepts externally recorded audio. During transmission,
|
||||
// this method should be called at as regular an interval as possible
|
||||
// with frames of corresponding size.
|
||||
virtual int ExternalRecordingInsertData(
|
||||
const int16_t speechData10ms[], int lengthSamples,
|
||||
int samplingFreqHz, int current_delay_ms) = 0;
|
||||
|
||||
|
||||
// This function inserts audio written to the OS audio drivers for use
|
||||
// as the far-end signal for AEC processing. The length of the block
|
||||
// must be 160, 320, 441 or 480 samples (for 16000, 32000, 44100 or
|
||||
// 48000 kHz sampling rates respectively).
|
||||
virtual int ExternalPlayoutData(
|
||||
int16_t speechData10ms[], int samplingFreqHz, int num_channels,
|
||||
int& lengthSamples) = 0;
|
||||
|
||||
// This function gets audio for an external playout sink.
|
||||
// During transmission, this function should be called every ~10 ms
|
||||
// to obtain a new 10 ms frame of audio. The length of the block will
|
||||
// be 160, 320, 441 or 480 samples (for 16000, 32000, 44100 or
|
||||
// 48000 kHz sampling rates respectively).
|
||||
virtual int ExternalPlayoutGetData(
|
||||
int16_t speechData10ms[], int samplingFreqHz,
|
||||
int current_delay_ms, int& lengthSamples) = 0;
|
||||
|
||||
// Pulls an audio frame from the specified |channel| for external mixing.
|
||||
// If the |desired_sample_rate_hz| is 0, the signal will be returned with
|
||||
// its native frequency, otherwise it will be resampled. Valid frequencies
|
||||
|
|
|
@ -29,6 +29,43 @@ class ExternalMediaTest : public AfterStreamingFixture {
|
|||
}
|
||||
};
|
||||
|
||||
TEST_F(ExternalMediaTest, ManualCanRecordAndPlaybackUsingExternalPlayout) {
|
||||
SwitchToManualMicrophone();
|
||||
|
||||
EXPECT_EQ(0, voe_base_->StopSend(channel_));
|
||||
EXPECT_EQ(0, voe_base_->StopPlayout(channel_));
|
||||
EXPECT_EQ(0, voe_xmedia_->SetExternalPlayoutStatus(true));
|
||||
EXPECT_EQ(0, voe_base_->StartPlayout(channel_));
|
||||
EXPECT_EQ(0, voe_base_->StartSend(channel_));
|
||||
|
||||
TEST_LOG("Recording data for 2 seconds starting now: please speak.\n");
|
||||
int16_t recording[32000];
|
||||
for (int i = 0; i < 200; i++) {
|
||||
int sample_length = 0;
|
||||
EXPECT_EQ(0, voe_xmedia_->ExternalPlayoutGetData(
|
||||
&(recording[i * 160]), 16000, 100, sample_length));
|
||||
EXPECT_EQ(160, sample_length);
|
||||
Sleep(10);
|
||||
}
|
||||
|
||||
EXPECT_EQ(0, voe_base_->StopSend(channel_));
|
||||
EXPECT_EQ(0, voe_base_->StopPlayout(channel_));
|
||||
EXPECT_EQ(0, voe_xmedia_->SetExternalPlayoutStatus(false));
|
||||
EXPECT_EQ(0, voe_base_->StartPlayout(channel_));
|
||||
EXPECT_EQ(0, voe_xmedia_->SetExternalRecordingStatus(true));
|
||||
EXPECT_EQ(0, voe_base_->StartSend(channel_));
|
||||
|
||||
TEST_LOG("Playing back recording, you should hear what you said earlier.\n");
|
||||
for (int i = 0; i < 200; i++) {
|
||||
EXPECT_EQ(0, voe_xmedia_->ExternalRecordingInsertData(
|
||||
&(recording[i * 160]), 160, 16000, 20));
|
||||
Sleep(10);
|
||||
}
|
||||
|
||||
EXPECT_EQ(0, voe_base_->StopSend(channel_));
|
||||
EXPECT_EQ(0, voe_xmedia_->SetExternalRecordingStatus(false));
|
||||
}
|
||||
|
||||
TEST_F(ExternalMediaTest,
|
||||
ManualRegisterExternalMediaProcessingOnAllChannelsAffectsPlayout) {
|
||||
TEST_LOG("Enabling external media processing: audio should be affected.\n");
|
||||
|
|
|
@ -31,6 +31,9 @@ VoEExternalMedia* VoEExternalMedia::GetInterface(VoiceEngine* voiceEngine) {
|
|||
|
||||
VoEExternalMediaImpl::VoEExternalMediaImpl(voe::SharedData* shared)
|
||||
:
|
||||
#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
|
||||
playout_delay_ms_(0),
|
||||
#endif
|
||||
shared_(shared) {
|
||||
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(shared_->instance_id(), -1),
|
||||
"VoEExternalMediaImpl() - ctor");
|
||||
|
@ -113,6 +116,267 @@ int VoEExternalMediaImpl::DeRegisterExternalMediaProcessing(
|
|||
return -1;
|
||||
}
|
||||
|
||||
int VoEExternalMediaImpl::SetExternalRecordingStatus(bool enable)
|
||||
{
|
||||
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(shared_->instance_id(), -1),
|
||||
"SetExternalRecordingStatus(enable=%d)", enable);
|
||||
#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
|
||||
if (shared_->audio_device() && shared_->audio_device()->Recording())
|
||||
{
|
||||
shared_->SetLastError(VE_ALREADY_SENDING, kTraceError,
|
||||
"SetExternalRecordingStatus() cannot set state while sending");
|
||||
return -1;
|
||||
}
|
||||
shared_->set_ext_recording(enable);
|
||||
return 0;
|
||||
#else
|
||||
shared_->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
|
||||
"SetExternalRecordingStatus() external recording is not supported");
|
||||
return -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
int VoEExternalMediaImpl::ExternalRecordingInsertData(
|
||||
const int16_t speechData10ms[],
|
||||
int lengthSamples,
|
||||
int samplingFreqHz,
|
||||
int current_delay_ms)
|
||||
{
|
||||
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(shared_->instance_id(), -1),
|
||||
"ExternalRecordingInsertData(speechData10ms=0x%x,"
|
||||
" lengthSamples=%u, samplingFreqHz=%d, current_delay_ms=%d)",
|
||||
&speechData10ms[0], lengthSamples, samplingFreqHz,
|
||||
current_delay_ms);
|
||||
#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
|
||||
if (!shared_->statistics().Initialized())
|
||||
{
|
||||
shared_->SetLastError(VE_NOT_INITED, kTraceError);
|
||||
return -1;
|
||||
}
|
||||
if (!shared_->ext_recording())
|
||||
{
|
||||
shared_->SetLastError(VE_INVALID_OPERATION, kTraceError,
|
||||
"ExternalRecordingInsertData() external recording is not enabled");
|
||||
return -1;
|
||||
}
|
||||
if (shared_->NumOfSendingChannels() == 0)
|
||||
{
|
||||
shared_->SetLastError(VE_ALREADY_SENDING, kTraceError,
|
||||
"SetExternalRecordingStatus() no channel is sending");
|
||||
return -1;
|
||||
}
|
||||
if ((16000 != samplingFreqHz) && (32000 != samplingFreqHz) &&
|
||||
(48000 != samplingFreqHz) && (44100 != samplingFreqHz))
|
||||
{
|
||||
shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
|
||||
"SetExternalRecordingStatus() invalid sample rate");
|
||||
return -1;
|
||||
}
|
||||
if ((0 == lengthSamples) ||
|
||||
((lengthSamples % (samplingFreqHz / 100)) != 0))
|
||||
{
|
||||
shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
|
||||
"SetExternalRecordingStatus() invalid buffer size");
|
||||
return -1;
|
||||
}
|
||||
if (current_delay_ms < 0)
|
||||
{
|
||||
shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
|
||||
"SetExternalRecordingStatus() invalid delay)");
|
||||
return -1;
|
||||
}
|
||||
|
||||
uint16_t blockSize = samplingFreqHz / 100;
|
||||
// We know the number of samples for 10ms of audio, so we can derive the
|
||||
// number of channels here:
|
||||
uint32_t channels = lengthSamples * 100 / samplingFreqHz;
|
||||
uint32_t nBlocks = lengthSamples / blockSize / channels;
|
||||
int16_t totalDelayMS = 0;
|
||||
uint16_t playoutDelayMS = 0;
|
||||
|
||||
for (uint32_t i = 0; i < nBlocks; i++)
|
||||
{
|
||||
if (!shared_->ext_playout())
|
||||
{
|
||||
// Use real playout delay if external playout is not enabled.
|
||||
if (shared_->audio_device()->PlayoutDelay(&playoutDelayMS) != 0) {
|
||||
shared_->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning,
|
||||
"PlayoutDelay() unable to get the playout delay");
|
||||
}
|
||||
totalDelayMS = current_delay_ms + playoutDelayMS;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Use stored delay value given the last call
|
||||
// to ExternalPlayoutGetData.
|
||||
totalDelayMS = current_delay_ms + playout_delay_ms_;
|
||||
// Compensate for block sizes larger than 10ms
|
||||
totalDelayMS -= (int16_t)(i*10);
|
||||
if (totalDelayMS < 0)
|
||||
totalDelayMS = 0;
|
||||
}
|
||||
shared_->transmit_mixer()->PrepareDemux(
|
||||
(const int8_t*)(&speechData10ms[i*blockSize]),
|
||||
blockSize,
|
||||
channels,
|
||||
samplingFreqHz,
|
||||
totalDelayMS,
|
||||
0,
|
||||
0,
|
||||
false); // Typing detection not supported
|
||||
|
||||
shared_->transmit_mixer()->DemuxAndMix();
|
||||
shared_->transmit_mixer()->EncodeAndSend();
|
||||
}
|
||||
return 0;
|
||||
#else
|
||||
shared_->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
|
||||
"ExternalRecordingInsertData() external recording is not supported");
|
||||
return -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
int VoEExternalMediaImpl::SetExternalPlayoutStatus(bool enable)
|
||||
{
|
||||
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(shared_->instance_id(), -1),
|
||||
"SetExternalPlayoutStatus(enable=%d)", enable);
|
||||
#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
|
||||
if (shared_->audio_device() && shared_->audio_device()->Playing())
|
||||
{
|
||||
shared_->SetLastError(VE_ALREADY_SENDING, kTraceError,
|
||||
"SetExternalPlayoutStatus() cannot set state while playing");
|
||||
return -1;
|
||||
}
|
||||
shared_->set_ext_playout(enable);
|
||||
return 0;
|
||||
#else
|
||||
shared_->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
|
||||
"SetExternalPlayoutStatus() external playout is not supported");
|
||||
return -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
// This inserts a copy of the raw audio sent to the output drivers to use
|
||||
// as the "far end" signal for the AEC. Currently only 10ms chunks are
|
||||
// supported unfortunately. Since we have to rechunk to 10ms to call this,
|
||||
// thre isn't much gained by allowing N*10ms here; external code can loop
|
||||
// if needed.
|
||||
int VoEExternalMediaImpl::ExternalPlayoutData(
|
||||
int16_t speechData10ms[],
|
||||
int samplingFreqHz,
|
||||
int num_channels,
|
||||
int& lengthSamples)
|
||||
{
|
||||
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(shared_->instance_id(), -1),
|
||||
"ExternalPlayoutData(speechData10ms=0x%x,"
|
||||
" lengthSamples=%u, samplingFreqHz=%d)",
|
||||
&speechData10ms[0], lengthSamples, samplingFreqHz);
|
||||
|
||||
#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
|
||||
if (!shared_->statistics().Initialized())
|
||||
{
|
||||
shared_->SetLastError(VE_NOT_INITED, kTraceError);
|
||||
return -1;
|
||||
}
|
||||
// FIX(jesup) - check if this is enabled?
|
||||
if (shared_->NumOfSendingChannels() == 0)
|
||||
{
|
||||
shared_->SetLastError(VE_ALREADY_SENDING, kTraceError,
|
||||
"SetExternalRecordingStatus() no channel is sending");
|
||||
return -1;
|
||||
}
|
||||
if ((16000 != samplingFreqHz) && (32000 != samplingFreqHz) &&
|
||||
(48000 != samplingFreqHz) && (44100 != samplingFreqHz))
|
||||
{
|
||||
shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
|
||||
"SetExternalRecordingStatus() invalid sample rate");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Far-end data is inserted without going through neteq/etc.
|
||||
// Only supports 10ms chunks; AnalyzeReverseStream() enforces that
|
||||
// lower down.
|
||||
AudioFrame audioFrame;
|
||||
audioFrame.UpdateFrame(-1, 0xFFFFFFFF,
|
||||
speechData10ms,
|
||||
lengthSamples,
|
||||
samplingFreqHz,
|
||||
AudioFrame::kNormalSpeech,
|
||||
AudioFrame::kVadUnknown,
|
||||
num_channels);
|
||||
|
||||
shared_->output_mixer()->APMAnalyzeReverseStream(audioFrame);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
int VoEExternalMediaImpl::ExternalPlayoutGetData(
|
||||
int16_t speechData10ms[],
|
||||
int samplingFreqHz,
|
||||
int current_delay_ms,
|
||||
int& lengthSamples)
|
||||
{
|
||||
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(shared_->instance_id(), -1),
|
||||
"ExternalPlayoutGetData(speechData10ms=0x%x, samplingFreqHz=%d"
|
||||
", current_delay_ms=%d)", &speechData10ms[0], samplingFreqHz,
|
||||
current_delay_ms);
|
||||
#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
|
||||
if (!shared_->statistics().Initialized())
|
||||
{
|
||||
shared_->SetLastError(VE_NOT_INITED, kTraceError);
|
||||
return -1;
|
||||
}
|
||||
if (!shared_->ext_playout())
|
||||
{
|
||||
shared_->SetLastError(VE_INVALID_OPERATION, kTraceError,
|
||||
"ExternalPlayoutGetData() external playout is not enabled");
|
||||
return -1;
|
||||
}
|
||||
if ((16000 != samplingFreqHz) && (32000 != samplingFreqHz) &&
|
||||
(48000 != samplingFreqHz) && (44100 != samplingFreqHz))
|
||||
{
|
||||
shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
|
||||
"ExternalPlayoutGetData() invalid sample rate");
|
||||
return -1;
|
||||
}
|
||||
if (current_delay_ms < 0)
|
||||
{
|
||||
shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
|
||||
"ExternalPlayoutGetData() invalid delay)");
|
||||
return -1;
|
||||
}
|
||||
|
||||
AudioFrame audioFrame;
|
||||
|
||||
uint32_t channels = shared_->output_mixer()->GetOutputChannelCount();
|
||||
// If we have not received any data yet, consider it's mono since it's the
|
||||
// most common case.
|
||||
if (channels == 0) {
|
||||
channels = 1;
|
||||
}
|
||||
|
||||
// Retrieve mixed output at the specified rate
|
||||
shared_->output_mixer()->MixActiveChannels();
|
||||
shared_->output_mixer()->DoOperationsOnCombinedSignal(true);
|
||||
shared_->output_mixer()->GetMixedAudio(samplingFreqHz, channels, &audioFrame);
|
||||
|
||||
// Deliver audio (PCM) samples to the external sink
|
||||
memcpy(speechData10ms,
|
||||
audioFrame.data_,
|
||||
sizeof(int16_t)*audioFrame.samples_per_channel_*channels);
|
||||
lengthSamples = audioFrame.samples_per_channel_ * channels;
|
||||
|
||||
// Store current playout delay (to be used by ExternalRecordingInsertData).
|
||||
playout_delay_ms_ = current_delay_ms;
|
||||
|
||||
return 0;
|
||||
#else
|
||||
shared_->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
|
||||
"ExternalPlayoutGetData() external playout is not supported");
|
||||
return -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
int VoEExternalMediaImpl::GetAudioFrame(int channel, int desired_sample_rate_hz,
|
||||
AudioFrame* frame) {
|
||||
if (!shared_->statistics().Initialized()) {
|
||||
|
|
|
@ -26,6 +26,28 @@ class VoEExternalMediaImpl : public VoEExternalMedia {
|
|||
int DeRegisterExternalMediaProcessing(int channel,
|
||||
ProcessingTypes type) override;
|
||||
|
||||
virtual int SetExternalRecordingStatus(bool enable) override;
|
||||
|
||||
virtual int SetExternalPlayoutStatus(bool enable) override;
|
||||
|
||||
virtual int ExternalRecordingInsertData(
|
||||
const int16_t speechData10ms[],
|
||||
int lengthSamples,
|
||||
int samplingFreqHz,
|
||||
int current_delay_ms) override;
|
||||
|
||||
// Insertion of far-end data as actually played out to the OS audio driver
|
||||
virtual int ExternalPlayoutData(
|
||||
int16_t speechData10ms[],
|
||||
int samplingFreqHz,
|
||||
int num_channels,
|
||||
int& lengthSamples) override;
|
||||
|
||||
virtual int ExternalPlayoutGetData(int16_t speechData10ms[],
|
||||
int samplingFreqHz,
|
||||
int current_delay_ms,
|
||||
int& lengthSamples) override;
|
||||
|
||||
int GetAudioFrame(int channel,
|
||||
int desired_sample_rate_hz,
|
||||
AudioFrame* frame) override;
|
||||
|
@ -37,6 +59,9 @@ class VoEExternalMediaImpl : public VoEExternalMedia {
|
|||
~VoEExternalMediaImpl() override;
|
||||
|
||||
private:
|
||||
#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
|
||||
int playout_delay_ms_;
|
||||
#endif
|
||||
voe::SharedData* shared_;
|
||||
};
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче