2012-07-12 15:53:08 +04:00
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
|
|
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
|
|
|
|
|
|
#include "MediaEngineWebRTC.h"
|
2014-04-02 21:58:19 +04:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <algorithm>
|
|
|
|
#include "mozilla/Assertions.h"
|
2014-04-18 23:15:10 +04:00
|
|
|
#include "MediaTrackConstraints.h"
|
2014-07-08 23:02:40 +04:00
|
|
|
#include "mtransport/runnable_utils.h"
|
2016-06-07 23:10:18 +03:00
|
|
|
#include "nsAutoPtr.h"
|
2014-04-02 21:58:19 +04:00
|
|
|
|
|
|
|
// scoped_ptr.h uses FF
|
|
|
|
#ifdef FF
|
|
|
|
#undef FF
|
|
|
|
#endif
|
|
|
|
#include "webrtc/modules/audio_device/opensl/single_rw_fifo.h"
|
2012-07-12 15:53:08 +04:00
|
|
|
|
|
|
|
#define CHANNELS 1
|
|
|
|
#define ENCODING "L16"
|
|
|
|
#define DEFAULT_PORT 5555
|
|
|
|
|
2015-09-24 16:23:37 +03:00
|
|
|
#define SAMPLE_RATE(freq) ((freq)*2*8) // bps, 16-bit samples
|
|
|
|
#define SAMPLE_LENGTH(freq) (((freq)*10)/1000)
|
2012-07-12 15:53:08 +04:00
|
|
|
|
2014-04-02 21:58:19 +04:00
|
|
|
// These are restrictions from the webrtc.org code
|
|
|
|
#define MAX_CHANNELS 2
|
|
|
|
#define MAX_SAMPLING_FREQ 48000 // Hz - multiple of 100
|
|
|
|
|
|
|
|
#define MAX_AEC_FIFO_DEPTH 200 // ms - multiple of 10
|
|
|
|
static_assert(!(MAX_AEC_FIFO_DEPTH % 10), "Invalid MAX_AEC_FIFO_DEPTH");
|
|
|
|
|
2012-07-12 15:53:08 +04:00
|
|
|
namespace mozilla {
|
|
|
|
|
2013-11-18 06:31:46 +04:00
|
|
|
#ifdef LOG
|
|
|
|
#undef LOG
|
|
|
|
#endif
|
|
|
|
|
2015-11-15 16:49:01 +03:00
|
|
|
extern LogModule* GetMediaManagerLog();
|
2015-06-04 01:25:57 +03:00
|
|
|
#define LOG(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Debug, msg)
|
|
|
|
#define LOG_FRAMES(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Verbose, msg)
|
2012-10-16 00:41:46 +04:00
|
|
|
|
2017-03-10 23:22:42 +03:00
|
|
|
LogModule* AudioLogModule() {
|
|
|
|
static mozilla::LazyLogModule log("AudioLatency");
|
|
|
|
return static_cast<LogModule*>(log);
|
|
|
|
}
|
|
|
|
|
2012-07-12 15:53:08 +04:00
|
|
|
/**
|
2015-07-24 15:28:16 +03:00
|
|
|
* Webrtc microphone source source.
|
2012-07-12 15:53:08 +04:00
|
|
|
*/
|
2015-07-24 15:28:16 +03:00
|
|
|
NS_IMPL_ISUPPORTS0(MediaEngineWebRTCMicrophoneSource)
|
2015-07-24 15:28:16 +03:00
|
|
|
NS_IMPL_ISUPPORTS0(MediaEngineWebRTCAudioCaptureSource)
|
2012-07-12 15:53:08 +04:00
|
|
|
|
2016-05-23 17:22:47 +03:00
|
|
|
int MediaEngineWebRTCMicrophoneSource::sChannelsOpen = 0;
|
|
|
|
ScopedCustomReleasePtr<webrtc::VoEBase> MediaEngineWebRTCMicrophoneSource::mVoEBase;
|
|
|
|
ScopedCustomReleasePtr<webrtc::VoEExternalMedia> MediaEngineWebRTCMicrophoneSource::mVoERender;
|
|
|
|
ScopedCustomReleasePtr<webrtc::VoENetwork> MediaEngineWebRTCMicrophoneSource::mVoENetwork;
|
|
|
|
ScopedCustomReleasePtr<webrtc::VoEAudioProcessing> MediaEngineWebRTCMicrophoneSource::mVoEProcessing;
|
|
|
|
|
2014-04-02 21:58:19 +04:00
|
|
|
AudioOutputObserver::AudioOutputObserver()
|
|
|
|
: mPlayoutFreq(0)
|
|
|
|
, mPlayoutChannels(0)
|
|
|
|
, mChunkSize(0)
|
2014-06-07 08:11:42 +04:00
|
|
|
, mSaved(nullptr)
|
2014-04-02 21:58:19 +04:00
|
|
|
, mSamplesSaved(0)
|
|
|
|
{
|
|
|
|
// Buffers of 10ms chunks
|
|
|
|
mPlayoutFifo = new webrtc::SingleRwFifo(MAX_AEC_FIFO_DEPTH/10);
|
|
|
|
}
|
|
|
|
|
|
|
|
AudioOutputObserver::~AudioOutputObserver()
|
|
|
|
{
|
2014-05-12 17:25:01 +04:00
|
|
|
Clear();
|
2015-02-19 07:51:06 +03:00
|
|
|
free(mSaved);
|
2014-07-08 16:46:28 +04:00
|
|
|
mSaved = nullptr;
|
2014-04-02 21:58:19 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
AudioOutputObserver::Clear()
|
|
|
|
{
|
|
|
|
while (mPlayoutFifo->size() > 0) {
|
2015-02-19 07:51:06 +03:00
|
|
|
free(mPlayoutFifo->Pop());
|
2014-04-02 21:58:19 +04:00
|
|
|
}
|
2014-07-08 16:46:28 +04:00
|
|
|
// we'd like to touch mSaved here, but we can't if we might still be getting callbacks
|
2014-04-02 21:58:19 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
FarEndAudioChunk *
|
|
|
|
AudioOutputObserver::Pop()
|
|
|
|
{
|
|
|
|
return (FarEndAudioChunk *) mPlayoutFifo->Pop();
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t
|
|
|
|
AudioOutputObserver::Size()
|
|
|
|
{
|
|
|
|
return mPlayoutFifo->size();
|
|
|
|
}
|
|
|
|
|
|
|
|
// static
|
|
|
|
void
|
2014-07-23 18:02:34 +04:00
|
|
|
AudioOutputObserver::InsertFarEnd(const AudioDataValue *aBuffer, uint32_t aFrames, bool aOverran,
|
2017-06-12 18:14:26 +03:00
|
|
|
int aFreq, int aChannels)
|
2014-04-02 21:58:19 +04:00
|
|
|
{
|
|
|
|
if (mPlayoutChannels != 0) {
|
|
|
|
if (mPlayoutChannels != static_cast<uint32_t>(aChannels)) {
|
|
|
|
MOZ_CRASH();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
MOZ_ASSERT(aChannels <= MAX_CHANNELS);
|
|
|
|
mPlayoutChannels = static_cast<uint32_t>(aChannels);
|
|
|
|
}
|
|
|
|
if (mPlayoutFreq != 0) {
|
|
|
|
if (mPlayoutFreq != static_cast<uint32_t>(aFreq)) {
|
|
|
|
MOZ_CRASH();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
MOZ_ASSERT(aFreq <= MAX_SAMPLING_FREQ);
|
|
|
|
MOZ_ASSERT(!(aFreq % 100), "Sampling rate for far end data should be multiple of 100.");
|
|
|
|
mPlayoutFreq = aFreq;
|
|
|
|
mChunkSize = aFreq/100; // 10ms
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef LOG_FAREND_INSERTION
|
|
|
|
static FILE *fp = fopen("insertfarend.pcm","wb");
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (mSaved) {
|
|
|
|
// flag overrun as soon as possible, and only once
|
|
|
|
mSaved->mOverrun = aOverran;
|
|
|
|
aOverran = false;
|
|
|
|
}
|
|
|
|
// Rechunk to 10ms.
|
|
|
|
// The AnalyzeReverseStream() and WebRtcAec_BufferFarend() functions insist on 10ms
|
|
|
|
// samples per call. Annoying...
|
2014-07-23 18:02:34 +04:00
|
|
|
while (aFrames) {
|
2014-04-02 21:58:19 +04:00
|
|
|
if (!mSaved) {
|
|
|
|
mSaved = (FarEndAudioChunk *) moz_xmalloc(sizeof(FarEndAudioChunk) +
|
|
|
|
(mChunkSize * aChannels - 1)*sizeof(int16_t));
|
|
|
|
mSaved->mSamples = mChunkSize;
|
|
|
|
mSaved->mOverrun = aOverran;
|
|
|
|
aOverran = false;
|
|
|
|
}
|
|
|
|
uint32_t to_copy = mChunkSize - mSamplesSaved;
|
2014-07-23 18:02:34 +04:00
|
|
|
if (to_copy > aFrames) {
|
|
|
|
to_copy = aFrames;
|
2014-04-02 21:58:19 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
int16_t *dest = &(mSaved->mData[mSamplesSaved * aChannels]);
|
|
|
|
ConvertAudioSamples(aBuffer, dest, to_copy * aChannels);
|
|
|
|
|
|
|
|
#ifdef LOG_FAREND_INSERTION
|
|
|
|
if (fp) {
|
|
|
|
fwrite(&(mSaved->mData[mSamplesSaved * aChannels]), to_copy * aChannels, sizeof(int16_t), fp);
|
|
|
|
}
|
|
|
|
#endif
|
2014-07-23 18:02:34 +04:00
|
|
|
aFrames -= to_copy;
|
2014-04-02 21:58:19 +04:00
|
|
|
mSamplesSaved += to_copy;
|
2014-04-25 16:10:38 +04:00
|
|
|
aBuffer += to_copy * aChannels;
|
2014-04-02 21:58:19 +04:00
|
|
|
|
|
|
|
if (mSamplesSaved >= mChunkSize) {
|
|
|
|
int free_slots = mPlayoutFifo->capacity() - mPlayoutFifo->size();
|
|
|
|
if (free_slots <= 0) {
|
|
|
|
// XXX We should flag an overrun for the reader. We can't drop data from it due to
|
|
|
|
// thread safety issues.
|
|
|
|
break;
|
|
|
|
} else {
|
2014-06-07 08:11:42 +04:00
|
|
|
mPlayoutFifo->Push((int8_t *) mSaved); // takes ownership
|
|
|
|
mSaved = nullptr;
|
2014-04-02 21:58:19 +04:00
|
|
|
mSamplesSaved = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-16 22:33:54 +03:00
|
|
|
MediaEngineWebRTCMicrophoneSource::MediaEngineWebRTCMicrophoneSource(
|
|
|
|
webrtc::VoiceEngine* aVoiceEnginePtr,
|
|
|
|
mozilla::AudioInput* aAudioInput,
|
|
|
|
int aIndex,
|
|
|
|
const char* name,
|
|
|
|
const char* uuid)
|
|
|
|
: MediaEngineAudioSource(kReleased)
|
|
|
|
, mVoiceEngine(aVoiceEnginePtr)
|
|
|
|
, mAudioInput(aAudioInput)
|
|
|
|
, mMonitor("WebRTCMic.Monitor")
|
|
|
|
, mCapIndex(aIndex)
|
|
|
|
, mChannel(-1)
|
2016-08-16 06:06:50 +03:00
|
|
|
, mTrackID(TRACK_NONE)
|
2016-07-16 22:33:54 +03:00
|
|
|
, mStarted(false)
|
|
|
|
, mSampleFrequency(MediaEngine::DEFAULT_SAMPLE_RATE)
|
2017-03-10 23:22:42 +03:00
|
|
|
, mTotalFrames(0)
|
|
|
|
, mLastLogFrames(0)
|
2016-07-16 22:33:54 +03:00
|
|
|
, mPlayoutDelay(0)
|
|
|
|
, mNullTransport(nullptr)
|
|
|
|
, mSkipProcessing(false)
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(aVoiceEnginePtr);
|
|
|
|
MOZ_ASSERT(aAudioInput);
|
|
|
|
mDeviceName.Assign(NS_ConvertUTF8toUTF16(name));
|
|
|
|
mDeviceUUID.Assign(uuid);
|
|
|
|
mListener = new mozilla::WebRTCAudioDataListener(this);
|
|
|
|
mSettings.mEchoCancellation.Construct(0);
|
2017-05-20 03:57:44 +03:00
|
|
|
mSettings.mAutoGainControl.Construct(0);
|
|
|
|
mSettings.mNoiseSuppression.Construct(0);
|
2017-06-30 07:01:17 +03:00
|
|
|
mSettings.mChannelCount.Construct(0);
|
2016-07-16 22:33:54 +03:00
|
|
|
// We'll init lazily as needed
|
|
|
|
}
|
|
|
|
|
2012-07-12 15:53:08 +04:00
|
|
|
void
|
2016-06-21 03:15:39 +03:00
|
|
|
MediaEngineWebRTCMicrophoneSource::GetName(nsAString& aName) const
|
2012-07-12 15:53:08 +04:00
|
|
|
{
|
2016-05-23 17:22:47 +03:00
|
|
|
aName.Assign(mDeviceName);
|
2012-07-12 15:53:08 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-06-21 03:15:39 +03:00
|
|
|
MediaEngineWebRTCMicrophoneSource::GetUUID(nsACString& aUUID) const
|
2012-07-12 15:53:08 +04:00
|
|
|
{
|
2016-05-23 17:22:47 +03:00
|
|
|
aUUID.Assign(mDeviceUUID);
|
2012-07-12 15:53:08 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-07-03 01:01:52 +03:00
|
|
|
// GetBestFitnessDistance returns the best distance the capture device can offer
|
|
|
|
// as a whole, given an accumulated number of ConstraintSets.
|
|
|
|
// Ideal values are considered in the first ConstraintSet only.
|
|
|
|
// Plain values are treated as Ideal in the first ConstraintSet.
|
|
|
|
// Plain values are treated as Exact in subsequent ConstraintSets.
|
|
|
|
// Infinity = UINT32_MAX e.g. device cannot satisfy accumulated ConstraintSets.
|
|
|
|
// A finite result may be used to calculate this device's ranking as a choice.
|
|
|
|
|
2015-07-24 15:28:17 +03:00
|
|
|
uint32_t MediaEngineWebRTCMicrophoneSource::GetBestFitnessDistance(
|
2016-06-17 22:20:10 +03:00
|
|
|
const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
|
2016-06-21 03:15:39 +03:00
|
|
|
const nsString& aDeviceId) const
|
2015-07-03 01:01:52 +03:00
|
|
|
{
|
|
|
|
uint32_t distance = 0;
|
|
|
|
|
2016-06-17 22:20:10 +03:00
|
|
|
for (const auto* cs : aConstraintSets) {
|
|
|
|
distance = GetMinimumFitnessDistance(*cs, aDeviceId);
|
2015-07-03 01:01:52 +03:00
|
|
|
break; // distance is read from first entry only
|
|
|
|
}
|
|
|
|
return distance;
|
|
|
|
}
|
|
|
|
|
2016-01-23 00:46:38 +03:00
|
|
|
nsresult
|
2016-07-13 06:25:07 +03:00
|
|
|
MediaEngineWebRTCMicrophoneSource::Restart(AllocationHandle* aHandle,
|
2016-05-25 08:52:15 +03:00
|
|
|
const dom::MediaTrackConstraints& aConstraints,
|
2016-01-23 00:46:38 +03:00
|
|
|
const MediaEnginePrefs &aPrefs,
|
2016-06-16 02:25:07 +03:00
|
|
|
const nsString& aDeviceId,
|
|
|
|
const char** aOutBadConstraint)
|
2016-01-23 00:46:38 +03:00
|
|
|
{
|
2016-07-16 02:55:59 +03:00
|
|
|
AssertIsOnOwningThread();
|
|
|
|
MOZ_ASSERT(aHandle);
|
|
|
|
NormalizedConstraints constraints(aConstraints);
|
|
|
|
return ReevaluateAllocation(aHandle, &constraints, aPrefs, aDeviceId,
|
|
|
|
aOutBadConstraint);
|
|
|
|
}
|
|
|
|
|
2016-07-16 22:33:54 +03:00
|
|
|
bool operator == (const MediaEnginePrefs& a, const MediaEnginePrefs& b)
|
|
|
|
{
|
|
|
|
return !memcmp(&a, &b, sizeof(MediaEnginePrefs));
|
|
|
|
};
|
|
|
|
|
2016-07-16 02:55:59 +03:00
|
|
|
nsresult
|
|
|
|
MediaEngineWebRTCMicrophoneSource::UpdateSingleSource(
|
|
|
|
const AllocationHandle* aHandle,
|
|
|
|
const NormalizedConstraints& aNetConstraints,
|
|
|
|
const MediaEnginePrefs& aPrefs,
|
|
|
|
const nsString& aDeviceId,
|
|
|
|
const char** aOutBadConstraint)
|
|
|
|
{
|
|
|
|
FlattenedConstraints c(aNetConstraints);
|
2016-01-23 00:46:38 +03:00
|
|
|
|
2016-07-16 22:33:54 +03:00
|
|
|
MediaEnginePrefs prefs = aPrefs;
|
|
|
|
prefs.mAecOn = c.mEchoCancellation.Get(prefs.mAecOn);
|
2017-05-20 03:57:44 +03:00
|
|
|
prefs.mAgcOn = c.mAutoGainControl.Get(prefs.mAgcOn);
|
|
|
|
prefs.mNoiseOn = c.mNoiseSuppression.Get(prefs.mNoiseOn);
|
2017-06-30 07:01:17 +03:00
|
|
|
uint32_t maxChannels = 1;
|
|
|
|
if (mAudioInput->GetMaxAvailableChannels(maxChannels) != 0) {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
// Check channelCount violation
|
|
|
|
if (static_cast<int32_t>(maxChannels) < c.mChannelCount.mMin ||
|
|
|
|
static_cast<int32_t>(maxChannels) > c.mChannelCount.mMax) {
|
|
|
|
*aOutBadConstraint = "channelCount";
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
// Clamp channelCount to a valid value
|
|
|
|
if (prefs.mChannels <= 0) {
|
|
|
|
prefs.mChannels = static_cast<int32_t>(maxChannels);
|
|
|
|
}
|
|
|
|
prefs.mChannels = c.mChannelCount.Get(std::min(prefs.mChannels,
|
|
|
|
static_cast<int32_t>(maxChannels)));
|
|
|
|
// Clamp channelCount to a valid value
|
|
|
|
prefs.mChannels = std::max(1, std::min(prefs.mChannels, static_cast<int32_t>(maxChannels)));
|
2016-01-23 00:46:38 +03:00
|
|
|
|
2017-06-30 07:01:17 +03:00
|
|
|
LOG(("Audio config: aec: %d, agc: %d, noise: %d, delay: %d, channels: %d",
|
|
|
|
prefs.mAecOn ? prefs.mAec : -1,
|
|
|
|
prefs.mAgcOn ? prefs.mAgc : -1,
|
|
|
|
prefs.mNoiseOn ? prefs.mNoise : -1,
|
|
|
|
prefs.mPlayoutDelay,
|
|
|
|
prefs.mChannels));
|
2016-01-23 00:46:38 +03:00
|
|
|
|
2016-07-16 22:33:54 +03:00
|
|
|
mPlayoutDelay = prefs.mPlayoutDelay;
|
2016-01-23 00:46:38 +03:00
|
|
|
|
2016-07-16 02:55:59 +03:00
|
|
|
switch (mState) {
|
|
|
|
case kReleased:
|
|
|
|
MOZ_ASSERT(aHandle);
|
|
|
|
if (sChannelsOpen == 0) {
|
|
|
|
if (!InitEngine()) {
|
|
|
|
LOG(("Audio engine is not initalized"));
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
2016-10-07 04:40:44 +03:00
|
|
|
} else {
|
|
|
|
// Until we fix (or wallpaper) support for multiple mic input
|
|
|
|
// (Bug 1238038) fail allocation for a second device
|
|
|
|
return NS_ERROR_FAILURE;
|
2016-07-16 02:55:59 +03:00
|
|
|
}
|
|
|
|
if (mAudioInput->SetRecordingDevice(mCapIndex)) {
|
2017-06-30 07:01:17 +03:00
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
mAudioInput->SetUserChannelCount(prefs.mChannels);
|
|
|
|
if (!AllocChannel()) {
|
2016-07-16 02:55:59 +03:00
|
|
|
FreeChannel();
|
2017-06-30 07:01:17 +03:00
|
|
|
LOG(("Audio device is not initalized"));
|
2016-07-16 02:55:59 +03:00
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
LOG(("Audio device %d allocated", mCapIndex));
|
2017-06-30 07:01:17 +03:00
|
|
|
{
|
|
|
|
// Update with the actual applied channelCount in order
|
|
|
|
// to store it in settings.
|
|
|
|
uint32_t channelCount = 0;
|
|
|
|
mAudioInput->GetChannelCount(channelCount);
|
|
|
|
MOZ_ASSERT(channelCount > 0);
|
|
|
|
prefs.mChannels = channelCount;
|
|
|
|
}
|
2016-07-16 02:55:59 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case kStarted:
|
2016-07-16 22:33:54 +03:00
|
|
|
if (prefs == mLastPrefs) {
|
|
|
|
return NS_OK;
|
|
|
|
}
|
2017-06-30 07:01:17 +03:00
|
|
|
|
|
|
|
if (prefs.mChannels != mLastPrefs.mChannels) {
|
|
|
|
MOZ_ASSERT(mSources.Length() > 0);
|
|
|
|
auto& source = mSources.LastElement();
|
|
|
|
mAudioInput->SetUserChannelCount(prefs.mChannels);
|
|
|
|
// Get validated number of channel
|
|
|
|
uint32_t channelCount = 0;
|
|
|
|
mAudioInput->GetChannelCount(channelCount);
|
|
|
|
MOZ_ASSERT(channelCount > 0 && mLastPrefs.mChannels > 0);
|
|
|
|
// Check if new validated channels is the same as previous
|
|
|
|
if (static_cast<uint32_t>(mLastPrefs.mChannels) != channelCount
|
|
|
|
&& !source->OpenNewAudioCallbackDriver(mListener)) {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
// Update settings
|
|
|
|
prefs.mChannels = channelCount;
|
|
|
|
}
|
|
|
|
|
2016-07-16 02:55:59 +03:00
|
|
|
if (MOZ_LOG_TEST(GetMediaManagerLog(), LogLevel::Debug)) {
|
|
|
|
MonitorAutoLock lock(mMonitor);
|
|
|
|
if (mSources.IsEmpty()) {
|
|
|
|
LOG(("Audio device %d reallocated", mCapIndex));
|
|
|
|
} else {
|
|
|
|
LOG(("Audio device %d allocated shared", mCapIndex));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2017-01-28 20:29:13 +03:00
|
|
|
LOG(("Audio device %d in ignored state %d", mCapIndex, mState));
|
2016-07-16 02:55:59 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-05-23 17:22:47 +03:00
|
|
|
if (sChannelsOpen > 0) {
|
2016-01-23 00:46:38 +03:00
|
|
|
int error;
|
|
|
|
|
2016-07-16 22:33:54 +03:00
|
|
|
error = mVoEProcessing->SetEcStatus(prefs.mAecOn, (webrtc::EcModes)prefs.mAec);
|
|
|
|
if (error) {
|
2016-01-23 00:46:38 +03:00
|
|
|
LOG(("%s Error setting Echo Status: %d ",__FUNCTION__, error));
|
|
|
|
// Overhead of capturing all the time is very low (<0.1% of an audio only call)
|
2016-07-16 22:33:54 +03:00
|
|
|
if (prefs.mAecOn) {
|
|
|
|
error = mVoEProcessing->SetEcMetricsStatus(true);
|
|
|
|
if (error) {
|
2016-01-23 00:46:38 +03:00
|
|
|
LOG(("%s Error setting Echo Metrics: %d ",__FUNCTION__, error));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-07-16 22:33:54 +03:00
|
|
|
error = mVoEProcessing->SetAgcStatus(prefs.mAgcOn, (webrtc::AgcModes)prefs.mAgc);
|
|
|
|
if (error) {
|
2016-01-23 00:46:38 +03:00
|
|
|
LOG(("%s Error setting AGC Status: %d ",__FUNCTION__, error));
|
|
|
|
}
|
2016-07-16 22:33:54 +03:00
|
|
|
error = mVoEProcessing->SetNsStatus(prefs.mNoiseOn, (webrtc::NsModes)prefs.mNoise);
|
|
|
|
if (error) {
|
2016-01-23 00:46:38 +03:00
|
|
|
LOG(("%s Error setting NoiseSuppression Status: %d ",__FUNCTION__, error));
|
|
|
|
}
|
|
|
|
}
|
2016-05-30 16:24:19 +03:00
|
|
|
|
2016-07-16 22:33:54 +03:00
|
|
|
mSkipProcessing = !(prefs.mAecOn || prefs.mAgcOn || prefs.mNoiseOn);
|
2016-05-30 16:24:19 +03:00
|
|
|
if (mSkipProcessing) {
|
|
|
|
mSampleFrequency = MediaEngine::USE_GRAPH_RATE;
|
2017-06-12 18:14:26 +03:00
|
|
|
mAudioOutputObserver = nullptr;
|
|
|
|
} else {
|
|
|
|
// make sure we route a copy of the mixed audio output of this MSG to the
|
|
|
|
// AEC
|
|
|
|
mAudioOutputObserver = new AudioOutputObserver();
|
2016-05-30 16:24:19 +03:00
|
|
|
}
|
2016-07-16 22:33:54 +03:00
|
|
|
SetLastPrefs(prefs);
|
2012-07-12 15:53:08 +04:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2016-07-16 22:33:54 +03:00
|
|
|
void
|
|
|
|
MediaEngineWebRTCMicrophoneSource::SetLastPrefs(
|
|
|
|
const MediaEnginePrefs& aPrefs)
|
|
|
|
{
|
|
|
|
mLastPrefs = aPrefs;
|
|
|
|
|
|
|
|
RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
|
|
|
|
|
2017-02-04 00:57:49 +03:00
|
|
|
NS_DispatchToMainThread(media::NewRunnableFrom([that, aPrefs]() mutable {
|
|
|
|
that->mSettings.mEchoCancellation.Value() = aPrefs.mAecOn;
|
2017-05-20 03:57:44 +03:00
|
|
|
that->mSettings.mAutoGainControl.Value() = aPrefs.mAgcOn;
|
|
|
|
that->mSettings.mNoiseSuppression.Value() = aPrefs.mNoiseOn;
|
2017-06-30 07:01:17 +03:00
|
|
|
that->mSettings.mChannelCount.Value() = aPrefs.mChannels;
|
2016-07-16 22:33:54 +03:00
|
|
|
return NS_OK;
|
|
|
|
}));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-07-12 15:53:08 +04:00
|
|
|
nsresult
|
2016-07-13 06:25:07 +03:00
|
|
|
MediaEngineWebRTCMicrophoneSource::Deallocate(AllocationHandle* aHandle)
|
2012-07-12 15:53:08 +04:00
|
|
|
{
|
2015-10-14 20:08:34 +03:00
|
|
|
AssertIsOnOwningThread();
|
2016-07-16 02:55:59 +03:00
|
|
|
|
|
|
|
Super::Deallocate(aHandle);
|
|
|
|
|
|
|
|
if (!mRegisteredHandles.Length()) {
|
2015-01-17 00:27:56 +03:00
|
|
|
// If empty, no callbacks to deliver data should be occuring
|
2013-01-01 03:12:12 +04:00
|
|
|
if (mState != kStopped && mState != kAllocated) {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
2012-07-12 15:53:08 +04:00
|
|
|
|
2016-05-23 17:22:47 +03:00
|
|
|
FreeChannel();
|
2013-01-01 03:12:12 +04:00
|
|
|
LOG(("Audio device %d deallocated", mCapIndex));
|
|
|
|
} else {
|
|
|
|
LOG(("Audio device %d deallocated but still in use", mCapIndex));
|
|
|
|
}
|
2012-07-12 15:53:08 +04:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
2015-07-24 15:28:16 +03:00
|
|
|
MediaEngineWebRTCMicrophoneSource::Start(SourceMediaStream *aStream,
|
2016-04-06 15:56:44 +03:00
|
|
|
TrackID aID,
|
|
|
|
const PrincipalHandle& aPrincipalHandle)
|
2012-07-12 15:53:08 +04:00
|
|
|
{
|
2015-10-14 20:08:34 +03:00
|
|
|
AssertIsOnOwningThread();
|
2016-05-23 17:22:47 +03:00
|
|
|
if (sChannelsOpen == 0 || !aStream) {
|
2012-07-12 15:53:08 +04:00
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
|
2013-01-10 20:52:53 +04:00
|
|
|
{
|
2013-03-13 19:42:18 +04:00
|
|
|
MonitorAutoLock lock(mMonitor);
|
2013-01-10 20:52:53 +04:00
|
|
|
mSources.AppendElement(aStream);
|
2016-04-06 15:56:44 +03:00
|
|
|
mPrincipalHandles.AppendElement(aPrincipalHandle);
|
|
|
|
MOZ_ASSERT(mSources.Length() == mPrincipalHandles.Length());
|
2013-01-10 20:52:53 +04:00
|
|
|
}
|
2012-07-12 15:53:08 +04:00
|
|
|
|
|
|
|
AudioSegment* segment = new AudioSegment();
|
2016-05-30 16:24:19 +03:00
|
|
|
if (mSampleFrequency == MediaEngine::USE_GRAPH_RATE) {
|
|
|
|
mSampleFrequency = aStream->GraphRate();
|
|
|
|
}
|
2015-09-24 16:23:37 +03:00
|
|
|
aStream->AddAudioTrack(aID, mSampleFrequency, 0, segment, SourceMediaStream::ADDTRACK_QUEUED);
|
2015-02-19 20:04:26 +03:00
|
|
|
|
2014-03-24 14:06:06 +04:00
|
|
|
// XXX Make this based on the pref.
|
|
|
|
aStream->RegisterForAudioMixing();
|
2013-10-26 02:13:42 +04:00
|
|
|
LOG(("Start audio for stream %p", aStream));
|
2012-07-12 15:53:08 +04:00
|
|
|
|
2016-03-08 20:11:09 +03:00
|
|
|
if (!mListener) {
|
|
|
|
mListener = new mozilla::WebRTCAudioDataListener(this);
|
|
|
|
}
|
2013-01-01 03:12:12 +04:00
|
|
|
if (mState == kStarted) {
|
2013-10-26 02:13:42 +04:00
|
|
|
MOZ_ASSERT(aID == mTrackID);
|
2016-02-04 05:12:51 +03:00
|
|
|
// Make sure we're associated with this stream
|
|
|
|
mAudioInput->StartRecording(aStream, mListener);
|
2013-01-01 03:12:12 +04:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
mState = kStarted;
|
2013-10-26 02:13:42 +04:00
|
|
|
mTrackID = aID;
|
|
|
|
|
|
|
|
// Make sure logger starts before capture
|
|
|
|
AsyncLatencyLogger::Get(true);
|
2013-01-01 03:12:12 +04:00
|
|
|
|
2017-06-12 18:14:26 +03:00
|
|
|
MOZ_ASSERT(mAudioOutputObserver);
|
|
|
|
mAudioOutputObserver->Clear();
|
2014-04-02 21:58:19 +04:00
|
|
|
|
2012-07-12 15:53:08 +04:00
|
|
|
if (mVoEBase->StartReceive(mChannel)) {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
2016-02-17 21:19:02 +03:00
|
|
|
|
|
|
|
// Must be *before* StartSend() so it will notice we selected external input (full_duplex)
|
2016-02-04 05:12:51 +03:00
|
|
|
mAudioInput->StartRecording(aStream, mListener);
|
2016-02-17 21:19:02 +03:00
|
|
|
|
2012-07-12 15:53:08 +04:00
|
|
|
if (mVoEBase->StartSend(mChannel)) {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attach external media processor, so this::Process will be called.
|
|
|
|
mVoERender->RegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel, *this);
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
2015-07-24 15:28:16 +03:00
|
|
|
MediaEngineWebRTCMicrophoneSource::Stop(SourceMediaStream *aSource, TrackID aID)
|
2012-07-12 15:53:08 +04:00
|
|
|
{
|
2015-10-14 20:08:34 +03:00
|
|
|
AssertIsOnOwningThread();
|
2013-01-01 03:12:12 +04:00
|
|
|
{
|
2013-03-13 19:42:18 +04:00
|
|
|
MonitorAutoLock lock(mMonitor);
|
2013-01-10 20:52:53 +04:00
|
|
|
|
2016-04-06 15:56:44 +03:00
|
|
|
size_t sourceIndex = mSources.IndexOf(aSource);
|
|
|
|
if (sourceIndex == mSources.NoIndex) {
|
2013-01-10 20:52:53 +04:00
|
|
|
// Already stopped - this is allowed
|
|
|
|
return NS_OK;
|
|
|
|
}
|
2016-04-06 15:56:44 +03:00
|
|
|
mSources.RemoveElementAt(sourceIndex);
|
|
|
|
mPrincipalHandles.RemoveElementAt(sourceIndex);
|
|
|
|
MOZ_ASSERT(mSources.Length() == mPrincipalHandles.Length());
|
2014-12-12 07:52:00 +03:00
|
|
|
|
|
|
|
aSource->EndTrack(aID);
|
|
|
|
|
2013-01-10 20:52:53 +04:00
|
|
|
if (!mSources.IsEmpty()) {
|
2016-02-04 05:12:51 +03:00
|
|
|
mAudioInput->StopRecording(aSource);
|
2013-01-10 20:52:53 +04:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
if (mState != kStarted) {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
if (!mVoEBase) {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
|
2013-01-01 03:12:12 +04:00
|
|
|
mState = kStopped;
|
|
|
|
}
|
2016-03-08 20:11:09 +03:00
|
|
|
if (mListener) {
|
|
|
|
// breaks a cycle, since the WebRTCAudioDataListener has a RefPtr to us
|
|
|
|
mListener->Shutdown();
|
|
|
|
mListener = nullptr;
|
|
|
|
}
|
2013-01-01 03:12:12 +04:00
|
|
|
|
2016-02-04 05:12:51 +03:00
|
|
|
mAudioInput->StopRecording(aSource);
|
2016-01-21 19:51:36 +03:00
|
|
|
|
2012-07-12 15:53:08 +04:00
|
|
|
mVoERender->DeRegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel);
|
|
|
|
|
|
|
|
if (mVoEBase->StopSend(mChannel)) {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
if (mVoEBase->StopReceive(mChannel)) {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2012-10-17 13:46:40 +04:00
|
|
|
void
|
2015-07-24 15:28:16 +03:00
|
|
|
MediaEngineWebRTCMicrophoneSource::NotifyPull(MediaStreamGraph *aGraph,
|
|
|
|
SourceMediaStream *aSource,
|
|
|
|
TrackID aID,
|
2016-04-06 15:56:44 +03:00
|
|
|
StreamTime aDesiredTime,
|
|
|
|
const PrincipalHandle& aPrincipalHandle)
|
2012-10-17 13:46:40 +04:00
|
|
|
{
|
|
|
|
// Ignore - we push audio data
|
2016-12-16 06:16:31 +03:00
|
|
|
LOG_FRAMES(("NotifyPull, desired = %" PRId64, (int64_t) aDesiredTime));
|
2012-10-17 13:46:40 +04:00
|
|
|
}
|
|
|
|
|
2016-01-21 19:51:36 +03:00
|
|
|
void
|
|
|
|
MediaEngineWebRTCMicrophoneSource::NotifyOutputData(MediaStreamGraph* aGraph,
|
|
|
|
AudioDataValue* aBuffer,
|
|
|
|
size_t aFrames,
|
2016-02-17 21:19:01 +03:00
|
|
|
TrackRate aRate,
|
2016-01-21 19:51:36 +03:00
|
|
|
uint32_t aChannels)
|
|
|
|
{
|
2017-06-12 18:14:26 +03:00
|
|
|
if (mAudioOutputObserver) {
|
|
|
|
mAudioOutputObserver->InsertFarEnd(aBuffer, aFrames, false,
|
|
|
|
aRate, aChannels);
|
|
|
|
}
|
2016-01-21 19:51:36 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-05-30 16:24:19 +03:00
|
|
|
MediaEngineWebRTCMicrophoneSource::PacketizeAndProcess(MediaStreamGraph* aGraph,
|
|
|
|
const AudioDataValue* aBuffer,
|
|
|
|
size_t aFrames,
|
|
|
|
TrackRate aRate,
|
|
|
|
uint32_t aChannels)
|
2016-01-21 19:51:36 +03:00
|
|
|
{
|
|
|
|
// This will call Process() with data coming out of the AEC/NS/AGC/etc chain
|
|
|
|
if (!mPacketizer ||
|
2016-02-17 21:19:01 +03:00
|
|
|
mPacketizer->PacketSize() != aRate/100u ||
|
2016-01-21 19:51:36 +03:00
|
|
|
mPacketizer->Channels() != aChannels) {
|
|
|
|
// It's ok to drop the audio still in the packetizer here.
|
2016-05-30 16:24:19 +03:00
|
|
|
mPacketizer =
|
|
|
|
new AudioPacketizer<AudioDataValue, int16_t>(aRate/100, aChannels);
|
2016-02-17 21:19:01 +03:00
|
|
|
}
|
2016-01-21 19:51:36 +03:00
|
|
|
|
|
|
|
mPacketizer->Input(aBuffer, static_cast<uint32_t>(aFrames));
|
|
|
|
|
|
|
|
while (mPacketizer->PacketsAvailable()) {
|
|
|
|
uint32_t samplesPerPacket = mPacketizer->PacketSize() *
|
2016-05-30 16:24:19 +03:00
|
|
|
mPacketizer->Channels();
|
2016-05-30 16:24:17 +03:00
|
|
|
if (mInputBuffer.Length() < samplesPerPacket) {
|
|
|
|
mInputBuffer.SetLength(samplesPerPacket);
|
2016-02-17 21:19:02 +03:00
|
|
|
}
|
2016-05-30 16:24:17 +03:00
|
|
|
int16_t* packet = mInputBuffer.Elements();
|
2016-02-17 21:19:02 +03:00
|
|
|
mPacketizer->Output(packet);
|
2016-02-17 21:19:01 +03:00
|
|
|
|
2016-05-10 13:24:54 +03:00
|
|
|
mVoERender->ExternalRecordingInsertData(packet, samplesPerPacket, aRate, 0);
|
2016-01-21 19:51:36 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-30 16:24:19 +03:00
|
|
|
template<typename T>
|
|
|
|
void
|
|
|
|
MediaEngineWebRTCMicrophoneSource::InsertInGraph(const T* aBuffer,
|
|
|
|
size_t aFrames,
|
|
|
|
uint32_t aChannels)
|
|
|
|
{
|
|
|
|
if (mState != kStarted) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-03-10 23:22:42 +03:00
|
|
|
if (MOZ_LOG_TEST(AudioLogModule(), LogLevel::Debug)) {
|
|
|
|
mTotalFrames += aFrames;
|
|
|
|
if (mTotalFrames > mLastLogFrames + mSampleFrequency) { // ~ 1 second
|
|
|
|
MOZ_LOG(AudioLogModule(), LogLevel::Debug,
|
|
|
|
("%p: Inserting %" PRIuSIZE " samples into graph, total frames = %" PRIu64,
|
|
|
|
(void*)this, aFrames, mTotalFrames));
|
|
|
|
mLastLogFrames = mTotalFrames;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-30 16:24:19 +03:00
|
|
|
size_t len = mSources.Length();
|
|
|
|
for (size_t i = 0; i < len; i++) {
|
|
|
|
if (!mSources[i]) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
TimeStamp insertTime;
|
|
|
|
// Make sure we include the stream and the track.
|
|
|
|
// The 0:1 is a flag to note when we've done the final insert for a given input block.
|
|
|
|
LogTime(AsyncLatencyLogger::AudioTrackInsertion,
|
|
|
|
LATENCY_STREAM_ID(mSources[i].get(), mTrackID),
|
|
|
|
(i+1 < len) ? 0 : 1, insertTime);
|
|
|
|
|
2017-06-02 09:12:08 +03:00
|
|
|
// Bug 971528 - Support stereo capture in gUM
|
|
|
|
MOZ_ASSERT(aChannels == 1 || aChannels == 2,
|
|
|
|
"GraphDriver only supports mono and stereo audio for now");
|
|
|
|
|
2016-05-30 16:24:19 +03:00
|
|
|
nsAutoPtr<AudioSegment> segment(new AudioSegment());
|
2017-06-02 09:12:08 +03:00
|
|
|
RefPtr<SharedBuffer> buffer =
|
|
|
|
SharedBuffer::Create(aFrames * aChannels * sizeof(T));
|
|
|
|
AutoTArray<const T*, 8> channels;
|
|
|
|
if (aChannels == 1) {
|
|
|
|
PodCopy(static_cast<T*>(buffer->Data()), aBuffer, aFrames);
|
|
|
|
channels.AppendElement(static_cast<T*>(buffer->Data()));
|
|
|
|
} else {
|
2017-06-02 09:12:21 +03:00
|
|
|
channels.SetLength(aChannels);
|
2017-06-02 09:12:08 +03:00
|
|
|
AutoTArray<T*, 8> write_channels;
|
|
|
|
write_channels.SetLength(aChannels);
|
|
|
|
T * samples = static_cast<T*>(buffer->Data());
|
|
|
|
|
|
|
|
size_t offset = 0;
|
|
|
|
for(uint32_t i = 0; i < aChannels; ++i) {
|
|
|
|
channels[i] = write_channels[i] = samples + offset;
|
|
|
|
offset += aFrames;
|
|
|
|
}
|
|
|
|
|
|
|
|
DeinterleaveAndConvertBuffer(aBuffer,
|
|
|
|
aFrames,
|
|
|
|
aChannels,
|
|
|
|
write_channels.Elements());
|
|
|
|
}
|
|
|
|
|
2017-06-02 09:12:21 +03:00
|
|
|
MOZ_ASSERT(aChannels == channels.Length());
|
2016-05-30 16:24:19 +03:00
|
|
|
segment->AppendFrames(buffer.forget(), channels, aFrames,
|
|
|
|
mPrincipalHandles[i]);
|
|
|
|
segment->GetStartTime(insertTime);
|
|
|
|
|
2016-05-12 22:34:11 +03:00
|
|
|
mSources[i]->AppendToTrack(mTrackID, segment);
|
2016-05-30 16:24:19 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Called back on GraphDriver thread!
|
|
|
|
// Note this can be called back after ::Shutdown()
|
|
|
|
void
|
|
|
|
MediaEngineWebRTCMicrophoneSource::NotifyInputData(MediaStreamGraph* aGraph,
|
|
|
|
const AudioDataValue* aBuffer,
|
|
|
|
size_t aFrames,
|
|
|
|
TrackRate aRate,
|
|
|
|
uint32_t aChannels)
|
|
|
|
{
|
|
|
|
// If some processing is necessary, packetize and insert in the WebRTC.org
|
|
|
|
// code. Otherwise, directly insert the mic data in the MSG, bypassing all processing.
|
|
|
|
if (PassThrough()) {
|
|
|
|
InsertInGraph<AudioDataValue>(aBuffer, aFrames, aChannels);
|
|
|
|
} else {
|
|
|
|
PacketizeAndProcess(aGraph, aBuffer, aFrames, aRate, aChannels);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-22 17:24:17 +03:00
|
|
|
#define ResetProcessingIfNeeded(_processing) \
|
|
|
|
do { \
|
|
|
|
webrtc::_processing##Modes mode; \
|
|
|
|
int rv = mVoEProcessing->Get##_processing##Status(enabled, mode); \
|
|
|
|
if (rv) { \
|
|
|
|
NS_WARNING("Could not get the status of the " \
|
|
|
|
#_processing " on device change."); \
|
|
|
|
return; \
|
|
|
|
} \
|
|
|
|
\
|
|
|
|
if (enabled) { \
|
|
|
|
rv = mVoEProcessing->Set##_processing##Status(!enabled); \
|
|
|
|
if (rv) { \
|
|
|
|
NS_WARNING("Could not reset the status of the " \
|
|
|
|
#_processing " on device change."); \
|
|
|
|
return; \
|
|
|
|
} \
|
|
|
|
\
|
|
|
|
rv = mVoEProcessing->Set##_processing##Status(enabled); \
|
|
|
|
if (rv) { \
|
|
|
|
NS_WARNING("Could not reset the status of the " \
|
|
|
|
#_processing " on device change."); \
|
|
|
|
return; \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
} while(0)
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaEngineWebRTCMicrophoneSource::DeviceChanged() {
|
|
|
|
// Reset some processing
|
|
|
|
bool enabled;
|
|
|
|
ResetProcessingIfNeeded(Agc);
|
|
|
|
ResetProcessingIfNeeded(Ec);
|
|
|
|
ResetProcessingIfNeeded(Ns);
|
|
|
|
}
|
|
|
|
|
2016-05-23 17:22:47 +03:00
|
|
|
bool
|
|
|
|
MediaEngineWebRTCMicrophoneSource::InitEngine()
|
2012-10-17 04:53:55 +04:00
|
|
|
{
|
2016-05-23 17:22:47 +03:00
|
|
|
MOZ_ASSERT(!mVoEBase);
|
2012-10-17 04:53:55 +04:00
|
|
|
mVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine);
|
|
|
|
|
|
|
|
mVoEBase->Init();
|
|
|
|
|
|
|
|
mVoERender = webrtc::VoEExternalMedia::GetInterface(mVoiceEngine);
|
2016-09-17 11:13:00 +03:00
|
|
|
if (mVoERender) {
|
|
|
|
mVoENetwork = webrtc::VoENetwork::GetInterface(mVoiceEngine);
|
|
|
|
if (mVoENetwork) {
|
|
|
|
mVoEProcessing = webrtc::VoEAudioProcessing::GetInterface(mVoiceEngine);
|
|
|
|
if (mVoEProcessing) {
|
|
|
|
mNullTransport = new NullTransport();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
2016-05-23 17:22:47 +03:00
|
|
|
}
|
2016-09-17 11:13:00 +03:00
|
|
|
DeInitEngine();
|
2016-05-23 17:22:47 +03:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// This shuts down the engine when no channel is open
|
|
|
|
void
|
|
|
|
MediaEngineWebRTCMicrophoneSource::DeInitEngine()
|
|
|
|
{
|
|
|
|
if (mVoEBase) {
|
|
|
|
mVoEBase->Terminate();
|
|
|
|
delete mNullTransport;
|
|
|
|
mNullTransport = nullptr;
|
|
|
|
|
|
|
|
mVoEProcessing = nullptr;
|
|
|
|
mVoENetwork = nullptr;
|
|
|
|
mVoERender = nullptr;
|
|
|
|
mVoEBase = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-17 11:13:00 +03:00
|
|
|
// This shuts down the engine when no channel is open.
|
|
|
|
// mState records if a channel is allocated (slightly redundantly to mChannel)
|
2016-05-23 17:22:47 +03:00
|
|
|
void
|
|
|
|
MediaEngineWebRTCMicrophoneSource::FreeChannel()
|
|
|
|
{
|
2016-09-17 11:13:00 +03:00
|
|
|
if (mState != kReleased) {
|
|
|
|
if (mChannel != -1) {
|
|
|
|
MOZ_ASSERT(mVoENetwork && mVoEBase);
|
|
|
|
if (mVoENetwork) {
|
|
|
|
mVoENetwork->DeRegisterExternalTransport(mChannel);
|
|
|
|
}
|
|
|
|
if (mVoEBase) {
|
|
|
|
mVoEBase->DeleteChannel(mChannel);
|
|
|
|
}
|
|
|
|
mChannel = -1;
|
|
|
|
}
|
|
|
|
mState = kReleased;
|
|
|
|
|
|
|
|
MOZ_ASSERT(sChannelsOpen > 0);
|
|
|
|
if (--sChannelsOpen == 0) {
|
|
|
|
DeInitEngine();
|
2016-05-23 17:22:47 +03:00
|
|
|
}
|
2012-10-17 04:53:55 +04:00
|
|
|
}
|
2016-09-17 11:13:00 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
MediaEngineWebRTCMicrophoneSource::AllocChannel()
|
|
|
|
{
|
|
|
|
MOZ_ASSERT(mVoEBase);
|
|
|
|
|
|
|
|
mChannel = mVoEBase->CreateChannel();
|
|
|
|
if (mChannel >= 0) {
|
|
|
|
if (!mVoENetwork->RegisterExternalTransport(mChannel, *mNullTransport)) {
|
|
|
|
mSampleFrequency = MediaEngine::DEFAULT_SAMPLE_RATE;
|
|
|
|
LOG(("%s: sampling rate %u", __FUNCTION__, mSampleFrequency));
|
|
|
|
|
|
|
|
// Check for availability.
|
|
|
|
if (!mAudioInput->SetRecordingDevice(mCapIndex)) {
|
|
|
|
#ifndef MOZ_B2G
|
|
|
|
// Because of the permission mechanism of B2G, we need to skip the status
|
|
|
|
// check here.
|
|
|
|
bool avail = false;
|
|
|
|
mAudioInput->GetRecordingDeviceStatus(avail);
|
|
|
|
if (!avail) {
|
|
|
|
if (sChannelsOpen == 0) {
|
|
|
|
DeInitEngine();
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
#endif // MOZ_B2G
|
|
|
|
|
2017-06-02 09:12:21 +03:00
|
|
|
// Set "codec" to PCM, 32kHz on device's channels
|
2016-09-17 11:13:00 +03:00
|
|
|
ScopedCustomReleasePtr<webrtc::VoECodec> ptrVoECodec(webrtc::VoECodec::GetInterface(mVoiceEngine));
|
|
|
|
if (ptrVoECodec) {
|
|
|
|
webrtc::CodecInst codec;
|
|
|
|
strcpy(codec.plname, ENCODING);
|
|
|
|
codec.channels = CHANNELS;
|
2017-06-30 07:01:17 +03:00
|
|
|
uint32_t maxChannels = 0;
|
|
|
|
if (mAudioInput->GetMaxAvailableChannels(maxChannels) == 0) {
|
|
|
|
codec.channels = maxChannels;
|
2017-06-02 09:12:21 +03:00
|
|
|
}
|
2016-09-17 11:13:00 +03:00
|
|
|
MOZ_ASSERT(mSampleFrequency == 16000 || mSampleFrequency == 32000);
|
|
|
|
codec.rate = SAMPLE_RATE(mSampleFrequency);
|
|
|
|
codec.plfreq = mSampleFrequency;
|
|
|
|
codec.pacsize = SAMPLE_LENGTH(mSampleFrequency);
|
|
|
|
codec.pltype = 0; // Default payload type
|
|
|
|
|
|
|
|
if (!ptrVoECodec->SetSendCodec(mChannel, codec)) {
|
|
|
|
mState = kAllocated;
|
|
|
|
sChannelsOpen++;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mVoEBase->DeleteChannel(mChannel);
|
|
|
|
mChannel = -1;
|
|
|
|
if (sChannelsOpen == 0) {
|
|
|
|
DeInitEngine();
|
|
|
|
}
|
|
|
|
return false;
|
2012-10-17 04:53:55 +04:00
|
|
|
}
|
2012-07-12 15:53:08 +04:00
|
|
|
|
|
|
|
void
|
2015-07-24 15:28:16 +03:00
|
|
|
MediaEngineWebRTCMicrophoneSource::Shutdown()
|
2012-07-12 15:53:08 +04:00
|
|
|
{
|
2016-07-13 23:07:03 +03:00
|
|
|
Super::Shutdown();
|
2016-03-08 20:11:09 +03:00
|
|
|
if (mListener) {
|
|
|
|
// breaks a cycle, since the WebRTCAudioDataListener has a RefPtr to us
|
|
|
|
mListener->Shutdown();
|
|
|
|
// Don't release the webrtc.org pointers yet until the Listener is (async) shutdown
|
|
|
|
mListener = nullptr;
|
|
|
|
}
|
|
|
|
|
2012-07-12 15:53:08 +04:00
|
|
|
if (mState == kStarted) {
|
2015-01-17 00:27:56 +03:00
|
|
|
SourceMediaStream *source;
|
|
|
|
bool empty;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
{
|
|
|
|
MonitorAutoLock lock(mMonitor);
|
|
|
|
empty = mSources.IsEmpty();
|
|
|
|
if (empty) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
source = mSources[0];
|
|
|
|
}
|
|
|
|
Stop(source, kAudioTrack); // XXX change to support multiple tracks
|
2013-01-01 03:12:12 +04:00
|
|
|
}
|
|
|
|
MOZ_ASSERT(mState == kStopped);
|
2012-07-12 15:53:08 +04:00
|
|
|
}
|
|
|
|
|
2016-07-16 02:55:59 +03:00
|
|
|
while (mRegisteredHandles.Length()) {
|
2016-06-30 22:43:24 +03:00
|
|
|
MOZ_ASSERT(mState == kAllocated || mState == kStopped);
|
2016-12-01 02:25:53 +03:00
|
|
|
// on last Deallocate(), FreeChannel()s and DeInit()s if all channels are released
|
|
|
|
Deallocate(mRegisteredHandles[0].get());
|
2012-07-12 15:53:08 +04:00
|
|
|
}
|
2016-12-01 02:25:53 +03:00
|
|
|
MOZ_ASSERT(mState == kReleased);
|
2012-07-12 15:53:08 +04:00
|
|
|
|
2016-01-21 19:51:36 +03:00
|
|
|
mAudioInput = nullptr;
|
2012-07-12 15:53:08 +04:00
|
|
|
}
|
|
|
|
|
2013-08-30 10:08:57 +04:00
|
|
|
typedef int16_t sample;
|
2012-07-12 15:53:08 +04:00
|
|
|
|
|
|
|
void
|
2015-07-24 15:28:16 +03:00
|
|
|
MediaEngineWebRTCMicrophoneSource::Process(int channel,
|
|
|
|
webrtc::ProcessingTypes type,
|
2016-12-28 03:41:02 +03:00
|
|
|
sample *audio10ms, size_t length,
|
2015-07-24 15:28:16 +03:00
|
|
|
int samplingFreq, bool isStereo)
|
2012-07-12 15:53:08 +04:00
|
|
|
{
|
2016-05-30 16:24:19 +03:00
|
|
|
MOZ_ASSERT(!PassThrough(), "This should be bypassed when in PassThrough mode.");
|
2014-04-02 21:58:19 +04:00
|
|
|
// On initial capture, throw away all far-end data except the most recent sample
|
|
|
|
// since it's already irrelevant and we want to keep avoid confusing the AEC far-end
|
|
|
|
// input code with "old" audio.
|
|
|
|
if (!mStarted) {
|
|
|
|
mStarted = true;
|
2017-06-12 18:14:26 +03:00
|
|
|
while (mAudioOutputObserver->Size() > 1) {
|
|
|
|
free(mAudioOutputObserver->Pop()); // only call if size() > 0
|
2014-04-02 21:58:19 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-12 18:14:26 +03:00
|
|
|
while (mAudioOutputObserver->Size() > 0) {
|
|
|
|
FarEndAudioChunk *buffer = mAudioOutputObserver->Pop(); // only call if size() > 0
|
2014-04-02 21:58:19 +04:00
|
|
|
if (buffer) {
|
|
|
|
int length = buffer->mSamples;
|
2014-06-07 08:11:42 +04:00
|
|
|
int res = mVoERender->ExternalPlayoutData(buffer->mData,
|
2017-06-12 18:14:26 +03:00
|
|
|
mAudioOutputObserver->PlayoutFrequency(),
|
|
|
|
mAudioOutputObserver->PlayoutChannels(),
|
2014-06-07 08:11:42 +04:00
|
|
|
mPlayoutDelay,
|
|
|
|
length);
|
2015-02-19 07:51:06 +03:00
|
|
|
free(buffer);
|
2014-06-07 08:11:42 +04:00
|
|
|
if (res == -1) {
|
2014-04-02 21:58:19 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-13 19:42:18 +04:00
|
|
|
MonitorAutoLock lock(mMonitor);
|
2016-05-10 13:24:54 +03:00
|
|
|
if (mState != kStarted)
|
2012-10-18 01:40:14 +04:00
|
|
|
return;
|
2012-07-12 15:53:08 +04:00
|
|
|
|
2017-06-02 09:12:21 +03:00
|
|
|
uint32_t channels = isStereo ? 2 : 1;
|
|
|
|
InsertInGraph<int16_t>(audio10ms, length, channels);
|
2016-05-10 13:24:54 +03:00
|
|
|
return;
|
2012-07-12 15:53:08 +04:00
|
|
|
}
|
|
|
|
|
2015-07-24 15:28:16 +03:00
|
|
|
void
|
2016-06-21 03:15:39 +03:00
|
|
|
MediaEngineWebRTCAudioCaptureSource::GetName(nsAString &aName) const
|
2015-07-24 15:28:16 +03:00
|
|
|
{
|
|
|
|
aName.AssignLiteral("AudioCapture");
|
|
|
|
}
|
2016-01-21 19:51:36 +03:00
|
|
|
|
2015-07-24 15:28:16 +03:00
|
|
|
void
|
2016-06-21 03:15:39 +03:00
|
|
|
MediaEngineWebRTCAudioCaptureSource::GetUUID(nsACString &aUUID) const
|
2015-07-24 15:28:16 +03:00
|
|
|
{
|
|
|
|
nsID uuid;
|
|
|
|
char uuidBuffer[NSID_LENGTH];
|
|
|
|
nsCString asciiString;
|
|
|
|
ErrorResult rv;
|
|
|
|
|
|
|
|
rv = nsContentUtils::GenerateUUIDInPlace(uuid);
|
|
|
|
if (rv.Failed()) {
|
|
|
|
aUUID.AssignLiteral("");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
uuid.ToProvidedString(uuidBuffer);
|
|
|
|
asciiString.AssignASCII(uuidBuffer);
|
|
|
|
|
|
|
|
// Remove {} and the null terminator
|
|
|
|
aUUID.Assign(Substring(asciiString, 1, NSID_LENGTH - 3));
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
MediaEngineWebRTCAudioCaptureSource::Start(SourceMediaStream *aMediaStream,
|
2016-04-06 15:56:44 +03:00
|
|
|
TrackID aId,
|
|
|
|
const PrincipalHandle& aPrincipalHandle)
|
2015-07-24 15:28:16 +03:00
|
|
|
{
|
2015-10-14 20:08:34 +03:00
|
|
|
AssertIsOnOwningThread();
|
2015-07-24 15:28:16 +03:00
|
|
|
aMediaStream->AddTrack(aId, 0, new AudioSegment());
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
MediaEngineWebRTCAudioCaptureSource::Stop(SourceMediaStream *aMediaStream,
|
|
|
|
TrackID aId)
|
|
|
|
{
|
2015-10-14 20:08:34 +03:00
|
|
|
AssertIsOnOwningThread();
|
2015-07-24 15:28:16 +03:00
|
|
|
aMediaStream->EndAllTrackAndFinish();
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2015-09-21 01:45:57 +03:00
|
|
|
nsresult
|
|
|
|
MediaEngineWebRTCAudioCaptureSource::Restart(
|
2016-07-13 06:25:07 +03:00
|
|
|
AllocationHandle* aHandle,
|
2015-09-21 01:45:57 +03:00
|
|
|
const dom::MediaTrackConstraints& aConstraints,
|
|
|
|
const MediaEnginePrefs &aPrefs,
|
2016-06-16 02:25:07 +03:00
|
|
|
const nsString& aDeviceId,
|
|
|
|
const char** aOutBadConstraint)
|
2015-09-21 01:45:57 +03:00
|
|
|
{
|
2016-05-25 08:52:15 +03:00
|
|
|
MOZ_ASSERT(!aHandle);
|
2015-09-21 01:45:57 +03:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2015-07-24 15:28:16 +03:00
|
|
|
uint32_t
|
|
|
|
MediaEngineWebRTCAudioCaptureSource::GetBestFitnessDistance(
|
2016-06-17 22:20:10 +03:00
|
|
|
const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
|
2016-06-21 03:15:39 +03:00
|
|
|
const nsString& aDeviceId) const
|
2015-07-24 15:28:16 +03:00
|
|
|
{
|
|
|
|
// There is only one way of capturing audio for now, and it's always adequate.
|
|
|
|
return 0;
|
|
|
|
}
|
2016-01-21 19:51:36 +03:00
|
|
|
|
2012-07-12 15:53:08 +04:00
|
|
|
}
|