зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1221587: allow getUserMedia to use full-duplex cubeb streams r=padenot
--HG-- extra : commitid : AH6pOM2E3J2
This commit is contained in:
Родитель
3abc9ead67
Коммит
61fbf51469
|
@ -364,7 +364,7 @@ AudioStream::OpenCubeb(cubeb_stream_params &aParams)
|
||||||
|
|
||||||
{
|
{
|
||||||
cubeb_stream* stream;
|
cubeb_stream* stream;
|
||||||
if (cubeb_stream_init(cubebContext, &stream, "AudioStream", aParams,
|
if (cubeb_stream_init(cubebContext, &stream, "AudioStream", nullptr, &aParams,
|
||||||
latency, DataCallback_S, StateCallback_S, this) == CUBEB_OK) {
|
latency, DataCallback_S, StateCallback_S, this) == CUBEB_OK) {
|
||||||
MonitorAutoLock mon(mMonitor);
|
MonitorAutoLock mon(mMonitor);
|
||||||
MOZ_ASSERT(mState != SHUTDOWN);
|
MOZ_ASSERT(mState != SHUTDOWN);
|
||||||
|
|
|
@ -304,9 +304,9 @@ protected:
|
||||||
private:
|
private:
|
||||||
nsresult OpenCubeb(cubeb_stream_params &aParams);
|
nsresult OpenCubeb(cubeb_stream_params &aParams);
|
||||||
|
|
||||||
static long DataCallback_S(cubeb_stream*, void* aThis, void* aBuffer, long aFrames)
|
static long DataCallback_S(cubeb_stream*, void* aThis, void* /* aInputBuffer */, void* aOutputBuffer, long aFrames)
|
||||||
{
|
{
|
||||||
return static_cast<AudioStream*>(aThis)->DataCallback(aBuffer, aFrames);
|
return static_cast<AudioStream*>(aThis)->DataCallback(aOutputBuffer, aFrames);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void StateCallback_S(cubeb_stream*, void* aThis, cubeb_state aState)
|
static void StateCallback_S(cubeb_stream*, void* aThis, cubeb_state aState)
|
||||||
|
|
|
@ -557,21 +557,23 @@ AudioCallbackDriver::~AudioCallbackDriver()
|
||||||
void
|
void
|
||||||
AudioCallbackDriver::Init()
|
AudioCallbackDriver::Init()
|
||||||
{
|
{
|
||||||
cubeb_stream_params params;
|
cubeb_stream_params out_params;
|
||||||
|
cubeb_stream_params in_params;
|
||||||
uint32_t latency;
|
uint32_t latency;
|
||||||
|
|
||||||
MOZ_ASSERT(!NS_IsMainThread(),
|
MOZ_ASSERT(!NS_IsMainThread(),
|
||||||
"This is blocking and should never run on the main thread.");
|
"This is blocking and should never run on the main thread.");
|
||||||
|
|
||||||
mSampleRate = params.rate = CubebUtils::PreferredSampleRate();
|
out_params.devid = nullptr; // XXX take from config for the graph
|
||||||
|
mSampleRate = out_params.rate = CubebUtils::PreferredSampleRate();
|
||||||
|
|
||||||
#if defined(__ANDROID__)
|
#if defined(__ANDROID__)
|
||||||
#if defined(MOZ_B2G)
|
#if defined(MOZ_B2G)
|
||||||
params.stream_type = CubebUtils::ConvertChannelToCubebType(mAudioChannel);
|
out_params.stream_type = CubebUtils::ConvertChannelToCubebType(mAudioChannel);
|
||||||
#else
|
#else
|
||||||
params.stream_type = CUBEB_STREAM_TYPE_MUSIC;
|
out_params.stream_type = CUBEB_STREAM_TYPE_MUSIC;
|
||||||
#endif
|
#endif
|
||||||
if (params.stream_type == CUBEB_STREAM_TYPE_MAX) {
|
if (out_params.stream_type == CUBEB_STREAM_TYPE_MAX) {
|
||||||
NS_WARNING("Bad stream type");
|
NS_WARNING("Bad stream type");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -579,21 +581,27 @@ AudioCallbackDriver::Init()
|
||||||
(void)mAudioChannel;
|
(void)mAudioChannel;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
params.channels = mGraphImpl->AudioChannelCount();
|
out_params.channels = mGraphImpl->AudioChannelCount();
|
||||||
if (AUDIO_OUTPUT_FORMAT == AUDIO_FORMAT_S16) {
|
if (AUDIO_OUTPUT_FORMAT == AUDIO_FORMAT_S16) {
|
||||||
params.format = CUBEB_SAMPLE_S16NE;
|
out_params.format = CUBEB_SAMPLE_S16NE;
|
||||||
} else {
|
} else {
|
||||||
params.format = CUBEB_SAMPLE_FLOAT32NE;
|
out_params.format = CUBEB_SAMPLE_FLOAT32NE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cubeb_get_min_latency(CubebUtils::GetCubebContext(), params, &latency) != CUBEB_OK) {
|
if (cubeb_get_min_latency(CubebUtils::GetCubebContext(), out_params, &latency) != CUBEB_OK) {
|
||||||
NS_WARNING("Could not get minimal latency from cubeb.");
|
NS_WARNING("Could not get minimal latency from cubeb.");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
in_params = out_params;
|
||||||
|
in_params.channels = 1; // change to support optional stereo capture
|
||||||
|
|
||||||
cubeb_stream* stream;
|
cubeb_stream* stream;
|
||||||
|
// XXX Only pass input in_params if we have an input listener. Always
|
||||||
|
// set up output because it's easier, and it will just get silence.
|
||||||
|
// XXX Add support for adding/removing an input listener later.
|
||||||
if (cubeb_stream_init(CubebUtils::GetCubebContext(), &stream,
|
if (cubeb_stream_init(CubebUtils::GetCubebContext(), &stream,
|
||||||
"AudioCallbackDriver", params, latency,
|
"AudioCallbackDriver", &out_params, &in_params, latency,
|
||||||
DataCallback_s, StateCallback_s, this) == CUBEB_OK) {
|
DataCallback_s, StateCallback_s, this) == CUBEB_OK) {
|
||||||
mAudioStream.own(stream);
|
mAudioStream.own(stream);
|
||||||
} else {
|
} else {
|
||||||
|
@ -723,11 +731,12 @@ AudioCallbackDriver::WakeUp()
|
||||||
|
|
||||||
/* static */ long
|
/* static */ long
|
||||||
AudioCallbackDriver::DataCallback_s(cubeb_stream* aStream,
|
AudioCallbackDriver::DataCallback_s(cubeb_stream* aStream,
|
||||||
void* aUser, void* aBuffer,
|
void* aUser, void* aInputBuffer, void* aOutputBuffer,
|
||||||
long aFrames)
|
long aFrames)
|
||||||
{
|
{
|
||||||
AudioCallbackDriver* driver = reinterpret_cast<AudioCallbackDriver*>(aUser);
|
AudioCallbackDriver* driver = reinterpret_cast<AudioCallbackDriver*>(aUser);
|
||||||
return driver->DataCallback(static_cast<AudioDataValue*>(aBuffer), aFrames);
|
return driver->DataCallback(static_cast<AudioDataValue*>(aInputBuffer),
|
||||||
|
static_cast<AudioDataValue*>(aOutputBuffer), aFrames);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* static */ void
|
/* static */ void
|
||||||
|
@ -794,13 +803,14 @@ AudioCallbackDriver::OSXDeviceSwitchingWorkaround()
|
||||||
#endif // XP_MACOSX
|
#endif // XP_MACOSX
|
||||||
|
|
||||||
long
|
long
|
||||||
AudioCallbackDriver::DataCallback(AudioDataValue* aBuffer, long aFrames)
|
AudioCallbackDriver::DataCallback(AudioDataValue* aInputBuffer,
|
||||||
|
AudioDataValue* aOutputBuffer, long aFrames)
|
||||||
{
|
{
|
||||||
bool stillProcessing;
|
bool stillProcessing;
|
||||||
|
|
||||||
#ifdef XP_MACOSX
|
#ifdef XP_MACOSX
|
||||||
if (OSXDeviceSwitchingWorkaround()) {
|
if (OSXDeviceSwitchingWorkaround()) {
|
||||||
PodZero(aBuffer, aFrames * mGraphImpl->AudioChannelCount());
|
PodZero(aOutputBuffer, aFrames * mGraphImpl->AudioChannelCount());
|
||||||
return aFrames;
|
return aFrames;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -820,7 +830,7 @@ AudioCallbackDriver::DataCallback(AudioDataValue* aBuffer, long aFrames)
|
||||||
// driver is the first one for this graph), and the graph would exit. Simply
|
// driver is the first one for this graph), and the graph would exit. Simply
|
||||||
// return here until we have messages.
|
// return here until we have messages.
|
||||||
if (!mGraphImpl->MessagesQueued()) {
|
if (!mGraphImpl->MessagesQueued()) {
|
||||||
PodZero(aBuffer, aFrames * mGraphImpl->AudioChannelCount());
|
PodZero(aOutputBuffer, aFrames * mGraphImpl->AudioChannelCount());
|
||||||
return aFrames;
|
return aFrames;
|
||||||
}
|
}
|
||||||
mGraphImpl->SwapMessageQueues();
|
mGraphImpl->SwapMessageQueues();
|
||||||
|
@ -837,7 +847,7 @@ AudioCallbackDriver::DataCallback(AudioDataValue* aBuffer, long aFrames)
|
||||||
mIterationDurationMS /= 4;
|
mIterationDurationMS /= 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
mBuffer.SetBuffer(aBuffer, aFrames);
|
mBuffer.SetBuffer(aOutputBuffer, aFrames);
|
||||||
// fill part or all with leftover data from last iteration (since we
|
// fill part or all with leftover data from last iteration (since we
|
||||||
// align to Audio blocks)
|
// align to Audio blocks)
|
||||||
mScratchBuffer.Empty(mBuffer);
|
mScratchBuffer.Empty(mBuffer);
|
||||||
|
|
|
@ -153,10 +153,6 @@ public:
|
||||||
return mIterationEnd;
|
return mIterationEnd;
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void GetAudioBuffer(float** aBuffer, long& aFrames) {
|
|
||||||
MOZ_CRASH("This is not an Audio GraphDriver!");
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual AudioCallbackDriver* AsAudioCallbackDriver() {
|
virtual AudioCallbackDriver* AsAudioCallbackDriver() {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
@ -391,7 +387,7 @@ public:
|
||||||
|
|
||||||
/* Static wrapper function cubeb calls back. */
|
/* Static wrapper function cubeb calls back. */
|
||||||
static long DataCallback_s(cubeb_stream * aStream,
|
static long DataCallback_s(cubeb_stream * aStream,
|
||||||
void * aUser, void * aBuffer,
|
void * aUser, void * aInputBuffer, void * aOutputBuffer,
|
||||||
long aFrames);
|
long aFrames);
|
||||||
static void StateCallback_s(cubeb_stream* aStream, void * aUser,
|
static void StateCallback_s(cubeb_stream* aStream, void * aUser,
|
||||||
cubeb_state aState);
|
cubeb_state aState);
|
||||||
|
@ -401,7 +397,7 @@ public:
|
||||||
* audio. If the return value is exactly aFrames, this function will get
|
* audio. If the return value is exactly aFrames, this function will get
|
||||||
* called again. If it is less than aFrames, the stream will go in draining
|
* called again. If it is less than aFrames, the stream will go in draining
|
||||||
* mode, and this function will not be called again. */
|
* mode, and this function will not be called again. */
|
||||||
long DataCallback(AudioDataValue* aBuffer, long aFrames);
|
long DataCallback(AudioDataValue* aInputBuffer, AudioDataValue* aOutputBuffer, long aFrames);
|
||||||
/* This function is called by the underlying audio backend, but is only used
|
/* This function is called by the underlying audio backend, but is only used
|
||||||
* for informational purposes at the moment. */
|
* for informational purposes at the moment. */
|
||||||
void StateCallback(cubeb_state aState);
|
void StateCallback(cubeb_state aState);
|
||||||
|
|
|
@ -47,6 +47,7 @@ namespace mozilla {
|
||||||
MediaEngineWebRTC::MediaEngineWebRTC(MediaEnginePrefs &aPrefs)
|
MediaEngineWebRTC::MediaEngineWebRTC(MediaEnginePrefs &aPrefs)
|
||||||
: mMutex("mozilla::MediaEngineWebRTC"),
|
: mMutex("mozilla::MediaEngineWebRTC"),
|
||||||
mVoiceEngine(nullptr),
|
mVoiceEngine(nullptr),
|
||||||
|
mAudioInput(nullptr),
|
||||||
mAudioEngineInit(false)
|
mAudioEngineInit(false)
|
||||||
{
|
{
|
||||||
#ifndef MOZ_B2G_CAMERA
|
#ifndef MOZ_B2G_CAMERA
|
||||||
|
@ -239,7 +240,6 @@ MediaEngineWebRTC::EnumerateAudioDevices(dom::MediaSourceEnum aMediaSource,
|
||||||
nsTArray<RefPtr<MediaEngineAudioSource> >* aASources)
|
nsTArray<RefPtr<MediaEngineAudioSource> >* aASources)
|
||||||
{
|
{
|
||||||
ScopedCustomReleasePtr<webrtc::VoEBase> ptrVoEBase;
|
ScopedCustomReleasePtr<webrtc::VoEBase> ptrVoEBase;
|
||||||
ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
|
|
||||||
// We spawn threads to handle gUM runnables, so we must protect the member vars
|
// We spawn threads to handle gUM runnables, so we must protect the member vars
|
||||||
MutexAutoLock lock(mMutex);
|
MutexAutoLock lock(mMutex);
|
||||||
|
|
||||||
|
@ -283,13 +283,16 @@ MediaEngineWebRTC::EnumerateAudioDevices(dom::MediaSourceEnum aMediaSource,
|
||||||
mAudioEngineInit = true;
|
mAudioEngineInit = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
|
if (!mAudioInput) {
|
||||||
if (!ptrVoEHw) {
|
if (true /*platform_supports_full_duplex*/) {
|
||||||
return;
|
mAudioInput = new mozilla::AudioInputCubeb(mVoiceEngine);
|
||||||
|
} else {
|
||||||
|
mAudioInput = new mozilla::AudioInputWebRTC(mVoiceEngine);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int nDevices = 0;
|
int nDevices = 0;
|
||||||
ptrVoEHw->GetNumOfRecordingDevices(nDevices);
|
mAudioInput->GetNumOfRecordingDevices(nDevices);
|
||||||
int i;
|
int i;
|
||||||
#if defined(MOZ_WIDGET_ANDROID) || defined(MOZ_WIDGET_GONK)
|
#if defined(MOZ_WIDGET_ANDROID) || defined(MOZ_WIDGET_GONK)
|
||||||
i = 0; // Bug 1037025 - let the OS handle defaulting for now on android/b2g
|
i = 0; // Bug 1037025 - let the OS handle defaulting for now on android/b2g
|
||||||
|
@ -305,10 +308,9 @@ MediaEngineWebRTC::EnumerateAudioDevices(dom::MediaSourceEnum aMediaSource,
|
||||||
deviceName[0] = '\0';
|
deviceName[0] = '\0';
|
||||||
uniqueId[0] = '\0';
|
uniqueId[0] = '\0';
|
||||||
|
|
||||||
int error = ptrVoEHw->GetRecordingDeviceName(i, deviceName, uniqueId);
|
int error = mAudioInput->GetRecordingDeviceName(i, deviceName, uniqueId);
|
||||||
if (error) {
|
if (error) {
|
||||||
LOG((" VoEHardware:GetRecordingDeviceName: Failed %d",
|
LOG((" VoEHardware:GetRecordingDeviceName: Failed %d", error));
|
||||||
ptrVoEBase->LastError() ));
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -324,8 +326,8 @@ MediaEngineWebRTC::EnumerateAudioDevices(dom::MediaSourceEnum aMediaSource,
|
||||||
// We've already seen this device, just append.
|
// We've already seen this device, just append.
|
||||||
aASources->AppendElement(aSource.get());
|
aASources->AppendElement(aSource.get());
|
||||||
} else {
|
} else {
|
||||||
aSource = new MediaEngineWebRTCMicrophoneSource(mThread, mVoiceEngine, i,
|
aSource = new MediaEngineWebRTCMicrophoneSource(mThread, mVoiceEngine, mAudioInput,
|
||||||
deviceName, uniqueId);
|
i, deviceName, uniqueId);
|
||||||
mAudioSources.Put(uuid, aSource); // Hashtable takes ownership.
|
mAudioSources.Put(uuid, aSource); // Hashtable takes ownership.
|
||||||
aASources->AppendElement(aSource);
|
aASources->AppendElement(aSource);
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
|
|
||||||
#include "prcvar.h"
|
#include "prcvar.h"
|
||||||
#include "prthread.h"
|
#include "prthread.h"
|
||||||
|
#include "prprf.h"
|
||||||
#include "nsIThread.h"
|
#include "nsIThread.h"
|
||||||
#include "nsIRunnable.h"
|
#include "nsIRunnable.h"
|
||||||
|
|
||||||
|
@ -26,6 +27,8 @@
|
||||||
#include "AudioSegment.h"
|
#include "AudioSegment.h"
|
||||||
#include "StreamBuffer.h"
|
#include "StreamBuffer.h"
|
||||||
#include "MediaStreamGraph.h"
|
#include "MediaStreamGraph.h"
|
||||||
|
#include "cubeb/cubeb.h"
|
||||||
|
#include "CubebUtils.h"
|
||||||
|
|
||||||
#include "MediaEngineWrapper.h"
|
#include "MediaEngineWrapper.h"
|
||||||
#include "mozilla/dom/MediaStreamTrackBinding.h"
|
#include "mozilla/dom/MediaStreamTrackBinding.h"
|
||||||
|
@ -119,6 +122,159 @@ protected:
|
||||||
nsCString mUUID;
|
nsCString mUUID;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Small subset of VoEHardware
|
||||||
|
class AudioInput
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
AudioInput(webrtc::VoiceEngine* aVoiceEngine) : mVoiceEngine(aVoiceEngine) {};
|
||||||
|
virtual ~AudioInput() {}
|
||||||
|
|
||||||
|
NS_INLINE_DECL_REFCOUNTING(AudioInput)
|
||||||
|
|
||||||
|
virtual int GetNumOfRecordingDevices(int& aDevices) = 0;
|
||||||
|
virtual int GetRecordingDeviceName(int aIndex, char aStrNameUTF8[128],
|
||||||
|
char aStrGuidUTF8[128]) = 0;
|
||||||
|
virtual int GetRecordingDeviceStatus(bool& aIsAvailable) = 0;
|
||||||
|
virtual void StartRecording(MediaStreamGraph *aGraph) = 0;
|
||||||
|
virtual void StopRecording(MediaStreamGraph *aGraph) = 0;
|
||||||
|
virtual int SetRecordingDevice(int aIndex) = 0;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
webrtc::VoiceEngine* mVoiceEngine;
|
||||||
|
};
|
||||||
|
|
||||||
|
class AudioInputCubeb : public AudioInput,
|
||||||
|
public MediaStreamListener
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
AudioInputCubeb(webrtc::VoiceEngine* aVoiceEngine) :
|
||||||
|
AudioInput(aVoiceEngine), mDevices(nullptr) {}
|
||||||
|
virtual ~AudioInputCubeb()
|
||||||
|
{
|
||||||
|
if (mDevices) {
|
||||||
|
cubeb_device_collection_destroy(mDevices);
|
||||||
|
mDevices = nullptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual int GetNumOfRecordingDevices(int& aDevices)
|
||||||
|
{
|
||||||
|
// devices = cubeb_get_num_devices(...)
|
||||||
|
if (CUBEB_OK != cubeb_enumerate_devices(CubebUtils::GetCubebContext(),
|
||||||
|
CUBEB_DEVICE_TYPE_INPUT,
|
||||||
|
&mDevices)) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
aDevices = 0;
|
||||||
|
for (uint32_t i = 0; i < mDevices->count; i++) {
|
||||||
|
if (mDevices->device[i]->type == CUBEB_DEVICE_TYPE_INPUT && // paranoia
|
||||||
|
mDevices->device[i]->state == CUBEB_DEVICE_STATE_ENABLED)
|
||||||
|
{
|
||||||
|
aDevices++;
|
||||||
|
// XXX to support device changes, we need to identify by name/UUID not index
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual int GetRecordingDeviceName(int aIndex, char aStrNameUTF8[128],
|
||||||
|
char aStrGuidUTF8[128])
|
||||||
|
{
|
||||||
|
if (!mDevices) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
int devindex = aIndex == -1 ? 0 : aIndex;
|
||||||
|
PR_snprintf(aStrNameUTF8, 128, "%s%s", aIndex == -1 ? "default: " : "",
|
||||||
|
mDevices->device[devindex]->friendly_name);
|
||||||
|
aStrGuidUTF8[0] = '\0';
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual int GetRecordingDeviceStatus(bool& aIsAvailable)
|
||||||
|
{
|
||||||
|
// With cubeb, we only expose devices of type CUBEB_DEVICE_TYPE_INPUT
|
||||||
|
aIsAvailable = true;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void StartRecording(MediaStreamGraph *aGraph)
|
||||||
|
{
|
||||||
|
ScopedCustomReleasePtr<webrtc::VoEExternalMedia> ptrVoERender;
|
||||||
|
ptrVoERender = webrtc::VoEExternalMedia::GetInterface(mVoiceEngine);
|
||||||
|
if (ptrVoERender) {
|
||||||
|
ptrVoERender->SetExternalRecordingStatus(true);
|
||||||
|
}
|
||||||
|
aGraph->OpenAudioInput(nullptr, this);
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void StopRecording(MediaStreamGraph *aGraph)
|
||||||
|
{
|
||||||
|
aGraph->CloseAudioInput(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual int SetRecordingDevice(int aIndex)
|
||||||
|
{
|
||||||
|
// Not relevant to cubeb
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
cubeb_device_collection* mDevices;
|
||||||
|
};
|
||||||
|
|
||||||
|
class AudioInputWebRTC : public AudioInput
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
AudioInputWebRTC(webrtc::VoiceEngine* aVoiceEngine) : AudioInput(aVoiceEngine) {}
|
||||||
|
virtual ~AudioInputWebRTC() {}
|
||||||
|
|
||||||
|
virtual int GetNumOfRecordingDevices(int& aDevices)
|
||||||
|
{
|
||||||
|
ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
|
||||||
|
ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
|
||||||
|
if (!ptrVoEHw) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
return ptrVoEHw->GetNumOfRecordingDevices(aDevices);
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual int GetRecordingDeviceName(int aIndex, char aStrNameUTF8[128],
|
||||||
|
char aStrGuidUTF8[128])
|
||||||
|
{
|
||||||
|
ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
|
||||||
|
ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
|
||||||
|
if (!ptrVoEHw) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
return ptrVoEHw->GetRecordingDeviceName(aIndex, aStrNameUTF8,
|
||||||
|
aStrGuidUTF8);
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual int GetRecordingDeviceStatus(bool& aIsAvailable)
|
||||||
|
{
|
||||||
|
ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
|
||||||
|
ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
|
||||||
|
if (!ptrVoEHw) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
ptrVoEHw->GetRecordingDeviceStatus(aIsAvailable);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void StartRecording(MediaStreamGraph *aGraph) {}
|
||||||
|
virtual void StopRecording(MediaStreamGraph *aGraph) {}
|
||||||
|
|
||||||
|
virtual int SetRecordingDevice(int aIndex)
|
||||||
|
{
|
||||||
|
ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
|
||||||
|
ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
|
||||||
|
if (!ptrVoEHw) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
return ptrVoEHw->SetRecordingDevice(aIndex);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
class MediaEngineWebRTCMicrophoneSource : public MediaEngineAudioSource,
|
class MediaEngineWebRTCMicrophoneSource : public MediaEngineAudioSource,
|
||||||
public webrtc::VoEMediaProcess,
|
public webrtc::VoEMediaProcess,
|
||||||
private MediaConstraintsHelper
|
private MediaConstraintsHelper
|
||||||
|
@ -126,11 +282,13 @@ class MediaEngineWebRTCMicrophoneSource : public MediaEngineAudioSource,
|
||||||
public:
|
public:
|
||||||
MediaEngineWebRTCMicrophoneSource(nsIThread* aThread,
|
MediaEngineWebRTCMicrophoneSource(nsIThread* aThread,
|
||||||
webrtc::VoiceEngine* aVoiceEnginePtr,
|
webrtc::VoiceEngine* aVoiceEnginePtr,
|
||||||
|
mozilla::AudioInput* aAudioInput,
|
||||||
int aIndex,
|
int aIndex,
|
||||||
const char* name,
|
const char* name,
|
||||||
const char* uuid)
|
const char* uuid)
|
||||||
: MediaEngineAudioSource(kReleased)
|
: MediaEngineAudioSource(kReleased)
|
||||||
, mVoiceEngine(aVoiceEnginePtr)
|
, mVoiceEngine(aVoiceEnginePtr)
|
||||||
|
, mAudioInput(aAudioInput)
|
||||||
, mMonitor("WebRTCMic.Monitor")
|
, mMonitor("WebRTCMic.Monitor")
|
||||||
, mThread(aThread)
|
, mThread(aThread)
|
||||||
, mCapIndex(aIndex)
|
, mCapIndex(aIndex)
|
||||||
|
@ -146,6 +304,7 @@ public:
|
||||||
, mPlayoutDelay(0)
|
, mPlayoutDelay(0)
|
||||||
, mNullTransport(nullptr) {
|
, mNullTransport(nullptr) {
|
||||||
MOZ_ASSERT(aVoiceEnginePtr);
|
MOZ_ASSERT(aVoiceEnginePtr);
|
||||||
|
MOZ_ASSERT(aAudioInput);
|
||||||
mDeviceName.Assign(NS_ConvertUTF8toUTF16(name));
|
mDeviceName.Assign(NS_ConvertUTF8toUTF16(name));
|
||||||
mDeviceUUID.Assign(uuid);
|
mDeviceUUID.Assign(uuid);
|
||||||
Init();
|
Init();
|
||||||
|
@ -207,6 +366,8 @@ private:
|
||||||
void Init();
|
void Init();
|
||||||
|
|
||||||
webrtc::VoiceEngine* mVoiceEngine;
|
webrtc::VoiceEngine* mVoiceEngine;
|
||||||
|
RefPtr<mozilla::AudioInput> mAudioInput;
|
||||||
|
|
||||||
ScopedCustomReleasePtr<webrtc::VoEBase> mVoEBase;
|
ScopedCustomReleasePtr<webrtc::VoEBase> mVoEBase;
|
||||||
ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mVoERender;
|
ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mVoERender;
|
||||||
ScopedCustomReleasePtr<webrtc::VoENetwork> mVoENetwork;
|
ScopedCustomReleasePtr<webrtc::VoENetwork> mVoENetwork;
|
||||||
|
@ -265,6 +426,7 @@ private:
|
||||||
// gUM runnables can e.g. Enumerate from multiple threads
|
// gUM runnables can e.g. Enumerate from multiple threads
|
||||||
Mutex mMutex;
|
Mutex mMutex;
|
||||||
webrtc::VoiceEngine* mVoiceEngine;
|
webrtc::VoiceEngine* mVoiceEngine;
|
||||||
|
RefPtr<mozilla::AudioInput> mAudioInput;
|
||||||
bool mAudioEngineInit;
|
bool mAudioEngineInit;
|
||||||
|
|
||||||
bool mHasTabVideoSource;
|
bool mHasTabVideoSource;
|
||||||
|
|
|
@ -288,8 +288,7 @@ MediaEngineWebRTCMicrophoneSource::Allocate(const dom::MediaTrackConstraints &aC
|
||||||
AssertIsOnOwningThread();
|
AssertIsOnOwningThread();
|
||||||
if (mState == kReleased) {
|
if (mState == kReleased) {
|
||||||
if (mInitDone) {
|
if (mInitDone) {
|
||||||
ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw(webrtc::VoEHardware::GetInterface(mVoiceEngine));
|
if (mAudioInput->SetRecordingDevice(mCapIndex)) {
|
||||||
if (!ptrVoEHw || ptrVoEHw->SetRecordingDevice(mCapIndex)) {
|
|
||||||
return NS_ERROR_FAILURE;
|
return NS_ERROR_FAILURE;
|
||||||
}
|
}
|
||||||
mState = kAllocated;
|
mState = kAllocated;
|
||||||
|
@ -382,6 +381,8 @@ MediaEngineWebRTCMicrophoneSource::Start(SourceMediaStream *aStream,
|
||||||
// Attach external media processor, so this::Process will be called.
|
// Attach external media processor, so this::Process will be called.
|
||||||
mVoERender->RegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel, *this);
|
mVoERender->RegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel, *this);
|
||||||
|
|
||||||
|
mAudioInput->StartRecording(aStream->Graph());
|
||||||
|
|
||||||
return NS_OK;
|
return NS_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -412,6 +413,8 @@ MediaEngineWebRTCMicrophoneSource::Stop(SourceMediaStream *aSource, TrackID aID)
|
||||||
mState = kStopped;
|
mState = kStopped;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mAudioInput->StopRecording(aSource->Graph());
|
||||||
|
|
||||||
mVoERender->DeRegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel);
|
mVoERender->DeRegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel);
|
||||||
|
|
||||||
if (mVoEBase->StopSend(mChannel)) {
|
if (mVoEBase->StopSend(mChannel)) {
|
||||||
|
@ -475,8 +478,7 @@ MediaEngineWebRTCMicrophoneSource::Init()
|
||||||
LOG(("%s: sampling rate %u", __FUNCTION__, mSampleFrequency));
|
LOG(("%s: sampling rate %u", __FUNCTION__, mSampleFrequency));
|
||||||
|
|
||||||
// Check for availability.
|
// Check for availability.
|
||||||
ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw(webrtc::VoEHardware::GetInterface(mVoiceEngine));
|
if (mAudioInput->SetRecordingDevice(mCapIndex)) {
|
||||||
if (!ptrVoEHw || ptrVoEHw->SetRecordingDevice(mCapIndex)) {
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -484,7 +486,7 @@ MediaEngineWebRTCMicrophoneSource::Init()
|
||||||
// Because of the permission mechanism of B2G, we need to skip the status
|
// Because of the permission mechanism of B2G, we need to skip the status
|
||||||
// check here.
|
// check here.
|
||||||
bool avail = false;
|
bool avail = false;
|
||||||
ptrVoEHw->GetRecordingDeviceStatus(avail);
|
mAudioInput->GetRecordingDeviceStatus(avail);
|
||||||
if (!avail) {
|
if (!avail) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -639,6 +641,7 @@ MediaEngineWebRTCAudioCaptureSource::GetName(nsAString &aName)
|
||||||
{
|
{
|
||||||
aName.AssignLiteral("AudioCapture");
|
aName.AssignLiteral("AudioCapture");
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
MediaEngineWebRTCAudioCaptureSource::GetUUID(nsACString &aUUID)
|
MediaEngineWebRTCAudioCaptureSource::GetUUID(nsACString &aUUID)
|
||||||
{
|
{
|
||||||
|
@ -696,4 +699,5 @@ MediaEngineWebRTCAudioCaptureSource::GetBestFitnessDistance(
|
||||||
// There is only one way of capturing audio for now, and it's always adequate.
|
// There is only one way of capturing audio for now, and it's always adequate.
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Загрузка…
Ссылка в новой задаче