Bug 1221587: allow getUserMedia to use full-duplex cubeb streams r=padenot

--HG--
extra : commitid : AH6pOM2E3J2
This commit is contained in:
Paul Adenot 2016-01-21 11:51:36 -05:00
Родитель 3abc9ead67
Коммит 61fbf51469
7 изменённых файлов: 214 добавлений и 40 удалений

Просмотреть файл

@ -364,7 +364,7 @@ AudioStream::OpenCubeb(cubeb_stream_params &aParams)
{
cubeb_stream* stream;
if (cubeb_stream_init(cubebContext, &stream, "AudioStream", aParams,
if (cubeb_stream_init(cubebContext, &stream, "AudioStream", nullptr, &aParams,
latency, DataCallback_S, StateCallback_S, this) == CUBEB_OK) {
MonitorAutoLock mon(mMonitor);
MOZ_ASSERT(mState != SHUTDOWN);

Просмотреть файл

@ -304,9 +304,9 @@ protected:
private:
nsresult OpenCubeb(cubeb_stream_params &aParams);
static long DataCallback_S(cubeb_stream*, void* aThis, void* aBuffer, long aFrames)
static long DataCallback_S(cubeb_stream*, void* aThis, void* /* aInputBuffer */, void* aOutputBuffer, long aFrames)
{
return static_cast<AudioStream*>(aThis)->DataCallback(aBuffer, aFrames);
return static_cast<AudioStream*>(aThis)->DataCallback(aOutputBuffer, aFrames);
}
static void StateCallback_S(cubeb_stream*, void* aThis, cubeb_state aState)

Просмотреть файл

@ -557,21 +557,23 @@ AudioCallbackDriver::~AudioCallbackDriver()
void
AudioCallbackDriver::Init()
{
cubeb_stream_params params;
cubeb_stream_params out_params;
cubeb_stream_params in_params;
uint32_t latency;
MOZ_ASSERT(!NS_IsMainThread(),
"This is blocking and should never run on the main thread.");
mSampleRate = params.rate = CubebUtils::PreferredSampleRate();
out_params.devid = nullptr; // XXX take from config for the graph
mSampleRate = out_params.rate = CubebUtils::PreferredSampleRate();
#if defined(__ANDROID__)
#if defined(MOZ_B2G)
params.stream_type = CubebUtils::ConvertChannelToCubebType(mAudioChannel);
out_params.stream_type = CubebUtils::ConvertChannelToCubebType(mAudioChannel);
#else
params.stream_type = CUBEB_STREAM_TYPE_MUSIC;
out_params.stream_type = CUBEB_STREAM_TYPE_MUSIC;
#endif
if (params.stream_type == CUBEB_STREAM_TYPE_MAX) {
if (out_params.stream_type == CUBEB_STREAM_TYPE_MAX) {
NS_WARNING("Bad stream type");
return;
}
@ -579,21 +581,27 @@ AudioCallbackDriver::Init()
(void)mAudioChannel;
#endif
params.channels = mGraphImpl->AudioChannelCount();
out_params.channels = mGraphImpl->AudioChannelCount();
if (AUDIO_OUTPUT_FORMAT == AUDIO_FORMAT_S16) {
params.format = CUBEB_SAMPLE_S16NE;
out_params.format = CUBEB_SAMPLE_S16NE;
} else {
params.format = CUBEB_SAMPLE_FLOAT32NE;
out_params.format = CUBEB_SAMPLE_FLOAT32NE;
}
if (cubeb_get_min_latency(CubebUtils::GetCubebContext(), params, &latency) != CUBEB_OK) {
if (cubeb_get_min_latency(CubebUtils::GetCubebContext(), out_params, &latency) != CUBEB_OK) {
NS_WARNING("Could not get minimal latency from cubeb.");
return;
}
in_params = out_params;
in_params.channels = 1; // change to support optional stereo capture
cubeb_stream* stream;
// XXX Only pass input in_params if we have an input listener. Always
// set up output because it's easier, and it will just get silence.
// XXX Add support for adding/removing an input listener later.
if (cubeb_stream_init(CubebUtils::GetCubebContext(), &stream,
"AudioCallbackDriver", params, latency,
"AudioCallbackDriver", &out_params, &in_params, latency,
DataCallback_s, StateCallback_s, this) == CUBEB_OK) {
mAudioStream.own(stream);
} else {
@ -723,11 +731,12 @@ AudioCallbackDriver::WakeUp()
/* static */ long
AudioCallbackDriver::DataCallback_s(cubeb_stream* aStream,
void* aUser, void* aBuffer,
void* aUser, void* aInputBuffer, void* aOutputBuffer,
long aFrames)
{
AudioCallbackDriver* driver = reinterpret_cast<AudioCallbackDriver*>(aUser);
return driver->DataCallback(static_cast<AudioDataValue*>(aBuffer), aFrames);
return driver->DataCallback(static_cast<AudioDataValue*>(aInputBuffer),
static_cast<AudioDataValue*>(aOutputBuffer), aFrames);
}
/* static */ void
@ -794,13 +803,14 @@ AudioCallbackDriver::OSXDeviceSwitchingWorkaround()
#endif // XP_MACOSX
long
AudioCallbackDriver::DataCallback(AudioDataValue* aBuffer, long aFrames)
AudioCallbackDriver::DataCallback(AudioDataValue* aInputBuffer,
AudioDataValue* aOutputBuffer, long aFrames)
{
bool stillProcessing;
#ifdef XP_MACOSX
if (OSXDeviceSwitchingWorkaround()) {
PodZero(aBuffer, aFrames * mGraphImpl->AudioChannelCount());
PodZero(aOutputBuffer, aFrames * mGraphImpl->AudioChannelCount());
return aFrames;
}
#endif
@ -820,7 +830,7 @@ AudioCallbackDriver::DataCallback(AudioDataValue* aBuffer, long aFrames)
// driver is the first one for this graph), and the graph would exit. Simply
// return here until we have messages.
if (!mGraphImpl->MessagesQueued()) {
PodZero(aBuffer, aFrames * mGraphImpl->AudioChannelCount());
PodZero(aOutputBuffer, aFrames * mGraphImpl->AudioChannelCount());
return aFrames;
}
mGraphImpl->SwapMessageQueues();
@ -837,7 +847,7 @@ AudioCallbackDriver::DataCallback(AudioDataValue* aBuffer, long aFrames)
mIterationDurationMS /= 4;
}
mBuffer.SetBuffer(aBuffer, aFrames);
mBuffer.SetBuffer(aOutputBuffer, aFrames);
// fill part or all with leftover data from last iteration (since we
// align to Audio blocks)
mScratchBuffer.Empty(mBuffer);

Просмотреть файл

@ -153,10 +153,6 @@ public:
return mIterationEnd;
}
virtual void GetAudioBuffer(float** aBuffer, long& aFrames) {
MOZ_CRASH("This is not an Audio GraphDriver!");
}
virtual AudioCallbackDriver* AsAudioCallbackDriver() {
return nullptr;
}
@ -391,7 +387,7 @@ public:
/* Static wrapper function cubeb calls back. */
static long DataCallback_s(cubeb_stream * aStream,
void * aUser, void * aBuffer,
void * aUser, void * aInputBuffer, void * aOutputBuffer,
long aFrames);
static void StateCallback_s(cubeb_stream* aStream, void * aUser,
cubeb_state aState);
@ -401,7 +397,7 @@ public:
* audio. If the return value is exactly aFrames, this function will get
* called again. If it is less than aFrames, the stream will go in draining
* mode, and this function will not be called again. */
long DataCallback(AudioDataValue* aBuffer, long aFrames);
long DataCallback(AudioDataValue* aInputBuffer, AudioDataValue* aOutputBuffer, long aFrames);
/* This function is called by the underlying audio backend, but is only used
* for informational purposes at the moment. */
void StateCallback(cubeb_state aState);

Просмотреть файл

@ -47,6 +47,7 @@ namespace mozilla {
MediaEngineWebRTC::MediaEngineWebRTC(MediaEnginePrefs &aPrefs)
: mMutex("mozilla::MediaEngineWebRTC"),
mVoiceEngine(nullptr),
mAudioInput(nullptr),
mAudioEngineInit(false)
{
#ifndef MOZ_B2G_CAMERA
@ -239,7 +240,6 @@ MediaEngineWebRTC::EnumerateAudioDevices(dom::MediaSourceEnum aMediaSource,
nsTArray<RefPtr<MediaEngineAudioSource> >* aASources)
{
ScopedCustomReleasePtr<webrtc::VoEBase> ptrVoEBase;
ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
// We spawn threads to handle gUM runnables, so we must protect the member vars
MutexAutoLock lock(mMutex);
@ -283,13 +283,16 @@ MediaEngineWebRTC::EnumerateAudioDevices(dom::MediaSourceEnum aMediaSource,
mAudioEngineInit = true;
}
ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
if (!ptrVoEHw) {
return;
if (!mAudioInput) {
if (true /*platform_supports_full_duplex*/) {
mAudioInput = new mozilla::AudioInputCubeb(mVoiceEngine);
} else {
mAudioInput = new mozilla::AudioInputWebRTC(mVoiceEngine);
}
}
int nDevices = 0;
ptrVoEHw->GetNumOfRecordingDevices(nDevices);
mAudioInput->GetNumOfRecordingDevices(nDevices);
int i;
#if defined(MOZ_WIDGET_ANDROID) || defined(MOZ_WIDGET_GONK)
i = 0; // Bug 1037025 - let the OS handle defaulting for now on android/b2g
@ -305,10 +308,9 @@ MediaEngineWebRTC::EnumerateAudioDevices(dom::MediaSourceEnum aMediaSource,
deviceName[0] = '\0';
uniqueId[0] = '\0';
int error = ptrVoEHw->GetRecordingDeviceName(i, deviceName, uniqueId);
int error = mAudioInput->GetRecordingDeviceName(i, deviceName, uniqueId);
if (error) {
LOG((" VoEHardware:GetRecordingDeviceName: Failed %d",
ptrVoEBase->LastError() ));
LOG((" VoEHardware:GetRecordingDeviceName: Failed %d", error));
continue;
}
@ -324,8 +326,8 @@ MediaEngineWebRTC::EnumerateAudioDevices(dom::MediaSourceEnum aMediaSource,
// We've already seen this device, just append.
aASources->AppendElement(aSource.get());
} else {
aSource = new MediaEngineWebRTCMicrophoneSource(mThread, mVoiceEngine, i,
deviceName, uniqueId);
aSource = new MediaEngineWebRTCMicrophoneSource(mThread, mVoiceEngine, mAudioInput,
i, deviceName, uniqueId);
mAudioSources.Put(uuid, aSource); // Hashtable takes ownership.
aASources->AppendElement(aSource);
}

Просмотреть файл

@ -7,6 +7,7 @@
#include "prcvar.h"
#include "prthread.h"
#include "prprf.h"
#include "nsIThread.h"
#include "nsIRunnable.h"
@ -26,6 +27,8 @@
#include "AudioSegment.h"
#include "StreamBuffer.h"
#include "MediaStreamGraph.h"
#include "cubeb/cubeb.h"
#include "CubebUtils.h"
#include "MediaEngineWrapper.h"
#include "mozilla/dom/MediaStreamTrackBinding.h"
@ -119,6 +122,159 @@ protected:
nsCString mUUID;
};
// Small subset of VoEHardware
class AudioInput
{
public:
AudioInput(webrtc::VoiceEngine* aVoiceEngine) : mVoiceEngine(aVoiceEngine) {};
virtual ~AudioInput() {}
NS_INLINE_DECL_REFCOUNTING(AudioInput)
virtual int GetNumOfRecordingDevices(int& aDevices) = 0;
virtual int GetRecordingDeviceName(int aIndex, char aStrNameUTF8[128],
char aStrGuidUTF8[128]) = 0;
virtual int GetRecordingDeviceStatus(bool& aIsAvailable) = 0;
virtual void StartRecording(MediaStreamGraph *aGraph) = 0;
virtual void StopRecording(MediaStreamGraph *aGraph) = 0;
virtual int SetRecordingDevice(int aIndex) = 0;
protected:
webrtc::VoiceEngine* mVoiceEngine;
};
class AudioInputCubeb : public AudioInput,
public MediaStreamListener
{
public:
AudioInputCubeb(webrtc::VoiceEngine* aVoiceEngine) :
AudioInput(aVoiceEngine), mDevices(nullptr) {}
virtual ~AudioInputCubeb()
{
if (mDevices) {
cubeb_device_collection_destroy(mDevices);
mDevices = nullptr;
}
}
virtual int GetNumOfRecordingDevices(int& aDevices)
{
// devices = cubeb_get_num_devices(...)
if (CUBEB_OK != cubeb_enumerate_devices(CubebUtils::GetCubebContext(),
CUBEB_DEVICE_TYPE_INPUT,
&mDevices)) {
return 0;
}
aDevices = 0;
for (uint32_t i = 0; i < mDevices->count; i++) {
if (mDevices->device[i]->type == CUBEB_DEVICE_TYPE_INPUT && // paranoia
mDevices->device[i]->state == CUBEB_DEVICE_STATE_ENABLED)
{
aDevices++;
// XXX to support device changes, we need to identify by name/UUID not index
}
}
return 0;
}
virtual int GetRecordingDeviceName(int aIndex, char aStrNameUTF8[128],
char aStrGuidUTF8[128])
{
if (!mDevices) {
return 1;
}
int devindex = aIndex == -1 ? 0 : aIndex;
PR_snprintf(aStrNameUTF8, 128, "%s%s", aIndex == -1 ? "default: " : "",
mDevices->device[devindex]->friendly_name);
aStrGuidUTF8[0] = '\0';
return 0;
}
virtual int GetRecordingDeviceStatus(bool& aIsAvailable)
{
// With cubeb, we only expose devices of type CUBEB_DEVICE_TYPE_INPUT
aIsAvailable = true;
return 0;
}
virtual void StartRecording(MediaStreamGraph *aGraph)
{
ScopedCustomReleasePtr<webrtc::VoEExternalMedia> ptrVoERender;
ptrVoERender = webrtc::VoEExternalMedia::GetInterface(mVoiceEngine);
if (ptrVoERender) {
ptrVoERender->SetExternalRecordingStatus(true);
}
aGraph->OpenAudioInput(nullptr, this);
}
virtual void StopRecording(MediaStreamGraph *aGraph)
{
aGraph->CloseAudioInput(this);
}
virtual int SetRecordingDevice(int aIndex)
{
// Not relevant to cubeb
return 1;
}
private:
cubeb_device_collection* mDevices;
};
class AudioInputWebRTC : public AudioInput
{
public:
AudioInputWebRTC(webrtc::VoiceEngine* aVoiceEngine) : AudioInput(aVoiceEngine) {}
virtual ~AudioInputWebRTC() {}
virtual int GetNumOfRecordingDevices(int& aDevices)
{
ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
if (!ptrVoEHw) {
return 1;
}
return ptrVoEHw->GetNumOfRecordingDevices(aDevices);
}
virtual int GetRecordingDeviceName(int aIndex, char aStrNameUTF8[128],
char aStrGuidUTF8[128])
{
ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
if (!ptrVoEHw) {
return 1;
}
return ptrVoEHw->GetRecordingDeviceName(aIndex, aStrNameUTF8,
aStrGuidUTF8);
}
virtual int GetRecordingDeviceStatus(bool& aIsAvailable)
{
ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
if (!ptrVoEHw) {
return 1;
}
ptrVoEHw->GetRecordingDeviceStatus(aIsAvailable);
return 0;
}
virtual void StartRecording(MediaStreamGraph *aGraph) {}
virtual void StopRecording(MediaStreamGraph *aGraph) {}
virtual int SetRecordingDevice(int aIndex)
{
ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
if (!ptrVoEHw) {
return 1;
}
return ptrVoEHw->SetRecordingDevice(aIndex);
}
};
class MediaEngineWebRTCMicrophoneSource : public MediaEngineAudioSource,
public webrtc::VoEMediaProcess,
private MediaConstraintsHelper
@ -126,11 +282,13 @@ class MediaEngineWebRTCMicrophoneSource : public MediaEngineAudioSource,
public:
MediaEngineWebRTCMicrophoneSource(nsIThread* aThread,
webrtc::VoiceEngine* aVoiceEnginePtr,
mozilla::AudioInput* aAudioInput,
int aIndex,
const char* name,
const char* uuid)
: MediaEngineAudioSource(kReleased)
, mVoiceEngine(aVoiceEnginePtr)
, mAudioInput(aAudioInput)
, mMonitor("WebRTCMic.Monitor")
, mThread(aThread)
, mCapIndex(aIndex)
@ -146,6 +304,7 @@ public:
, mPlayoutDelay(0)
, mNullTransport(nullptr) {
MOZ_ASSERT(aVoiceEnginePtr);
MOZ_ASSERT(aAudioInput);
mDeviceName.Assign(NS_ConvertUTF8toUTF16(name));
mDeviceUUID.Assign(uuid);
Init();
@ -207,6 +366,8 @@ private:
void Init();
webrtc::VoiceEngine* mVoiceEngine;
RefPtr<mozilla::AudioInput> mAudioInput;
ScopedCustomReleasePtr<webrtc::VoEBase> mVoEBase;
ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mVoERender;
ScopedCustomReleasePtr<webrtc::VoENetwork> mVoENetwork;
@ -265,6 +426,7 @@ private:
// gUM runnables can e.g. Enumerate from multiple threads
Mutex mMutex;
webrtc::VoiceEngine* mVoiceEngine;
RefPtr<mozilla::AudioInput> mAudioInput;
bool mAudioEngineInit;
bool mHasTabVideoSource;

Просмотреть файл

@ -288,8 +288,7 @@ MediaEngineWebRTCMicrophoneSource::Allocate(const dom::MediaTrackConstraints &aC
AssertIsOnOwningThread();
if (mState == kReleased) {
if (mInitDone) {
ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw(webrtc::VoEHardware::GetInterface(mVoiceEngine));
if (!ptrVoEHw || ptrVoEHw->SetRecordingDevice(mCapIndex)) {
if (mAudioInput->SetRecordingDevice(mCapIndex)) {
return NS_ERROR_FAILURE;
}
mState = kAllocated;
@ -382,6 +381,8 @@ MediaEngineWebRTCMicrophoneSource::Start(SourceMediaStream *aStream,
// Attach external media processor, so this::Process will be called.
mVoERender->RegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel, *this);
mAudioInput->StartRecording(aStream->Graph());
return NS_OK;
}
@ -412,6 +413,8 @@ MediaEngineWebRTCMicrophoneSource::Stop(SourceMediaStream *aSource, TrackID aID)
mState = kStopped;
}
mAudioInput->StopRecording(aSource->Graph());
mVoERender->DeRegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel);
if (mVoEBase->StopSend(mChannel)) {
@ -475,8 +478,7 @@ MediaEngineWebRTCMicrophoneSource::Init()
LOG(("%s: sampling rate %u", __FUNCTION__, mSampleFrequency));
// Check for availability.
ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw(webrtc::VoEHardware::GetInterface(mVoiceEngine));
if (!ptrVoEHw || ptrVoEHw->SetRecordingDevice(mCapIndex)) {
if (mAudioInput->SetRecordingDevice(mCapIndex)) {
return;
}
@ -484,7 +486,7 @@ MediaEngineWebRTCMicrophoneSource::Init()
// Because of the permission mechanism of B2G, we need to skip the status
// check here.
bool avail = false;
ptrVoEHw->GetRecordingDeviceStatus(avail);
mAudioInput->GetRecordingDeviceStatus(avail);
if (!avail) {
return;
}
@ -639,6 +641,7 @@ MediaEngineWebRTCAudioCaptureSource::GetName(nsAString &aName)
{
aName.AssignLiteral("AudioCapture");
}
void
MediaEngineWebRTCAudioCaptureSource::GetUUID(nsACString &aUUID)
{
@ -696,4 +699,5 @@ MediaEngineWebRTCAudioCaptureSource::GetBestFitnessDistance(
// There is only one way of capturing audio for now, and it's always adequate.
return 0;
}
}