Bug 802411: Refactor MediaEngine to use GIPS singletons; r=jesup

This commit is contained in:
Anant Narayanan 2012-10-16 17:53:55 -07:00
Родитель 75ecc91244
Коммит edf634a604
8 изменённых файлов: 234 добавлений и 120 удалений

Просмотреть файл

@ -76,8 +76,20 @@ public:
/* Stop the device and release the corresponding MediaStream */
virtual nsresult Stop() = 0;
/* Return false if device is currently allocated or started */
bool IsAvailable() {
if (mState == kAllocated || mState == kStarted) {
return false;
} else {
return true;
}
}
/* It is an error to call Start() before an Allocate(), and Stop() before
* a Start(). Only Allocate() may be called after a Deallocate(). */
protected:
MediaEngineState mState;
};
/**

Просмотреть файл

@ -35,8 +35,10 @@ const MediaEngineVideoOptions MediaEngineDefaultVideoSource::mOpts = {
};
MediaEngineDefaultVideoSource::MediaEngineDefaultVideoSource()
: mTimer(nullptr), mState(kReleased)
{}
: mTimer(nullptr)
{
mState = kReleased;
}
MediaEngineDefaultVideoSource::~MediaEngineDefaultVideoSource()
{}
@ -205,6 +207,15 @@ NS_IMPL_THREADSAFE_ISUPPORTS1(MediaEngineDefaultAudioSource, nsITimerCallback)
/**
* Default audio source.
*/
MediaEngineDefaultAudioSource::MediaEngineDefaultAudioSource()
: mTimer(nullptr)
{
mState = kReleased;
}
MediaEngineDefaultAudioSource::~MediaEngineDefaultAudioSource()
{}
void
MediaEngineDefaultAudioSource::GetName(nsAString& aName)
{
@ -312,13 +323,43 @@ MediaEngineDefaultAudioSource::Notify(nsITimer* aTimer)
void
MediaEngineDefault::EnumerateVideoDevices(nsTArray<nsRefPtr<MediaEngineVideoSource> >* aVSources) {
aVSources->AppendElement(mVSource);
int32_t found = false;
int32_t len = mVSources.Length();
for (int32_t i = 0; i < len; i++) {
nsRefPtr<MediaEngineVideoSource> source = mVSources.ElementAt(i);
aVSources->AppendElement(source);
if (source->IsAvailable()) {
found = true;
}
}
// All streams are currently busy, just make a new one.
if (!found) {
nsRefPtr<MediaEngineVideoSource> newSource =
new MediaEngineDefaultVideoSource();
mVSources.AppendElement(newSource);
aVSources->AppendElement(newSource);
}
return;
}
void
MediaEngineDefault::EnumerateAudioDevices(nsTArray<nsRefPtr<MediaEngineAudioSource> >* aASources) {
aASources->AppendElement(mASource);
int32_t len = mVSources.Length();
for (int32_t i = 0; i < len; i++) {
nsRefPtr<MediaEngineAudioSource> source = mASources.ElementAt(i);
if (source->IsAvailable()) {
aASources->AppendElement(source);
}
}
// All streams are currently busy, just make a new one.
if (aASources->Length() == 0) {
nsRefPtr<MediaEngineAudioSource> newSource =
new MediaEngineDefaultAudioSource();
mASources.AppendElement(newSource);
aASources->AppendElement(newSource);
}
return;
}

Просмотреть файл

@ -41,7 +41,6 @@ public:
virtual const MediaEngineVideoOptions *GetOptions();
virtual nsresult Allocate();
virtual nsresult Deallocate();
virtual nsresult Start(SourceMediaStream*, TrackID);
virtual nsresult Stop();
@ -60,7 +59,6 @@ protected:
nsCOMPtr<nsITimer> mTimer;
nsRefPtr<layers::ImageContainer> mImageContainer;
MediaEngineState mState;
SourceMediaStream* mSource;
layers::PlanarYCbCrImage* mImage;
static const MediaEngineVideoOptions mOpts;
@ -70,14 +68,13 @@ class MediaEngineDefaultAudioSource : public nsITimerCallback,
public MediaEngineAudioSource
{
public:
MediaEngineDefaultAudioSource() : mTimer(nullptr), mState(kReleased) {}
~MediaEngineDefaultAudioSource(){};
MediaEngineDefaultAudioSource();
~MediaEngineDefaultAudioSource();
virtual void GetName(nsAString&);
virtual void GetUUID(nsAString&);
virtual nsresult Allocate();
virtual nsresult Deallocate();
virtual nsresult Start(SourceMediaStream*, TrackID);
virtual nsresult Stop();
@ -90,25 +87,21 @@ protected:
TrackID mTrackID;
nsCOMPtr<nsITimer> mTimer;
MediaEngineState mState;
SourceMediaStream* mSource;
};
class MediaEngineDefault : public MediaEngine
{
public:
MediaEngineDefault() {
mVSource = new MediaEngineDefaultVideoSource();
mASource = new MediaEngineDefaultAudioSource();
}
MediaEngineDefault() {}
~MediaEngineDefault() {}
virtual void EnumerateVideoDevices(nsTArray<nsRefPtr<MediaEngineVideoSource> >*);
virtual void EnumerateAudioDevices(nsTArray<nsRefPtr<MediaEngineAudioSource> >*);
private:
nsRefPtr<MediaEngineVideoSource> mVSource;
nsRefPtr<MediaEngineAudioSource> mASource;
nsTArray<nsRefPtr<MediaEngineVideoSource> > mVSources;
nsTArray<nsRefPtr<MediaEngineAudioSource> > mASources;
};
}

Просмотреть файл

@ -53,13 +53,20 @@ MediaEngineWebRTC::EnumerateVideoDevices(nsTArray<nsRefPtr<MediaEngineVideoSourc
return;
}
/**
* We still enumerate every time, in case a new device was plugged in since
* the last call. TODO: Verify that WebRTC actually does deal with hotplugging
* new devices (with or without new engine creation) and accordingly adjust.
* Enumeration is not neccessary if GIPS reports the same set of devices
* for a given instance of the engine. Likewise, if a device was plugged out,
* mVideoSources must be updated.
*/
int num = ptrViECapture->NumberOfCaptureDevices();
if (num <= 0) {
return;
}
for (int i = 0; i < num; i++) {
#ifdef DEBUG
const unsigned int kMaxDeviceNameLength = 128; // XXX FIX!
const unsigned int kMaxUniqueIdLength = 256;
char deviceName[kMaxDeviceNameLength];
@ -71,8 +78,10 @@ MediaEngineWebRTC::EnumerateVideoDevices(nsTArray<nsRefPtr<MediaEngineVideoSourc
int error = ptrViECapture->GetCaptureDevice(i, deviceName,
sizeof(deviceName), uniqueId,
sizeof(uniqueId));
#ifdef DEBUG
if (error) {
LOG((" VieCapture:GetCaptureDevice: Failed %d",
LOG((" VieCapture:GetCaptureDevice: Failed %d",
ptrViEBase->LastError() ));
continue;
}
@ -82,7 +91,7 @@ MediaEngineWebRTC::EnumerateVideoDevices(nsTArray<nsRefPtr<MediaEngineVideoSourc
int numCaps = ptrViECapture->NumberOfCapabilities(uniqueId, kMaxUniqueIdLength);
LOG(("Number of Capabilities %d", numCaps));
for (int j = 0; j < numCaps; j++) {
if (ptrViECapture->GetCaptureCapability(uniqueId, kMaxUniqueIdLength,
if (ptrViECapture->GetCaptureCapability(uniqueId, kMaxUniqueIdLength,
j, cap ) != 0 ) {
break;
}
@ -91,8 +100,16 @@ MediaEngineWebRTC::EnumerateVideoDevices(nsTArray<nsRefPtr<MediaEngineVideoSourc
}
#endif
nsRefPtr<MediaEngineVideoSource> vSource = new MediaEngineWebRTCVideoSource(mVideoEngine, i);
aVSources->AppendElement(vSource.forget());
nsRefPtr<MediaEngineWebRTCVideoSource> vSource;
NS_ConvertUTF8toUTF16 uuid(uniqueId);
if (mVideoSources.Get(uuid, getter_AddRefs(vSource))) {
// We've already seen this device, just append.
aVSources->AppendElement(vSource.get());
} else {
vSource = new MediaEngineWebRTCVideoSource(mVideoEngine, i);
mVideoSources.Put(uuid, vSource); // Hashtable takes ownership.
aVSources->AppendElement(vSource);
}
}
ptrViEBase->Release();
@ -136,31 +153,41 @@ MediaEngineWebRTC::EnumerateAudioDevices(nsTArray<nsRefPtr<MediaEngineAudioSourc
for (int i = 0; i < nDevices; i++) {
// We use constants here because GetRecordingDeviceName takes char[128].
char deviceName[128];
char uniqueID[128];
char uniqueId[128];
// paranoia; jingle doesn't bother with this
deviceName[0] = '\0';
uniqueID[0] = '\0';
uniqueId[0] = '\0';
ptrVoEHw->GetRecordingDeviceName(i, deviceName, uniqueID);
nsRefPtr<MediaEngineAudioSource> aSource = new MediaEngineWebRTCAudioSource(
mVoiceEngine, i, deviceName, uniqueID
);
aASources->AppendElement(aSource.forget());
ptrVoEHw->GetRecordingDeviceName(i, deviceName, uniqueId);
nsRefPtr<MediaEngineWebRTCAudioSource> aSource;
NS_ConvertUTF8toUTF16 uuid(uniqueId);
if (mAudioSources.Get(uuid, getter_AddRefs(aSource))) {
// We've already seen this device, just append.
aASources->AppendElement(aSource.get());
} else {
aSource = new MediaEngineWebRTCAudioSource(
mVoiceEngine, i, deviceName, uniqueId
);
mAudioSources.Put(uuid, aSource); // Hashtable takes ownership.
aASources->AppendElement(aSource);
}
}
ptrVoEHw->Release();
ptrVoEBase->Release();
}
void
MediaEngineWebRTC::Shutdown()
{
if (mVideoEngine) {
mVideoSources.Clear();
webrtc::VideoEngine::Delete(mVideoEngine);
}
if (mVoiceEngine) {
mAudioSources.Clear();
webrtc::VoiceEngine::Delete(mVoiceEngine);
}

Просмотреть файл

@ -60,9 +60,23 @@ public:
virtual int FrameSizeChange(unsigned int, unsigned int, unsigned int);
virtual int DeliverFrame(unsigned char*, int, uint32_t, int64_t);
MediaEngineWebRTCVideoSource(webrtc::VideoEngine* videoEnginePtr,
int index, int aMinFps = DEFAULT_MIN_VIDEO_FPS);
~MediaEngineWebRTCVideoSource();
MediaEngineWebRTCVideoSource(webrtc::VideoEngine* aVideoEnginePtr,
int aIndex, int aMinFps = DEFAULT_MIN_VIDEO_FPS)
: mVideoEngine(aVideoEnginePtr)
, mCaptureIndex(aIndex)
, mCapabilityChosen(false)
, mWidth(640)
, mHeight(480)
, mMonitor("WebRTCCamera.Monitor")
, mFps(DEFAULT_VIDEO_FPS)
, mMinFps(aMinFps)
, mInitDone(false)
, mInSnapshotMode(false)
, mSnapshotPath(NULL) {
mState = kReleased;
Init();
}
~MediaEngineWebRTCVideoSource() { Shutdown(); }
virtual void GetName(nsAString&);
virtual void GetUUID(nsAString&);
@ -115,7 +129,6 @@ private:
int mWidth, mHeight;
TrackID mTrackID;
MediaEngineState mState;
mozilla::ReentrantMonitor mMonitor; // Monitor for processing WebRTC frames.
SourceMediaStream* mSource;
@ -148,15 +161,12 @@ public:
, mMonitor("WebRTCMic.Monitor")
, mCapIndex(aIndex)
, mChannel(-1)
, mInitDone(false)
, mState(kReleased) {
mVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine);
, mInitDone(false) {
mState = kReleased;
mDeviceName.Assign(NS_ConvertUTF8toUTF16(name));
mDeviceUUID.Assign(NS_ConvertUTF8toUTF16(uuid));
mInitDone = true;
Init();
}
~MediaEngineWebRTCAudioSource() { Shutdown(); }
virtual void GetName(nsAString&);
@ -192,7 +202,6 @@ private:
int mChannel;
TrackID mTrackID;
bool mInitDone;
MediaEngineState mState;
nsString mDeviceName;
nsString mDeviceUUID;
@ -207,8 +216,10 @@ public:
: mVideoEngine(NULL)
, mVoiceEngine(NULL)
, mVideoEngineInit(false)
, mAudioEngineInit(false) {}
, mAudioEngineInit(false) {
mVideoSources.Init();
mAudioSources.Init();
}
~MediaEngineWebRTC() { Shutdown(); }
// Clients should ensure to clean-up sources video/audio sources
@ -225,6 +236,11 @@ private:
// Need this to avoid unneccesary WebRTC calls while enumerating.
bool mVideoEngineInit;
bool mAudioEngineInit;
// Store devices we've already seen in a hashtable for quick return.
// Maps UUID to MediaEngineSource (one set for audio, one for video).
nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCVideoSource > mVideoSources;
nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCAudioSource > mAudioSources;
};
}

Просмотреть файл

@ -53,49 +53,6 @@ MediaEngineWebRTCAudioSource::Allocate()
return NS_ERROR_FAILURE;
}
mVoEBase->Init();
mVoERender = webrtc::VoEExternalMedia::GetInterface(mVoiceEngine);
if (!mVoERender) {
return NS_ERROR_FAILURE;
}
mChannel = mVoEBase->CreateChannel();
if (mChannel < 0) {
return NS_ERROR_FAILURE;
}
// Check for availability.
webrtc::VoEHardware* ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
if (ptrVoEHw->SetRecordingDevice(mCapIndex)) {
return NS_ERROR_FAILURE;
}
bool avail = false;
ptrVoEHw->GetRecordingDeviceStatus(avail);
if (!avail) {
return NS_ERROR_FAILURE;
}
// Set "codec" to PCM, 32kHz on 1 channel
webrtc::VoECodec* ptrVoECodec;
webrtc::CodecInst codec;
ptrVoECodec = webrtc::VoECodec::GetInterface(mVoiceEngine);
if (!ptrVoECodec) {
return NS_ERROR_FAILURE;
}
strcpy(codec.plname, ENCODING);
codec.channels = CHANNELS;
codec.rate = SAMPLE_RATE;
codec.plfreq = SAMPLE_FREQUENCY;
codec.pacsize = SAMPLE_LENGTH;
codec.pltype = 0; // Default payload type
if (ptrVoECodec->SetSendCodec(mChannel, codec)) {
return NS_ERROR_FAILURE;
}
// Audio doesn't play through unless we set a receiver and destination, so
// we setup a dummy local destination, and do a loopback.
mVoEBase->SetLocalReceiver(mChannel, DEFAULT_PORT);
@ -112,9 +69,6 @@ MediaEngineWebRTCAudioSource::Deallocate()
return NS_ERROR_FAILURE;
}
mVoEBase->Terminate();
mVoERender->Release();
mState = kReleased;
return NS_OK;
}
@ -180,6 +134,56 @@ MediaEngineWebRTCAudioSource::Snapshot(uint32_t aDuration, nsIDOMFile** aFile)
return NS_ERROR_NOT_IMPLEMENTED;
}
void
MediaEngineWebRTCAudioSource::Init()
{
mVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine);
mVoEBase->Init();
mVoERender = webrtc::VoEExternalMedia::GetInterface(mVoiceEngine);
if (!mVoERender) {
return;
}
mChannel = mVoEBase->CreateChannel();
if (mChannel < 0) {
return;
}
// Check for availability.
webrtc::VoEHardware* ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
if (ptrVoEHw->SetRecordingDevice(mCapIndex)) {
return;
}
bool avail = false;
ptrVoEHw->GetRecordingDeviceStatus(avail);
if (!avail) {
return;
}
// Set "codec" to PCM, 32kHz on 1 channel
webrtc::VoECodec* ptrVoECodec;
webrtc::CodecInst codec;
ptrVoECodec = webrtc::VoECodec::GetInterface(mVoiceEngine);
if (!ptrVoECodec) {
return;
}
strcpy(codec.plname, ENCODING);
codec.channels = CHANNELS;
codec.rate = SAMPLE_RATE;
codec.plfreq = SAMPLE_FREQUENCY;
codec.pacsize = SAMPLE_LENGTH;
codec.pltype = 0; // Default payload type
if (ptrVoECodec->SetSendCodec(mChannel, codec)) {
return;
}
mInitDone = true;
}
void
MediaEngineWebRTCAudioSource::Shutdown()
@ -196,6 +200,8 @@ MediaEngineWebRTCAudioSource::Shutdown()
Deallocate();
}
mVoEBase->Terminate();
mVoERender->Release();
mVoEBase->Release();
mState = kReleased;

Просмотреть файл

@ -21,29 +21,6 @@ extern PRLogModuleInfo* gMediaManagerLog;
*/
NS_IMPL_THREADSAFE_ISUPPORTS1(MediaEngineWebRTCVideoSource, nsIRunnable)
MediaEngineWebRTCVideoSource::MediaEngineWebRTCVideoSource(webrtc::VideoEngine* aVideoEnginePtr,
int aIndex, int aMinFps)
: mVideoEngine(aVideoEnginePtr)
, mCaptureIndex(aIndex)
, mCapabilityChosen(false)
, mWidth(640)
, mHeight(480)
, mState(kReleased)
, mMonitor("WebRTCCamera.Monitor")
, mFps(DEFAULT_VIDEO_FPS)
, mMinFps(aMinFps)
, mInitDone(false)
, mInSnapshotMode(false)
, mSnapshotPath(NULL)
{
Init();
}
MediaEngineWebRTCVideoSource::~MediaEngineWebRTCVideoSource()
{
Shutdown();
}
// ViEExternalRenderer Callback.
int
MediaEngineWebRTCVideoSource::FrameSizeChange(
@ -191,10 +168,6 @@ MediaEngineWebRTCVideoSource::Allocate()
return NS_ERROR_FAILURE;
}
if (mViECapture->StartCapture(mCaptureIndex, mCapability) < 0) {
return NS_ERROR_FAILURE;
}
mState = kAllocated;
return NS_OK;
}
@ -206,7 +179,6 @@ MediaEngineWebRTCVideoSource::Deallocate()
return NS_ERROR_FAILURE;
}
mViECapture->StopCapture(mCaptureIndex);
mViECapture->ReleaseCaptureDevice(mCaptureIndex);
mState = kReleased;
return NS_OK;
@ -254,6 +226,10 @@ MediaEngineWebRTCVideoSource::Start(SourceMediaStream* aStream, TrackID aID)
return NS_ERROR_FAILURE;
}
if (mViECapture->StartCapture(mCaptureIndex, mCapability) < 0) {
return NS_ERROR_FAILURE;
}
mState = kStarted;
return NS_OK;
}
@ -270,6 +246,7 @@ MediaEngineWebRTCVideoSource::Stop()
mViERender->StopRender(mCaptureIndex);
mViERender->RemoveRenderer(mCaptureIndex);
mViECapture->StopCapture(mCaptureIndex);
mState = kStopped;
return NS_OK;

Просмотреть файл

@ -443,6 +443,7 @@ public:
nsresult
SelectDevice()
{
bool found = false;
uint32_t count;
if (mPicture || mVideo) {
nsTArray<nsRefPtr<MediaEngineVideoSource> > videoSources;
@ -455,7 +456,22 @@ public:
));
return NS_ERROR_FAILURE;
}
mVideoDevice = new MediaDevice(videoSources[0]);
// Pick the first available device.
for (uint32_t i = 0; i < count; i++) {
nsRefPtr<MediaEngineVideoSource> vSource = videoSources[i];
if (vSource->IsAvailable()) {
found = true;
mVideoDevice = new MediaDevice(videoSources[i]);
}
}
if (!found) {
NS_DispatchToMainThread(new ErrorCallbackRunnable(
mSuccess, mError, NS_LITERAL_STRING("HARDWARE_UNAVAILABLE"), mWindowID
));
return NS_ERROR_FAILURE;
}
LOG(("Selected video device"));
}
if (mAudio) {
@ -469,7 +485,21 @@ public:
));
return NS_ERROR_FAILURE;
}
mAudioDevice = new MediaDevice(audioSources[0]);
for (uint32_t i = 0; i < count; i++) {
nsRefPtr<MediaEngineAudioSource> aSource = audioSources[i];
if (aSource->IsAvailable()) {
found = true;
mAudioDevice = new MediaDevice(audioSources[i]);
}
}
if (!found) {
NS_DispatchToMainThread(new ErrorCallbackRunnable(
mSuccess, mError, NS_LITERAL_STRING("HARDWARE_UNAVAILABLE"), mWindowID
));
return NS_ERROR_FAILURE;
}
LOG(("Selected audio device"));
}
@ -596,11 +626,23 @@ public:
nsTArray<nsCOMPtr<nsIMediaDevice> > *devices =
new nsTArray<nsCOMPtr<nsIMediaDevice> >;
/**
* We only display available devices in the UI for now. We can easily
* change this later, when we implement a more sophisticated UI that
* lets the user revoke a device currently held by another tab (or
* we decide to provide a stream from a device already allocated).
*/
for (i = 0; i < videoCount; i++) {
devices->AppendElement(new MediaDevice(videoSources[i]));
nsRefPtr<MediaEngineVideoSource> vSource = videoSources[i];
if (vSource->IsAvailable()) {
devices->AppendElement(new MediaDevice(vSource));
}
}
for (i = 0; i < audioCount; i++) {
devices->AppendElement(new MediaDevice(audioSources[i]));
nsRefPtr<MediaEngineAudioSource> aSource = audioSources[i];
if (aSource->IsAvailable()) {
devices->AppendElement(new MediaDevice(aSource));
}
}
NS_DispatchToMainThread(new DeviceSuccessCallbackRunnable(