Bug 1156472 - Part 2 - Rename MediaEngineWebRTCAudioSource to MediaEngineWebRTCMicrophoneSource. r=jesup

There are now two different possible audio source, so this was getting confusing.
This commit is contained in:
Paul Adenot 2015-07-24 14:28:16 +02:00
Родитель bae1e652bf
Коммит f6609f50c3
3 изменённых файлов: 39 добавлений и 34 удалений

Просмотреть файл

@ -358,15 +358,14 @@ MediaEngineWebRTC::EnumerateAudioDevices(dom::MediaSourceEnum aMediaSource,
strcpy(uniqueId,deviceName); // safe given assert and initialization/error-check strcpy(uniqueId,deviceName); // safe given assert and initialization/error-check
} }
nsRefPtr<MediaEngineWebRTCAudioSource> aSource; nsRefPtr<MediaEngineWebRTCMicrophoneSource> aSource;
NS_ConvertUTF8toUTF16 uuid(uniqueId); NS_ConvertUTF8toUTF16 uuid(uniqueId);
if (mAudioSources.Get(uuid, getter_AddRefs(aSource))) { if (mAudioSources.Get(uuid, getter_AddRefs(aSource))) {
// We've already seen this device, just append. // We've already seen this device, just append.
aASources->AppendElement(aSource.get()); aASources->AppendElement(aSource.get());
} else { } else {
aSource = new MediaEngineWebRTCAudioSource( aSource = new MediaEngineWebRTCMicrophoneSource(mThread, mVoiceEngine, i,
mThread, mVoiceEngine, i, deviceName, uniqueId deviceName, uniqueId);
);
mAudioSources.Put(uuid, aSource); // Hashtable takes ownership. mAudioSources.Put(uuid, aSource); // Hashtable takes ownership.
aASources->AppendElement(aSource); aASources->AppendElement(aSource);
} }

Просмотреть файл

@ -133,13 +133,16 @@ private:
void GetCapability(size_t aIndex, webrtc::CaptureCapability& aOut) override; void GetCapability(size_t aIndex, webrtc::CaptureCapability& aOut) override;
}; };
class MediaEngineWebRTCAudioSource : public MediaEngineAudioSource, class MediaEngineWebRTCMicrophoneSource : public MediaEngineAudioSource,
public webrtc::VoEMediaProcess, public webrtc::VoEMediaProcess,
private MediaConstraintsHelper private MediaConstraintsHelper
{ {
public: public:
MediaEngineWebRTCAudioSource(nsIThread* aThread, webrtc::VoiceEngine* aVoiceEnginePtr, MediaEngineWebRTCMicrophoneSource(nsIThread* aThread,
int aIndex, const char* name, const char* uuid) webrtc::VoiceEngine* aVoiceEnginePtr,
int aIndex,
const char* name,
const char* uuid)
: MediaEngineAudioSource(kReleased) : MediaEngineAudioSource(kReleased)
, mVoiceEngine(aVoiceEnginePtr) , mVoiceEngine(aVoiceEnginePtr)
, mMonitor("WebRTCMic.Monitor") , mMonitor("WebRTCMic.Monitor")
@ -207,7 +210,7 @@ public:
virtual void Shutdown() override; virtual void Shutdown() override;
protected: protected:
~MediaEngineWebRTCAudioSource() { Shutdown(); } ~MediaEngineWebRTCMicrophoneSource() { Shutdown(); }
private: private:
void Init(); void Init();
@ -294,7 +297,8 @@ private:
// Store devices we've already seen in a hashtable for quick return. // Store devices we've already seen in a hashtable for quick return.
// Maps UUID to MediaEngineSource (one set for audio, one for video). // Maps UUID to MediaEngineSource (one set for audio, one for video).
nsRefPtrHashtable<nsStringHashKey, MediaEngineVideoSource> mVideoSources; nsRefPtrHashtable<nsStringHashKey, MediaEngineVideoSource> mVideoSources;
nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCAudioSource> mAudioSources; nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCMicrophoneSource>
mAudioSources;
}; };
} }

Просмотреть файл

@ -41,9 +41,9 @@ extern PRLogModuleInfo* GetMediaManagerLog();
#define LOG_FRAMES(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Verbose, msg) #define LOG_FRAMES(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Verbose, msg)
/** /**
* Webrtc audio source. * Webrtc microphone source source.
*/ */
NS_IMPL_ISUPPORTS0(MediaEngineWebRTCAudioSource) NS_IMPL_ISUPPORTS0(MediaEngineWebRTCMicrophoneSource)
// XXX temp until MSG supports registration // XXX temp until MSG supports registration
StaticRefPtr<AudioOutputObserver> gFarendObserver; StaticRefPtr<AudioOutputObserver> gFarendObserver;
@ -177,7 +177,7 @@ AudioOutputObserver::InsertFarEnd(const AudioDataValue *aBuffer, uint32_t aFrame
} }
void void
MediaEngineWebRTCAudioSource::GetName(nsAString& aName) MediaEngineWebRTCMicrophoneSource::GetName(nsAString& aName)
{ {
if (mInitDone) { if (mInitDone) {
aName.Assign(mDeviceName); aName.Assign(mDeviceName);
@ -187,7 +187,7 @@ MediaEngineWebRTCAudioSource::GetName(nsAString& aName)
} }
void void
MediaEngineWebRTCAudioSource::GetUUID(nsACString& aUUID) MediaEngineWebRTCMicrophoneSource::GetUUID(nsACString& aUUID)
{ {
if (mInitDone) { if (mInitDone) {
aUUID.Assign(mDeviceUUID); aUUID.Assign(mDeviceUUID);
@ -197,10 +197,10 @@ MediaEngineWebRTCAudioSource::GetUUID(nsACString& aUUID)
} }
nsresult nsresult
MediaEngineWebRTCAudioSource::Config(bool aEchoOn, uint32_t aEcho, MediaEngineWebRTCMicrophoneSource::Config(bool aEchoOn, uint32_t aEcho,
bool aAgcOn, uint32_t aAGC, bool aAgcOn, uint32_t aAGC,
bool aNoiseOn, uint32_t aNoise, bool aNoiseOn, uint32_t aNoise,
int32_t aPlayoutDelay) int32_t aPlayoutDelay)
{ {
LOG(("Audio config: aec: %d, agc: %d, noise: %d", LOG(("Audio config: aec: %d, agc: %d, noise: %d",
aEchoOn ? aEcho : -1, aEchoOn ? aEcho : -1,
@ -281,9 +281,9 @@ uint32_t MediaEngineWebRTCAudioSource::GetBestFitnessDistance(
} }
nsresult nsresult
MediaEngineWebRTCAudioSource::Allocate(const dom::MediaTrackConstraints &aConstraints, MediaEngineWebRTCMicrophoneSource::Allocate(const dom::MediaTrackConstraints &aConstraints,
const MediaEnginePrefs &aPrefs, const MediaEnginePrefs &aPrefs,
const nsString& aDeviceId) const nsString& aDeviceId)
{ {
if (mState == kReleased) { if (mState == kReleased) {
if (mInitDone) { if (mInitDone) {
@ -309,7 +309,7 @@ MediaEngineWebRTCAudioSource::Allocate(const dom::MediaTrackConstraints &aConstr
} }
nsresult nsresult
MediaEngineWebRTCAudioSource::Deallocate() MediaEngineWebRTCMicrophoneSource::Deallocate()
{ {
bool empty; bool empty;
{ {
@ -331,7 +331,8 @@ MediaEngineWebRTCAudioSource::Deallocate()
} }
nsresult nsresult
MediaEngineWebRTCAudioSource::Start(SourceMediaStream* aStream, TrackID aID) MediaEngineWebRTCMicrophoneSource::Start(SourceMediaStream *aStream,
TrackID aID)
{ {
if (!mInitDone || !aStream) { if (!mInitDone || !aStream) {
return NS_ERROR_FAILURE; return NS_ERROR_FAILURE;
@ -384,7 +385,7 @@ MediaEngineWebRTCAudioSource::Start(SourceMediaStream* aStream, TrackID aID)
} }
nsresult nsresult
MediaEngineWebRTCAudioSource::Stop(SourceMediaStream *aSource, TrackID aID) MediaEngineWebRTCMicrophoneSource::Stop(SourceMediaStream *aSource, TrackID aID)
{ {
{ {
MonitorAutoLock lock(mMonitor); MonitorAutoLock lock(mMonitor);
@ -421,17 +422,17 @@ MediaEngineWebRTCAudioSource::Stop(SourceMediaStream *aSource, TrackID aID)
} }
void void
MediaEngineWebRTCAudioSource::NotifyPull(MediaStreamGraph* aGraph, MediaEngineWebRTCMicrophoneSource::NotifyPull(MediaStreamGraph *aGraph,
SourceMediaStream *aSource, SourceMediaStream *aSource,
TrackID aID, TrackID aID,
StreamTime aDesiredTime) StreamTime aDesiredTime)
{ {
// Ignore - we push audio data // Ignore - we push audio data
LOG_FRAMES(("NotifyPull, desired = %ld", (int64_t) aDesiredTime)); LOG_FRAMES(("NotifyPull, desired = %ld", (int64_t) aDesiredTime));
} }
void void
MediaEngineWebRTCAudioSource::Init() MediaEngineWebRTCMicrophoneSource::Init()
{ {
mVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine); mVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine);
@ -496,7 +497,7 @@ MediaEngineWebRTCAudioSource::Init()
} }
void void
MediaEngineWebRTCAudioSource::Shutdown() MediaEngineWebRTCMicrophoneSource::Shutdown()
{ {
if (!mInitDone) { if (!mInitDone) {
// duplicate these here in case we failed during Init() // duplicate these here in case we failed during Init()
@ -551,9 +552,10 @@ MediaEngineWebRTCAudioSource::Shutdown()
typedef int16_t sample; typedef int16_t sample;
void void
MediaEngineWebRTCAudioSource::Process(int channel, MediaEngineWebRTCMicrophoneSource::Process(int channel,
webrtc::ProcessingTypes type, sample* audio10ms, webrtc::ProcessingTypes type,
int length, int samplingFreq, bool isStereo) sample *audio10ms, int length,
int samplingFreq, bool isStereo)
{ {
// On initial capture, throw away all far-end data except the most recent sample // On initial capture, throw away all far-end data except the most recent sample
// since it's already irrelevant and we want to keep avoid confusing the AEC far-end // since it's already irrelevant and we want to keep avoid confusing the AEC far-end