diff --git a/content/media/webrtc/MediaEngine.h b/content/media/webrtc/MediaEngine.h index f4c7eed37837..ce66ba11aa04 100644 --- a/content/media/webrtc/MediaEngine.h +++ b/content/media/webrtc/MediaEngine.h @@ -89,11 +89,6 @@ public: /* Stop the device and release the corresponding MediaStream */ virtual nsresult Stop(SourceMediaStream *aSource, TrackID aID) = 0; - /* Change device configuration. */ - virtual nsresult Config(bool aEchoOn, uint32_t aEcho, - bool aAgcOn, uint32_t aAGC, - bool aNoiseOn, uint32_t aNoise) = 0; - /* Return false if device is currently allocated or started */ bool IsAvailable() { if (mState == kAllocated || mState == kStarted) { diff --git a/content/media/webrtc/MediaEngineDefault.h b/content/media/webrtc/MediaEngineDefault.h index bd1149adf4e1..e34b7b1b95d1 100644 --- a/content/media/webrtc/MediaEngineDefault.h +++ b/content/media/webrtc/MediaEngineDefault.h @@ -44,9 +44,6 @@ public: virtual nsresult Start(SourceMediaStream*, TrackID); virtual nsresult Stop(SourceMediaStream*, TrackID); virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile); - virtual nsresult Config(bool aEchoOn, uint32_t aEcho, - bool aAgcOn, uint32_t aAGC, - bool aNoiseOn, uint32_t aNoise) { return NS_OK; }; virtual void NotifyPull(MediaStreamGraph* aGraph, StreamTime aDesiredTime); virtual void NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream *aSource, @@ -89,9 +86,6 @@ public: virtual nsresult Start(SourceMediaStream*, TrackID); virtual nsresult Stop(SourceMediaStream*, TrackID); virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile); - virtual nsresult Config(bool aEchoOn, uint32_t aEcho, - bool aAgcOn, uint32_t aAGC, - bool aNoiseOn, uint32_t aNoise) { return NS_OK; }; virtual void NotifyPull(MediaStreamGraph* aGraph, StreamTime aDesiredTime); virtual void NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream *aSource, diff --git a/content/media/webrtc/MediaEngineWebRTC.h b/content/media/webrtc/MediaEngineWebRTC.h index 8f553befec17..34ac056a078a 100644 --- a/content/media/webrtc/MediaEngineWebRTC.h +++ b/content/media/webrtc/MediaEngineWebRTC.h @@ -35,7 +35,6 @@ #include "voice_engine/include/voe_audio_processing.h" #include "voice_engine/include/voe_volume_control.h" #include "voice_engine/include/voe_external_media.h" -#include "voice_engine/include/voe_audio_processing.h" // Video Engine #include "video_engine/include/vie_base.h" @@ -91,9 +90,6 @@ public: virtual nsresult Start(SourceMediaStream*, TrackID); virtual nsresult Stop(SourceMediaStream*, TrackID); virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile); - virtual nsresult Config(bool aEchoOn, uint32_t aEcho, - bool aAgcOn, uint32_t aAGC, - bool aNoiseOn, uint32_t aNoise) { return NS_OK; }; virtual void NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream *aSource, TrackID aId, @@ -181,10 +177,6 @@ public: , mCapIndex(aIndex) , mChannel(-1) , mInitDone(false) - , mEchoOn(false), mAgcOn(false), mNoiseOn(false) - , mEchoCancel(webrtc::kEcDefault) - , mAGC(webrtc::kAgcDefault) - , mNoiseSuppress(webrtc::kNsDefault) , mNullTransport(nullptr) { MOZ_ASSERT(aVoiceEnginePtr); mState = kReleased; @@ -202,10 +194,6 @@ public: virtual nsresult Start(SourceMediaStream*, TrackID); virtual nsresult Stop(SourceMediaStream*, TrackID); virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile); - virtual nsresult Config(bool aEchoOn, uint32_t aEcho, - bool aAgcOn, uint32_t aAGC, - bool aNoiseOn, uint32_t aNoise); - virtual void NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream *aSource, TrackID aId, @@ -230,7 +218,6 @@ private: webrtc::VoEBase* mVoEBase; webrtc::VoEExternalMedia* mVoERender; webrtc::VoENetwork* mVoENetwork; - webrtc::VoEAudioProcessing *mVoEProcessing; // mMonitor protects mSources[] access/changes, and transitions of mState // from kStarted to kStopped (which are combined with EndTrack()). @@ -246,11 +233,6 @@ private: nsString mDeviceName; nsString mDeviceUUID; - bool mEchoOn, mAgcOn, mNoiseOn; - webrtc::EcModes mEchoCancel; - webrtc::AgcModes mAGC; - webrtc::NsModes mNoiseSuppress; - NullTransport *mNullTransport; }; diff --git a/content/media/webrtc/MediaEngineWebRTCAudio.cpp b/content/media/webrtc/MediaEngineWebRTCAudio.cpp index 4da1e2af56aa..85981ca6da24 100644 --- a/content/media/webrtc/MediaEngineWebRTCAudio.cpp +++ b/content/media/webrtc/MediaEngineWebRTCAudio.cpp @@ -46,60 +46,6 @@ MediaEngineWebRTCAudioSource::GetUUID(nsAString& aUUID) return; } -nsresult -MediaEngineWebRTCAudioSource::Config(bool aEchoOn, uint32_t aEcho, - bool aAgcOn, uint32_t aAGC, - bool aNoiseOn, uint32_t aNoise) -{ - LOG(("Audio config: aec: %d, agc: %d, noise: %d", - aEchoOn ? aEcho : -1, - aAgcOn ? aAGC : -1, - aNoiseOn ? aNoise : -1)); - - bool update_agc = (mAgcOn == aAgcOn); - bool update_noise = (mNoiseOn == aNoiseOn); - mAgcOn = aAgcOn; - mNoiseOn = aNoiseOn; - - if ((webrtc::AgcModes) aAGC != webrtc::kAgcUnchanged) { - if (mAGC != (webrtc::AgcModes) aAGC) { - update_agc = true; - mAGC = (webrtc::AgcModes) aAGC; - } - } - if ((webrtc::NsModes) aNoise != webrtc::kNsUnchanged) { - if (mNoiseSuppress != (webrtc::NsModes) aNoise) { - update_noise = true; - mNoiseSuppress = (webrtc::NsModes) aNoise; - } - } - - if (mInitDone) { - int error; -#if 0 - // Until we can support feeding our full output audio from the browser - // through the MediaStream, this won't work. Or we need to move AEC to - // below audio input and output, perhaps invoked from here. - mEchoOn = aEchoOn; - if ((webrtc::EcModes) aEcho != webrtc::kEcUnchanged) - mEchoCancel = (webrtc::EcModes) aEcho; - mVoEProcessing->SetEcStatus(mEchoOn, aEcho); -#else - (void) aEcho; (void) aEchoOn; // suppress warnings -#endif - - if (update_agc && - 0 != (error = mVoEProcessing->SetAgcStatus(mAgcOn, (webrtc::AgcModes) aAGC))) { - LOG(("%s Error setting AGC Status: %d ",__FUNCTION__, error)); - } - if (update_noise && - 0 != (error = mVoEProcessing->SetNsStatus(mNoiseOn, (webrtc::NsModes) aNoise))) { - LOG(("%s Error setting NoiseSuppression Status: %d ",__FUNCTION__, error)); - } - } - return NS_OK; -} - nsresult MediaEngineWebRTCAudioSource::Allocate() { @@ -160,11 +106,6 @@ MediaEngineWebRTCAudioSource::Start(SourceMediaStream* aStream, TrackID aID) } mState = kStarted; - // Configure audio processing in webrtc code - Config(mEchoOn, webrtc::kEcUnchanged, - mAgcOn, webrtc::kAgcUnchanged, - mNoiseOn, webrtc::kNsUnchanged); - if (mVoEBase->StartReceive(mChannel)) { return NS_ERROR_FAILURE; } @@ -251,11 +192,6 @@ MediaEngineWebRTCAudioSource::Init() return; } - mVoEProcessing = webrtc::VoEAudioProcessing::GetInterface(mVoiceEngine); - if (!mVoEProcessing) { - return; - } - mChannel = mVoEBase->CreateChannel(); if (mChannel < 0) { return; diff --git a/dom/media/MediaManager.cpp b/dom/media/MediaManager.cpp index bb63d68d12f4..97a6c1eeed07 100644 --- a/dom/media/MediaManager.cpp +++ b/dom/media/MediaManager.cpp @@ -11,8 +11,6 @@ #include "nsIScriptGlobalObject.h" #include "nsIPopupWindowManager.h" #include "nsISupportsArray.h" -#include "nsIPrefService.h" -#include "nsIPrefBranch.h" // For PR_snprintf #include "prprf.h" @@ -384,30 +382,6 @@ public: mAudioSource, mVideoSource, false)); mediaThread->Dispatch(runnable, NS_DISPATCH_NORMAL); - nsresult rv; - nsCOMPtr prefs = do_GetService("@mozilla.org/preferences-service;1", &rv); - if (NS_SUCCEEDED(rv)) { - nsCOMPtr branch = do_QueryInterface(prefs); - - if (branch) { - int32_t aec = (int32_t) webrtc::kEcUnchanged; - int32_t agc = (int32_t) webrtc::kAgcUnchanged; - int32_t noise = (int32_t) webrtc::kNsUnchanged; - bool aec_on = false, agc_on = false, noise_on = false; - - branch->GetBoolPref("media.peerconnection.aec_enabled", &aec_on); - branch->GetIntPref("media.peerconnection.aec", &aec); - branch->GetBoolPref("media.peerconnection.agc_enabled", &agc_on); - branch->GetIntPref("media.peerconnection.agc", &agc); - branch->GetBoolPref("media.peerconnection.noise_enabled", &noise_on); - branch->GetIntPref("media.peerconnection.noise", &noise); - - mListener->AudioConfig(aec_on, (uint32_t) aec, - agc_on, (uint32_t) agc, - noise_on, (uint32_t) noise); - } - } - // We're in the main thread, so no worries here either. nsCOMPtr success(mSuccess); nsCOMPtr error(mError); diff --git a/dom/media/MediaManager.h b/dom/media/MediaManager.h index d6ffb474f001..fc1263d8a250 100644 --- a/dom/media/MediaManager.h +++ b/dom/media/MediaManager.h @@ -21,8 +21,6 @@ #include "mozilla/StaticPtr.h" #include "prlog.h" -#include "mtransport/runnable_utils.h" - namespace mozilla { #ifdef PR_LOGGING @@ -130,20 +128,6 @@ public: // Can be invoked from EITHER MainThread or MSG thread void Invalidate(); - void - AudioConfig(bool aEchoOn, uint32_t aEcho, - bool aAgcOn, uint32_t aAGC, - bool aNoiseOn, uint32_t aNoise) - { - if (mAudioSource) { - RUN_ON_THREAD(mMediaThread, - WrapRunnable(nsRefPtr(mAudioSource), // threadsafe - &MediaEngineSource::Config, - aEchoOn, aEcho, aAgcOn, aAGC, aNoiseOn, aNoise), - NS_DISPATCH_NORMAL); - } - } - void Remove() { diff --git a/media/mtransport/build/Makefile.in b/media/mtransport/build/Makefile.in index ca9d1ad28e70..7eb850b58943 100644 --- a/media/mtransport/build/Makefile.in +++ b/media/mtransport/build/Makefile.in @@ -37,7 +37,6 @@ EXPORTS_mtransport = \ ../transportlayerprsock.h \ ../m_cpp_utils.h \ ../runnable_utils.h \ - ../runnable_utils_generated.h \ ../sigslot.h \ $(NULL) diff --git a/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp b/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp index f1a20e60dcc7..8434363e12fb 100644 --- a/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp +++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp @@ -3,13 +3,6 @@ * You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "AudioConduit.h" -#include "nsCOMPtr.h" -#include "mozilla/Services.h" -#include "nsServiceManagerUtils.h" -#include "nsIPrefService.h" -#include "nsIPrefBranch.h" -#include "nsThreadUtils.h" - #include "CSFLog.h" #include "voice_engine/include/voe_errors.h" @@ -24,13 +17,11 @@ const unsigned int WebrtcAudioConduit::CODEC_PLNAME_SIZE = 32; /** * Factory Method for AudioConduit */ -mozilla::RefPtr AudioSessionConduit::Create(AudioSessionConduit *aOther) +mozilla::RefPtr AudioSessionConduit::Create() { CSFLogDebug(logTag, "%s ", __FUNCTION__); - NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); - WebrtcAudioConduit* obj = new WebrtcAudioConduit(); - if(obj->Init(static_cast(aOther)) != kMediaConduitNoError) + if(obj->Init() != kMediaConduitNoError) { CSFLogError(logTag, "%s AudioConduit Init Failed ", __FUNCTION__); delete obj; @@ -45,8 +36,6 @@ mozilla::RefPtr AudioSessionConduit::Create(AudioSessionCon */ WebrtcAudioConduit::~WebrtcAudioConduit() { - NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); - CSFLogDebug(logTag, "%s ", __FUNCTION__); for(std::vector::size_type i=0;i < mRecvCodecList.size();i++) { @@ -55,27 +44,17 @@ WebrtcAudioConduit::~WebrtcAudioConduit() delete mCurSendCodecConfig; - // The first one of a pair to be deleted shuts down media for both if(mPtrVoEXmedia) { - if (!mShutDown) { - mPtrVoEXmedia->SetExternalRecordingStatus(false); - mPtrVoEXmedia->SetExternalPlayoutStatus(false); - } + mPtrVoEXmedia->SetExternalRecordingStatus(false); + mPtrVoEXmedia->SetExternalPlayoutStatus(false); mPtrVoEXmedia->Release(); } - if(mPtrVoEProcessing) - { - mPtrVoEProcessing->Release(); - } - //Deal with the transport if(mPtrVoENetwork) { - if (!mShutDown) { - mPtrVoENetwork->DeRegisterExternalTransport(mChannel); - } + mPtrVoENetwork->DeRegisterExternalTransport(mChannel); mPtrVoENetwork->Release(); } @@ -86,69 +65,47 @@ WebrtcAudioConduit::~WebrtcAudioConduit() if(mPtrVoEBase) { - if (!mShutDown) { - mPtrVoEBase->StopPlayout(mChannel); - mPtrVoEBase->StopSend(mChannel); - mPtrVoEBase->StopReceive(mChannel); - mPtrVoEBase->DeleteChannel(mChannel); - mPtrVoEBase->Terminate(); - } + mPtrVoEBase->StopPlayout(mChannel); + mPtrVoEBase->StopSend(mChannel); + mPtrVoEBase->StopReceive(mChannel); + mPtrVoEBase->DeleteChannel(mChannel); + mPtrVoEBase->Terminate(); mPtrVoEBase->Release(); } - if (mOtherDirection) + if(mVoiceEngine) { - // mOtherDirection owns these now! - mOtherDirection->mOtherDirection = NULL; - // let other side we terminated the channel - mOtherDirection->mShutDown = true; - mVoiceEngine = nullptr; - } else { - // only one opener can call Delete. Have it be the last to close. - if(mVoiceEngine) - { - webrtc::VoiceEngine::Delete(mVoiceEngine); - } + webrtc::VoiceEngine::Delete(mVoiceEngine); } } /* * WebRTCAudioConduit Implementation */ -MediaConduitErrorCode WebrtcAudioConduit::Init(WebrtcAudioConduit *other) +MediaConduitErrorCode WebrtcAudioConduit::Init() { - CSFLogDebug(logTag, "%s this=%p other=%p", __FUNCTION__, this, other); + CSFLogDebug(logTag, "%s ", __FUNCTION__); - if (other) { - MOZ_ASSERT(!other->mOtherDirection); - other->mOtherDirection = this; - mOtherDirection = other; + //Per WebRTC APIs below function calls return NULL on failure + if(!(mVoiceEngine = webrtc::VoiceEngine::Create())) + { + CSFLogError(logTag, "%s Unable to create voice engine", __FUNCTION__); + return kMediaConduitSessionNotInited; + } - // only one can call ::Create()/GetVoiceEngine() - MOZ_ASSERT(other->mVoiceEngine); - mVoiceEngine = other->mVoiceEngine; - } else { - //Per WebRTC APIs below function calls return NULL on failure - if(!(mVoiceEngine = webrtc::VoiceEngine::Create())) - { - CSFLogError(logTag, "%s Unable to create voice engine", __FUNCTION__); - return kMediaConduitSessionNotInited; - } - - PRLogModuleInfo *logs = GetWebRTCLogInfo(); - if (!gWebrtcTraceLoggingOn && logs && logs->level > 0) { - // no need to a critical section or lock here - gWebrtcTraceLoggingOn = 1; - - const char *file = PR_GetEnv("WEBRTC_TRACE_FILE"); - if (!file) { - file = "WebRTC.log"; - } - CSFLogDebug(logTag, "%s Logging webrtc to %s level %d", __FUNCTION__, - file, logs->level); - mVoiceEngine->SetTraceFilter(logs->level); - mVoiceEngine->SetTraceFile(file); + PRLogModuleInfo *logs = GetWebRTCLogInfo(); + if (!gWebrtcTraceLoggingOn && logs && logs->level > 0) { + // no need to a critical section or lock here + gWebrtcTraceLoggingOn = 1; + + const char *file = PR_GetEnv("WEBRTC_TRACE_FILE"); + if (!file) { + file = "WebRTC.log"; } + CSFLogDebug(logTag, "%s Logging webrtc to %s level %d", __FUNCTION__, + file, logs->level); + mVoiceEngine->SetTraceFilter(logs->level); + mVoiceEngine->SetTraceFile(file); } if(!(mPtrVoEBase = VoEBase::GetInterface(mVoiceEngine))) @@ -169,60 +126,51 @@ MediaConduitErrorCode WebrtcAudioConduit::Init(WebrtcAudioConduit *other) return kMediaConduitSessionNotInited; } - if(!(mPtrVoEProcessing = VoEAudioProcessing::GetInterface(mVoiceEngine))) - { - CSFLogError(logTag, "%s Unable to initialize VoEProcessing", __FUNCTION__); - return kMediaConduitSessionNotInited; - } - if(!(mPtrVoEXmedia = VoEExternalMedia::GetInterface(mVoiceEngine))) { CSFLogError(logTag, "%s Unable to initialize VoEExternalMedia", __FUNCTION__); return kMediaConduitSessionNotInited; } - if (other) { - mChannel = other->mChannel; - } else { - // init the engine with our audio device layer - if(mPtrVoEBase->Init() == -1) - { - CSFLogError(logTag, "%s VoiceEngine Base Not Initialized", __FUNCTION__); - return kMediaConduitSessionNotInited; - } - - if( (mChannel = mPtrVoEBase->CreateChannel()) == -1) - { - CSFLogError(logTag, "%s VoiceEngine Channel creation failed",__FUNCTION__); - return kMediaConduitChannelError; - } - - CSFLogDebug(logTag, "%s Channel Created %d ",__FUNCTION__, mChannel); - - if(mPtrVoENetwork->RegisterExternalTransport(mChannel, *this) == -1) - { - CSFLogError(logTag, "%s VoiceEngine, External Transport Failed",__FUNCTION__); - return kMediaConduitTransportRegistrationFail; - } - - if(mPtrVoEXmedia->SetExternalRecordingStatus(true) == -1) - { - CSFLogError(logTag, "%s SetExternalRecordingStatus Failed %d",__FUNCTION__, - mPtrVoEBase->LastError()); - return kMediaConduitExternalPlayoutError; - } - - if(mPtrVoEXmedia->SetExternalPlayoutStatus(true) == -1) - { - CSFLogError(logTag, "%s SetExternalPlayoutStatus Failed %d ",__FUNCTION__, - mPtrVoEBase->LastError()); - return kMediaConduitExternalRecordingError; - } - CSFLogDebug(logTag , "%s AudioSessionConduit Initialization Done (%p)",__FUNCTION__, this); + // init the engine with our audio device layer + if(mPtrVoEBase->Init() == -1) + { + CSFLogError(logTag, "%s VoiceEngine Base Not Initialized", __FUNCTION__); + return kMediaConduitSessionNotInited; } + + if( (mChannel = mPtrVoEBase->CreateChannel()) == -1) + { + CSFLogError(logTag, "%s VoiceEngine Channel creation failed",__FUNCTION__); + return kMediaConduitChannelError; + } + + CSFLogDebug(logTag, "%s Channel Created %d ",__FUNCTION__, mChannel); + + if(mPtrVoENetwork->RegisterExternalTransport(mChannel, *this) == -1) + { + CSFLogError(logTag, "%s VoiceEngine, External Transport Failed",__FUNCTION__); + return kMediaConduitTransportRegistrationFail; + } + + if(mPtrVoEXmedia->SetExternalRecordingStatus(true) == -1) + { + CSFLogError(logTag, "%s SetExternalRecordingStatus Failed %d",__FUNCTION__, + mPtrVoEBase->LastError()); + return kMediaConduitExternalPlayoutError; + } + + if(mPtrVoEXmedia->SetExternalPlayoutStatus(true) == -1) + { + CSFLogError(logTag, "%s SetExternalPlayoutStatus Failed %d ",__FUNCTION__, + mPtrVoEBase->LastError()); + return kMediaConduitExternalRecordingError; + } + CSFLogDebug(logTag , "%s AudioSessionConduit Initialization Done",__FUNCTION__); return kMediaConduitNoError; } + // AudioSessionConduit Implementation MediaConduitErrorCode WebrtcAudioConduit::AttachTransport(mozilla::RefPtr aTransport) @@ -287,33 +235,6 @@ WebrtcAudioConduit::ConfigureSendMediaCodec(const AudioCodecConfig* codecConfig) return kMediaConduitUnknownError; } - // TEMPORARY - see bug 694814 comment 2 - nsresult rv; - nsCOMPtr prefs = do_GetService("@mozilla.org/preferences-service;1", &rv); - if (NS_SUCCEEDED(rv)) { - nsCOMPtr branch = do_QueryInterface(prefs); - - if (branch) { - int32_t aec = 0; // 0 == unchanged - bool aec_on = false; - - branch->GetBoolPref("media.peerconnection.aec_enabled", &aec_on); - branch->GetIntPref("media.peerconnection.aec", &aec); - - CSFLogDebug(logTag,"Audio config: aec: %d", aec_on ? aec : -1); - mEchoOn = aec_on; - if (static_cast(aec) != webrtc::kEcUnchanged) - mEchoCancel = static_cast(aec); - - branch->GetIntPref("media.peerconnection.capture_delay", &mCaptureDelay); - } - } - - if (0 != (error = mPtrVoEProcessing->SetEcStatus(mEchoOn, mEchoCancel))) { - CSFLogError(logTag,"%s Error setting EVStatus: %d ",__FUNCTION__, error); - return kMediaConduitUnknownError; - } - //Let's Send Transport State-machine on the Engine if(mPtrVoEBase->StartSend(mChannel) == -1) { @@ -483,7 +404,7 @@ WebrtcAudioConduit::SendAudioFrame(const int16_t audio_data[], return kMediaConduitSessionNotInited; } - capture_delay = mCaptureDelay; + //Insert the samples if(mPtrVoEXmedia->ExternalRecordingInsertData(audio_data, lengthSamples, @@ -623,53 +544,34 @@ WebrtcAudioConduit::ReceivedRTCPPacket(const void *data, int len) int WebrtcAudioConduit::SendPacket(int channel, const void* data, int len) { - CSFLogDebug(logTag, "%s : channel %d %s",__FUNCTION__,channel, - (mEngineReceiving && mOtherDirection) ? "(using mOtherDirection)" : ""); + CSFLogDebug(logTag, "%s : channel %d",__FUNCTION__,channel); - if (mEngineReceiving) - { - if (mOtherDirection) - { - return mOtherDirection->SendPacket(channel, data, len); - } - CSFLogDebug(logTag, "%s : Asked to send RTP without an RTP sender", - __FUNCTION__, channel); - return -1; - } else { - if(mTransport && (mTransport->SendRtpPacket(data, len) == NS_OK)) - { + if(mTransport && (mTransport->SendRtpPacket(data, len) == NS_OK)) + { CSFLogDebug(logTag, "%s Sent RTP Packet ", __FUNCTION__); return len; - } else { - CSFLogError(logTag, "%s RTP Packet Send Failed ", __FUNCTION__); - return -1; - } - } + } else { + CSFLogError(logTag, "%s RTP Packet Send Failed ", __FUNCTION__); + return -1; + } + } int WebrtcAudioConduit::SendRTCPPacket(int channel, const void* data, int len) { CSFLogDebug(logTag, "%s : channel %d", __FUNCTION__, channel); - if (mEngineTransmitting) + // can't enable this assertion, because we do. Suppress it + // NS_ASSERTION(mEngineReceiving,"We shouldn't send RTCP on the receiver side"); + if(mEngineReceiving && mTransport && mTransport->SendRtcpPacket(data, len) == NS_OK) { - if (mOtherDirection) - { - return mOtherDirection->SendRTCPPacket(channel, data, len); - } - CSFLogDebug(logTag, "%s : Asked to send RTCP without an RTP receiver", - __FUNCTION__, channel); - return -1; + CSFLogDebug(logTag, "%s Sent RTCP Packet ", __FUNCTION__); + return len; } else { - if(mTransport && mTransport->SendRtcpPacket(data, len) == NS_OK) - { - CSFLogDebug(logTag, "%s Sent RTCP Packet ", __FUNCTION__); - return len; - } else { - CSFLogError(logTag, "%s RTCP Packet Send Failed ", __FUNCTION__); - return -1; - } + CSFLogError(logTag, "%s RTCP Packet Send Failed ", __FUNCTION__); + return -1; } + } /** @@ -846,3 +748,4 @@ WebrtcAudioConduit::DumpCodecDB() const } } }// end namespace + diff --git a/media/webrtc/signaling/src/media-conduit/AudioConduit.h b/media/webrtc/signaling/src/media-conduit/AudioConduit.h index 7869b6fddfb1..9b484f4b9a20 100755 --- a/media/webrtc/signaling/src/media-conduit/AudioConduit.h +++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.h @@ -18,14 +18,12 @@ #include "voice_engine/include/voe_file.h" #include "voice_engine/include/voe_network.h" #include "voice_engine/include/voe_external_media.h" -#include "voice_engine/include/voe_audio_processing.h" //Some WebRTC types for short notations using webrtc::VoEBase; using webrtc::VoENetwork; using webrtc::VoECodec; using webrtc::VoEExternalMedia; - using webrtc::VoEAudioProcessing; /** This file hosts several structures identifying different aspects * of a RTP Session. @@ -143,23 +141,18 @@ public: WebrtcAudioConduit(): - mOtherDirection(NULL), - mShutDown(false), mVoiceEngine(NULL), mTransport(NULL), mEngineTransmitting(false), mEngineReceiving(false), mChannel(-1), - mCurSendCodecConfig(NULL), - mCaptureDelay(150), - mEchoOn(true), - mEchoCancel(webrtc::kEcAec) + mCurSendCodecConfig(NULL) { } virtual ~WebrtcAudioConduit(); - MediaConduitErrorCode Init(WebrtcAudioConduit *other); + MediaConduitErrorCode Init(); private: WebrtcAudioConduit(const WebrtcAudioConduit& other) MOZ_DELETE; @@ -192,19 +185,12 @@ private: //Utility function to dump recv codec database void DumpCodecDB() const; - WebrtcAudioConduit* mOtherDirection; - // Other side has shut down our channel and related items already - bool mShutDown; - - // These are shared by both directions. They're released by the last - // conduit to die webrtc::VoiceEngine* mVoiceEngine; mozilla::RefPtr mTransport; webrtc::VoENetwork* mPtrVoENetwork; webrtc::VoEBase* mPtrVoEBase; webrtc::VoECodec* mPtrVoECodec; webrtc::VoEExternalMedia* mPtrVoEXmedia; - webrtc::VoEAudioProcessing* mPtrVoEProcessing; //engine states of our interets bool mEngineTransmitting; // If true => VoiceEngine Send-subsystem is up @@ -214,12 +200,6 @@ private: int mChannel; RecvCodecList mRecvCodecList; AudioCodecConfig* mCurSendCodecConfig; - - // Current "capture" delay (really output plus input delay) - int32_t mCaptureDelay; - - bool mEchoOn; - webrtc::EcModes mEchoCancel; }; } // end namespace diff --git a/media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h b/media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h index 0f1437b577e4..7988f6b15dc4 100755 --- a/media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h +++ b/media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h @@ -226,7 +226,7 @@ public: * return: Concrete VideoSessionConduitObject or NULL in the case * of failure */ - static mozilla::RefPtr Create(AudioSessionConduit *aOther); + static mozilla::RefPtr Create(); virtual ~AudioSessionConduit() {} diff --git a/media/webrtc/signaling/src/media/VcmSIPCCBinding.cpp b/media/webrtc/signaling/src/media/VcmSIPCCBinding.cpp index 2aaf579728c2..aa1bcdbbe62f 100644 --- a/media/webrtc/signaling/src/media/VcmSIPCCBinding.cpp +++ b/media/webrtc/signaling/src/media/VcmSIPCCBinding.cpp @@ -1303,18 +1303,12 @@ static int vcmRxStartICE_m(cc_mcapid_t mcap_id, if (CC_IS_AUDIO(mcap_id)) { std::vector configs; - // Instantiate an appropriate conduit - mozilla::RefPtr tx_conduit = - pc.impl()->media()->GetConduit(level, false); - mozilla::RefPtr conduit = - mozilla::AudioSessionConduit::Create(tx_conduit); + mozilla::AudioSessionConduit::Create(); if(!conduit) return VCM_ERROR; - pc.impl()->media()->AddConduit(level, true, conduit); - mozilla::AudioCodecConfig *config_raw; for(int i=0; i config(config_raw); // Instantiate an appropriate conduit - mozilla::RefPtr rx_conduit = - pc.impl()->media()->GetConduit(level, true); - mozilla::RefPtr conduit = - mozilla::AudioSessionConduit::Create(rx_conduit); + mozilla::AudioSessionConduit::Create(); if (!conduit || conduit->ConfigureSendMediaCodec(config)) return VCM_ERROR; - pc.impl()->media()->AddConduit(level, false, conduit); - mozilla::RefPtr pipeline = new mozilla::MediaPipelineTransmit( pc.impl()->GetHandle(), diff --git a/media/webrtc/signaling/src/peerconnection/PeerConnectionMedia.h b/media/webrtc/signaling/src/peerconnection/PeerConnectionMedia.h index 26cd23ecbb71..cc5f776ebd0a 100644 --- a/media/webrtc/signaling/src/peerconnection/PeerConnectionMedia.h +++ b/media/webrtc/signaling/src/peerconnection/PeerConnectionMedia.h @@ -293,28 +293,9 @@ class PeerConnectionMedia : public sigslot::has_slots<> { mozilla::RefPtr aFlow) { int index_inner = aIndex * 2 + (aRtcp ? 1 : 0); - MOZ_ASSERT(!mTransportFlows[index_inner]); mTransportFlows[index_inner] = aFlow; } - mozilla::RefPtr GetConduit(int aStreamIndex, bool aReceive) { - int index_inner = aStreamIndex * 2 + (aReceive ? 0 : 1); - - if (mAudioConduits.find(index_inner) == mAudioConduits.end()) - return NULL; - - return mAudioConduits[index_inner]; - } - - // Add a conduit - void AddConduit(int aIndex, bool aReceive, - const mozilla::RefPtr &aConduit) { - int index_inner = aIndex * 2 + (aReceive ? 0 : 1); - - MOZ_ASSERT(!mAudioConduits[index_inner]); - mAudioConduits[index_inner] = aConduit; - } - // ICE state signals sigslot::signal1 SignalIceGatheringCompleted; // Done gathering sigslot::signal1 SignalIceCompleted; // Done handshaking @@ -350,10 +331,6 @@ class PeerConnectionMedia : public sigslot::has_slots<> { // Transport flows: even is RTP, odd is RTCP std::map > mTransportFlows; - // Conduits: even is receive, odd is transmit (for easier correlation with - // flows) - std::map > mAudioConduits; - NS_INLINE_DECL_THREADSAFE_REFCOUNTING(PeerConnectionMedia) }; diff --git a/media/webrtc/signaling/test/mediaconduit_unittests.cpp b/media/webrtc/signaling/test/mediaconduit_unittests.cpp index afde93e53252..318a0959a065 100644 --- a/media/webrtc/signaling/test/mediaconduit_unittests.cpp +++ b/media/webrtc/signaling/test/mediaconduit_unittests.cpp @@ -490,11 +490,11 @@ class TransportConduitTest : public ::testing::Test { //get pointer to AudioSessionConduit int err=0; - mAudioSession = mozilla::AudioSessionConduit::Create(NULL); + mAudioSession = mozilla::AudioSessionConduit::Create(); if( !mAudioSession ) ASSERT_NE(mAudioSession, (void*)NULL); - mAudioSession2 = mozilla::AudioSessionConduit::Create(NULL); + mAudioSession2 = mozilla::AudioSessionConduit::Create(); if( !mAudioSession2 ) ASSERT_NE(mAudioSession2, (void*)NULL); diff --git a/media/webrtc/signaling/test/mediapipeline_unittest.cpp b/media/webrtc/signaling/test/mediapipeline_unittest.cpp index 27dccf05ac87..128423dd11d4 100644 --- a/media/webrtc/signaling/test/mediapipeline_unittest.cpp +++ b/media/webrtc/signaling/test/mediapipeline_unittest.cpp @@ -48,7 +48,7 @@ class TestAgent { audio_prsock_(new TransportLayerPrsock()), audio_dtls_(new TransportLayerDtls()), audio_config_(109, "opus", 48000, 480, 1, 64000), - audio_conduit_(mozilla::AudioSessionConduit::Create(NULL)), + audio_conduit_(mozilla::AudioSessionConduit::Create()), audio_(), audio_pipeline_(), video_flow_(new TransportFlow()), diff --git a/media/webrtc/webrtc_config.gypi b/media/webrtc/webrtc_config.gypi index c7195fcfc020..b504d3d8c56c 100644 --- a/media/webrtc/webrtc_config.gypi +++ b/media/webrtc/webrtc_config.gypi @@ -3,9 +3,6 @@ # file, You can obtain one at http://mozilla.org/MPL/2.0/. # definitions to control what gets built in webrtc -# NOTE!!! if you change something here, due to .gyp files not -# being reprocessed on .gypi changes, run this before building: -# "find . -name '*.gyp' | xargs touch" { 'variables': { # basic stuff for everything @@ -16,8 +13,6 @@ 'include_tests': 0, 'use_system_libjpeg': 1, 'use_system_libvpx': 1, -# Creates AEC internal sample dump files in current directory -# 'aec_debug_dump': 1, # codec enable/disables: # Note: if you change one here, you must modify shared_libs.mk! diff --git a/modules/libpref/src/init/all.js b/modules/libpref/src/init/all.js index 69705b5634a6..897300bcc08c 100644 --- a/modules/libpref/src/init/all.js +++ b/modules/libpref/src/init/all.js @@ -178,15 +178,6 @@ pref("media.gstreamer.enabled", true); pref("media.navigator.enabled", true); pref("media.peerconnection.enabled", false); pref("media.navigator.permission.disabled", false); -// These values (aec, agc, and noice) are from media/webrtc/trunk/webrtc/common_types.h -// kXxxUnchanged = 0, kXxxDefault = 1, and higher values are specific to each -// setting (for Xxx = Ec, Agc, or Ns). Defaults are all set to kXxxDefault here. -pref("media.peerconnection.aec_enabled", true); -pref("media.peerconnection.aec", 1); -pref("media.peerconnection.agc_enabled", false); -pref("media.peerconnection.agc", 1); -pref("media.peerconnection.noise_enabled", false); -pref("media.peerconnection.noise", 1); #else #ifdef ANDROID pref("media.navigator.enabled", true);