Bug 1221587: change audio listeners for full-duplex audio r=padenot

--HG--
extra : commitid : HGZSv3IY3OF
This commit is contained in:
Randell Jesup 2016-01-21 11:51:36 -05:00
Родитель 439679b676
Коммит be7d8f1d36
9 изменённых файлов: 193 добавлений и 70 удалений

Просмотреть файл

@ -48,6 +48,7 @@ GraphDriver::GraphDriver(MediaStreamGraphImpl* aGraphImpl)
mIterationEnd(0),
mGraphImpl(aGraphImpl),
mWaitState(WAITSTATE_RUNNING),
mAudioInput(nullptr),
mCurrentTimeStamp(TimeStamp::Now()),
mPreviousDriver(nullptr),
mNextDriver(nullptr)
@ -539,6 +540,7 @@ AudioCallbackDriver::AudioCallbackDriver(MediaStreamGraphImpl* aGraphImpl)
, mSampleRate(0)
, mIterationDurationMS(MEDIA_GRAPH_TARGET_PERIOD_MS)
, mStarted(false)
, mAudioInput(nullptr)
, mAudioChannel(aGraphImpl->AudioChannel())
, mInCallback(false)
, mMicrophoneActive(false)
@ -900,8 +902,8 @@ AudioCallbackDriver::DataCallback(AudioDataValue* aInputBuffer,
// data off separate cubeb callbacks. Take care with how stuff is
// removed/added to this list and TSAN issues, but input and output will
// use separate callback methods.
mGraphImpl->NotifySpeakerData(aOutputBuffer, static_cast<size_t>(aFrames),
ChannelCount);
mGraphImpl->NotifyOutputData(aOutputBuffer, static_cast<size_t>(aFrames),
ChannelCount);
// Process mic data if any/needed -- after inserting far-end data for AEC!
if (aInputBuffer) {

Просмотреть файл

@ -192,13 +192,12 @@ public:
virtual bool OnThread() = 0;
// XXX Thread-safety! Do these via commands to avoid TSAN issues
// and crashes!!!
virtual void SetInputListener(MediaStreamListener *aListener) {
// These are invoked on the MSG thread (or MainThread in shutdown)
virtual void SetInputListener(AudioDataListener *aListener) {
mAudioInput = aListener;
}
// XXX do we need the param? probably no
virtual void RemoveInputListener(MediaStreamListener *aListener) {
virtual void RemoveInputListener(AudioDataListener *aListener) {
mAudioInput = nullptr;
}
@ -233,7 +232,7 @@ protected:
WaitState mWaitState;
// Callback for mic data, if any
RefPtr<MediaStreamListener> mAudioInput;
AudioDataListener *mAudioInput;
// This is used on the main thread (during initialization), and the graph
// thread. No monitor needed because we know the graph thread does not run
@ -498,7 +497,7 @@ private:
* */
bool mStarted;
/* Listener for mic input, if any. */
RefPtr<MediaStreamListener> mAudioInput;
RefPtr<AudioDataListener> mAudioInput;
struct AutoInCallback
{

Просмотреть файл

@ -924,7 +924,7 @@ MediaStreamGraphImpl::PlayVideo(MediaStream* aStream)
}
void
MediaStreamGraphImpl::OpenAudioInputImpl(char *aName, MediaStreamListener *aListener)
MediaStreamGraphImpl::OpenAudioInputImpl(char *aName, AudioDataListener *aListener)
{
if (CurrentDriver()->AsAudioCallbackDriver()) {
CurrentDriver()->SetInputListener(aListener);
@ -935,7 +935,7 @@ MediaStreamGraphImpl::OpenAudioInputImpl(char *aName, MediaStreamListener *aList
}
nsresult
MediaStreamGraphImpl::OpenAudioInput(char *aName, MediaStreamListener *aListener)
MediaStreamGraphImpl::OpenAudioInput(char *aName, AudioDataListener *aListener)
{
// XXX So, so, so annoying. Can't AppendMessage except on Mainthread
if (!NS_IsMainThread()) {
@ -946,7 +946,7 @@ MediaStreamGraphImpl::OpenAudioInput(char *aName, MediaStreamListener *aListener
}
class Message : public ControlMessage {
public:
Message(MediaStreamGraphImpl *aGraph, char *aName, MediaStreamListener *aListener) :
Message(MediaStreamGraphImpl *aGraph, char *aName, AudioDataListener *aListener) :
ControlMessage(nullptr), mGraph(aGraph), mName(aName), mListener(aListener) {}
virtual void Run()
{
@ -954,21 +954,21 @@ MediaStreamGraphImpl::OpenAudioInput(char *aName, MediaStreamListener *aListener
}
MediaStreamGraphImpl *mGraph;
char *mName; // XXX needs to copy
MediaStreamListener *mListener;
RefPtr<AudioDataListener> mListener;
};
this->AppendMessage(new Message(this, aName, aListener));
return NS_OK;
}
void
MediaStreamGraphImpl::CloseAudioInputImpl(MediaStreamListener *aListener)
MediaStreamGraphImpl::CloseAudioInputImpl(AudioDataListener *aListener)
{
CurrentDriver()->RemoveInputListener(aListener);
mAudioInputs.RemoveElement(aListener);
}
void
MediaStreamGraphImpl::CloseAudioInput(MediaStreamListener *aListener)
MediaStreamGraphImpl::CloseAudioInput(AudioDataListener *aListener)
{
// XXX So, so, so annoying. Can't AppendMessage except on Mainthread
if (!NS_IsMainThread()) {
@ -979,14 +979,14 @@ MediaStreamGraphImpl::CloseAudioInput(MediaStreamListener *aListener)
}
class Message : public ControlMessage {
public:
Message(MediaStreamGraphImpl *aGraph, MediaStreamListener *aListener) :
Message(MediaStreamGraphImpl *aGraph, AudioDataListener *aListener) :
ControlMessage(nullptr), mGraph(aGraph), mListener(aListener) {}
virtual void Run()
{
mGraph->CloseAudioInputImpl(mListener);
}
MediaStreamGraphImpl *mGraph;
MediaStreamListener *mListener;
RefPtr<AudioDataListener> mListener;
};
this->AppendMessage(new Message(this, aListener));
}
@ -994,11 +994,11 @@ MediaStreamGraphImpl::CloseAudioInput(MediaStreamListener *aListener)
// All AudioInput listeners get the same speaker data (at least for now).
void
MediaStreamGraph::NotifySpeakerData(AudioDataValue* aBuffer, size_t aFrames,
uint32_t aChannels)
MediaStreamGraph::NotifyOutputData(AudioDataValue* aBuffer, size_t aFrames,
uint32_t aChannels)
{
for (auto& listener : mAudioInputs) {
listener->NotifySpeakerData(this, aBuffer, aFrames, aChannels);
listener->NotifyOutputData(this, aBuffer, aFrames, aChannels);
}
}

Просмотреть файл

@ -181,23 +181,39 @@ public:
* are also notified of atomically to MediaStreamListeners.
*/
virtual void NotifyFinishedTrackCreation(MediaStreamGraph* aGraph) {}
};
class AudioDataListenerInterface {
protected:
// Protected destructor, to discourage deletion outside of Release():
virtual ~AudioDataListenerInterface() {}
public:
/* These are for cubeb audio input & output streams: */
/**
* Output data to speakers, for use as the "far-end" data for echo
* cancellation. This is not guaranteed to be in any particular size
* chunks.
*/
virtual void NotifySpeakerData(MediaStreamGraph* aGraph,
AudioDataValue* aBuffer, size_t aFrames,
uint32_t aChannels) {}
virtual void NotifyOutputData(MediaStreamGraph* aGraph,
AudioDataValue* aBuffer, size_t aFrames,
uint32_t aChannels) = 0;
/**
* Input data from a microphone (or other audio source. This is not
* guaranteed to be in any particular size chunks.
*/
virtual void NotifyInputData(MediaStreamGraph* aGraph,
AudioDataValue* aBuffer, size_t aFrames,
uint32_t aChannels) {}
uint32_t aChannels) = 0;
};
class AudioDataListener : public AudioDataListenerInterface {
protected:
// Protected destructor, to discourage deletion outside of Release():
virtual ~AudioDataListener() {}
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioDataListener)
};
/**
@ -1192,10 +1208,10 @@ public:
// Idempotent
static void DestroyNonRealtimeInstance(MediaStreamGraph* aGraph);
virtual nsresult OpenAudioInput(char *aName, MediaStreamListener *aListener) {
virtual nsresult OpenAudioInput(char *aName, AudioDataListener *aListener) {
return NS_ERROR_FAILURE;
}
virtual void CloseAudioInput(MediaStreamListener *aListener) {}
virtual void CloseAudioInput(AudioDataListener *aListener) {}
// Control API.
/**
@ -1280,8 +1296,8 @@ public:
* Data going to the speakers from the GraphDriver's DataCallback
* to notify any listeners (for echo cancellation).
*/
void NotifySpeakerData(AudioDataValue* aBuffer, size_t aFrames,
uint32_t aChannels);
void NotifyOutputData(AudioDataValue* aBuffer, size_t aFrames,
uint32_t aChannels);
protected:
explicit MediaStreamGraph(TrackRate aSampleRate)
@ -1304,7 +1320,11 @@ protected:
*/
TrackRate mSampleRate;
nsTArray<RefPtr<MediaStreamListener>> mAudioInputs;
/**
* Lifetime is controlled by OpenAudioInput/CloseAudioInput. Destroying the listener
* without removing it is an error; callers should assert on that.
*/
nsTArray<AudioDataListener *> mAudioInputs;
};
} // namespace mozilla

Просмотреть файл

@ -350,10 +350,10 @@ public:
* at the current buffer end point. The StreamBuffer's tracks must be
* explicitly set to finished by the caller.
*/
void OpenAudioInputImpl(char *aName, MediaStreamListener *aListener);
virtual nsresult OpenAudioInput(char *aName, MediaStreamListener *aListener) override;
void CloseAudioInputImpl(MediaStreamListener *aListener);
virtual void CloseAudioInput(MediaStreamListener *aListener) override;
void OpenAudioInputImpl(char *aName, AudioDataListener *aListener);
virtual nsresult OpenAudioInput(char *aName, AudioDataListener *aListener) override;
void CloseAudioInputImpl(AudioDataListener *aListener);
virtual void CloseAudioInput(AudioDataListener *aListener) override;
void FinishStream(MediaStream* aStream);
/**

Просмотреть файл

@ -274,7 +274,8 @@ protected:
/**
* Audio source and friends.
*/
class MediaEngineAudioSource : public MediaEngineSource
class MediaEngineAudioSource : public MediaEngineSource,
public AudioDataListenerInterface
{
public:
virtual ~MediaEngineAudioSource() {}

Просмотреть файл

@ -144,6 +144,14 @@ public:
#endif
}
void NotifyOutputData(MediaStreamGraph* aGraph,
AudioDataValue* aBuffer, size_t aFrames,
uint32_t aChannels) override
{}
void NotifyInputData(MediaStreamGraph* aGraph,
AudioDataValue* aBuffer, size_t aFrames,
uint32_t aChannels) override
{}
bool IsFake() override {
return true;
}

Просмотреть файл

@ -29,6 +29,7 @@
#include "MediaStreamGraph.h"
#include "cubeb/cubeb.h"
#include "CubebUtils.h"
#include "AudioPacketizer.h"
#include "MediaEngineWrapper.h"
#include "mozilla/dom/MediaStreamTrackBinding.h"
@ -98,6 +99,14 @@ public:
{
return NS_OK;
}
void NotifyOutputData(MediaStreamGraph* aGraph,
AudioDataValue* aBuffer, size_t aFrames,
uint32_t aChannels) override
{}
void NotifyInputData(MediaStreamGraph* aGraph,
AudioDataValue* aBuffer, size_t aFrames,
uint32_t aChannels) override
{}
void NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream* aSource,
TrackID aID, StreamTime aDesiredTime) override
{}
@ -127,37 +136,32 @@ class AudioInput
{
public:
AudioInput(webrtc::VoiceEngine* aVoiceEngine) : mVoiceEngine(aVoiceEngine) {};
virtual ~AudioInput() {}
NS_INLINE_DECL_REFCOUNTING(AudioInput)
// Threadsafe because it's referenced from an MicrophoneSource, which can
// had references to it on other threads.
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioInput)
virtual int GetNumOfRecordingDevices(int& aDevices) = 0;
virtual int GetRecordingDeviceName(int aIndex, char aStrNameUTF8[128],
char aStrGuidUTF8[128]) = 0;
virtual int GetRecordingDeviceStatus(bool& aIsAvailable) = 0;
virtual void StartRecording(MediaStreamGraph *aGraph) = 0;
virtual void StopRecording(MediaStreamGraph *aGraph) = 0;
virtual void StartRecording(MediaStreamGraph *aGraph, AudioDataListener *aListener) = 0;
virtual void StopRecording(MediaStreamGraph *aGraph, AudioDataListener *aListener) = 0;
virtual int SetRecordingDevice(int aIndex) = 0;
protected:
// Protected destructor, to discourage deletion outside of Release():
virtual ~AudioInput() {}
webrtc::VoiceEngine* mVoiceEngine;
};
class AudioInputCubeb : public AudioInput,
public MediaStreamListener
class AudioInputCubeb final : public AudioInput
{
public:
AudioInputCubeb(webrtc::VoiceEngine* aVoiceEngine) :
explicit AudioInputCubeb(webrtc::VoiceEngine* aVoiceEngine) :
AudioInput(aVoiceEngine), mDevices(nullptr) {}
virtual ~AudioInputCubeb()
{
if (mDevices) {
cubeb_device_collection_destroy(mDevices);
mDevices = nullptr;
}
}
virtual int GetNumOfRecordingDevices(int& aDevices)
int GetNumOfRecordingDevices(int& aDevices)
{
// devices = cubeb_get_num_devices(...)
if (CUBEB_OK != cubeb_enumerate_devices(CubebUtils::GetCubebContext(),
@ -177,8 +181,8 @@ public:
return 0;
}
virtual int GetRecordingDeviceName(int aIndex, char aStrNameUTF8[128],
char aStrGuidUTF8[128])
int GetRecordingDeviceName(int aIndex, char aStrNameUTF8[128],
char aStrGuidUTF8[128])
{
if (!mDevices) {
return 1;
@ -190,45 +194,53 @@ public:
return 0;
}
virtual int GetRecordingDeviceStatus(bool& aIsAvailable)
int GetRecordingDeviceStatus(bool& aIsAvailable)
{
// With cubeb, we only expose devices of type CUBEB_DEVICE_TYPE_INPUT
aIsAvailable = true;
return 0;
}
virtual void StartRecording(MediaStreamGraph *aGraph)
void StartRecording(MediaStreamGraph *aGraph, AudioDataListener *aListener)
{
ScopedCustomReleasePtr<webrtc::VoEExternalMedia> ptrVoERender;
ptrVoERender = webrtc::VoEExternalMedia::GetInterface(mVoiceEngine);
if (ptrVoERender) {
ptrVoERender->SetExternalRecordingStatus(true);
}
aGraph->OpenAudioInput(nullptr, this);
aGraph->OpenAudioInput(nullptr, aListener);
}
virtual void StopRecording(MediaStreamGraph *aGraph)
void StopRecording(MediaStreamGraph *aGraph, AudioDataListener *aListener)
{
aGraph->CloseAudioInput(this);
aGraph->CloseAudioInput(aListener);
}
virtual int SetRecordingDevice(int aIndex)
int SetRecordingDevice(int aIndex)
{
// Not relevant to cubeb
// Relevant with devid support
return 1;
}
protected:
~AudioInputCubeb() {
{
if (mDevices) {
cubeb_device_collection_destroy(mDevices);
mDevices = nullptr;
}
}
private:
cubeb_device_collection* mDevices;
};
class AudioInputWebRTC : public AudioInput
class AudioInputWebRTC final : public AudioInput
{
public:
AudioInputWebRTC(webrtc::VoiceEngine* aVoiceEngine) : AudioInput(aVoiceEngine) {}
virtual ~AudioInputWebRTC() {}
explicit AudioInputWebRTC(webrtc::VoiceEngine* aVoiceEngine) : AudioInput(aVoiceEngine) {}
virtual int GetNumOfRecordingDevices(int& aDevices)
int GetNumOfRecordingDevices(int& aDevices)
{
ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
@ -238,8 +250,8 @@ public:
return ptrVoEHw->GetNumOfRecordingDevices(aDevices);
}
virtual int GetRecordingDeviceName(int aIndex, char aStrNameUTF8[128],
char aStrGuidUTF8[128])
int GetRecordingDeviceName(int aIndex, char aStrNameUTF8[128],
char aStrGuidUTF8[128])
{
ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
@ -250,7 +262,7 @@ public:
aStrGuidUTF8);
}
virtual int GetRecordingDeviceStatus(bool& aIsAvailable)
int GetRecordingDeviceStatus(bool& aIsAvailable)
{
ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
@ -261,10 +273,10 @@ public:
return 0;
}
virtual void StartRecording(MediaStreamGraph *aGraph) {}
virtual void StopRecording(MediaStreamGraph *aGraph) {}
void StartRecording(MediaStreamGraph *aGraph, AudioDataListener *aListener) {}
void StopRecording(MediaStreamGraph *aGraph, AudioDataListener *aListener) {}
virtual int SetRecordingDevice(int aIndex)
int SetRecordingDevice(int aIndex)
{
ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
@ -273,6 +285,39 @@ public:
}
return ptrVoEHw->SetRecordingDevice(aIndex);
}
protected:
// Protected destructor, to discourage deletion outside of Release():
~AudioInputWebRTC() {}
};
class WebRTCAudioDataListener : public AudioDataListener
{
protected:
// Protected destructor, to discourage deletion outside of Release():
virtual ~WebRTCAudioDataListener() {}
public:
explicit WebRTCAudioDataListener(MediaEngineAudioSource* aAudioSource) :
mAudioSource(aAudioSource)
{}
// AudioDataListenerInterface methods
virtual void NotifyOutputData(MediaStreamGraph* aGraph,
AudioDataValue* aBuffer, size_t aFrames,
uint32_t aChannels) override
{
mAudioSource->NotifyOutputData(aGraph, aBuffer, aFrames, aChannels);
}
virtual void NotifyInputData(MediaStreamGraph* aGraph,
AudioDataValue* aBuffer, size_t aFrames,
uint32_t aChannels) override
{
mAudioSource->NotifyInputData(aGraph, aBuffer, aFrames, aChannels);
}
private:
RefPtr<MediaEngineAudioSource> mAudioSource;
};
class MediaEngineWebRTCMicrophoneSource : public MediaEngineAudioSource,
@ -307,6 +352,7 @@ public:
MOZ_ASSERT(aAudioInput);
mDeviceName.Assign(NS_ConvertUTF8toUTF16(name));
mDeviceUUID.Assign(uuid);
mListener = new mozilla::WebRTCAudioDataListener(this);
Init();
}
@ -333,6 +379,14 @@ public:
TrackID aId,
StreamTime aDesiredTime) override;
// AudioDataListenerInterface methods
void NotifyOutputData(MediaStreamGraph* aGraph,
AudioDataValue* aBuffer, size_t aFrames,
uint32_t aChannels) override;
void NotifyInputData(MediaStreamGraph* aGraph,
AudioDataValue* aBuffer, size_t aFrames,
uint32_t aChannels) override;
bool IsFake() override {
return false;
}
@ -367,12 +421,15 @@ private:
webrtc::VoiceEngine* mVoiceEngine;
RefPtr<mozilla::AudioInput> mAudioInput;
RefPtr<WebRTCAudioDataListener> mListener;
ScopedCustomReleasePtr<webrtc::VoEBase> mVoEBase;
ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mVoERender;
ScopedCustomReleasePtr<webrtc::VoENetwork> mVoENetwork;
ScopedCustomReleasePtr<webrtc::VoEAudioProcessing> mVoEProcessing;
nsAutoPtr<AudioPacketizer<AudioDataValue, int16_t>> mPacketizer;
// mMonitor protects mSources[] access/changes, and transitions of mState
// from kStarted to kStopped (which are combined with EndTrack()).
// mSources[] is accessed from webrtc threads.

Просмотреть файл

@ -381,7 +381,7 @@ MediaEngineWebRTCMicrophoneSource::Start(SourceMediaStream *aStream,
// Attach external media processor, so this::Process will be called.
mVoERender->RegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel, *this);
mAudioInput->StartRecording(aStream->Graph());
mAudioInput->StartRecording(aStream->Graph(), mListener);
return NS_OK;
}
@ -413,7 +413,7 @@ MediaEngineWebRTCMicrophoneSource::Stop(SourceMediaStream *aSource, TrackID aID)
mState = kStopped;
}
mAudioInput->StopRecording(aSource->Graph());
mAudioInput->StopRecording(aSource->Graph(), mListener);
mVoERender->DeRegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel);
@ -444,6 +444,39 @@ MediaEngineWebRTCMicrophoneSource::NotifyPull(MediaStreamGraph *aGraph,
LOG_FRAMES(("NotifyPull, desired = %ld", (int64_t) aDesiredTime));
}
void
MediaEngineWebRTCMicrophoneSource::NotifyOutputData(MediaStreamGraph* aGraph,
AudioDataValue* aBuffer,
size_t aFrames,
uint32_t aChannels)
{
}
// Called back on GraphDriver thread
void
MediaEngineWebRTCMicrophoneSource::NotifyInputData(MediaStreamGraph* aGraph,
AudioDataValue* aBuffer,
size_t aFrames,
uint32_t aChannels)
{
// This will call Process() with data coming out of the AEC/NS/AGC/etc chain
if (!mPacketizer ||
mPacketizer->PacketSize() != mSampleFrequency/100 ||
mPacketizer->Channels() != aChannels) {
// It's ok to drop the audio still in the packetizer here.
mPacketizer = new AudioPacketizer<AudioDataValue, int16_t>(mSampleFrequency/100, aChannels);
}
mPacketizer->Input(aBuffer, static_cast<uint32_t>(aFrames));
while (mPacketizer->PacketsAvailable()) {
uint32_t samplesPerPacket = mPacketizer->PacketSize() *
mPacketizer->Channels();
int16_t* packet = mPacketizer->Output();
mVoERender->ExternalRecordingInsertData(packet, samplesPerPacket, mSampleFrequency, 0);
}
}
void
MediaEngineWebRTCMicrophoneSource::Init()
{
@ -561,6 +594,9 @@ MediaEngineWebRTCMicrophoneSource::Shutdown()
mVoERender = nullptr;
mVoEBase = nullptr;
mAudioInput = nullptr;
mListener = nullptr; // breaks a cycle, since the WebRTCAudioDataListener has a RefPtr to us
mState = kReleased;
mInitDone = false;
}