Bug 1525323 - Remove SourceTrackListener from MediaManager. r=padenot

This moves the responsibility of forwarding NotifyPull() from the graph thread
to MediaEngineSources out of MediaManager and into the sources themselves. This
is better aligned with how the sources work, since not all sources need pulling.
This also clarifies lifetime management of these listeners in relation to when
pulling is enabled for a track, since the sources are already handling enabling
pulling themselves.

Differential Revision: https://phabricator.services.mozilla.com/D24896

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Andreas Pehrson 2019-03-27 18:05:56 +00:00
Родитель 3e8325f6fb
Коммит 5797084794
10 изменённых файлов: 158 добавлений и 312 удалений

Просмотреть файл

@ -221,14 +221,9 @@ using media::Refcountable;
static Atomic<bool> sHasShutdown;
class SourceTrackListener;
struct DeviceState {
DeviceState(const RefPtr<MediaDevice>& aDevice, bool aOffWhileDisabled,
RefPtr<SourceTrackListener> aListener)
: mOffWhileDisabled(aOffWhileDisabled),
mDevice(aDevice),
mListener(std::move(aListener)) {
DeviceState(const RefPtr<MediaDevice>& aDevice, bool aOffWhileDisabled)
: mOffWhileDisabled(aOffWhileDisabled), mDevice(aDevice) {
MOZ_ASSERT(mDevice);
}
@ -267,10 +262,6 @@ struct DeviceState {
// The underlying device we keep state for. Always non-null.
// Threadsafe access, but see method declarations for individual constraints.
const RefPtr<MediaDevice> mDevice;
// The track listener for the track hooked up to mDevice.
// Main thread only.
RefPtr<SourceTrackListener> mListener;
};
/**
@ -316,10 +307,6 @@ void MediaManager::CallOnSuccess(GetUserMediaSuccessCallback& aCallback,
* MSG threads. But it has a non-threadsafe SupportsWeakPtr for WeakPtr usage
* only from main thread, to ensure that garbage- and cycle-collected objects
* don't hold a reference to it during late shutdown.
*
* There's also a hard reference to the SourceListener through its
* SourceStreamListener and the MediaStreamGraph. MediaStreamGraph
* clears this on XPCOM_WILL_SHUTDOWN, before MediaManager enters shutdown.
*/
class SourceListener : public SupportsWeakPtr<SourceListener> {
public:
@ -415,13 +402,6 @@ class SourceListener : public SupportsWeakPtr<SourceListener> {
return mVideoDeviceState ? mVideoDeviceState->mDevice.get() : nullptr;
}
/**
* Called on MediaStreamGraph thread when MSG asks us for more data from
* input devices.
*/
void Pull(TrackID aTrackID, StreamTime aEndOfAppendedData,
StreamTime aDesiredTime);
/**
* Called on main thread after MediaStreamGraph notifies us that one of our
* track listeners was removed as listener from its track in the graph.
@ -469,9 +449,6 @@ class SourceListener : public SupportsWeakPtr<SourceListener> {
// never ever indirect off this; just for assertions
PRThread* mMainThreadCheck;
// For access to mMainThreadCheck
friend class SourceTrackListener;
// Set in Register() on main thread, then read from any thread.
PrincipalHandle mPrincipalHandle;
@ -485,45 +462,6 @@ class SourceListener : public SupportsWeakPtr<SourceListener> {
RefPtr<SourceMediaStream> mStream; // threadsafe refcnt
};
/**
* Wrapper class for the MediaStreamTrackListener part of SourceListener.
*
* This is required since MediaStreamTrackListener and SupportsWeakPtr
* both implement refcounting.
*/
class SourceTrackListener : public MediaStreamTrackListener {
public:
SourceTrackListener(SourceListener* aSourceListener, TrackID aTrackID)
: mSourceListener(aSourceListener), mTrackID(aTrackID) {}
void NotifyPull(MediaStreamGraph* aGraph, StreamTime aEndOfAppendedData,
StreamTime aDesiredTime) override {
mSourceListener->Pull(mTrackID, aEndOfAppendedData, aDesiredTime);
}
void NotifyEnded() override { NotifyRemoved(); }
void NotifyRemoved() override {
nsCOMPtr<nsIEventTarget> target = GetMainThreadEventTarget();
if (NS_WARN_IF(!target)) {
NS_ASSERTION(false,
"Mainthread not available; running on current thread");
// Ensure this really *was* MainThread (NS_GetCurrentThread won't work)
MOZ_RELEASE_ASSERT(mSourceListener->mMainThreadCheck ==
GetCurrentVirtualThread());
mSourceListener->NotifyRemoved(mTrackID);
return;
}
target->Dispatch(NewRunnableMethod<TrackID>(
"SourceListener::NotifyRemoved", mSourceListener,
&SourceListener::NotifyRemoved, mTrackID));
}
private:
const RefPtr<SourceListener> mSourceListener;
const TrackID mTrackID;
};
/**
* This class represents a WindowID and handles all MediaStreamTrackListeners
* (here subclassed as SourceListeners) used to feed GetUserMedia source
@ -1022,18 +960,6 @@ nsresult MediaDevice::Deallocate() {
return mSource->Deallocate(mAllocationHandle);
}
void MediaDevice::Pull(const RefPtr<SourceMediaStream>& aStream,
TrackID aTrackID, StreamTime aEndOfAppendedData,
StreamTime aDesiredTime,
const PrincipalHandle& aPrincipal) {
// This is on the graph thread, but mAllocationHandle is safe since we never
// change it after it's been set, which is guaranteed to happen before
// registering the listener for pulls.
MOZ_ASSERT(mSource);
mSource->Pull(mAllocationHandle, aStream, aTrackID, aEndOfAppendedData,
aDesiredTime, aPrincipal);
}
dom::MediaSourceEnum MediaDevice::GetMediaSource() const {
// Threadsafe because mSource is const. GetMediaSource() might have other
// requirements.
@ -4205,9 +4131,7 @@ void SourceListener::Activate(SourceMediaStream* aStream,
aAudioDevice->GetMediaSource() == dom::MediaSourceEnum::Microphone &&
Preferences::GetBool(
"media.getusermedia.microphone.off_while_disabled.enabled",
true),
MakeRefPtr<SourceTrackListener>(this, kAudioTrack));
mStream->AddTrackListener(mAudioDeviceState->mListener, kAudioTrack);
true));
}
if (aVideoDevice) {
@ -4215,9 +4139,7 @@ void SourceListener::Activate(SourceMediaStream* aStream,
aVideoDevice,
aVideoDevice->GetMediaSource() == dom::MediaSourceEnum::Camera &&
Preferences::GetBool(
"media.getusermedia.camera.off_while_disabled.enabled", true),
MakeRefPtr<SourceTrackListener>(this, kVideoTrack));
mStream->AddTrackListener(mVideoDeviceState->mListener, kVideoTrack);
"media.getusermedia.camera.off_while_disabled.enabled", true));
}
}
@ -4374,28 +4296,6 @@ void SourceListener::Remove() {
LOG("SourceListener %p removed on purpose", this);
mRemoved = true; // RemoveListener is async, avoid races
mWindowListener = nullptr;
// If it's destroyed, don't call - listener will be removed and we'll be
// notified!
if (!mStream->IsDestroyed()) {
// We disable pulling before removing so we don't risk having live tracks
// without a listener attached - that wouldn't produce data and would be
// illegal to the graph.
if (mAudioDeviceState) {
mStream->SetPullingEnabled(kAudioTrack, false);
mStream->RemoveTrackListener(mAudioDeviceState->mListener, kAudioTrack);
}
if (mVideoDeviceState) {
mStream->RemoveTrackListener(mVideoDeviceState->mListener, kVideoTrack);
}
}
if (mAudioDeviceState) {
mAudioDeviceState->mListener = nullptr;
}
if (mVideoDeviceState) {
mVideoDeviceState->mListener = nullptr;
}
}
void SourceListener::StopTrack(TrackID aTrackID) {
@ -4648,14 +4548,6 @@ SourceMediaStream* SourceListener::GetSourceStream() {
return mStream;
}
// Proxy Pull() to the right source
void SourceListener::Pull(TrackID aTrackID, StreamTime aEndOfAppendedData,
StreamTime aDesiredTime) {
DeviceState& state = GetDeviceStateFor(aTrackID);
state.mDevice->Pull(mStream, aTrackID, aEndOfAppendedData, aDesiredTime,
mPrincipalHandle);
}
void SourceListener::NotifyRemoved(TrackID aTrackID) {
MOZ_ASSERT(NS_IsMainThread());
LOG("Track %d for SourceListener %p removed", aTrackID, this);
@ -4675,8 +4567,6 @@ void SourceListener::NotifyRemoved(TrackID aTrackID) {
mWindowListener->Remove(this);
MOZ_ASSERT(!mWindowListener);
MOZ_ASSERT_IF(mAudioDeviceState, !mAudioDeviceState->mListener);
MOZ_ASSERT_IF(mVideoDeviceState, !mVideoDeviceState->mListener);
}
bool SourceListener::CapturingVideo() const {

Просмотреть файл

@ -8,6 +8,7 @@
#include "ImageTypes.h"
#include "Layers.h"
#include "MediaStreamGraph.h"
#include "MediaStreamListener.h"
#include "MediaTrackConstraints.h"
#include "mozilla/dom/File.h"
#include "mozilla/UniquePtr.h"
@ -17,6 +18,7 @@
#include "nsIFilePicker.h"
#include "nsIPrefBranch.h"
#include "nsIPrefService.h"
#include "SineWaveGenerator.h"
#include "Tracing.h"
#ifdef MOZ_WIDGET_ANDROID
@ -326,22 +328,40 @@ void MediaEngineDefaultVideoSource::GenerateFrame() {
segment.AppendFrame(ycbcr_image.forget(),
gfx::IntSize(mOpts.mWidth, mOpts.mHeight),
mPrincipalHandle);
;
mStream->AppendToTrack(mTrackID, &segment);
}
void MediaEngineDefaultVideoSource::Pull(
const RefPtr<const AllocationHandle>& aHandle,
const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
const PrincipalHandle& aPrincipalHandle) {}
// This class is created on the media thread, as part of Start(), then entirely
// self-sustained until destruction, just forwarding calls to Pull().
class AudioSourcePullListener : public MediaStreamTrackListener {
public:
AudioSourcePullListener(RefPtr<SourceMediaStream> aStream, TrackID aTrackID,
const PrincipalHandle& aPrincipalHandle,
uint32_t aFrequency)
: mStream(std::move(aStream)),
mTrackID(aTrackID),
mPrincipalHandle(aPrincipalHandle),
mSineGenerator(
MakeUnique<SineWaveGenerator>(mStream->GraphRate(), aFrequency)) {
MOZ_COUNT_CTOR(AudioSourcePullListener);
}
~AudioSourcePullListener() { MOZ_COUNT_DTOR(AudioSourcePullListener); }
void NotifyPull(MediaStreamGraph* aGraph, StreamTime aEndOfAppendedData,
StreamTime aDesiredTime) override;
const RefPtr<SourceMediaStream> mStream;
const TrackID mTrackID;
const PrincipalHandle mPrincipalHandle;
const UniquePtr<SineWaveGenerator> mSineGenerator;
};
/**
* Default audio source.
*/
MediaEngineDefaultAudioSource::MediaEngineDefaultAudioSource()
: mMutex("MediaEngineDefaultAudioSource::mMutex") {}
MediaEngineDefaultAudioSource::MediaEngineDefaultAudioSource() = default;
MediaEngineDefaultAudioSource::~MediaEngineDefaultAudioSource() = default;
@ -393,10 +413,9 @@ nsresult MediaEngineDefaultAudioSource::Allocate(
return NS_ERROR_FAILURE;
}
mFreq = aPrefs.mFreq ? aPrefs.mFreq : 1000;
mFrequency = aPrefs.mFreq ? aPrefs.mFreq : 1000;
*aOutHandle = nullptr;
MutexAutoLock lock(mMutex);
mState = kAllocated;
return NS_OK;
}
@ -408,11 +427,11 @@ nsresult MediaEngineDefaultAudioSource::Deallocate(
MOZ_ASSERT(!aHandle);
MOZ_ASSERT(mState == kStopped || mState == kAllocated);
MutexAutoLock lock(mMutex);
if (mStream && IsTrackIDExplicit(mTrackID)) {
mStream->EndTrack(mTrackID);
mStream = nullptr;
mTrackID = TRACK_NONE;
mPrincipalHandle = PRINCIPAL_HANDLE_NONE;
}
mState = kReleased;
return NS_OK;
@ -431,6 +450,7 @@ void MediaEngineDefaultAudioSource::SetTrack(
// AddAudioTrack will take ownership of segment
mStream = aStream;
mTrackID = aTrackID;
mPrincipalHandle = aPrincipal;
aStream->AddAudioTrack(aTrackID, aStream->GraphRate(), new AudioSegment(),
SourceMediaStream::ADDTRACK_QUEUED);
}
@ -444,21 +464,20 @@ nsresult MediaEngineDefaultAudioSource::Start(
MOZ_ASSERT(IsTrackIDExplicit(mTrackID),
"SetTrack() must happen before Start()");
if (!mSineGenerator) {
// generate sine wave (default 1KHz)
mSineGenerator = new SineWaveGenerator(mStream->GraphRate(), mFreq);
if (!mPullListener) {
mPullListener = MakeAndAddRef<AudioSourcePullListener>(
mStream, mTrackID, mPrincipalHandle, mFrequency);
}
{
MutexAutoLock lock(mMutex);
mState = kStarted;
}
mState = kStarted;
NS_DispatchToMainThread(
NS_NewRunnableFunction(__func__, [stream = mStream, track = mTrackID]() {
NS_DispatchToMainThread(NS_NewRunnableFunction(
__func__,
[stream = mStream, track = mTrackID, listener = mPullListener]() {
if (stream->IsDestroyed()) {
return;
}
stream->AddTrackListener(listener, track);
stream->SetPullingEnabled(track, true);
}));
@ -472,19 +491,16 @@ nsresult MediaEngineDefaultAudioSource::Stop(
if (mState == kStopped || mState == kAllocated) {
return NS_OK;
}
MOZ_ASSERT(mState == kStarted);
{
MutexAutoLock lock(mMutex);
mState = kStopped;
}
mState = kStopped;
NS_DispatchToMainThread(
NS_NewRunnableFunction(__func__, [stream = mStream, track = mTrackID]() {
NS_NewRunnableFunction(__func__, [stream = mStream, track = mTrackID,
listener = std::move(mPullListener)]() {
if (stream->IsDestroyed()) {
return;
}
stream->RemoveTrackListener(listener, track);
stream->SetPullingEnabled(track, false);
}));
return NS_OK;
@ -498,34 +514,20 @@ nsresult MediaEngineDefaultAudioSource::Reconfigure(
return NS_OK;
}
void MediaEngineDefaultAudioSource::AppendToSegment(
AudioSegment& aSegment, TrackTicks aSamples,
const PrincipalHandle& aPrincipalHandle) {
RefPtr<SharedBuffer> buffer =
SharedBuffer::Create(aSamples * sizeof(int16_t));
void AudioSourcePullListener::NotifyPull(MediaStreamGraph* aGraph,
StreamTime aEndOfAppendedData,
StreamTime aDesiredTime) {
TRACE_AUDIO_CALLBACK_COMMENT("SourceMediaStream %p track %i", mStream.get(),
mTrackID);
AudioSegment segment;
TrackTicks delta = aDesiredTime - aEndOfAppendedData;
RefPtr<SharedBuffer> buffer = SharedBuffer::Create(delta * sizeof(int16_t));
int16_t* dest = static_cast<int16_t*>(buffer->Data());
mSineGenerator->generate(dest, aSamples);
mSineGenerator->generate(dest, delta);
AutoTArray<const int16_t*, 1> channels;
channels.AppendElement(dest);
aSegment.AppendFrames(buffer.forget(), channels, aSamples, aPrincipalHandle);
}
void MediaEngineDefaultAudioSource::Pull(
const RefPtr<const AllocationHandle>& aHandle,
const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
const PrincipalHandle& aPrincipalHandle) {
TRACE_AUDIO_CALLBACK_COMMENT("SourceMediaStream %p track %i", aStream.get(),
aTrackID);
AudioSegment segment;
// avoid accumulating rounding errors
TrackTicks desired =
aStream->TimeToTicksRoundUp(aStream->GraphRate(), aDesiredTime);
TrackTicks delta = desired - mLastNotify;
mLastNotify += delta;
AppendToSegment(segment, delta, aPrincipalHandle);
aStream->AppendToTrack(aTrackID, &segment);
segment.AppendFrames(buffer.forget(), channels, delta, mPrincipalHandle);
mStream->AppendToTrack(mTrackID, &segment);
}
void MediaEngineDefault::EnumerateDevices(

Просмотреть файл

@ -22,7 +22,6 @@
#include "StreamTracks.h"
#include "MediaEngineSource.h"
#include "MediaStreamGraph.h"
#include "SineWaveGenerator.h"
namespace mozilla {
@ -59,10 +58,6 @@ class MediaEngineDefaultVideoSource : public MediaEngineSource {
const char** aOutBadConstraint) override;
nsresult Stop(const RefPtr<const AllocationHandle>& aHandle) override;
nsresult Deallocate(const RefPtr<const AllocationHandle>& aHandle) override;
void Pull(const RefPtr<const AllocationHandle>& aHandle,
const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
const PrincipalHandle& aPrincipalHandle) override;
uint32_t GetBestFitnessDistance(
const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
@ -101,7 +96,7 @@ class MediaEngineDefaultVideoSource : public MediaEngineSource {
const nsString mName;
};
class SineWaveGenerator;
class AudioSourcePullListener;
class MediaEngineDefaultAudioSource : public MediaEngineSource {
public:
@ -127,12 +122,6 @@ class MediaEngineDefaultAudioSource : public MediaEngineSource {
const char** aOutBadConstraint) override;
nsresult Stop(const RefPtr<const AllocationHandle>& aHandle) override;
nsresult Deallocate(const RefPtr<const AllocationHandle>& aHandle) override;
void inline AppendToSegment(AudioSegment& aSegment, TrackTicks aSamples,
const PrincipalHandle& aPrincipalHandle);
void Pull(const RefPtr<const AllocationHandle>& aHandle,
const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
const PrincipalHandle& aPrincipalHandle) override;
bool IsFake() const override { return true; }
@ -149,21 +138,13 @@ class MediaEngineDefaultAudioSource : public MediaEngineSource {
protected:
~MediaEngineDefaultAudioSource();
// mMutex protects mState, mStream, mTrackID
Mutex mMutex;
// Current state of this source.
// Set under mMutex on the owning thread. Accessed under one of the two.
MediaEngineSourceState mState = kReleased;
RefPtr<SourceMediaStream> mStream;
TrackID mTrackID = TRACK_NONE;
// Accessed in Pull (from MSG thread)
TrackTicks mLastNotify = 0;
uint32_t mFreq = 1000; // ditto
// Created on Start, then accessed from Pull (MSG thread)
nsAutoPtr<SineWaveGenerator> mSineGenerator;
PrincipalHandle mPrincipalHandle = PRINCIPAL_HANDLE_NONE;
uint32_t mFrequency = 1000;
RefPtr<AudioSourcePullListener> mPullListener;
};
class MediaEngineDefault : public MediaEngine {

Просмотреть файл

@ -483,12 +483,6 @@ webrtc::CaptureCapability MediaEngineRemoteVideoSource::GetCapability(
return result;
}
void MediaEngineRemoteVideoSource::Pull(
const RefPtr<const AllocationHandle>& aHandle,
const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
const PrincipalHandle& aPrincipalHandle) {}
int MediaEngineRemoteVideoSource::DeliverFrame(
uint8_t* aBuffer, const camera::VideoFrameProperties& aProps) {
// Cameras IPC thread - take great care with accessing members!

Просмотреть файл

@ -135,10 +135,6 @@ class MediaEngineRemoteVideoSource : public MediaEngineSource,
nsresult FocusOnSelectedSource(
const RefPtr<const AllocationHandle>& aHandle) override;
nsresult Stop(const RefPtr<const AllocationHandle>& aHandle) override;
void Pull(const RefPtr<const AllocationHandle>& aHandle,
const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
const PrincipalHandle& aPrincipalHandle) override;
void GetSettings(dom::MediaTrackSettings& aOutSettings) const override;

Просмотреть файл

@ -261,16 +261,6 @@ class MediaEngineSourceInterface {
* device settings as seen by js.
*/
virtual void GetSettings(dom::MediaTrackSettings& aOutSettings) const = 0;
/**
* Pulls data from the MediaEngineSource into the track.
*
* Driven by MediaStreamTrackListener::NotifyPull.
*/
virtual void Pull(const RefPtr<const AllocationHandle>& aHandle,
const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
const PrincipalHandle& aPrincipalHandle) = 0;
};
/**

Просмотреть файл

@ -264,12 +264,6 @@ nsresult MediaEngineTabVideoSource::Start(
return NS_OK;
}
void MediaEngineTabVideoSource::Pull(
const RefPtr<const AllocationHandle>& aHandle,
const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
const PrincipalHandle& aPrincipalHandle) {}
void MediaEngineTabVideoSource::Draw() {
MOZ_ASSERT(NS_IsMainThread());

Просмотреть файл

@ -48,11 +48,6 @@ class MediaEngineTabVideoSource : public MediaEngineSource {
const RefPtr<const AllocationHandle>& aHandle) override;
nsresult Stop(const RefPtr<const AllocationHandle>& aHandle) override;
void Pull(const RefPtr<const AllocationHandle>& aHandle,
const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
const PrincipalHandle& aPrincipalHandle) override;
uint32_t GetBestFitnessDistance(
const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
const nsString& aDeviceId) const override {

Просмотреть файл

@ -189,18 +189,6 @@ nsresult MediaEngineWebRTCMicrophoneSource::Reconfigure(
return NS_OK;
}
void MediaEngineWebRTCMicrophoneSource::Pull(
const RefPtr<const AllocationHandle>&,
const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
const PrincipalHandle& aPrincipalHandle) {
// If pull is enabled, it means that the audio input is not open, and we
// should fill it out with silence. This is the only method called on the
// MSG thread.
mInputProcessing->Pull(aStream, aTrackID, aEndOfAppendedData, aDesiredTime,
aPrincipalHandle);
}
void MediaEngineWebRTCMicrophoneSource::UpdateAECSettings(
bool aEnable, bool aUseAecMobile,
EchoCancellation::SuppressionLevel aLevel) {
@ -208,8 +196,9 @@ void MediaEngineWebRTCMicrophoneSource::UpdateAECSettings(
RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
RefPtr<MediaStreamGraphImpl> gripGraph = mStream->GraphImpl();
NS_DispatchToMainThread(media::NewRunnableFrom(
[that, graph = std::move(gripGraph), aEnable, aUseAecMobile, aLevel]() {
NS_DispatchToMainThread(NS_NewRunnableFunction(
__func__,
[that, graph = std::move(gripGraph), aEnable, aUseAecMobile, aLevel] {
class Message : public ControlMessage {
public:
Message(AudioInputProcessing* aInputProcessing, bool aEnable,
@ -235,8 +224,6 @@ void MediaEngineWebRTCMicrophoneSource::UpdateAECSettings(
graph->AppendMessage(MakeUnique<Message>(
that->mInputProcessing, aEnable, aUseAecMobile, aLevel));
}
return NS_OK;
}));
}
@ -246,8 +233,8 @@ void MediaEngineWebRTCMicrophoneSource::UpdateAGCSettings(
RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
RefPtr<MediaStreamGraphImpl> gripGraph = mStream->GraphImpl();
NS_DispatchToMainThread(media::NewRunnableFrom(
[that, graph = std::move(gripGraph), aEnable, aMode]() {
NS_DispatchToMainThread(NS_NewRunnableFunction(
__func__, [that, graph = std::move(gripGraph), aEnable, aMode] {
class Message : public ControlMessage {
public:
Message(AudioInputProcessing* aInputProcessing, bool aEnable,
@ -271,8 +258,6 @@ void MediaEngineWebRTCMicrophoneSource::UpdateAGCSettings(
graph->AppendMessage(
MakeUnique<Message>(that->mInputProcessing, aEnable, aMode));
}
return NS_OK;
}));
}
@ -282,8 +267,8 @@ void MediaEngineWebRTCMicrophoneSource::UpdateNSSettings(
RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
RefPtr<MediaStreamGraphImpl> gripGraph = mStream->GraphImpl();
NS_DispatchToMainThread(media::NewRunnableFrom(
[that, graph = std::move(gripGraph), aEnable, aLevel]() {
NS_DispatchToMainThread(NS_NewRunnableFunction(
__func__, [that, graph = std::move(gripGraph), aEnable, aLevel] {
class Message : public ControlMessage {
public:
Message(AudioInputProcessing* aInputProcessing, bool aEnable,
@ -307,8 +292,6 @@ void MediaEngineWebRTCMicrophoneSource::UpdateNSSettings(
graph->AppendMessage(
MakeUnique<Message>(that->mInputProcessing, aEnable, aLevel));
}
return NS_OK;
}));
}
@ -318,8 +301,9 @@ void MediaEngineWebRTCMicrophoneSource::UpdateAPMExtraOptions(
RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
RefPtr<MediaStreamGraphImpl> gripGraph = mStream->GraphImpl();
NS_DispatchToMainThread(media::NewRunnableFrom(
[that, graph = std::move(gripGraph), aExtendedFilter, aDelayAgnostic]() {
NS_DispatchToMainThread(NS_NewRunnableFunction(
__func__,
[that, graph = std::move(gripGraph), aExtendedFilter, aDelayAgnostic] {
class Message : public ControlMessage {
public:
Message(AudioInputProcessing* aInputProcessing, bool aExtendedFilter,
@ -344,8 +328,6 @@ void MediaEngineWebRTCMicrophoneSource::UpdateAPMExtraOptions(
graph->AppendMessage(MakeUnique<Message>(
that->mInputProcessing, aExtendedFilter, aDelayAgnostic));
}
return NS_OK;
}));
}
@ -372,8 +354,8 @@ void MediaEngineWebRTCMicrophoneSource::ApplySettings(
RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
RefPtr<MediaStreamGraphImpl> graphImpl = mStream->GraphImpl();
NS_DispatchToMainThread(media::NewRunnableFrom(
[that, graph = std::move(graphImpl), prefs = aPrefs]() {
NS_DispatchToMainThread(NS_NewRunnableFunction(
__func__, [that, graph = std::move(graphImpl), prefs = aPrefs] {
that->mSettings->mEchoCancellation.Value() = prefs.mAecOn;
that->mSettings->mAutoGainControl.Value() = prefs.mAgcOn;
that->mSettings->mNoiseSuppression.Value() = prefs.mNoiseOn;
@ -405,8 +387,6 @@ void MediaEngineWebRTCMicrophoneSource::ApplySettings(
graph->AppendMessage(MakeUnique<Message>(
that->mInputProcessing, passThrough, prefs.mChannels));
}
return NS_OK;
}));
}
@ -431,13 +411,13 @@ nsresult MediaEngineWebRTCMicrophoneSource::Allocate(
}
RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
NS_DispatchToMainThread(media::NewRunnableFrom([that, prefs = outputPrefs]() {
that->mSettings->mEchoCancellation.Value() = prefs.mAecOn;
that->mSettings->mAutoGainControl.Value() = prefs.mAgcOn;
that->mSettings->mNoiseSuppression.Value() = prefs.mNoiseOn;
that->mSettings->mChannelCount.Value() = prefs.mChannels;
return NS_OK;
}));
NS_DispatchToMainThread(
NS_NewRunnableFunction(__func__, [that, prefs = outputPrefs] {
that->mSettings->mEchoCancellation.Value() = prefs.mAecOn;
that->mSettings->mAutoGainControl.Value() = prefs.mAgcOn;
that->mSettings->mNoiseSuppression.Value() = prefs.mNoiseOn;
that->mSettings->mChannelCount.Value() = prefs.mChannels;
}));
mCurrentPrefs = outputPrefs;
@ -472,19 +452,18 @@ nsresult MediaEngineWebRTCMicrophoneSource::Deallocate(
if (mStream && IsTrackIDExplicit(mTrackID)) {
RefPtr<MediaStream> sourceStream = mStream;
RefPtr<AudioInputProcessing> inputProcessing = mInputProcessing;
NS_DispatchToMainThread(media::NewRunnableFrom(
[stream = std::move(sourceStream),
audioInputProcessing = std::move(inputProcessing),
trackID = mTrackID]() {
NS_DispatchToMainThread(NS_NewRunnableFunction(
__func__, [stream = std::move(sourceStream),
audioInputProcessing = std::move(inputProcessing),
trackID = mTrackID] {
if (stream->IsDestroyed()) {
// This stream has already been destroyed on main thread by its
// DOMMediaStream. No cleanup left to do.
return NS_OK;
return;
}
MOZ_ASSERT(stream->GraphImpl());
stream->GraphImpl()->AppendMessage(MakeUnique<EndTrackMessage>(
stream, audioInputProcessing, trackID));
return NS_OK;
}));
}
@ -522,12 +501,25 @@ void MediaEngineWebRTCMicrophoneSource::SetTrack(
AudioSegment* segment = new AudioSegment();
aStream->AddAudioTrack(aTrackID, aStream->GraphRate(), segment,
mStream->AddAudioTrack(mTrackID, mStream->GraphRate(), segment,
SourceMediaStream::ADDTRACK_QUEUED);
mInputProcessing = new AudioInputProcessing(mDeviceMaxChannelCount, mStream,
mTrackID, mPrincipal);
// We only add the listener once -- AudioInputProcessing wants pull
// notifications also when stopped for appending silence.
mPullListener = new AudioInputProcessingPullListener(mInputProcessing);
NS_DispatchToMainThread(NS_NewRunnableFunction(
__func__, [self = RefPtr<MediaEngineWebRTCMicrophoneSource>(this),
stream = mStream, track = mTrackID, listener = mPullListener] {
if (stream->IsDestroyed()) {
return;
}
stream->AddTrackListener(listener, track);
}));
LOG("Stream %p registered for microphone capture", aStream.get());
}
@ -575,18 +567,16 @@ nsresult MediaEngineWebRTCMicrophoneSource::Start(
}
RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
NS_DispatchToMainThread(media::NewRunnableFrom(
[that, deviceID, stream = mStream, track = mTrackID]() {
NS_DispatchToMainThread(NS_NewRunnableFunction(
__func__, [that, deviceID, stream = mStream, track = mTrackID] {
if (stream->IsDestroyed()) {
return NS_OK;
return;
}
stream->GraphImpl()->AppendMessage(MakeUnique<StartStopMessage>(
that->mInputProcessing, StartStopMessage::Start));
stream->SetPullingEnabled(track, true);
stream->OpenAudioInput(deviceID, that->mInputProcessing);
return NS_OK;
}));
ApplySettings(mCurrentPrefs);
@ -610,19 +600,18 @@ nsresult MediaEngineWebRTCMicrophoneSource::Stop(
}
RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
NS_DispatchToMainThread(media::NewRunnableFrom([that, stream = mStream]() {
if (stream->IsDestroyed()) {
return NS_OK;
}
NS_DispatchToMainThread(
NS_NewRunnableFunction(__func__, [that, stream = mStream] {
if (stream->IsDestroyed()) {
return;
}
stream->GraphImpl()->AppendMessage(MakeUnique<StartStopMessage>(
that->mInputProcessing, StartStopMessage::Stop));
CubebUtils::AudioDeviceID deviceID = that->mDeviceInfo->DeviceID();
Maybe<CubebUtils::AudioDeviceID> id = Some(deviceID);
stream->CloseAudioInput(id, that->mInputProcessing);
return NS_OK;
}));
stream->GraphImpl()->AppendMessage(MakeUnique<StartStopMessage>(
that->mInputProcessing, StartStopMessage::Stop));
CubebUtils::AudioDeviceID deviceID = that->mDeviceInfo->DeviceID();
Maybe<CubebUtils::AudioDeviceID> id = Some(deviceID);
stream->CloseAudioInput(id, that->mInputProcessing);
}));
MOZ_ASSERT(mState == kStarted, "Should be started when stopping");
mState = kStopped;
@ -791,12 +780,10 @@ void AudioInputProcessing::Start() {
void AudioInputProcessing::Stop() { mEnabled = false; }
void AudioInputProcessing::Pull(const RefPtr<SourceMediaStream>& aStream,
TrackID aTrackID, StreamTime aEndOfAppendedData,
StreamTime aDesiredTime,
const PrincipalHandle& aPrincipalHandle) {
TRACE_AUDIO_CALLBACK_COMMENT("SourceMediaStream %p track %i", aStream.get(),
aTrackID);
void AudioInputProcessing::Pull(StreamTime aEndOfAppendedData,
StreamTime aDesiredTime) {
TRACE_AUDIO_CALLBACK_COMMENT("SourceMediaStream %p track %i", mStream.get(),
mTrackID);
if (mEnded) {
return;
@ -818,10 +805,10 @@ void AudioInputProcessing::Pull(const RefPtr<SourceMediaStream>& aStream,
// If there were live frames appended and we haven't appended the
// right amount of silence, we'll have to append silence once more,
// failing the other assert below.
MOZ_ASSERT_IF(!PassThrough(aStream->GraphImpl()) && !mPacketizerInput,
MOZ_ASSERT_IF(!PassThrough(mStream->GraphImpl()) && !mPacketizerInput,
!mLiveFramesAppended);
if (!PassThrough(aStream->GraphImpl()) && mPacketizerInput) {
if (!PassThrough(mStream->GraphImpl()) && mPacketizerInput) {
// Processing is active and is processed in chunks of 10ms through the
// input packetizer. We allow for 10ms of silence on the track to
// accomodate the buffering worst-case.
@ -841,7 +828,7 @@ void AudioInputProcessing::Pull(const RefPtr<SourceMediaStream>& aStream,
// before the first graph iteration.
// And other combinations of order of audio sample sources.
MOZ_ASSERT_IF(mEnabled && mLiveFramesAppended && mLiveSilenceAppended,
aStream->GraphImpl()->IterationEnd() > mLastCallbackAppendTime);
mStream->GraphImpl()->IterationEnd() > mLastCallbackAppendTime);
if (mLiveFramesAppended) {
mLiveSilenceAppended = true;
@ -849,7 +836,7 @@ void AudioInputProcessing::Pull(const RefPtr<SourceMediaStream>& aStream,
AudioSegment audio;
audio.AppendNullData(delta);
aStream->AppendToTrack(aTrackID, &audio);
mStream->AppendToTrack(mTrackID, &audio);
}
void AudioInputProcessing::NotifyOutputData(MediaStreamGraphImpl* aGraph,

Просмотреть файл

@ -6,15 +6,17 @@
#ifndef MediaEngineWebRTCAudio_h
#define MediaEngineWebRTCAudio_h
#include "MediaEngineWebRTC.h"
#include "AudioPacketizer.h"
#include "AudioSegment.h"
#include "AudioDeviceInfo.h"
#include "MediaEngineWebRTC.h"
#include "MediaStreamListener.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
namespace mozilla {
class AudioInputProcessing;
class AudioInputProcessingPullListener;
// This class is created and used exclusively on the Media Manager thread, with
// exactly two exceptions:
@ -57,11 +59,6 @@ class MediaEngineWebRTCMicrophoneSource : public MediaEngineSource {
const nsString& aDeviceId,
const char** aOutBadConstraint) override;
void Pull(const RefPtr<const AllocationHandle>& aHandle,
const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
const PrincipalHandle& aPrincipalHandle) override;
/**
* Assigns the current settings of the capture to aOutSettings.
* Main thread only.
@ -142,6 +139,13 @@ class MediaEngineWebRTCMicrophoneSource : public MediaEngineSource {
// See note at the top of this class.
RefPtr<AudioInputProcessing> mInputProcessing;
// The class receiving NotifyPull() from the MediaStreamGraph, and forwarding
// them on the graph thread. This is separated from AudioInputProcessing since
// both AudioDataListener (base class of AudioInputProcessing) and
// MediaStreamTrackListener (base class of AudioInputProcessingPullListener)
// implement refcounting.
RefPtr<AudioInputProcessingPullListener> mPullListener;
};
// This class is created on the MediaManager thread, and then exclusively used
@ -153,9 +157,7 @@ class AudioInputProcessing : public AudioDataListener {
RefPtr<SourceMediaStream> aStream, TrackID aTrackID,
const PrincipalHandle& aPrincipalHandle);
void Pull(const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
const PrincipalHandle& aPrincipalHandle);
void Pull(StreamTime aEndOfAppendedData, StreamTime aDesiredTime);
void NotifyOutputData(MediaStreamGraphImpl* aGraph, AudioDataValue* aBuffer,
size_t aFrames, TrackRate aRate,
@ -201,7 +203,7 @@ class AudioInputProcessing : public AudioDataListener {
private:
~AudioInputProcessing() = default;
RefPtr<SourceMediaStream> mStream;
const RefPtr<SourceMediaStream> mStream;
// This implements the processing algoritm to apply to the input (e.g. a
// microphone). If all algorithms are disabled, this class in not used. This
// class only accepts audio chunks of 10ms. It has two inputs and one output:
@ -243,9 +245,9 @@ class AudioInputProcessing : public AudioDataListener {
// silence *after* the first audio callback has appended real frames.
bool mLiveSilenceAppended;
// Track ID on which the data is to be appended after processing
TrackID mTrackID;
const TrackID mTrackID;
// Principal for the data that flows through this class.
PrincipalHandle mPrincipal;
const PrincipalHandle mPrincipal;
// Whether or not this MediaEngine is enabled. If it's not enabled, it
// operates in "pull" mode, and we append silence only, releasing the audio
// input stream.
@ -254,6 +256,28 @@ class AudioInputProcessing : public AudioDataListener {
bool mEnded;
};
// This class is created on the media thread, as part of Start(), then entirely
// self-sustained until destruction, just forwarding calls to Pull().
class AudioInputProcessingPullListener : public MediaStreamTrackListener {
public:
explicit AudioInputProcessingPullListener(
RefPtr<AudioInputProcessing> aInputProcessing)
: mInputProcessing(std::move(aInputProcessing)) {
MOZ_COUNT_CTOR(AudioInputProcessingPullListener);
}
~AudioInputProcessingPullListener() {
MOZ_COUNT_DTOR(AudioInputProcessingPullListener);
}
void NotifyPull(MediaStreamGraph* aGraph, StreamTime aEndOfAppendedData,
StreamTime aDesiredTime) override {
mInputProcessing->Pull(aEndOfAppendedData, aDesiredTime);
}
const RefPtr<AudioInputProcessing> mInputProcessing;
};
class MediaEngineWebRTCAudioCaptureSource : public MediaEngineSource {
public:
explicit MediaEngineWebRTCAudioCaptureSource(const char* aUuid) {}
@ -285,13 +309,6 @@ class MediaEngineWebRTCAudioCaptureSource : public MediaEngineSource {
const nsString& aDeviceId,
const char** aOutBadConstraint) override;
void Pull(const RefPtr<const AllocationHandle>& aHandle,
const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
const PrincipalHandle& aPrincipalHandle) override {
MOZ_ASSERT_UNREACHABLE("Should never have to append silence");
}
dom::MediaSourceEnum GetMediaSource() const override {
return dom::MediaSourceEnum::AudioCapture;
}