Bug 1513973 - Enable pulling at the same time as starting audio sources. r=jib

Otherwise we risk building up a buffer in the microphone source from when
Start() is run until pulling is enabled. This manifests itself as input latency
to the user.

Differential Revision: https://phabricator.services.mozilla.com/D15194

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Andreas Pehrson 2019-01-04 06:39:43 +00:00
Родитель d8bf65f800
Коммит 997855e877
4 изменённых файлов: 45 добавлений и 22 удалений

Просмотреть файл

@ -4123,6 +4123,17 @@ SourceListener::InitializeAsync() {
MozPromiseHolder<SourceListenerPromise>& aHolder) {
if (audioDevice) {
audioDevice->SetTrack(stream, kAudioTrack, principal);
}
if (videoDevice) {
videoDevice->SetTrack(stream, kVideoTrack, principal);
}
// SetTrack() queued the tracks. We add them synchronously here
// to avoid races.
stream->FinishAddTracks();
if (audioDevice) {
nsresult rv = audioDevice->Start();
if (NS_FAILED(rv)) {
nsString log;
@ -4143,7 +4154,6 @@ SourceListener::InitializeAsync() {
}
if (videoDevice) {
videoDevice->SetTrack(stream, kVideoTrack, principal);
nsresult rv = videoDevice->Start();
if (NS_FAILED(rv)) {
if (audioDevice) {
@ -4160,9 +4170,6 @@ SourceListener::InitializeAsync() {
}
}
// Start() queued the tracks to be added synchronously to avoid
// races
stream->FinishAddTracks();
LOG("started all sources");
aHolder.Resolve(true, __func__);
})
@ -4186,15 +4193,8 @@ SourceListener::InitializeAsync() {
state->mTrackEnabled = true;
state->mTrackEnabledTime = TimeStamp::Now();
if (state->mDevice->GetMediaSource() !=
MediaSourceEnum::AudioCapture) {
// For AudioCapture mStream is a dummy stream, so we don't
// try to enable pulling - there won't be a track to enable
// it for.
mStream->SetPullingEnabled(state == mAudioDeviceState.get()
? kAudioTrack
: kVideoTrack,
true);
if (state == mVideoDeviceState.get()) {
mStream->SetPullingEnabled(kVideoTrack, true);
}
}
return SourceListenerPromise::CreateAndResolve(true, __func__);

Просмотреть файл

@ -454,8 +454,16 @@ nsresult MediaEngineDefaultAudioSource::Start(
mSineGenerator = new SineWaveGenerator(mStream->GraphRate(), mFreq);
}
MutexAutoLock lock(mMutex);
mState = kStarted;
{
MutexAutoLock lock(mMutex);
mState = kStarted;
}
NS_DispatchToMainThread(
NS_NewRunnableFunction(__func__, [stream = mStream, track = mTrackID]() {
stream->SetPullingEnabled(track, true);
}));
return NS_OK;
}
@ -469,8 +477,15 @@ nsresult MediaEngineDefaultAudioSource::Stop(
MOZ_ASSERT(mState == kStarted);
MutexAutoLock lock(mMutex);
mState = kStopped;
{
MutexAutoLock lock(mMutex);
mState = kStopped;
}
NS_DispatchToMainThread(
NS_NewRunnableFunction(__func__, [stream = mStream, track = mTrackID]() {
stream->SetPullingEnabled(track, false);
}));
return NS_OK;
}

Просмотреть файл

@ -156,6 +156,8 @@ class MediaEngineSourceInterface {
*
* If this is the first AllocationHandle to start, the underlying device
* will be started.
*
* NB: Audio sources handle the enabling of pulling themselves.
*/
virtual nsresult Start(const RefPtr<const AllocationHandle>& aHandle) = 0;
@ -204,6 +206,8 @@ class MediaEngineSourceInterface {
*
* Double-stopping a given allocation handle is allowed and will return NS_OK.
* This is necessary sometimes during shutdown.
*
* NB: Audio sources handle the disabling of pulling themselves.
*/
virtual nsresult Stop(const RefPtr<const AllocationHandle>& aHandle) = 0;

Просмотреть файл

@ -574,14 +574,16 @@ nsresult MediaEngineWebRTCMicrophoneSource::Start(
RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
RefPtr<MediaStreamGraphImpl> gripGraph = mStream->GraphImpl();
NS_DispatchToMainThread(media::NewRunnableFrom(
[that, graph = std::move(gripGraph), deviceID]() mutable {
NS_DispatchToMainThread(
media::NewRunnableFrom([that, graph = std::move(gripGraph), deviceID,
stream = mStream, track = mTrackID]() mutable {
if (graph) {
graph->AppendMessage(MakeUnique<StartStopMessage>(
that->mInputProcessing, StartStopMessage::Start));
}
that->mStream->OpenAudioInput(deviceID, that->mInputProcessing);
stream->OpenAudioInput(deviceID, that->mInputProcessing);
stream->SetPullingEnabled(track, true);
return NS_OK;
}));
@ -608,8 +610,9 @@ nsresult MediaEngineWebRTCMicrophoneSource::Stop(
RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
RefPtr<MediaStreamGraphImpl> gripGraph = mStream->GraphImpl();
NS_DispatchToMainThread(media::NewRunnableFrom(
[that, graph = std::move(gripGraph), stream = mStream]() mutable {
NS_DispatchToMainThread(
media::NewRunnableFrom([that, graph = std::move(gripGraph),
stream = mStream, track = mTrackID]() mutable {
if (graph) {
graph->AppendMessage(MakeUnique<StartStopMessage>(
that->mInputProcessing, StartStopMessage::Stop));
@ -618,6 +621,7 @@ nsresult MediaEngineWebRTCMicrophoneSource::Stop(
CubebUtils::AudioDeviceID deviceID = that->mDeviceInfo->DeviceID();
Maybe<CubebUtils::AudioDeviceID> id = Some(deviceID);
stream->CloseAudioInput(id, that->mInputProcessing);
stream->SetPullingEnabled(track, false);
return NS_OK;
}));