Bug 1172394 - Perform some forgotten Stream -> Track renaming in DecodedStream. r=padenot

Differential Revision: https://phabricator.services.mozilla.com/D52036

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Andreas Pehrson 2019-11-13 22:39:48 +00:00
Родитель 55b1cc6058
Коммит 8537128686
1 изменённых файлов: 65 добавлений и 69 удалений

Просмотреть файл

@ -54,34 +54,34 @@ class DecodedStreamGraphListener {
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(DecodedStreamGraphListener)
public:
DecodedStreamGraphListener(
SourceMediaTrack* aAudioStream,
SourceMediaTrack* aAudioTrack,
MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedHolder,
SourceMediaTrack* aVideoStream,
SourceMediaTrack* aVideoTrack,
MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedHolder,
AbstractThread* aMainThread)
: mAudioTrackListener(
aAudioStream
? MakeRefPtr<DecodedStreamTrackListener>(this, aAudioStream)
aAudioTrack
? MakeRefPtr<DecodedStreamTrackListener>(this, aAudioTrack)
: nullptr),
mAudioEndedHolder(std::move(aAudioEndedHolder)),
mVideoTrackListener(
aVideoStream
? MakeRefPtr<DecodedStreamTrackListener>(this, aVideoStream)
aVideoTrack
? MakeRefPtr<DecodedStreamTrackListener>(this, aVideoTrack)
: nullptr),
mVideoEndedHolder(std::move(aVideoEndedHolder)),
mAudioStream(aAudioStream),
mVideoStream(aVideoStream),
mAudioTrack(aAudioTrack),
mVideoTrack(aVideoTrack),
mAbstractMainThread(aMainThread) {
MOZ_ASSERT(NS_IsMainThread());
if (mAudioTrackListener) {
mAudioStream->AddListener(mAudioTrackListener);
mAudioTrack->AddListener(mAudioTrackListener);
} else {
mAudioEnded = true;
mAudioEndedHolder.ResolveIfExists(true, __func__);
}
if (mVideoTrackListener) {
mVideoStream->AddListener(mVideoTrackListener);
mVideoTrack->AddListener(mVideoTrackListener);
} else {
mVideoEnded = true;
mVideoEndedHolder.ResolveIfExists(true, __func__);
@ -89,30 +89,30 @@ class DecodedStreamGraphListener {
}
void NotifyOutput(SourceMediaTrack* aTrack, TrackTime aCurrentTrackTime) {
if (aTrack == mAudioStream) {
if (aTrack == mAudioTrack) {
if (aCurrentTrackTime >= mAudioEnd) {
mAudioStream->End();
mAudioTrack->End();
}
} else if (aTrack == mVideoStream) {
} else if (aTrack == mVideoTrack) {
if (aCurrentTrackTime >= mVideoEnd) {
mVideoStream->End();
mVideoTrack->End();
}
} else {
MOZ_CRASH("Unexpected source track");
}
if (aTrack != mAudioStream && mAudioStream && !mAudioEnded) {
if (aTrack != mAudioTrack && mAudioTrack && !mAudioEnded) {
// Only audio playout drives the clock forward, if present and live.
return;
}
MOZ_ASSERT_IF(aTrack == mAudioStream, !mAudioEnded);
MOZ_ASSERT_IF(aTrack == mVideoStream, !mVideoEnded);
MOZ_ASSERT_IF(aTrack == mAudioTrack, !mAudioEnded);
MOZ_ASSERT_IF(aTrack == mVideoTrack, !mVideoEnded);
mOnOutput.Notify(aTrack->TrackTimeToMicroseconds(aCurrentTrackTime));
}
void NotifyEnded(SourceMediaTrack* aTrack) {
if (aTrack == mAudioStream) {
if (aTrack == mAudioTrack) {
mAudioEnded = true;
} else if (aTrack == mVideoStream) {
} else if (aTrack == mVideoTrack) {
mVideoEnded = true;
} else {
MOZ_CRASH("Unexpected source track");
@ -145,9 +145,9 @@ class DecodedStreamGraphListener {
* Callable from any thread.
*/
void EndTrackAt(SourceMediaTrack* aTrack, TrackTime aEnd) {
if (aTrack == mAudioStream) {
if (aTrack == mAudioTrack) {
mAudioEnd = aEnd;
} else if (aTrack == mVideoStream) {
} else if (aTrack == mVideoTrack) {
mVideoEnd = aEnd;
} else {
MOZ_CRASH("Unexpected source track");
@ -156,9 +156,9 @@ class DecodedStreamGraphListener {
void DoNotifyTrackEnded(SourceMediaTrack* aTrack) {
MOZ_ASSERT(NS_IsMainThread());
if (aTrack == mAudioStream) {
if (aTrack == mAudioTrack) {
mAudioEndedHolder.ResolveIfExists(true, __func__);
} else if (aTrack == mVideoStream) {
} else if (aTrack == mVideoTrack) {
mVideoEndedHolder.ResolveIfExists(true, __func__);
} else {
MOZ_CRASH("Unexpected source track");
@ -168,16 +168,16 @@ class DecodedStreamGraphListener {
void Forget() {
MOZ_ASSERT(NS_IsMainThread());
if (mAudioTrackListener && !mAudioStream->IsDestroyed()) {
mAudioStream->End();
mAudioStream->RemoveListener(mAudioTrackListener);
if (mAudioTrackListener && !mAudioTrack->IsDestroyed()) {
mAudioTrack->End();
mAudioTrack->RemoveListener(mAudioTrackListener);
}
mAudioTrackListener = nullptr;
mAudioEndedHolder.ResolveIfExists(false, __func__);
if (mVideoTrackListener && !mVideoStream->IsDestroyed()) {
mVideoStream->End();
mVideoStream->RemoveListener(mVideoTrackListener);
if (mVideoTrackListener && !mVideoTrack->IsDestroyed()) {
mVideoTrack->End();
mVideoTrack->RemoveListener(mVideoTrackListener);
}
mVideoTrackListener = nullptr;
mVideoEndedHolder.ResolveIfExists(false, __func__);
@ -204,8 +204,8 @@ class DecodedStreamGraphListener {
bool mVideoEnded = false;
// Any thread.
const RefPtr<SourceMediaTrack> mAudioStream;
const RefPtr<SourceMediaTrack> mVideoStream;
const RefPtr<SourceMediaTrack> mAudioTrack;
const RefPtr<SourceMediaTrack> mVideoTrack;
Atomic<TrackTime> mAudioEnd{TRACK_TIME_MAX};
Atomic<TrackTime> mVideoEnd{TRACK_TIME_MAX};
const RefPtr<AbstractThread> mAbstractMainThread;
@ -236,8 +236,8 @@ class DecodedStreamData final {
public:
DecodedStreamData(
OutputStreamManager* aOutputStreamManager, PlaybackInfoInit&& aInit,
RefPtr<SourceMediaTrack> aAudioStream,
RefPtr<SourceMediaTrack> aVideoStream,
RefPtr<SourceMediaTrack> aAudioTrack,
RefPtr<SourceMediaTrack> aVideoTrack,
MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedPromise,
MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise,
AbstractThread* aMainThread);
@ -258,9 +258,9 @@ class DecodedStreamData final {
// Count of audio frames written to the track
int64_t mAudioFramesWritten;
// Count of video frames written to the track in the track's rate
TrackTime mVideoStreamWritten;
TrackTime mVideoTrackWritten;
// Count of audio frames written to the track in the track's rate
TrackTime mAudioStreamWritten;
TrackTime mAudioTrackWritten;
// mNextAudioTime is the end timestamp for the last packet sent to the track.
// Therefore audio packets starting at or after this time need to be copied
// to the output track.
@ -283,8 +283,8 @@ class DecodedStreamData final {
bool mHaveSentFinishAudio;
bool mHaveSentFinishVideo;
const RefPtr<SourceMediaTrack> mAudioStream;
const RefPtr<SourceMediaTrack> mVideoStream;
const RefPtr<SourceMediaTrack> mAudioTrack;
const RefPtr<SourceMediaTrack> mVideoTrack;
const RefPtr<DecodedStreamGraphListener> mListener;
const RefPtr<OutputStreamManager> mOutputStreamManager;
@ -293,28 +293,27 @@ class DecodedStreamData final {
DecodedStreamData::DecodedStreamData(
OutputStreamManager* aOutputStreamManager, PlaybackInfoInit&& aInit,
RefPtr<SourceMediaTrack> aAudioStream,
RefPtr<SourceMediaTrack> aVideoStream,
RefPtr<SourceMediaTrack> aAudioTrack, RefPtr<SourceMediaTrack> aVideoTrack,
MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedPromise,
MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise,
AbstractThread* aMainThread)
: mAudioFramesWritten(0),
mVideoStreamWritten(0),
mAudioStreamWritten(0),
mVideoTrackWritten(0),
mAudioTrackWritten(0),
mNextAudioTime(aInit.mStartTime),
mHaveSentFinishAudio(false),
mHaveSentFinishVideo(false),
mAudioStream(std::move(aAudioStream)),
mVideoStream(std::move(aVideoStream)),
mAudioTrack(std::move(aAudioTrack)),
mVideoTrack(std::move(aVideoTrack)),
// DecodedStreamGraphListener will resolve these promises.
mListener(MakeRefPtr<DecodedStreamGraphListener>(
mAudioStream, std::move(aAudioEndedPromise), mVideoStream,
mAudioTrack, std::move(aAudioEndedPromise), mVideoTrack,
std::move(aVideoEndedPromise), aMainThread)),
mOutputStreamManager(aOutputStreamManager),
mAbstractMainThread(aMainThread) {
MOZ_ASSERT(NS_IsMainThread());
MOZ_DIAGNOSTIC_ASSERT(
mOutputStreamManager->HasTracks(mAudioStream, mVideoStream),
mOutputStreamManager->HasTracks(mAudioTrack, mVideoTrack),
"Tracks must be pre-created on main thread");
}
@ -329,7 +328,7 @@ void DecodedStreamData::Forget() { mListener->Forget(); }
void DecodedStreamData::GetDebugInfo(dom::DecodedStreamDataDebugInfo& aInfo) {
aInfo.mInstance = NS_ConvertUTF8toUTF16(nsPrintfCString("%p", this));
aInfo.mAudioFramesWritten = mAudioFramesWritten;
aInfo.mStreamAudioWritten = mAudioStreamWritten;
aInfo.mStreamAudioWritten = mAudioTrackWritten;
aInfo.mNextAudioTime = mNextAudioTime.ToMicroseconds();
aInfo.mLastVideoStartTime =
mLastVideoStartTime.valueOr(TimeUnit::FromMicroseconds(-1))
@ -424,26 +423,26 @@ nsresult DecodedStream::Start(const TimeUnit& aStartTime,
mVideoEndedPromise.Resolve(true, __func__);
return NS_OK;
}
RefPtr<SourceMediaTrack> audioStream =
RefPtr<SourceMediaTrack> audioTrack =
mOutputStreamManager->GetPrecreatedTrackOfType(MediaSegment::AUDIO);
if (mInit.mInfo.HasAudio() && !audioStream) {
if (mInit.mInfo.HasAudio() && !audioTrack) {
MOZ_DIAGNOSTIC_ASSERT(
!mOutputStreamManager->HasTrackType(MediaSegment::AUDIO));
audioStream = mOutputStreamManager->AddTrack(MediaSegment::AUDIO);
audioTrack = mOutputStreamManager->AddTrack(MediaSegment::AUDIO);
}
if (audioStream) {
audioStream->SetAppendDataSourceRate(mInit.mInfo.mAudio.mRate);
if (audioTrack) {
audioTrack->SetAppendDataSourceRate(mInit.mInfo.mAudio.mRate);
}
RefPtr<SourceMediaTrack> videoStream =
RefPtr<SourceMediaTrack> videoTrack =
mOutputStreamManager->GetPrecreatedTrackOfType(MediaSegment::VIDEO);
if (mInit.mInfo.HasVideo() && !videoStream) {
if (mInit.mInfo.HasVideo() && !videoTrack) {
MOZ_DIAGNOSTIC_ASSERT(
!mOutputStreamManager->HasTrackType(MediaSegment::VIDEO));
videoStream = mOutputStreamManager->AddTrack(MediaSegment::VIDEO);
videoTrack = mOutputStreamManager->AddTrack(MediaSegment::VIDEO);
}
mData = MakeUnique<DecodedStreamData>(
mOutputStreamManager, std::move(mInit), std::move(audioStream),
std::move(videoStream), std::move(mAudioEndedPromise),
mOutputStreamManager, std::move(mInit), std::move(audioTrack),
std::move(videoTrack), std::move(mAudioEndedPromise),
std::move(mVideoEndedPromise), mAbstractMainThread);
return NS_OK;
}
@ -628,12 +627,11 @@ void DecodedStream::SendAudio(double aVolume,
// |mNextAudioTime| is updated as we process each audio sample in
// SendStreamAudio().
if (output.GetDuration() > 0) {
mData->mAudioStreamWritten += mData->mAudioStream->AppendData(&output);
mData->mAudioTrackWritten += mData->mAudioTrack->AppendData(&output);
}
if (mAudioQueue.IsFinished() && !mData->mHaveSentFinishAudio) {
mData->mListener->EndTrackAt(mData->mAudioStream,
mData->mAudioStreamWritten);
mData->mListener->EndTrackAt(mData->mAudioTrack, mData->mAudioTrackWritten);
mData->mHaveSentFinishAudio = true;
}
}
@ -644,9 +642,9 @@ void DecodedStreamData::WriteVideoToSegment(
VideoSegment* aOutput, const PrincipalHandle& aPrincipalHandle) {
RefPtr<layers::Image> image = aImage;
auto end =
mVideoStream->MicrosecondsToTrackTimeRoundDown(aEnd.ToMicroseconds());
mVideoTrack->MicrosecondsToTrackTimeRoundDown(aEnd.ToMicroseconds());
auto start =
mVideoStream->MicrosecondsToTrackTimeRoundDown(aStart.ToMicroseconds());
mVideoTrack->MicrosecondsToTrackTimeRoundDown(aStart.ToMicroseconds());
aOutput->AppendFrame(image.forget(), aIntrinsicSize, aPrincipalHandle, false,
aTimeStamp);
// Extend this so we get accurate durations for all frames.
@ -692,7 +690,7 @@ void DecodedStream::ResetVideo(const PrincipalHandle& aPrincipalHandle) {
// for video tracks as part of bug 1493618.
resetter.AppendFrame(nullptr, mData->mLastVideoImageDisplaySize,
aPrincipalHandle, false, currentTime);
mData->mVideoStream->AppendData(&resetter);
mData->mVideoTrack->AppendData(&resetter);
// Consumer buffers have been reset. We now set the next time to the start
// time of the current frame, so that it can be displayed again on resuming.
@ -772,7 +770,7 @@ void DecodedStream::SendVideo(const PrincipalHandle& aPrincipalHandle) {
TimeUnit end = std::max(
v->GetEndTime(),
lastEnd + TimeUnit::FromMicroseconds(
mData->mVideoStream->TrackTimeToMicroseconds(1) + 1));
mData->mVideoTrack->TrackTimeToMicroseconds(1) + 1));
mData->mLastVideoImage = v->mImage;
mData->mLastVideoImageDisplaySize = v->mDisplay;
mData->WriteVideoToSegment(v->mImage, lastEnd, end, v->mDisplay, t,
@ -788,7 +786,7 @@ void DecodedStream::SendVideo(const PrincipalHandle& aPrincipalHandle) {
}
if (output.GetDuration() > 0) {
mData->mVideoStreamWritten += mData->mVideoStream->AppendData(&output);
mData->mVideoTrackWritten += mData->mVideoTrack->AppendData(&output);
}
if (mVideoQueue.IsFinished() && !mData->mHaveSentFinishVideo) {
@ -810,7 +808,7 @@ void DecodedStream::SendVideo(const PrincipalHandle& aPrincipalHandle) {
// We round the nr of microseconds up, because WriteVideoToSegment
// will round the conversion from microseconds to TrackTime down.
auto deviation = TimeUnit::FromMicroseconds(
mData->mVideoStream->TrackTimeToMicroseconds(1) + 1);
mData->mVideoTrack->TrackTimeToMicroseconds(1) + 1);
auto start = mData->mLastVideoEndTime.valueOr(mStartTime.ref());
mData->WriteVideoToSegment(
mData->mLastVideoImage, start, start + deviation,
@ -821,11 +819,9 @@ void DecodedStream::SendVideo(const PrincipalHandle& aPrincipalHandle) {
if (forceBlack) {
endSegment.ReplaceWithDisabled();
}
mData->mVideoStreamWritten +=
mData->mVideoStream->AppendData(&endSegment);
mData->mVideoTrackWritten += mData->mVideoTrack->AppendData(&endSegment);
}
mData->mListener->EndTrackAt(mData->mVideoStream,
mData->mVideoStreamWritten);
mData->mListener->EndTrackAt(mData->mVideoTrack, mData->mVideoTrackWritten);
mData->mHaveSentFinishVideo = true;
}
}