Bug 1322799 part 7 - move AccurateSeekTask::Drop{Audio,Video}UpToSeekTarget(); r=jwwang

MozReview-Commit-ID: 8i3yLFqXfJL

--HG--
extra : rebase_source : 64f73096a7ed9ceda0fbf835eee9a370660545b0
This commit is contained in:
Kaku Kuo 2016-12-09 13:20:10 -10:00
Родитель af9ba50992
Коммит b7ca8de04f
3 изменённых файлов: 114 добавлений и 124 удалений

Просмотреть файл

@ -132,124 +132,6 @@ AccurateSeekTask::Seek(const media::TimeUnit& aDuration)
return mSeekTaskPromise.Ensure(__func__);
}
nsresult
AccurateSeekTask::DropAudioUpToSeekTarget(MediaData* aSample)
{
AssertOwnerThread();
RefPtr<AudioData> audio(aSample->As<AudioData>());
MOZ_ASSERT(audio && mTarget.IsAccurate());
CheckedInt64 sampleDuration = FramesToUsecs(audio->mFrames, mAudioRate);
if (!sampleDuration.isValid()) {
return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
}
if (audio->mTime + sampleDuration.value() <= mTarget.GetTime().ToMicroseconds()) {
// Our seek target lies after the frames in this AudioData. Don't
// push it onto the audio queue, and keep decoding forwards.
return NS_OK;
}
if (audio->mTime > mTarget.GetTime().ToMicroseconds()) {
// The seek target doesn't lie in the audio block just after the last
// audio frames we've seen which were before the seek target. This
// could have been the first audio data we've seen after seek, i.e. the
// seek terminated after the seek target in the audio stream. Just
// abort the audio decode-to-target, the state machine will play
// silence to cover the gap. Typically this happens in poorly muxed
// files.
DECODER_WARN("Audio not synced after seek, maybe a poorly muxed file?");
mSeekedAudioData = audio;
mDoneAudioSeeking = true;
return NS_OK;
}
// The seek target lies somewhere in this AudioData's frames, strip off
// any frames which lie before the seek target, so we'll begin playback
// exactly at the seek target.
NS_ASSERTION(mTarget.GetTime().ToMicroseconds() >= audio->mTime,
"Target must at or be after data start.");
NS_ASSERTION(mTarget.GetTime().ToMicroseconds() < audio->mTime + sampleDuration.value(),
"Data must end after target.");
CheckedInt64 framesToPrune =
UsecsToFrames(mTarget.GetTime().ToMicroseconds() - audio->mTime, mAudioRate);
if (!framesToPrune.isValid()) {
return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
}
if (framesToPrune.value() > audio->mFrames) {
// We've messed up somehow. Don't try to trim frames, the |frames|
// variable below will overflow.
DECODER_WARN("Can't prune more frames that we have!");
return NS_ERROR_FAILURE;
}
uint32_t frames = audio->mFrames - static_cast<uint32_t>(framesToPrune.value());
uint32_t channels = audio->mChannels;
AlignedAudioBuffer audioData(frames * channels);
if (!audioData) {
return NS_ERROR_OUT_OF_MEMORY;
}
memcpy(audioData.get(),
audio->mAudioData.get() + (framesToPrune.value() * channels),
frames * channels * sizeof(AudioDataValue));
CheckedInt64 duration = FramesToUsecs(frames, mAudioRate);
if (!duration.isValid()) {
return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
}
RefPtr<AudioData> data(new AudioData(audio->mOffset,
mTarget.GetTime().ToMicroseconds(),
duration.value(),
frames,
Move(audioData),
channels,
audio->mRate));
MOZ_ASSERT(!mSeekedAudioData, "Should be the 1st sample after seeking");
mSeekedAudioData = data;
mDoneAudioSeeking = true;
return NS_OK;
}
nsresult
AccurateSeekTask::DropVideoUpToSeekTarget(MediaData* aSample)
{
AssertOwnerThread();
RefPtr<VideoData> video(aSample->As<VideoData>());
MOZ_ASSERT(video);
DECODER_LOG("DropVideoUpToSeekTarget() frame [%lld, %lld]",
video->mTime, video->GetEndTime());
const int64_t target = mTarget.GetTime().ToMicroseconds();
// If the frame end time is less than the seek target, we won't want
// to display this frame after the seek, so discard it.
if (target >= video->GetEndTime()) {
DECODER_LOG("DropVideoUpToSeekTarget() pop video frame [%lld, %lld] target=%lld",
video->mTime, video->GetEndTime(), target);
mFirstVideoFrameAfterSeek = video;
} else {
if (target >= video->mTime && video->GetEndTime() >= target) {
// The seek target lies inside this frame's time slice. Adjust the frame's
// start time to match the seek target. We do this by replacing the
// first frame with a shallow copy which has the new timestamp.
RefPtr<VideoData> temp = VideoData::ShallowCopyUpdateTimestamp(video.get(), target);
video = temp;
}
mFirstVideoFrameAfterSeek = nullptr;
DECODER_LOG("DropVideoUpToSeekTarget() found video frame [%lld, %lld] containing target=%lld",
video->mTime, video->GetEndTime(), target);
MOZ_ASSERT(!mSeekedVideoData, "Should be the 1st sample after seeking");
mSeekedVideoData = video;
mDoneVideoSeeking = true;
}
return NS_OK;
}
void
AccurateSeekTask::MaybeFinishSeek()
{

Просмотреть файл

@ -43,10 +43,6 @@ public:
~AccurateSeekTask();
nsresult DropAudioUpToSeekTarget(MediaData* aSample);
nsresult DropVideoUpToSeekTarget(MediaData* aSample);
void MaybeFinishSeek();
/*

Просмотреть файл

@ -917,7 +917,7 @@ public:
mTask->mSeekedAudioData = audio;
mTask->mDoneAudioSeeking = true;
} else {
nsresult rv = mTask->DropAudioUpToSeekTarget(audio);
nsresult rv = DropAudioUpToSeekTarget(audio);
if (NS_FAILED(rv)) {
mTask->RejectIfExist(rv, __func__);
return;
@ -950,7 +950,7 @@ public:
mTask->mSeekedVideoData = video;
mTask->mDoneVideoSeeking = true;
} else {
nsresult rv = mTask->DropVideoUpToSeekTarget(video.get());
nsresult rv = DropVideoUpToSeekTarget(video.get());
if (NS_FAILED(rv)) {
mTask->RejectIfExist(rv, __func__);
return;
@ -1133,6 +1133,118 @@ private:
}
}
nsresult DropAudioUpToSeekTarget(MediaData* aSample)
{
RefPtr<AudioData> audio(aSample->As<AudioData>());
MOZ_ASSERT(audio && mTask->mTarget.IsAccurate());
CheckedInt64 sampleDuration = FramesToUsecs(audio->mFrames, mTask->mAudioRate);
if (!sampleDuration.isValid()) {
return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
}
if (audio->mTime + sampleDuration.value() <= mTask->mTarget.GetTime().ToMicroseconds()) {
// Our seek target lies after the frames in this AudioData. Don't
// push it onto the audio queue, and keep decoding forwards.
return NS_OK;
}
if (audio->mTime > mTask->mTarget.GetTime().ToMicroseconds()) {
// The seek target doesn't lie in the audio block just after the last
// audio frames we've seen which were before the seek target. This
// could have been the first audio data we've seen after seek, i.e. the
// seek terminated after the seek target in the audio stream. Just
// abort the audio decode-to-target, the state machine will play
// silence to cover the gap. Typically this happens in poorly muxed
// files.
SWARN("Audio not synced after seek, maybe a poorly muxed file?");
mTask->mSeekedAudioData = audio;
mTask->mDoneAudioSeeking = true;
return NS_OK;
}
// The seek target lies somewhere in this AudioData's frames, strip off
// any frames which lie before the seek target, so we'll begin playback
// exactly at the seek target.
NS_ASSERTION(mTask->mTarget.GetTime().ToMicroseconds() >= audio->mTime,
"Target must at or be after data start.");
NS_ASSERTION(mTask->mTarget.GetTime().ToMicroseconds() < audio->mTime + sampleDuration.value(),
"Data must end after target.");
CheckedInt64 framesToPrune =
UsecsToFrames(mTask->mTarget.GetTime().ToMicroseconds() - audio->mTime, mTask->mAudioRate);
if (!framesToPrune.isValid()) {
return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
}
if (framesToPrune.value() > audio->mFrames) {
// We've messed up somehow. Don't try to trim frames, the |frames|
// variable below will overflow.
SWARN("Can't prune more frames that we have!");
return NS_ERROR_FAILURE;
}
uint32_t frames = audio->mFrames - static_cast<uint32_t>(framesToPrune.value());
uint32_t channels = audio->mChannels;
AlignedAudioBuffer audioData(frames * channels);
if (!audioData) {
return NS_ERROR_OUT_OF_MEMORY;
}
memcpy(audioData.get(),
audio->mAudioData.get() + (framesToPrune.value() * channels),
frames * channels * sizeof(AudioDataValue));
CheckedInt64 duration = FramesToUsecs(frames, mTask->mAudioRate);
if (!duration.isValid()) {
return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
}
RefPtr<AudioData> data(new AudioData(audio->mOffset,
mTask->mTarget.GetTime().ToMicroseconds(),
duration.value(),
frames,
Move(audioData),
channels,
audio->mRate));
MOZ_ASSERT(!mTask->mSeekedAudioData, "Should be the 1st sample after seeking");
mTask->mSeekedAudioData = data;
mTask->mDoneAudioSeeking = true;
return NS_OK;
}
nsresult DropVideoUpToSeekTarget(MediaData* aSample)
{
RefPtr<VideoData> video(aSample->As<VideoData>());
MOZ_ASSERT(video);
SLOG("DropVideoUpToSeekTarget() frame [%lld, %lld]",
video->mTime, video->GetEndTime());
const int64_t target = mTask->mTarget.GetTime().ToMicroseconds();
// If the frame end time is less than the seek target, we won't want
// to display this frame after the seek, so discard it.
if (target >= video->GetEndTime()) {
SLOG("DropVideoUpToSeekTarget() pop video frame [%lld, %lld] target=%lld",
video->mTime, video->GetEndTime(), target);
mTask->mFirstVideoFrameAfterSeek = video;
} else {
if (target >= video->mTime && video->GetEndTime() >= target) {
// The seek target lies inside this frame's time slice. Adjust the frame's
// start time to match the seek target. We do this by replacing the
// first frame with a shallow copy which has the new timestamp.
RefPtr<VideoData> temp = VideoData::ShallowCopyUpdateTimestamp(video.get(), target);
video = temp;
}
mTask->mFirstVideoFrameAfterSeek = nullptr;
SLOG("DropVideoUpToSeekTarget() found video frame [%lld, %lld] containing target=%lld",
video->mTime, video->GetEndTime(), target);
MOZ_ASSERT(!mTask->mSeekedVideoData, "Should be the 1st sample after seeking");
mTask->mSeekedVideoData = video;
mTask->mDoneVideoSeeking = true;
}
return NS_OK;
}
void OnSeekTaskResolved(const SeekTaskResolveValue& aValue)
{
mSeekTaskRequest.Complete();