зеркало из https://github.com/mozilla/gecko-dev.git
Bug 778077 - Implement HTMLMediaElement.fastSeek(time). r=cajbir
Implement HTMLMediaElement.fastSeek(), basically by changing all the MediaDecoderReader::Seek() overrides to not call MediaDecoderReader::DecodeToTarget(), and have MediaDecoderReader::DecodeSeek() call DecodeToTarget() if we're doing an accurate (non-fast) seek. Update gizmo.mp4 to have a keyframe every second, instead of only 1 keyframe at the start of stream. This makes the unit test I added more useful for mp4... I pushed most of the seek target clamping logic in MediaDecoder up into HTMLMediaElement, so that we're clamping in fewer places. Note MediaDecoderStateMachine::Seek() still sanity checks the seek target. We have to update the currentTime/MediaDecoder playback position after a seek completes now, rather than assuming the seek always got it exactly right. Removed those pesky assertions about seek target lying in the first frame after seek, since actually sometimes the media doesn't have samples for all streams after a seek (either due to the media being encoded like that, or because of a bug in the platform's decoder, not entirely sure). Green: https://tbpl.mozilla.org/?tree=Try&rev=b028258565e2 * * * Bug 778077 - Fix up MediaOMXReader fastseek to ensure audio stream stays in sync with video stream. r=cajbir
This commit is contained in:
Родитель
c1d30ea962
Коммит
b12df215bb
|
@ -19,6 +19,7 @@
|
|||
#include "mozilla/Attributes.h"
|
||||
#include "mozilla/dom/AudioChannelBinding.h"
|
||||
#include "mozilla/dom/TextTrackManager.h"
|
||||
#include "MediaDecoder.h"
|
||||
|
||||
// Define to output information on decoding and painting framerate
|
||||
/* #define DEBUG_FRAME_RATE 1 */
|
||||
|
@ -393,6 +394,8 @@ public:
|
|||
|
||||
void SetCurrentTime(double aCurrentTime, ErrorResult& aRv);
|
||||
|
||||
void FastSeek(double aTime, ErrorResult& aRv);
|
||||
|
||||
double Duration() const;
|
||||
|
||||
bool Paused() const
|
||||
|
@ -861,6 +864,12 @@ protected:
|
|||
// This method does the check for muting/fading/unmuting the audio channel.
|
||||
nsresult UpdateChannelMuteState(mozilla::dom::AudioChannelState aCanPlay);
|
||||
|
||||
// Seeks to aTime seconds. aSeekType can be Exact to seek to exactly the
|
||||
// seek target, or PrevSyncPoint if a quicker but less precise seek is
|
||||
// desired, and we'll seek to the sync point (keyframe and/or start of the
|
||||
// next block of audio samples) preceeding seek target.
|
||||
void Seek(double aTime, SeekTarget::Type aSeekType, ErrorResult& aRv);
|
||||
|
||||
// Update the audio channel playing state
|
||||
virtual void UpdateAudioChannelPlayingState();
|
||||
|
||||
|
|
|
@ -1323,10 +1323,63 @@ NS_IMETHODIMP HTMLMediaElement::GetCurrentTime(double* aCurrentTime)
|
|||
return NS_OK;
|
||||
}
|
||||
|
||||
void
|
||||
HTMLMediaElement::FastSeek(double aTime, ErrorResult& aRv)
|
||||
{
|
||||
Seek(aTime, SeekTarget::PrevSyncPoint, aRv);
|
||||
}
|
||||
|
||||
void
|
||||
HTMLMediaElement::SetCurrentTime(double aCurrentTime, ErrorResult& aRv)
|
||||
{
|
||||
MOZ_ASSERT(aCurrentTime == aCurrentTime);
|
||||
Seek(aCurrentTime, SeekTarget::Accurate, aRv);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if aValue is inside a range of aRanges, and if so sets aIsInRanges
|
||||
* to true and put the range index in aIntervalIndex. If aValue is not
|
||||
* inside a range, aIsInRanges is set to false, and aIntervalIndex
|
||||
* is set to the index of the range which ends immediately before aValue
|
||||
* (and can be -1 if aValue is before aRanges.Start(0)). Returns NS_OK
|
||||
* on success, and NS_ERROR_FAILURE on failure.
|
||||
*/
|
||||
static nsresult
|
||||
IsInRanges(dom::TimeRanges& aRanges,
|
||||
double aValue,
|
||||
bool& aIsInRanges,
|
||||
int32_t& aIntervalIndex)
|
||||
{
|
||||
aIsInRanges = false;
|
||||
uint32_t length;
|
||||
nsresult rv = aRanges.GetLength(&length);
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
for (uint32_t i = 0; i < length; i++) {
|
||||
double start, end;
|
||||
rv = aRanges.Start(i, &start);
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
if (start > aValue) {
|
||||
aIntervalIndex = i - 1;
|
||||
return NS_OK;
|
||||
}
|
||||
rv = aRanges.End(i, &end);
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
if (aValue <= end) {
|
||||
aIntervalIndex = i;
|
||||
aIsInRanges = true;
|
||||
return NS_OK;
|
||||
}
|
||||
}
|
||||
aIntervalIndex = length - 1;
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
void
|
||||
HTMLMediaElement::Seek(double aTime,
|
||||
SeekTarget::Type aSeekType,
|
||||
ErrorResult& aRv)
|
||||
{
|
||||
// aTime should be non-NaN.
|
||||
MOZ_ASSERT(aTime == aTime);
|
||||
|
||||
StopSuspendingAfterFirstFrame();
|
||||
|
||||
|
@ -1350,34 +1403,98 @@ HTMLMediaElement::SetCurrentTime(double aCurrentTime, ErrorResult& aRv)
|
|||
if (mCurrentPlayRangeStart != rangeEndTime) {
|
||||
mPlayed->Add(mCurrentPlayRangeStart, rangeEndTime);
|
||||
}
|
||||
// Reset the current played range start time. We'll re-set it once
|
||||
// the seek completes.
|
||||
mCurrentPlayRangeStart = -1.0;
|
||||
}
|
||||
|
||||
if (!mDecoder) {
|
||||
LOG(PR_LOG_DEBUG, ("%p SetCurrentTime(%f) failed: no decoder", this, aCurrentTime));
|
||||
LOG(PR_LOG_DEBUG, ("%p SetCurrentTime(%f) failed: no decoder", this, aTime));
|
||||
aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
|
||||
return;
|
||||
}
|
||||
|
||||
if (mReadyState == nsIDOMHTMLMediaElement::HAVE_NOTHING) {
|
||||
LOG(PR_LOG_DEBUG, ("%p SetCurrentTime(%f) failed: no source", this, aCurrentTime));
|
||||
LOG(PR_LOG_DEBUG, ("%p SetCurrentTime(%f) failed: no source", this, aTime));
|
||||
aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
|
||||
return;
|
||||
}
|
||||
|
||||
// Clamp the time to [0, duration] as required by the spec.
|
||||
double clampedTime = std::max(0.0, aCurrentTime);
|
||||
double duration = mDecoder->GetDuration();
|
||||
if (duration >= 0) {
|
||||
clampedTime = std::min(clampedTime, duration);
|
||||
// Clamp the seek target to inside the seekable ranges.
|
||||
dom::TimeRanges seekable;
|
||||
if (NS_FAILED(mDecoder->GetSeekable(&seekable))) {
|
||||
aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
|
||||
return;
|
||||
}
|
||||
uint32_t length = 0;
|
||||
seekable.GetLength(&length);
|
||||
if (!length) {
|
||||
return;
|
||||
}
|
||||
|
||||
// If the position we want to seek to is not in a seekable range, we seek
|
||||
// to the closest position in the seekable ranges instead. If two positions
|
||||
// are equally close, we seek to the closest position from the currentTime.
|
||||
// See seeking spec, point 7 :
|
||||
// http://www.whatwg.org/specs/web-apps/current-work/multipage/the-video-element.html#seeking
|
||||
int32_t range = 0;
|
||||
bool isInRange = false;
|
||||
if (NS_FAILED(IsInRanges(seekable, aTime, isInRange, range))) {
|
||||
aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
|
||||
return;
|
||||
}
|
||||
if (!isInRange) {
|
||||
if (range != -1) {
|
||||
// |range + 1| can't be negative, because the only possible negative value
|
||||
// for |range| is -1.
|
||||
if (uint32_t(range + 1) < length) {
|
||||
double leftBound, rightBound;
|
||||
if (NS_FAILED(seekable.End(range, &leftBound))) {
|
||||
aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
|
||||
return;
|
||||
}
|
||||
if (NS_FAILED(seekable.Start(range + 1, &rightBound))) {
|
||||
aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
|
||||
return;
|
||||
}
|
||||
double distanceLeft = Abs(leftBound - aTime);
|
||||
double distanceRight = Abs(rightBound - aTime);
|
||||
if (distanceLeft == distanceRight) {
|
||||
double currentTime = CurrentTime();
|
||||
distanceLeft = Abs(leftBound - currentTime);
|
||||
distanceRight = Abs(rightBound - currentTime);
|
||||
}
|
||||
aTime = (distanceLeft < distanceRight) ? leftBound : rightBound;
|
||||
} else {
|
||||
// Seek target is after the end last range in seekable data.
|
||||
// Clamp the seek target to the end of the last seekable range.
|
||||
if (NS_FAILED(seekable.End(length - 1, &aTime))) {
|
||||
aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
|
||||
return;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// aTime is before the first range in |seekable|, the closest point we can
|
||||
// seek to is the start of the first range.
|
||||
seekable.Start(0, &aTime);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: The spec requires us to update the current time to reflect the
|
||||
// actual seek target before beginning the synchronous section, but
|
||||
// that requires changing all MediaDecoderReaders to support telling
|
||||
// us the fastSeek target, and it's currently not possible to get
|
||||
// this information as we don't yet control the demuxer for all
|
||||
// MediaDecoderReaders.
|
||||
|
||||
mPlayingBeforeSeek = IsPotentiallyPlaying();
|
||||
// The media backend is responsible for dispatching the timeupdate
|
||||
// event if it changes the playback position as a result of the seek.
|
||||
LOG(PR_LOG_DEBUG, ("%p SetCurrentTime(%f) starting seek", this, aCurrentTime));
|
||||
aRv = mDecoder->Seek(clampedTime);
|
||||
// Start a new range at position we seeked to.
|
||||
mCurrentPlayRangeStart = mDecoder->GetCurrentTime();
|
||||
LOG(PR_LOG_DEBUG, ("%p SetCurrentTime(%f) starting seek", this, aTime));
|
||||
nsresult rv = mDecoder->Seek(aTime, aSeekType);
|
||||
if (NS_FAILED(rv)) {
|
||||
aRv.Throw(rv);
|
||||
}
|
||||
|
||||
// We changed whether we're seeking so we need to AddRemoveSelfReference.
|
||||
AddRemoveSelfReference();
|
||||
|
@ -3048,6 +3165,9 @@ void HTMLMediaElement::SeekCompleted()
|
|||
if (mTextTrackManager) {
|
||||
mTextTrackManager->DidSeek();
|
||||
}
|
||||
if (mCurrentPlayRangeStart == -1.0) {
|
||||
mCurrentPlayRangeStart = CurrentTime();
|
||||
}
|
||||
}
|
||||
|
||||
void HTMLMediaElement::NotifySuspendedByCache(bool aIsSuspended)
|
||||
|
|
|
@ -133,6 +133,21 @@ VideoData* VideoData::ShallowCopyUpdateDuration(VideoData* aOther,
|
|||
return v;
|
||||
}
|
||||
|
||||
/* static */
|
||||
VideoData* VideoData::ShallowCopyUpdateTimestamp(VideoData* aOther,
|
||||
int64_t aTimestamp)
|
||||
{
|
||||
NS_ENSURE_TRUE(aOther, nullptr);
|
||||
VideoData* v = new VideoData(aOther->mOffset,
|
||||
aTimestamp,
|
||||
aOther->GetEndTime() - aTimestamp,
|
||||
aOther->mKeyframe,
|
||||
aOther->mTimecode,
|
||||
aOther->mDisplay);
|
||||
v->mImage = aOther->mImage;
|
||||
return v;
|
||||
}
|
||||
|
||||
/* static */
|
||||
void VideoData::SetVideoDataToImage(PlanarYCbCrImage* aVideoImage,
|
||||
VideoInfo& aInfo,
|
||||
|
|
|
@ -204,6 +204,12 @@ public:
|
|||
static VideoData* ShallowCopyUpdateDuration(VideoData* aOther,
|
||||
int64_t aDuration);
|
||||
|
||||
// Creates a new VideoData identical to aOther, but with a different
|
||||
// specified timestamp. All data from aOther is copied into the new
|
||||
// VideoData, as ShallowCopyUpdateDuration() does.
|
||||
static VideoData* ShallowCopyUpdateTimestamp(VideoData* aOther,
|
||||
int64_t aTimestamp);
|
||||
|
||||
// Initialize PlanarYCbCrImage. Only When aCopyData is true,
|
||||
// video data is copied to PlanarYCbCrImage.
|
||||
static void SetVideoDataToImage(PlanarYCbCrImage* aVideoImage,
|
||||
|
|
|
@ -124,7 +124,7 @@ void MediaDecoder::SetDormantIfNecessary(bool aDormant)
|
|||
DestroyDecodedStream();
|
||||
mDecoderStateMachine->SetDormant(true);
|
||||
|
||||
mRequestedSeekTime = mCurrentTime;
|
||||
mRequestedSeekTarget = SeekTarget(mCurrentTime, SeekTarget::Accurate);
|
||||
if (mPlayState == PLAY_STATE_PLAYING){
|
||||
mNextState = PLAY_STATE_PLAYING;
|
||||
} else {
|
||||
|
@ -420,7 +420,6 @@ MediaDecoder::MediaDecoder() :
|
|||
mIsExitingDormant(false),
|
||||
mPlayState(PLAY_STATE_PAUSED),
|
||||
mNextState(PLAY_STATE_PAUSED),
|
||||
mRequestedSeekTime(-1.0),
|
||||
mCalledResourceLoaded(false),
|
||||
mIgnoreProgressData(false),
|
||||
mInfiniteStream(false),
|
||||
|
@ -605,99 +604,27 @@ nsresult MediaDecoder::Play()
|
|||
return NS_OK;
|
||||
}
|
||||
if (mPlayState == PLAY_STATE_ENDED)
|
||||
return Seek(0);
|
||||
return Seek(0, SeekTarget::PrevSyncPoint);
|
||||
|
||||
ChangeState(PLAY_STATE_PLAYING);
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if aValue is inside a range of aRanges, and put the range
|
||||
* index in aIntervalIndex if it is not null.
|
||||
* If aValue is not inside a range, false is returned, and aIntervalIndex, if
|
||||
* not null, is set to the index of the range which ends immediately before aValue
|
||||
* (and can be -1 if aValue is before aRanges.Start(0)).
|
||||
*/
|
||||
static bool
|
||||
IsInRanges(dom::TimeRanges& aRanges, double aValue, int32_t& aIntervalIndex)
|
||||
{
|
||||
uint32_t length;
|
||||
aRanges.GetLength(&length);
|
||||
for (uint32_t i = 0; i < length; i++) {
|
||||
double start, end;
|
||||
aRanges.Start(i, &start);
|
||||
if (start > aValue) {
|
||||
aIntervalIndex = i - 1;
|
||||
return false;
|
||||
}
|
||||
aRanges.End(i, &end);
|
||||
if (aValue <= end) {
|
||||
aIntervalIndex = i;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
aIntervalIndex = length - 1;
|
||||
return false;
|
||||
}
|
||||
|
||||
nsresult MediaDecoder::Seek(double aTime)
|
||||
nsresult MediaDecoder::Seek(double aTime, SeekTarget::Type aSeekType)
|
||||
{
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
|
||||
|
||||
NS_ABORT_IF_FALSE(aTime >= 0.0, "Cannot seek to a negative value.");
|
||||
|
||||
dom::TimeRanges seekable;
|
||||
nsresult res;
|
||||
uint32_t length = 0;
|
||||
res = GetSeekable(&seekable);
|
||||
NS_ENSURE_SUCCESS(res, NS_OK);
|
||||
int64_t timeUsecs = 0;
|
||||
nsresult rv = SecondsToUsecs(aTime, timeUsecs);
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
|
||||
seekable.GetLength(&length);
|
||||
if (!length) {
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
// If the position we want to seek to is not in a seekable range, we seek
|
||||
// to the closest position in the seekable ranges instead. If two positions
|
||||
// are equally close, we seek to the closest position from the currentTime.
|
||||
// See seeking spec, point 7 :
|
||||
// http://www.whatwg.org/specs/web-apps/current-work/multipage/the-video-element.html#seeking
|
||||
int32_t range = 0;
|
||||
if (!IsInRanges(seekable, aTime, range)) {
|
||||
if (range != -1) {
|
||||
// |range + 1| can't be negative, because the only possible negative value
|
||||
// for |range| is -1.
|
||||
if (uint32_t(range + 1) < length) {
|
||||
double leftBound, rightBound;
|
||||
res = seekable.End(range, &leftBound);
|
||||
NS_ENSURE_SUCCESS(res, NS_OK);
|
||||
res = seekable.Start(range + 1, &rightBound);
|
||||
NS_ENSURE_SUCCESS(res, NS_OK);
|
||||
double distanceLeft = Abs(leftBound - aTime);
|
||||
double distanceRight = Abs(rightBound - aTime);
|
||||
if (distanceLeft == distanceRight) {
|
||||
distanceLeft = Abs(leftBound - mCurrentTime);
|
||||
distanceRight = Abs(rightBound - mCurrentTime);
|
||||
}
|
||||
aTime = (distanceLeft < distanceRight) ? leftBound : rightBound;
|
||||
} else {
|
||||
// Seek target is after the end last range in seekable data.
|
||||
// Clamp the seek target to the end of the last seekable range.
|
||||
res = seekable.End(length - 1, &aTime);
|
||||
NS_ENSURE_SUCCESS(res, NS_OK);
|
||||
}
|
||||
} else {
|
||||
// aTime is before the first range in |seekable|, the closest point we can
|
||||
// seek to is the start of the first range.
|
||||
seekable.Start(0, &aTime);
|
||||
}
|
||||
}
|
||||
|
||||
mRequestedSeekTime = aTime;
|
||||
mRequestedSeekTarget = SeekTarget(timeUsecs, aSeekType);
|
||||
mCurrentTime = aTime;
|
||||
|
||||
// If we are already in the seeking state, then setting mRequestedSeekTime
|
||||
// If we are already in the seeking state, then setting mRequestedSeekTarget
|
||||
// above will result in the new seek occurring when the current seek
|
||||
// completes.
|
||||
if ((mPlayState != PLAY_STATE_LOADING || !mIsDormant) && mPlayState != PLAY_STATE_SEEKING) {
|
||||
|
@ -816,7 +743,7 @@ void MediaDecoder::MetadataLoaded(int aChannels, int aRate, bool aHasAudio, bool
|
|||
// state if we're still set to the original
|
||||
// loading state.
|
||||
if (mPlayState == PLAY_STATE_LOADING) {
|
||||
if (mRequestedSeekTime >= 0.0) {
|
||||
if (mRequestedSeekTarget.IsValid()) {
|
||||
ChangeState(PLAY_STATE_SEEKING);
|
||||
}
|
||||
else {
|
||||
|
@ -1143,7 +1070,7 @@ void MediaDecoder::SeekingStopped()
|
|||
|
||||
// An additional seek was requested while the current seek was
|
||||
// in operation.
|
||||
if (mRequestedSeekTime >= 0.0) {
|
||||
if (mRequestedSeekTarget.IsValid()) {
|
||||
ChangeState(PLAY_STATE_SEEKING);
|
||||
seekWasAborted = true;
|
||||
} else {
|
||||
|
@ -1152,6 +1079,8 @@ void MediaDecoder::SeekingStopped()
|
|||
}
|
||||
}
|
||||
|
||||
PlaybackPositionChanged();
|
||||
|
||||
if (mOwner) {
|
||||
UpdateReadyStateForData();
|
||||
if (!seekWasAborted) {
|
||||
|
@ -1176,7 +1105,7 @@ void MediaDecoder::SeekingStoppedAtEnd()
|
|||
|
||||
// An additional seek was requested while the current seek was
|
||||
// in operation.
|
||||
if (mRequestedSeekTime >= 0.0) {
|
||||
if (mRequestedSeekTarget.IsValid()) {
|
||||
ChangeState(PLAY_STATE_SEEKING);
|
||||
seekWasAborted = true;
|
||||
} else {
|
||||
|
@ -1186,6 +1115,8 @@ void MediaDecoder::SeekingStoppedAtEnd()
|
|||
}
|
||||
}
|
||||
|
||||
PlaybackPositionChanged();
|
||||
|
||||
if (mOwner) {
|
||||
UpdateReadyStateForData();
|
||||
if (!seekWasAborted) {
|
||||
|
@ -1254,8 +1185,8 @@ void MediaDecoder::ApplyStateToStateMachine(PlayState aState)
|
|||
mDecoderStateMachine->Play();
|
||||
break;
|
||||
case PLAY_STATE_SEEKING:
|
||||
mDecoderStateMachine->Seek(mRequestedSeekTime);
|
||||
mRequestedSeekTime = -1.0;
|
||||
mDecoderStateMachine->Seek(mRequestedSeekTarget);
|
||||
mRequestedSeekTarget.Reset();
|
||||
break;
|
||||
default:
|
||||
/* No action needed */
|
||||
|
|
|
@ -226,6 +226,39 @@ static const uint32_t FRAMEBUFFER_LENGTH_MAX = 16384;
|
|||
#undef GetCurrentTime
|
||||
#endif
|
||||
|
||||
// Stores the seek target; the time to seek to, and whether an Accurate,
|
||||
// or "Fast" (nearest keyframe) seek was requested.
|
||||
struct SeekTarget {
|
||||
enum Type {
|
||||
Invalid,
|
||||
PrevSyncPoint,
|
||||
Accurate
|
||||
};
|
||||
SeekTarget()
|
||||
: mTime(-1.0)
|
||||
, mType(SeekTarget::Invalid)
|
||||
{
|
||||
}
|
||||
SeekTarget(int64_t aTimeUsecs, Type aType)
|
||||
: mTime(aTimeUsecs)
|
||||
, mType(aType)
|
||||
{
|
||||
}
|
||||
bool IsValid() const {
|
||||
return mType != SeekTarget::Invalid;
|
||||
}
|
||||
void Reset() {
|
||||
mTime = -1;
|
||||
mType = SeekTarget::Invalid;
|
||||
}
|
||||
// Seek target time in microseconds.
|
||||
int64_t mTime;
|
||||
// Whether we should seek "Fast", or "Accurate".
|
||||
// "Fast" seeks to the seek point preceeding mTime, whereas
|
||||
// "Accurate" seeks as close as possible to mTime.
|
||||
Type mType;
|
||||
};
|
||||
|
||||
class MediaDecoder : public nsIObserver,
|
||||
public AbstractMediaDecoder
|
||||
{
|
||||
|
@ -310,7 +343,9 @@ public:
|
|||
virtual double GetCurrentTime();
|
||||
|
||||
// Seek to the time position in (seconds) from the start of the video.
|
||||
virtual nsresult Seek(double aTime);
|
||||
// If aDoFastSeek is true, we'll seek to the sync point/keyframe preceeding
|
||||
// the seek target.
|
||||
virtual nsresult Seek(double aTime, SeekTarget::Type aSeekType);
|
||||
|
||||
// Enables decoders to supply an enclosing byte range for a seek offset.
|
||||
// E.g. used by ChannelMediaResource to download a whole cluster for
|
||||
|
@ -1104,9 +1139,9 @@ protected:
|
|||
// This can only be changed on the main thread while holding the decoder
|
||||
// monitor. Thus, it can be safely read while holding the decoder monitor
|
||||
// OR on the main thread.
|
||||
// If the value is negative then no seek has been requested. When a seek is
|
||||
// started this is reset to negative.
|
||||
double mRequestedSeekTime;
|
||||
// If the SeekTarget's IsValid() accessor returns false, then no seek has
|
||||
// been requested. When a seek is started this is reset to invalid.
|
||||
SeekTarget mRequestedSeekTarget;
|
||||
|
||||
// True when we have fully loaded the resource and reported that
|
||||
// to the element (i.e. reached NETWORK_LOADED state).
|
||||
|
|
|
@ -179,8 +179,10 @@ nsresult MediaDecoderReader::DecodeToTarget(int64_t aTarget)
|
|||
|
||||
// Decode forward to the target frame. Start with video, if we have it.
|
||||
if (HasVideo()) {
|
||||
// Note: when decoding hits the end of stream we must keep the last frame
|
||||
// in the video queue so that we'll have something to display after the
|
||||
// seek completes. This makes our logic a bit messy.
|
||||
bool eof = false;
|
||||
int64_t startTime = -1;
|
||||
nsAutoPtr<VideoData> video;
|
||||
while (HasVideo() && !eof) {
|
||||
while (VideoQueue().GetSize() == 0 && !eof) {
|
||||
|
@ -196,6 +198,9 @@ nsresult MediaDecoderReader::DecodeToTarget(int64_t aTarget)
|
|||
if (eof) {
|
||||
// Hit end of file, we want to display the last frame of the video.
|
||||
if (video) {
|
||||
DECODER_LOG(PR_LOG_DEBUG,
|
||||
("MediaDecoderReader::DecodeToTarget(%lld) repushing video frame [%lld, %lld] at EOF",
|
||||
aTarget, video->mTime, video->GetEndTime()));
|
||||
VideoQueue().PushFront(video.forget());
|
||||
}
|
||||
VideoQueue().Finish();
|
||||
|
@ -205,11 +210,25 @@ nsresult MediaDecoderReader::DecodeToTarget(int64_t aTarget)
|
|||
// If the frame end time is less than the seek target, we won't want
|
||||
// to display this frame after the seek, so discard it.
|
||||
if (video && video->GetEndTime() <= aTarget) {
|
||||
if (startTime == -1) {
|
||||
startTime = video->mTime;
|
||||
}
|
||||
DECODER_LOG(PR_LOG_DEBUG,
|
||||
("MediaDecoderReader::DecodeToTarget(%lld) pop video frame [%lld, %lld]",
|
||||
aTarget, video->mTime, video->GetEndTime()));
|
||||
VideoQueue().PopFront();
|
||||
} else {
|
||||
// Found a frame after or encompasing the seek target.
|
||||
if (aTarget >= video->mTime && video->GetEndTime() >= aTarget) {
|
||||
// The seek target lies inside this frame's time slice. Adjust the frame's
|
||||
// start time to match the seek target. We do this by replacing the
|
||||
// first frame with a shallow copy which has the new timestamp.
|
||||
VideoQueue().PopFront();
|
||||
VideoData* temp = VideoData::ShallowCopyUpdateTimestamp(video, aTarget);
|
||||
video = temp;
|
||||
VideoQueue().PushFront(video);
|
||||
}
|
||||
DECODER_LOG(PR_LOG_DEBUG,
|
||||
("MediaDecoderReader::DecodeToTarget(%lld) found target video frame [%lld,%lld]",
|
||||
aTarget, video->mTime, video->GetEndTime()));
|
||||
|
||||
video.forget();
|
||||
break;
|
||||
}
|
||||
|
@ -220,7 +239,11 @@ nsresult MediaDecoderReader::DecodeToTarget(int64_t aTarget)
|
|||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
}
|
||||
DECODER_LOG(PR_LOG_DEBUG, ("First video frame after decode is %lld", startTime));
|
||||
#ifdef PR_LOGGING
|
||||
const VideoData* front = VideoQueue().PeekFront();
|
||||
DECODER_LOG(PR_LOG_DEBUG, ("First video frame after decode is %lld",
|
||||
front ? front->mTime : -1));
|
||||
#endif
|
||||
}
|
||||
|
||||
if (HasAudio()) {
|
||||
|
@ -302,7 +325,13 @@ nsresult MediaDecoderReader::DecodeToTarget(int64_t aTarget)
|
|||
}
|
||||
}
|
||||
|
||||
DECODER_LOG(PR_LOG_DEBUG, ("MediaDecoderReader::DecodeToTarget(%lld) End", aTarget));
|
||||
#ifdef PR_LOGGING
|
||||
const VideoData* v = VideoQueue().PeekFront();
|
||||
const AudioData* a = AudioQueue().PeekFront();
|
||||
DECODER_LOG(PR_LOG_DEBUG,
|
||||
("MediaDecoderReader::DecodeToTarget(%lld) finished v=%lld a=%lld",
|
||||
aTarget, v ? v->mTime : -1, a ? a->mTime : -1));
|
||||
#endif
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
|
|
@ -155,11 +155,13 @@ public:
|
|||
AudioData* DecodeToFirstAudioData();
|
||||
VideoData* DecodeToFirstVideoData();
|
||||
|
||||
protected:
|
||||
// Pumps the decode until we reach frames required to play at time aTarget
|
||||
// (usecs).
|
||||
// Decodes samples until we reach frames required to play at time aTarget
|
||||
// (usecs). This also trims the samples to start exactly at aTarget,
|
||||
// by discarding audio samples and adjusting start times of video frames.
|
||||
nsresult DecodeToTarget(int64_t aTarget);
|
||||
|
||||
protected:
|
||||
|
||||
// Reference to the owning decoder object.
|
||||
AbstractMediaDecoder* mDecoder;
|
||||
|
||||
|
|
|
@ -166,7 +166,6 @@ MediaDecoderStateMachine::MediaDecoderStateMachine(MediaDecoder* aDecoder,
|
|||
mPlayDuration(0),
|
||||
mStartTime(-1),
|
||||
mEndTime(-1),
|
||||
mSeekTime(0),
|
||||
mFragmentEndTime(-1),
|
||||
mReader(aReader),
|
||||
mCurrentFrameTime(0),
|
||||
|
@ -1415,7 +1414,7 @@ void MediaDecoderStateMachine::NotifyDataArrived(const char* aBuffer,
|
|||
}
|
||||
}
|
||||
|
||||
void MediaDecoderStateMachine::Seek(double aTime)
|
||||
void MediaDecoderStateMachine::Seek(const SeekTarget& aTarget)
|
||||
{
|
||||
NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
|
||||
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
|
||||
|
@ -1431,26 +1430,22 @@ void MediaDecoderStateMachine::Seek(double aTime)
|
|||
"We shouldn't already be seeking");
|
||||
NS_ASSERTION(mState >= DECODER_STATE_DECODING,
|
||||
"We should have loaded metadata");
|
||||
double t = aTime * static_cast<double>(USECS_PER_S);
|
||||
if (t > INT64_MAX) {
|
||||
// Prevent integer overflow.
|
||||
return;
|
||||
}
|
||||
|
||||
mSeekTime = static_cast<int64_t>(t) + mStartTime;
|
||||
NS_ASSERTION(mSeekTime >= mStartTime && mSeekTime <= mEndTime,
|
||||
"Can only seek in range [0,duration]");
|
||||
|
||||
// Bound the seek time to be inside the media range.
|
||||
NS_ASSERTION(mStartTime != -1, "Should know start time by now");
|
||||
NS_ASSERTION(mEndTime != -1, "Should know end time by now");
|
||||
mSeekTime = std::min(mSeekTime, mEndTime);
|
||||
mSeekTime = std::max(mStartTime, mSeekTime);
|
||||
mBasePosition = mSeekTime - mStartTime;
|
||||
DECODER_LOG(PR_LOG_DEBUG, ("%p Changed state to SEEKING (to %f)", mDecoder.get(), aTime));
|
||||
int64_t seekTime = aTarget.mTime + mStartTime;
|
||||
seekTime = std::min(seekTime, mEndTime);
|
||||
seekTime = std::max(mStartTime, seekTime);
|
||||
NS_ASSERTION(seekTime >= mStartTime && seekTime <= mEndTime,
|
||||
"Can only seek in range [0,duration]");
|
||||
mSeekTarget = SeekTarget(seekTime, aTarget.mType);
|
||||
|
||||
mBasePosition = seekTime - mStartTime;
|
||||
DECODER_LOG(PR_LOG_DEBUG, ("%p Changed state to SEEKING (to %ld)", mDecoder.get(), mSeekTarget.mTime));
|
||||
mState = DECODER_STATE_SEEKING;
|
||||
if (mDecoder->GetDecodedStream()) {
|
||||
mDecoder->RecreateDecodedStream(mSeekTime - mStartTime);
|
||||
mDecoder->RecreateDecodedStream(seekTime - mStartTime);
|
||||
}
|
||||
ScheduleStateMachine();
|
||||
}
|
||||
|
@ -1944,11 +1939,11 @@ void MediaDecoderStateMachine::DecodeSeek()
|
|||
// the lock since it won't deadlock. We check the state when
|
||||
// acquiring the lock again in case shutdown has occurred
|
||||
// during the time when we didn't have the lock.
|
||||
int64_t seekTime = mSeekTime;
|
||||
int64_t seekTime = mSeekTarget.mTime;
|
||||
mDecoder->StopProgressUpdates();
|
||||
|
||||
bool currentTimeChanged = false;
|
||||
int64_t mediaTime = GetMediaTime();
|
||||
const int64_t mediaTime = GetMediaTime();
|
||||
if (mediaTime != seekTime) {
|
||||
currentTimeChanged = true;
|
||||
// Stop playback now to ensure that while we're outside the monitor
|
||||
|
@ -1968,6 +1963,7 @@ void MediaDecoderStateMachine::DecodeSeek()
|
|||
NS_DispatchToMainThread(startEvent, NS_DISPATCH_SYNC);
|
||||
}
|
||||
|
||||
int64_t newCurrentTime = seekTime;
|
||||
if (currentTimeChanged) {
|
||||
// The seek target is different than the current playback position,
|
||||
// we'll need to seek the playback position, so shutdown our decode
|
||||
|
@ -1983,23 +1979,33 @@ void MediaDecoderStateMachine::DecodeSeek()
|
|||
mStartTime,
|
||||
mEndTime,
|
||||
mediaTime);
|
||||
|
||||
if (NS_SUCCEEDED(res) && mSeekTarget.mType == SeekTarget::Accurate) {
|
||||
res = mReader->DecodeToTarget(seekTime);
|
||||
}
|
||||
}
|
||||
|
||||
if (NS_SUCCEEDED(res)) {
|
||||
AudioData* audio = HasAudio() ? mReader->AudioQueue().PeekFront() : nullptr;
|
||||
MOZ_ASSERT(!audio ||
|
||||
(audio->mTime <= seekTime &&
|
||||
seekTime <= audio->mTime + audio->mDuration) ||
|
||||
mReader->AudioQueue().IsFinished(),
|
||||
"Seek target should lie inside the first audio block after seek");
|
||||
int64_t startTime = (audio && audio->mTime < seekTime) ? audio->mTime : seekTime;
|
||||
mAudioStartTime = startTime;
|
||||
mPlayDuration = startTime - mStartTime;
|
||||
int64_t nextSampleStartTime = 0;
|
||||
VideoData* video = nullptr;
|
||||
{
|
||||
ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
|
||||
video = mReader->FindStartTime(nextSampleStartTime);
|
||||
}
|
||||
|
||||
// Setup timestamp state.
|
||||
if (seekTime == mEndTime) {
|
||||
newCurrentTime = mAudioStartTime = seekTime;
|
||||
} else if (HasAudio()) {
|
||||
AudioData* audio = mReader->AudioQueue().PeekFront();
|
||||
newCurrentTime = mAudioStartTime = audio ? audio->mTime : seekTime;
|
||||
} else {
|
||||
newCurrentTime = video ? video->mTime : seekTime;
|
||||
}
|
||||
mPlayDuration = newCurrentTime - mStartTime;
|
||||
|
||||
if (HasVideo()) {
|
||||
VideoData* video = mReader->VideoQueue().PeekFront();
|
||||
if (video) {
|
||||
MOZ_ASSERT((video->mTime <= seekTime && seekTime <= video->GetEndTime()) ||
|
||||
mReader->VideoQueue().IsFinished(),
|
||||
"Seek target should lie inside the first frame after seek, unless it's the last frame.");
|
||||
{
|
||||
ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
|
||||
RenderVideoFrame(video, TimeStamp::Now());
|
||||
|
@ -2020,10 +2026,6 @@ void MediaDecoderStateMachine::DecodeSeek()
|
|||
return;
|
||||
}
|
||||
|
||||
// Try to decode another frame to detect if we're at the end...
|
||||
DECODER_LOG(PR_LOG_DEBUG, ("%p Seek completed, mCurrentFrameTime=%lld\n",
|
||||
mDecoder.get(), mCurrentFrameTime));
|
||||
|
||||
// Change state to DECODING or COMPLETED now. SeekingStopped will
|
||||
// call MediaDecoderStateMachine::Seek to reset our state to SEEKING
|
||||
// if we need to seek again.
|
||||
|
@ -2049,6 +2051,18 @@ void MediaDecoderStateMachine::DecodeSeek()
|
|||
stopEvent = NS_NewRunnableMethod(mDecoder, &MediaDecoder::SeekingStopped);
|
||||
StartDecoding();
|
||||
}
|
||||
|
||||
if (newCurrentTime != mediaTime) {
|
||||
UpdatePlaybackPositionInternal(newCurrentTime);
|
||||
if (mDecoder->GetDecodedStream()) {
|
||||
SetSyncPointForMediaStream();
|
||||
}
|
||||
}
|
||||
|
||||
// Try to decode another frame to detect if we're at the end...
|
||||
DECODER_LOG(PR_LOG_DEBUG, ("%p Seek completed, mCurrentFrameTime=%lld\n",
|
||||
mDecoder.get(), mCurrentFrameTime));
|
||||
|
||||
{
|
||||
ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
|
||||
NS_DispatchToMainThread(stopEvent, NS_DISPATCH_SYNC);
|
||||
|
@ -2847,7 +2861,7 @@ void MediaDecoderStateMachine::SetPlaybackRate(double aPlaybackRate)
|
|||
if (!HasAudio()) {
|
||||
// mBasePosition is a position in the video stream, not an absolute time.
|
||||
if (mState == DECODER_STATE_SEEKING) {
|
||||
mBasePosition = mSeekTime - mStartTime;
|
||||
mBasePosition = mSeekTarget.mTime - mStartTime;
|
||||
} else {
|
||||
mBasePosition = GetVideoStreamPosition();
|
||||
}
|
||||
|
|
|
@ -190,8 +190,8 @@ public:
|
|||
// that the state has changed.
|
||||
void Play();
|
||||
|
||||
// Seeks to aTime in seconds.
|
||||
void Seek(double aTime);
|
||||
// Seeks to the decoder to aTarget asynchronously.
|
||||
void Seek(const SeekTarget& aTarget);
|
||||
|
||||
// Returns the current playback position in seconds.
|
||||
// Called from the main thread to get the current frame time. The decoder
|
||||
|
@ -725,7 +725,7 @@ private:
|
|||
// Position to seek to in microseconds when the seek state transition occurs.
|
||||
// The decoder monitor lock must be obtained before reading or writing
|
||||
// this value. Accessed on main and decode thread.
|
||||
int64_t mSeekTime;
|
||||
SeekTarget mSeekTarget;
|
||||
|
||||
// Media Fragment end time in microseconds. Access controlled by decoder monitor.
|
||||
int64_t mFragmentEndTime;
|
||||
|
|
|
@ -28,6 +28,14 @@ CheckedInt64 UsecsToFrames(int64_t aUsecs, uint32_t aRate) {
|
|||
return (CheckedInt64(aUsecs) * aRate) / USECS_PER_S;
|
||||
}
|
||||
|
||||
nsresult SecondsToUsecs(double aSeconds, int64_t& aOutUsecs) {
|
||||
if (aSeconds * double(USECS_PER_S) > INT64_MAX) {
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
aOutUsecs = int64_t(aSeconds * double(USECS_PER_S));
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
static int32_t ConditionDimension(float aValue)
|
||||
{
|
||||
// This will exclude NaNs and too-big values.
|
||||
|
|
|
@ -128,6 +128,10 @@ static const int64_t USECS_PER_MS = 1000;
|
|||
// Converts seconds to milliseconds.
|
||||
#define MS_TO_SECONDS(s) ((double)(s) / (PR_MSEC_PER_SEC))
|
||||
|
||||
// Converts from seconds to microseconds. Returns failure if the resulting
|
||||
// integer is too big to fit in an int64_t.
|
||||
nsresult SecondsToUsecs(double aSeconds, int64_t& aOutUsecs);
|
||||
|
||||
// The maximum height and width of the video. Used for
|
||||
// sanitizing the memory allocation of the RGB buffer.
|
||||
// The maximum resolution we anticipate encountering in the
|
||||
|
|
|
@ -390,7 +390,7 @@ DirectShowReader::Seek(int64_t aTargetUs,
|
|||
hr = mControl->Run();
|
||||
NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);
|
||||
|
||||
return DecodeToTarget(aTargetUs);
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -758,8 +758,11 @@ nsresult GStreamerReader::Seek(int64_t aTarget,
|
|||
LOG(PR_LOG_DEBUG, "%p About to seek to %" GST_TIME_FORMAT,
|
||||
mDecoder, GST_TIME_ARGS(seekPos));
|
||||
|
||||
if (!gst_element_seek_simple(mPlayBin, GST_FORMAT_TIME,
|
||||
static_cast<GstSeekFlags>(GST_SEEK_FLAG_FLUSH | GST_SEEK_FLAG_ACCURATE), seekPos)) {
|
||||
int flags = GST_SEEK_FLAG_FLUSH | GST_SEEK_FLAG_KEY_UNIT;
|
||||
if (!gst_element_seek_simple(mPlayBin,
|
||||
GST_FORMAT_TIME,
|
||||
static_cast<GstSeekFlags>(flags),
|
||||
seekPos)) {
|
||||
LOG(PR_LOG_ERROR, "seek failed");
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
@ -769,7 +772,7 @@ nsresult GStreamerReader::Seek(int64_t aTarget,
|
|||
gst_message_unref(message);
|
||||
LOG(PR_LOG_DEBUG, "seek completed");
|
||||
|
||||
return DecodeToTarget(aTarget);
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
nsresult GStreamerReader::GetBuffered(dom::TimeRanges* aBuffered,
|
||||
|
|
|
@ -1315,9 +1315,9 @@ nsresult OggReader::SeekInUnbuffered(int64_t aTarget,
|
|||
}
|
||||
|
||||
nsresult OggReader::Seek(int64_t aTarget,
|
||||
int64_t aStartTime,
|
||||
int64_t aEndTime,
|
||||
int64_t aCurrentTime)
|
||||
int64_t aStartTime,
|
||||
int64_t aEndTime,
|
||||
int64_t aCurrentTime)
|
||||
{
|
||||
NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
|
||||
if (mIsChained)
|
||||
|
@ -1379,10 +1379,7 @@ nsresult OggReader::Seek(int64_t aTarget,
|
|||
}
|
||||
}
|
||||
|
||||
// The decode position must now be either close to the seek target, or
|
||||
// we've seeked to before the keyframe before the seek target. Decode
|
||||
// forward to the seek target frame.
|
||||
return DecodeToTarget(aTarget);
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
// Reads a page from the media resource.
|
||||
|
|
|
@ -350,9 +350,23 @@ nsresult MediaOmxReader::Seek(int64_t aTarget, int64_t aStartTime, int64_t aEndT
|
|||
container->GetImageContainer()->ClearAllImagesExceptFront();
|
||||
}
|
||||
|
||||
mAudioSeekTimeUs = mVideoSeekTimeUs = aTarget;
|
||||
if (mHasAudio && mHasVideo) {
|
||||
// The OMXDecoder seeks/demuxes audio and video streams separately. So if
|
||||
// we seek both audio and video to aTarget, the audio stream can typically
|
||||
// seek closer to the seek target, since typically every audio block is
|
||||
// a sync point, whereas for video there are only keyframes once every few
|
||||
// seconds. So if we have both audio and video, we must seek the video
|
||||
// stream to the preceeding keyframe first, get the stream time, and then
|
||||
// seek the audio stream to match the video stream's time. Otherwise, the
|
||||
// audio and video streams won't be in sync after the seek.
|
||||
mVideoSeekTimeUs = aTarget;
|
||||
const VideoData* v = DecodeToFirstVideoData();
|
||||
mAudioSeekTimeUs = v ? v->mTime : aTarget;
|
||||
} else {
|
||||
mAudioSeekTimeUs = mVideoSeekTimeUs = aTarget;
|
||||
}
|
||||
|
||||
return DecodeToTarget(aTarget);
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
static uint64_t BytesToTime(int64_t offset, uint64_t length, uint64_t durationUs) {
|
||||
|
|
|
@ -328,7 +328,7 @@ nsresult MediaPluginReader::Seek(int64_t aTarget, int64_t aStartTime, int64_t aE
|
|||
|
||||
mAudioSeekTimeUs = mVideoSeekTimeUs = aTarget;
|
||||
|
||||
return DecodeToTarget(aTarget);
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
MediaPluginReader::ImageBufferCallback::ImageBufferCallback(mozilla::layers::ImageContainer *aImageContainer) :
|
||||
|
|
Двоичные данные
content/media/test/gizmo.mp4
Двоичные данные
content/media/test/gizmo.mp4
Двоичный файл не отображается.
|
@ -352,6 +352,13 @@ var gSeekTests = [
|
|||
{ name:"bogus.duh", type:"bogus/duh", duration:123 }
|
||||
];
|
||||
|
||||
var gFastSeekTests = [
|
||||
{ name:"gizmo.mp4", type:"video/mp4", keyframes:[0, 1.0, 2.0, 3.0, 4.0, 5.0 ] },
|
||||
// Note: Not all keyframes in the file are actually referenced in the Cues in this file.
|
||||
{ name:"seek.webm", type:"video/webm", keyframes:[0, 0.8, 1.6, 2.4, 3.2]},
|
||||
// Note: omitting Ogg from this test, as I'm not sure our Ogg seek code is optimal/correct - cpearce
|
||||
];
|
||||
|
||||
function IsWindows8OrLater() {
|
||||
var re = /Windows NT (\d.\d)/;
|
||||
var winver = navigator.userAgent.match(re);
|
||||
|
|
|
@ -346,6 +346,7 @@ skip-if = buildapp == 'b2g' || e10s
|
|||
skip-if = buildapp == 'b2g' # b2g(6 failures) b2g-debug(6 failures) b2g-desktop(6 failures)
|
||||
[test_error_on_404.html]
|
||||
skip-if = buildapp == 'b2g' && (toolkit != 'gonk' || debug)) # b2g-debug(timed out) b2g-desktop(timed out)
|
||||
[test_fastSeek.html]
|
||||
[test_framebuffer.html]
|
||||
skip-if = buildapp == 'b2g' # b2g(timed out) b2g-debug(timed out) b2g-desktop(timed out)
|
||||
[test_info_leak.html]
|
||||
|
|
|
@ -14,14 +14,16 @@ function seekStarted() {
|
|||
if (completed)
|
||||
return;
|
||||
//is(v.currentTime, v.duration, "seeking: currentTime must be duration");
|
||||
ok(Math.abs(v.currentTime - v.duration) < 0.01, "seeking: currentTime must be duration");
|
||||
ok(Math.abs(v.currentTime - v.duration) < 0.01,
|
||||
"seeking: currentTime (" + v.currentTime + ") must be duration (" + v.duration + ")");
|
||||
}
|
||||
|
||||
function seekEnded() {
|
||||
if (completed)
|
||||
return;
|
||||
//is(v.currentTime, v.duration, "seeked: currentTime must be duration");
|
||||
ok(Math.abs(v.currentTime - v.duration) < 0.01, "seeked: currentTime must be duration");
|
||||
ok(Math.abs(v.currentTime - v.duration) < 0.01,
|
||||
"seeked: currentTime (" + v.currentTime + ") must be duration (" + v.duration + ")");
|
||||
is(v.seeking, false, "seeking flag on end should be false");
|
||||
}
|
||||
|
||||
|
@ -30,7 +32,8 @@ function playbackEnded() {
|
|||
return;
|
||||
completed = true;
|
||||
//is(v.currentTime, v.duration, "ended: currentTime must be duration");
|
||||
ok(Math.abs(v.currentTime - v.duration) < 0.01, "ended: currentTime must be duration");
|
||||
ok(Math.abs(v.currentTime - v.duration) < 0.01,
|
||||
"ended: currentTime (" + v.currentTime + ") must be duration (" + v.duration + ")");
|
||||
is(v.seeking, false, "seeking flag on end should be false");
|
||||
is(v.ended, true, "ended must be true");
|
||||
finish();
|
||||
|
|
|
@ -0,0 +1,88 @@
|
|||
<!DOCTYPE HTML>
|
||||
<html>
|
||||
<!--
|
||||
https://bugzilla.mozilla.org/show_bug.cgi?id=778077
|
||||
-->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>Test for Bug 778077</title>
|
||||
<script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
|
||||
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
|
||||
<script type="text/javascript" src="manifest.js"></script>
|
||||
<script type="application/javascript">
|
||||
|
||||
/** Test for Bug 778077 - HTMLMediaElement.fastSeek() **/
|
||||
// Iterate through a list of keyframe timestamps, and seek to
|
||||
// halfway between the keyframe and the keyframe after it.
|
||||
var manager = new MediaTestManager;
|
||||
|
||||
function doSeek(v) {
|
||||
// fastSeek to half way between this keyframe and the next, or if this is the last
|
||||
// keyframe seek to halfway between this keyframe and the end of media.
|
||||
var nextKeyFrame = (v.keyframeIndex + 1) < v.keyframes.length ? v.keyframes[v.keyframeIndex + 1] : v.duration;
|
||||
v.target = (v.keyframes[v.keyframeIndex] + nextKeyFrame) / 2;
|
||||
v.fastSeek(v.target);
|
||||
ok(Math.abs(v.currentTime - v.target) < 0.01,
|
||||
v.name + " seekTo=" + v.target + " currentTime (" + v.currentTime + ") should be close to seek target initially");
|
||||
}
|
||||
|
||||
function onloadedmetadata(event) {
|
||||
var v = event.target;
|
||||
doSeek(v);
|
||||
}
|
||||
|
||||
function onseeked(event) {
|
||||
var v = event.target;
|
||||
var keyframe = v.keyframes[v.keyframeIndex];
|
||||
|
||||
// Check that the current time ended up roughly after the keyframe.
|
||||
// We must be a bit fuzzy here, as the decoder backend may actually
|
||||
// seek to the audio sample prior to the keyframe.
|
||||
ok(v.currentTime >= keyframe - 0.05,
|
||||
v.name + " seekTo=" + v.target + " currentTime (" + v.currentTime +
|
||||
") should be end up roughly after keyframe (" + keyframe + ") after fastSeek");
|
||||
|
||||
ok(v.currentTime <= v.target,
|
||||
v.name + " seekTo=" + v.target + " currentTime (" + v.currentTime +
|
||||
") should be end up less than target after fastSeek");
|
||||
|
||||
v.keyframeIndex++
|
||||
if (v.keyframeIndex == v.keyframes.length) {
|
||||
manager.finished(v.token);
|
||||
v.src = "";
|
||||
v.parentNode.removeChild(v);
|
||||
} else {
|
||||
doSeek(v);
|
||||
}
|
||||
}
|
||||
|
||||
function startTest(test, token) {
|
||||
manager.started(token);
|
||||
v = document.createElement("video");
|
||||
v.src = test.name;
|
||||
v.name = test.name;
|
||||
v.preload = "metadata";
|
||||
v.token = token;
|
||||
v.target = 0;
|
||||
v.keyframes = test.keyframes;
|
||||
v.keyframeIndex = 0;
|
||||
ok(v.keyframes.length > 0, v.name + " - video should have at least one sync point");
|
||||
v.addEventListener("loadedmetadata", onloadedmetadata);
|
||||
v.addEventListener("seeked", onseeked);
|
||||
document.body.appendChild(v);
|
||||
}
|
||||
|
||||
manager.runTests(gFastSeekTests, startTest);
|
||||
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=778077">Mozilla Bug 778077</a>
|
||||
<p id="display"></p>
|
||||
<div id="content" style="display: none">
|
||||
|
||||
</div>
|
||||
<pre id="test">
|
||||
</pre>
|
||||
</body>
|
||||
</html>
|
|
@ -1008,7 +1008,7 @@ nsresult WebMReader::Seek(int64_t aTarget, int64_t aStartTime, int64_t aEndTime,
|
|||
if (r != 0) {
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
return DecodeToTarget(aTarget);
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
nsresult WebMReader::GetBuffered(dom::TimeRanges* aBuffered, int64_t aStartTime)
|
||||
|
|
|
@ -886,7 +886,7 @@ WMFReader::Seek(int64_t aTargetUs,
|
|||
hr = mSourceReader->SetCurrentPosition(GUID_NULL, var);
|
||||
NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);
|
||||
|
||||
return DecodeToTarget(aTargetUs);
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
} // namespace mozilla
|
||||
|
|
|
@ -47,7 +47,8 @@ interface HTMLMediaElement : HTMLElement {
|
|||
// playback state
|
||||
[SetterThrows]
|
||||
attribute double currentTime;
|
||||
// TODO: Bug 847375 - void fastSeek(double time);
|
||||
[Throws]
|
||||
void fastSeek(double time);
|
||||
readonly attribute unrestricted double duration;
|
||||
// TODO: Bug 847376 - readonly attribute any startDate;
|
||||
readonly attribute boolean paused;
|
||||
|
|
|
@ -795,7 +795,11 @@
|
|||
seekToPosition : function(newPosition) {
|
||||
newPosition /= 1000; // convert from ms
|
||||
this.log("+++ seeking to " + newPosition);
|
||||
this.video.currentTime = newPosition;
|
||||
if (isTouchControl) {
|
||||
this.video.fastSeek(newPosition);
|
||||
} else {
|
||||
this.video.currentTime = newPosition;
|
||||
}
|
||||
},
|
||||
|
||||
setVolume : function(newVolume) {
|
||||
|
|
Загрузка…
Ссылка в новой задаче