Bug 568431 - Remove the requirement for mCallbackPeriod from the generic media backend. r=doublec

This commit is contained in:
Matthew Gregan 2010-05-31 16:02:00 +12:00
Родитель c157381ae5
Коммит 8db1fc8f64
10 изменённых файлов: 83 добавлений и 85 удалений

Просмотреть файл

@ -89,6 +89,7 @@ VideoData* VideoData::Create(nsVideoInfo& aInfo,
ImageContainer* aContainer,
PRInt64 aOffset,
PRInt64 aTime,
PRInt64 aEndTime,
const YCbCrBuffer& aBuffer,
PRBool aKeyframe,
PRInt64 aTimecode)
@ -135,7 +136,7 @@ VideoData* VideoData::Create(nsVideoInfo& aInfo,
return nsnull;
}
nsAutoPtr<VideoData> v(new VideoData(aOffset, aTime, aKeyframe, aTimecode));
nsAutoPtr<VideoData> v(new VideoData(aOffset, aTime, aEndTime, aKeyframe, aTimecode));
// Currently our decoder only knows how to output to PLANAR_YCBCR
// format.
Image::Format format = Image::PLANAR_YCBCR;

Просмотреть файл

@ -55,9 +55,7 @@ class nsBuiltinDecoderStateMachine;
class nsVideoInfo {
public:
nsVideoInfo()
: mFramerate(0.0),
mPixelAspectRatio(1.0),
mCallbackPeriod(1),
: mPixelAspectRatio(1.0),
mAudioRate(0),
mAudioChannels(0),
mFrame(0,0),
@ -65,16 +63,9 @@ public:
mHasVideo(PR_FALSE)
{}
// Frames per second.
float mFramerate;
// Pixel aspect ratio, as stored in the metadata.
float mPixelAspectRatio;
// Length of a video frame in milliseconds, or the callback period if
// there's no audio.
PRUint32 mCallbackPeriod;
// Samples per second.
PRUint32 mAudioRate;
@ -183,6 +174,7 @@ public:
ImageContainer* aContainer,
PRInt64 aOffset,
PRInt64 aTime,
PRInt64 aEndTime,
const YCbCrBuffer &aBuffer,
PRBool aKeyframe,
PRInt64 aTimecode);
@ -192,9 +184,10 @@ public:
// frame is played; this frame is identical to the previous.
static VideoData* CreateDuplicate(PRInt64 aOffset,
PRInt64 aTime,
PRInt64 aEndTime,
PRInt64 aTimecode)
{
return new VideoData(aOffset, aTime, aTimecode);
return new VideoData(aOffset, aTime, aEndTime, aTimecode);
}
~VideoData()
@ -208,6 +201,9 @@ public:
// Start time of frame in milliseconds.
PRInt64 mTime;
// End time of frame in milliseconds;
PRInt64 mEndTime;
// Codec specific internal time code. For Ogg based codecs this is the
// granulepos.
PRInt64 mTimecode;
@ -221,27 +217,32 @@ public:
PRPackedBool mKeyframe;
public:
VideoData(PRInt64 aOffset, PRInt64 aTime, PRInt64 aTimecode)
VideoData(PRInt64 aOffset, PRInt64 aTime, PRInt64 aEndTime, PRInt64 aTimecode)
: mOffset(aOffset),
mTime(aTime),
mEndTime(aEndTime),
mTimecode(aTimecode),
mDuplicate(PR_TRUE),
mKeyframe(PR_FALSE)
{
MOZ_COUNT_CTOR(VideoData);
NS_ASSERTION(aEndTime > aTime, "Frame must start before it ends.");
}
VideoData(PRInt64 aOffset,
PRInt64 aTime,
PRInt64 aEndTime,
PRBool aKeyframe,
PRInt64 aTimecode)
: mOffset(aOffset),
mTime(aTime),
mEndTime(aEndTime),
mTimecode(aTimecode),
mDuplicate(PR_FALSE),
mKeyframe(aKeyframe)
{
MOZ_COUNT_CTOR(VideoData);
NS_ASSERTION(aEndTime > aTime, "Frame must start before it ends.");
}
};

Просмотреть файл

@ -92,6 +92,9 @@ const unsigned AMPLE_AUDIO_MS = 2000;
// less than LOW_VIDEO_FRAMES frames.
static const PRUint32 LOW_VIDEO_FRAMES = 1;
// Arbitrary "frame duration" when playing only audio.
static const int AUDIO_DURATION_MS = 40;
nsBuiltinDecoderStateMachine::nsBuiltinDecoderStateMachine(nsBuiltinDecoder* aDecoder,
nsBuiltinDecoderReader* aReader) :
mDecoder(aDecoder),
@ -107,7 +110,7 @@ nsBuiltinDecoderStateMachine::nsBuiltinDecoderStateMachine(nsBuiltinDecoder* aDe
mCurrentFrameTime(0),
mAudioStartTime(-1),
mAudioEndTime(-1),
mVideoFrameTime(-1),
mVideoFrameEndTime(-1),
mVolume(1.0),
mSeekable(PR_TRUE),
mPositionChangeQueued(PR_FALSE),
@ -636,7 +639,7 @@ void nsBuiltinDecoderStateMachine::ResetPlayback()
{
NS_ASSERTION(IsCurrentThread(mDecoder->mStateMachineThread),
"Should be on state machine thread.");
mVideoFrameTime = -1;
mVideoFrameEndTime = -1;
mAudioStartTime = -1;
mAudioEndTime = -1;
mAudioCompleted = PR_FALSE;
@ -894,7 +897,7 @@ nsresult nsBuiltinDecoderStateMachine::Run()
RenderVideoFrame(video);
if (!audio) {
NS_ASSERTION(video->mTime <= seekTime &&
seekTime <= video->mTime + mReader->GetInfo().mCallbackPeriod,
seekTime <= video->mEndTime,
"Seek target should lie inside the first frame after seek");
mPlayDuration = TimeDuration::FromMilliseconds(seekTime);
}
@ -990,7 +993,7 @@ nsresult nsBuiltinDecoderStateMachine::Run()
(HasAudio() && !mAudioCompleted)));
if (mAudioStream) {
// Close the audop stream so that next time audio is used a new stream
// Close the audio stream so that next time audio is used a new stream
// is created. The StopPlayback call also resets the IsPlaying() state
// so audio is restarted correctly.
StopPlayback(AUDIO_SHUTDOWN);
@ -1003,7 +1006,7 @@ nsresult nsBuiltinDecoderStateMachine::Run()
StopDecodeThreads();
if (mDecoder->GetState() == nsBuiltinDecoder::PLAY_STATE_PLAYING) {
PRInt64 videoTime = HasVideo() ? (mVideoFrameTime + mReader->GetInfo().mCallbackPeriod) : 0;
PRInt64 videoTime = HasVideo() ? mVideoFrameEndTime : 0;
PRInt64 clockTime = NS_MAX(mEndTime, NS_MAX(videoTime, GetAudioClock()));
UpdatePlaybackPosition(clockTime);
{
@ -1063,11 +1066,12 @@ void nsBuiltinDecoderStateMachine::AdvanceFrame()
}
if (HasAudio() && mAudioStartTime == -1 && !mAudioCompleted) {
// We've got audio (so we should sync off the audio clock), but we've
// not played a sample on the audio thread, so we can't get a time
// from the audio clock. Just wait and then return, to give the audio
// clock time to tick.
Wait(mReader->GetInfo().mCallbackPeriod);
// We've got audio (so we should sync off the audio clock), but we've not
// played a sample on the audio thread, so we can't get a time from the
// audio clock. Just wait and then return, to give the audio clock time
// to tick. This should really wait for a specific signal from the audio
// thread rather than polling after a sleep. See bug 568431 comment 4.
Wait(AUDIO_DURATION_MS);
return;
}
@ -1095,7 +1099,7 @@ void nsBuiltinDecoderStateMachine::AdvanceFrame()
if (mReader->mVideoQueue.GetSize() > 0) {
VideoData* data = mReader->mVideoQueue.PeekFront();
while (clock_time >= data->mTime) {
mVideoFrameTime = data->mTime;
mVideoFrameEndTime = data->mEndTime;
videoData = data;
mReader->mVideoQueue.PopFront();
mDecoder->UpdatePlaybackOffset(data->mOffset);
@ -1105,6 +1109,7 @@ void nsBuiltinDecoderStateMachine::AdvanceFrame()
}
}
PRInt64 frameDuration = AUDIO_DURATION_MS;
if (videoData) {
// Decode one frame and display it
NS_ASSERTION(videoData->mTime >= mStartTime, "Should have positive frame time");
@ -1115,15 +1120,16 @@ void nsBuiltinDecoderStateMachine::AdvanceFrame()
RenderVideoFrame(videoData);
}
mDecoder->GetMonitor().NotifyAll();
frameDuration = videoData->mEndTime - videoData->mTime;
videoData = nsnull;
}
// Cap the current time to the larger of the audio and video end time.
// This ensures that if we're running off the system clock, we don't
// advance the clock to after the media end time.
if (mVideoFrameTime != -1 || mAudioEndTime != -1) {
if (mVideoFrameEndTime != -1 || mAudioEndTime != -1) {
// These will be non -1 if we've displayed a video frame, or played an audio sample.
clock_time = NS_MIN(clock_time, NS_MAX(mVideoFrameTime, mAudioEndTime));
clock_time = NS_MIN(clock_time, NS_MAX(mVideoFrameEndTime, mAudioEndTime));
if (clock_time - mStartTime > mCurrentFrameTime) {
// Only update the playback position if the clock time is greater
// than the previous playback position. The audio clock can
@ -1139,7 +1145,7 @@ void nsBuiltinDecoderStateMachine::AdvanceFrame()
// ready state. Post an update to do so.
UpdateReadyState();
Wait(mReader->GetInfo().mCallbackPeriod);
Wait(frameDuration);
} else {
if (IsPlaying()) {
StopPlayback(AUDIO_PAUSE);
@ -1265,6 +1271,4 @@ void nsBuiltinDecoderStateMachine::LoadMetadata()
NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL);
return;
}
LOG(PR_LOG_DEBUG, ("%p Callback Period: %u", mDecoder, info.mCallbackPeriod));
}

Просмотреть файл

@ -396,9 +396,9 @@ protected:
// unless another sample is pushed to the hardware.
PRInt64 mAudioEndTime;
// The presentation time of the last video frame which has been displayed.
// The presentation end time of the last video frame which has been displayed.
// Accessed from the state machine thread.
PRInt64 mVideoFrameTime;
PRInt64 mVideoFrameEndTime;
// Volume of playback. 0.0 = muted. 1.0 = full volume. Read/Written
// from the state machine and main threads. Synchronised via decoder

Просмотреть файл

@ -278,10 +278,6 @@ protected:
// in the midst of being changed.
PRLock* mVideoUpdateLock;
// Framerate of video being displayed in the element
// expressed in numbers of frames per second.
float mFramerate;
// Pixel aspect ratio (ratio of the pixel width to pixel height)
float mPixelAspectRatio;

Просмотреть файл

@ -148,7 +148,6 @@ nsTheoraState::nsTheoraState(ogg_page* aBosPage) :
mSetup(0),
mCtx(0),
mFrameDuration(0),
mFrameRate(0),
mPixelAspectRatio(0)
{
MOZ_COUNT_CTOR(nsTheoraState);
@ -175,9 +174,6 @@ PRBool nsTheoraState::Init() {
PRInt64 n = mInfo.fps_numerator;
PRInt64 d = mInfo.fps_denominator;
mFrameRate = (n == 0 || d == 0) ?
0.0f : static_cast<float>(n) / static_cast<float>(d);
PRInt64 f;
if (!MulOverflow(1000, d, f)) {
return mActive = PR_FALSE;

Просмотреть файл

@ -65,16 +65,11 @@ extern PRLogModuleInfo* gBuiltinDecoderLog;
// is about 4300 bytes, so we read the file in chunks larger than that.
static const int PAGE_STEP = 8192;
// The frame rate to use if there is no video data in the resource to
// be played.
#define AUDIO_FRAME_RATE 25.0
nsOggReader::nsOggReader(nsBuiltinDecoder* aDecoder)
: nsBuiltinDecoderReader(aDecoder),
mTheoraState(nsnull),
mVorbisState(nsnull),
mPageOffset(0),
mCallbackPeriod(0),
mTheoraGranulepos(-1),
mVorbisGranulepos(-1)
{
@ -251,10 +246,8 @@ nsresult nsOggReader::ReadMetadata()
// Theora spec these can be considered the 'primary' bitstreams for playback.
// Extract the metadata needed from these streams.
// Set a default callback period for if we have no video data
mCallbackPeriod = 1000 / AUDIO_FRAME_RATE;
if (mTheoraState) {
if (mTheoraState->Init()) {
mCallbackPeriod = mTheoraState->mFrameDuration;
gfxIntSize sz(mTheoraState->mInfo.pic_width,
mTheoraState->mInfo.pic_height);
mDecoder->SetVideoData(sz, mTheoraState->mPixelAspectRatio, nsnull);
@ -268,13 +261,11 @@ nsresult nsOggReader::ReadMetadata()
mInfo.mHasAudio = HasAudio();
mInfo.mHasVideo = HasVideo();
mInfo.mCallbackPeriod = mCallbackPeriod;
if (HasAudio()) {
mInfo.mAudioRate = mVorbisState->mInfo.rate;
mInfo.mAudioChannels = mVorbisState->mInfo.channels;
}
if (HasVideo()) {
mInfo.mFramerate = mTheoraState->mFrameRate;
mInfo.mPixelAspectRatio = mTheoraState->mPixelAspectRatio;
mInfo.mPicture.width = mTheoraState->mInfo.pic_width;
mInfo.mPicture.height = mTheoraState->mInfo.pic_height;
@ -482,6 +473,7 @@ nsresult nsOggReader::DecodeTheora(nsTArray<VideoData*>& aFrames,
if (ret == TH_DUPFRAME) {
aFrames.AppendElement(VideoData::CreateDuplicate(mPageOffset,
time,
time + mTheoraState->mFrameDuration,
aPacket->granulepos));
} else if (ret == 0) {
th_ycbcr_buffer buffer;
@ -499,6 +491,7 @@ nsresult nsOggReader::DecodeTheora(nsTArray<VideoData*>& aFrames,
mDecoder->GetImageContainer(),
mPageOffset,
time,
time + mTheoraState->mFrameDuration,
b,
isKeyframe,
aPacket->granulepos);
@ -1045,10 +1038,8 @@ nsresult nsOggReader::Seek(PRInt64 aTarget, PRInt64 aStartTime, PRInt64 aEndTime
// Decode forward to the seek target frame. Start with video, if we have it.
// We should pass a keyframe while doing this.
if (HasVideo()) {
nsAutoPtr<VideoData> video;
PRBool eof = PR_FALSE;
PRInt64 startTime = -1;
video = nsnull;
while (HasVideo() && !eof) {
while (mVideoQueue.GetSize() == 0 && !eof) {
PRBool skip = PR_FALSE;
@ -1064,10 +1055,10 @@ nsresult nsOggReader::Seek(PRInt64 aTarget, PRInt64 aStartTime, PRInt64 aEndTime
if (mVideoQueue.GetSize() == 0) {
break;
}
video = mVideoQueue.PeekFront();
nsAutoPtr<VideoData> video(mVideoQueue.PeekFront());
// If the frame end time is less than the seek target, we won't want
// to display this frame after the seek, so discard it.
if (video && video->mTime + mCallbackPeriod < aTarget) {
if (video && video->mEndTime < aTarget) {
if (startTime == -1) {
startTime = video->mTime;
}

Просмотреть файл

@ -126,9 +126,6 @@ private:
// the page we're about to read.
PRInt64 mPageOffset;
// Number of milliseconds of data video/audio data held in a frame.
PRUint32 mCallbackPeriod;
// The granulepos of the last decoded Theora frame.
PRInt64 mTheoraGranulepos;

Просмотреть файл

@ -61,14 +61,7 @@ extern PRLogModuleInfo* gBuiltinDecoderLog;
#define SEEK_LOG(type, msg)
#endif
// Nestegg doesn't expose the framerate and the framerate is optional
// anyway. We use a default value - the backend playback code
// only uses it for a 'maximum wait time' not actual frame display time
// so an estimate is fine. A value higher than a standard framerate is
// used to ensure that backend Wait's don't take longer than frame
// display. Bug 568431 should remove the need for 'faking' a framerate in
// the future.
#define DEFAULT_FRAMERATE 32.0
static const unsigned NS_PER_MS = 1000000;
// Functions for reading and seeking using nsMediaStream required for
// nestegg_io. The 'user data' passed to these functions is the
@ -205,7 +198,7 @@ nsresult nsWebMReader::ReadMetadata()
if (r == 0) {
MonitorAutoExit exitReaderMon(mMonitor);
MonitorAutoEnter decoderMon(mDecoder->GetMonitor());
mDecoder->GetStateMachine()->SetDuration(duration / 1000000);
mDecoder->GetStateMachine()->SetDuration(duration / NS_PER_MS);
}
unsigned int ntracks = 0;
@ -256,9 +249,6 @@ nsresult nsWebMReader::ReadMetadata()
// See bug 566779 for a suggestion to refactor
// and remove it.
mInfo.mDataOffset = -1;
mInfo.mFramerate = DEFAULT_FRAMERATE;
mInfo.mCallbackPeriod = 1000 / mInfo.mFramerate;
}
else if (!mHasAudio && type == NESTEGG_TRACK_AUDIO) {
nestegg_audio_params params;
@ -272,10 +262,6 @@ nsresult nsWebMReader::ReadMetadata()
mHasAudio = PR_TRUE;
mInfo.mHasAudio = PR_TRUE;
if (!mInfo.mHasVideo) {
mInfo.mCallbackPeriod = 1000 / DEFAULT_FRAMERATE;
}
// Get the Vorbis header data
unsigned int nheaders = 0;
r = nestegg_track_codec_data_count(mContext, track, &nheaders);
@ -360,7 +346,7 @@ PRBool nsWebMReader::DecodeAudioPacket(nestegg_packet* aPacket)
return PR_FALSE;
}
PRUint64 tstamp_ms = tstamp / 1000000;
PRUint64 tstamp_ms = tstamp / NS_PER_MS;
for (PRUint32 i = 0; i < count; ++i) {
unsigned char* data;
size_t length;
@ -497,7 +483,7 @@ PRBool nsWebMReader::DecodeAudioData()
}
PRBool nsWebMReader::DecodeVideoFrame(PRBool &aKeyframeSkip,
PRInt64 aTimeThreshold)
PRInt64 aTimeThreshold)
{
MonitorAutoEnter mon(mMonitor);
NS_ASSERTION(mDecoder->OnStateMachineThread() || mDecoder->OnDecodeThread(),
@ -531,7 +517,30 @@ PRBool nsWebMReader::DecodeVideoFrame(PRBool &aKeyframeSkip,
return PR_FALSE;
}
PRInt64 tstamp_ms = tstamp / 1000000;
// The end time of this frame is the start time of the next frame. Fetch
// the timestamp of the next packet for this track. If we've reached the
// end of the stream, use the file's duration as the end time of this
// video frame.
uint64_t next_tstamp = 0;
{
nestegg_packet* next_packet = NextPacket(VIDEO);
if (next_packet) {
r = nestegg_packet_tstamp(next_packet, &next_tstamp);
if (r == -1) {
nestegg_free_packet(next_packet);
return PR_FALSE;
}
} else {
r = nestegg_duration(mContext, &next_tstamp);
if (r == -1) {
nestegg_free_packet(next_packet);
return PR_FALSE;
}
}
mVideoPackets.PushFront(next_packet);
}
PRInt64 tstamp_ms = tstamp / NS_PER_MS;
for (PRUint32 i = 0; i < count; ++i) {
unsigned char* data;
size_t length;
@ -571,9 +580,9 @@ PRBool nsWebMReader::DecodeVideoFrame(PRBool &aKeyframeSkip,
while((img = vpx_codec_get_frame(&mVP8, &iter))) {
NS_ASSERTION(mInfo.mPicture.width == static_cast<PRInt32>(img->d_w),
"WebM picture width from header does not match decoded frame");
"WebM picture width from header does not match decoded frame");
NS_ASSERTION(mInfo.mPicture.height == static_cast<PRInt32>(img->d_h),
"WebM picture height from header does not match decoded frame");
"WebM picture height from header does not match decoded frame");
NS_ASSERTION(img->fmt == IMG_FMT_I420, "WebM image format is not I420");
// Chroma shifts are rounded down as per the decoding examples in the VP8 SDK
@ -597,6 +606,7 @@ PRBool nsWebMReader::DecodeVideoFrame(PRBool &aKeyframeSkip,
mDecoder->GetImageContainer(),
-1,
tstamp_ms,
next_tstamp / NS_PER_MS,
b,
si.is_kf,
-1);
@ -621,7 +631,7 @@ nsresult nsWebMReader::Seek(PRInt64 aTarget, PRInt64 aStartTime, PRInt64 aEndTim
if (NS_FAILED(ResetDecode())) {
return NS_ERROR_FAILURE;
}
int r = nestegg_track_seek(mContext, 0, aTarget * 1000000);
int r = nestegg_track_seek(mContext, 0, aTarget * NS_PER_MS);
if (r != 0) {
return NS_ERROR_FAILURE;
}
@ -629,7 +639,7 @@ nsresult nsWebMReader::Seek(PRInt64 aTarget, PRInt64 aStartTime, PRInt64 aEndTim
PRBool eof = PR_FALSE;
PRInt64 startTime = -1;
while (HasVideo() && !eof) {
while (mVideoQueue.GetSize() < 2 && !eof) {
while (mVideoQueue.GetSize() == 0 && !eof) {
PRBool skip = PR_FALSE;
eof = !DecodeVideoFrame(skip, 0);
MonitorAutoExit exitReaderMon(mMonitor);
@ -638,22 +648,20 @@ nsresult nsWebMReader::Seek(PRInt64 aTarget, PRInt64 aStartTime, PRInt64 aEndTim
return NS_ERROR_FAILURE;
}
}
if (mVideoQueue.GetSize() < 2) {
if (mVideoQueue.GetSize() == 0) {
break;
}
nsAutoPtr<VideoData> video(mVideoQueue.PopFront());
nsAutoPtr<VideoData> videoNext(mVideoQueue.PeekFront());
nsAutoPtr<VideoData> video(mVideoQueue.PeekFront());
// If the frame end time is less than the seek target, we won't want
// to display this frame after the seek, so discard it.
if (video && videoNext && videoNext->mTime < aTarget) {
if (video && video->mEndTime < aTarget) {
if (startTime == -1) {
startTime = video->mTime;
}
mVideoQueue.PopFront();
video = nsnull;
videoNext.forget();
} else {
videoNext.forget();
mVideoQueue.PushFront(video.forget());
video.forget();
break;
}
}

Просмотреть файл

@ -77,6 +77,10 @@ class PacketQueue : private nsDeque {
nsDeque::Push(aItem);
}
inline void PushFront(nestegg_packet* aItem) {
nsDeque::PushFront(aItem);
}
inline nestegg_packet* PopFront() {
return static_cast<nestegg_packet*>(nsDeque::PopFront());
}